hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4628a354b0cddbcb048a1d50ce815aaa040404 | 277 | py | Python | dev_global/dev_global/env.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | 1 | 2021-07-12T11:20:58.000Z | 2021-07-12T11:20:58.000Z | dev_global/dev_global/env.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | null | null | null | dev_global/dev_global/env.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
"""
global environment varibles
"""
PYTHON_VERSION = 3.8
LOCAL_TIME_ZONE = 'Beijing'
PROG_NAME = 'Neutrino'
TIME_FMT = '%Y-%m-%d'
LOG_TIME_FMT = "%Y-%m-%d %H:%M:%S"
GITHUB_URL = "https://github.com/FrederichRiver/neutrino3"
EMAIL = "hezhiyuan_tju@163.com"
| 19.785714 | 58 | 0.696751 |
PYTHON_VERSION = 3.8
LOCAL_TIME_ZONE = 'Beijing'
PROG_NAME = 'Neutrino'
TIME_FMT = '%Y-%m-%d'
LOG_TIME_FMT = "%Y-%m-%d %H:%M:%S"
GITHUB_URL = "https://github.com/FrederichRiver/neutrino3"
EMAIL = "hezhiyuan_tju@163.com"
| true | true |
1c462bb178d3b38b6a5d6e1fcb701ca8021f18d6 | 4,671 | py | Python | src/djangoSrc/app_api/settings.py | dighr/nethope_audio | 8571bd6f621920f3fea085be3879cab15ccfc1e6 | [
"MIT"
] | null | null | null | src/djangoSrc/app_api/settings.py | dighr/nethope_audio | 8571bd6f621920f3fea085be3879cab15ccfc1e6 | [
"MIT"
] | 9 | 2021-03-09T21:01:14.000Z | 2022-03-02T06:01:00.000Z | src/djangoSrc/app_api/settings.py | nethopeorg/nethope_audio | 8571bd6f621920f3fea085be3879cab15ccfc1e6 | [
"MIT"
] | null | null | null | """
Django settings for app_api project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6xxk%%z1ii*9%j(a-8p63(l&v$fb2de1w2fl24b(@rxzgcpk-8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['nethope-pr-assessment.appspot.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'audio_transcription',
'rest_framework',
'dropbox_listener'
]
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), '..//', 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = './static'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
if os.getenv('GAE_APPLICATION', None):
# Running on production App Engine, so connect to Google Cloud SQL using
# the unix socket at /cloudsql/<your-cloudsql-connection string>
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/cloudsql/nethope-pr-assessment:us-central1:nethopemysql',
'USER': os.environ["username"],
'PASSWORD': os.environ['password'],
'NAME': 'audio_transcription',
}
}
else:
# Running locally so connect to either a local MySQL instance or connect to
# Cloud SQL via the proxy. To start the proxy via command line:
#
# $ cloud_sql_proxy -instances=[INSTANCE_CONNECTION_NAME]=tcp:3306
#
# See https://cloud.google.com/sql/docs/mysql-connect-proxy
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'PORT': '5505',
'USER': os.environ["username"],
'PASSWORD': os.environ['password'],
'NAME': 'audio_transcription',
}
}
| 28.309091 | 98 | 0.673946 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '6xxk%%z1ii*9%j(a-8p63(l&v$fb2de1w2fl24b(@rxzgcpk-8'
DEBUG = True
ALLOWED_HOSTS = ['nethope-pr-assessment.appspot.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'audio_transcription',
'rest_framework',
'dropbox_listener'
]
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), '..//', 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app_api.wsgi.application'
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = './static'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
if os.getenv('GAE_APPLICATION', None):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/cloudsql/nethope-pr-assessment:us-central1:nethopemysql',
'USER': os.environ["username"],
'PASSWORD': os.environ['password'],
'NAME': 'audio_transcription',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'PORT': '5505',
'USER': os.environ["username"],
'PASSWORD': os.environ['password'],
'NAME': 'audio_transcription',
}
}
| true | true |
1c462c16ea8f0a10483f7cda6cfdbfbea0e74394 | 615 | py | Python | themes/solarized-light.py | ruturajv/powerline-shell | 2c30b504bc1da01e8f0a2bc2723ad5cc70662ec8 | [
"MIT"
] | null | null | null | themes/solarized-light.py | ruturajv/powerline-shell | 2c30b504bc1da01e8f0a2bc2723ad5cc70662ec8 | [
"MIT"
] | null | null | null | themes/solarized-light.py | ruturajv/powerline-shell | 2c30b504bc1da01e8f0a2bc2723ad5cc70662ec8 | [
"MIT"
] | null | null | null | class Color(DefaultColor):
USERNAME_FG = 15
USERNAME_BG = 4
USERNAME_ROOT_BG = 1
HOSTNAME_FG = 15
HOSTNAME_BG = 10
HOME_SPECIAL_DISPLAY = False
PATH_FG = 10
PATH_BG = 7
CWD_FG = 0
SEPARATOR_FG = 14
READONLY_BG = 1
READONLY_FG = 7
REPO_CLEAN_FG = 0
REPO_CLEAN_BG = 15
REPO_DIRTY_FG = 1
REPO_DIRTY_BG = 15
JOBS_FG = 4
JOBS_BG = 7
CMD_PASSED_FG = 15
CMD_PASSED_BG = 2
CMD_FAILED_FG = 15
CMD_FAILED_BG = 1
SVN_CHANGES_FG = REPO_DIRTY_FG
SVN_CHANGES_BG = REPO_DIRTY_BG
VIRTUAL_ENV_BG = 15
VIRTUAL_ENV_FG = 2
| 17.083333 | 34 | 0.64065 | class Color(DefaultColor):
USERNAME_FG = 15
USERNAME_BG = 4
USERNAME_ROOT_BG = 1
HOSTNAME_FG = 15
HOSTNAME_BG = 10
HOME_SPECIAL_DISPLAY = False
PATH_FG = 10
PATH_BG = 7
CWD_FG = 0
SEPARATOR_FG = 14
READONLY_BG = 1
READONLY_FG = 7
REPO_CLEAN_FG = 0
REPO_CLEAN_BG = 15
REPO_DIRTY_FG = 1
REPO_DIRTY_BG = 15
JOBS_FG = 4
JOBS_BG = 7
CMD_PASSED_FG = 15
CMD_PASSED_BG = 2
CMD_FAILED_FG = 15
CMD_FAILED_BG = 1
SVN_CHANGES_FG = REPO_DIRTY_FG
SVN_CHANGES_BG = REPO_DIRTY_BG
VIRTUAL_ENV_BG = 15
VIRTUAL_ENV_FG = 2
| true | true |
1c462cdb35036a78451f4184c423dcc60ac9ac47 | 7,497 | py | Python | poptorch/toolbox/Dataloader_h5.py | balewski/neuron_inverter_benchmark | 4ad8a03c07e174728ccea2bc5f24d1ae620966a8 | [
"MIT"
] | null | null | null | poptorch/toolbox/Dataloader_h5.py | balewski/neuron_inverter_benchmark | 4ad8a03c07e174728ccea2bc5f24d1ae620966a8 | [
"MIT"
] | null | null | null | poptorch/toolbox/Dataloader_h5.py | balewski/neuron_inverter_benchmark | 4ad8a03c07e174728ccea2bc5f24d1ae620966a8 | [
"MIT"
] | 1 | 2022-01-14T22:25:20.000Z | 2022-01-14T22:25:20.000Z | __author__ = "Jan Balewski"
__email__ = "janstar1122@gmail.com"
'''
this data loader reads all data upon start, there is no distributed sampler
reads all data at once and serves them from RAM
- optimized for mult-GPU training
- only used block of data from each H5-file
- reads data from common file for all ranks
- allows for in-fly transformation
Shuffle: only all samples after read is compleated
'''
import time, os
import random
import h5py
import numpy as np
from pprint import pprint
import copy
from torch.utils.data import Dataset, DataLoader
import torch
import logging
import poptorch
#...!...!..................
def get_data_loader(params, inpMD,domain,popopts, verb=1):
conf=copy.deepcopy(params) # the input is reused later in the upper level code
#print('\n\nGDL:',domain)
conf['domain']=domain
conf['h5name']=params['data_path']+inpMD['h5nameTemplate'].replace('*',params['cell_name'])
if params['num_inp_chan']!=None: #user wants a change
assert params['num_inp_chan']>0
assert params['num_inp_chan']<=inpMD['numFeature']
conf['numInpChan']=params['num_inp_chan']
else: # just copy the meta-data value
conf['numInpChan']=inpMD['numFeature']
conf['doAux']=False #legacy switch never used
#pprint(conf)
dataset= Dataset_h5_neuronInverter(conf,verb)
if 'max_samples_per_epoch' in params:
max_samp= params['max_samples_per_epoch']
print('GDL: WARN, shorter %s max_samples=%d from %d'%(domain,max_samp,dataset.numLocFrames))
dataset.numLocFrames=min(max_samp,dataset.numLocFrames)
#print('bb',len(dataset),dataset.sanity())
# GC-speciffic constraint:
assert len(dataset)//conf['local_batch_size']//conf['gc_m2000']['replica_steps_per_iter']>0
params[domain+'_steps_per_epoch']=dataset.sanity()
params['model']['inputShape']=list(dataset.data_frames.shape[1:])
params['model']['outputSize']=dataset.data_parU.shape[1]
#shuffle=domain=='train' # use False only for reproducibility
shuffle=True # both: train & val
# Graphcore speciffic
dataloader = poptorch.DataLoader(popopts,dataset,
batch_size=conf['local_batch_size'],
num_workers=conf['num_data_workers'],
shuffle=shuffle,
persistent_workers=True,
mode=poptorch.DataLoaderMode.Async,
async_options={
"sharing_strategy":
poptorch.SharingStrategy.SharedMemory,
"early_preload": True,
"buffer_size": conf['num_data_workers'],
"load_indefinitely": True,
"miss_sleep_time_in_ms": 0
},
auto_distributed_partitioning=False, #to serve all data
)
dataloader.conf=conf
#print('cc',len(dataloader))
return dataloader
#-------------------
#-------------------
#-------------------
class Dataset_h5_neuronInverter(Dataset):
def __init__(self, conf,verb=1):
self.conf=conf
self.verb=verb
self.openH5()
if self.verb and 0:
print('\nDS-cnst name=%s shuffle=%r BS=%d steps=%d myRank=%d numSampl/hd5=%d'%(self.conf['name'],self.conf['shuffle'],self.localBS,self.__len__(),self.conf['world_rank'],self.conf['numSamplesPerH5']),'H5-path=',self.conf['dataPath'])
assert self.numLocFrames>0
assert self.conf['world_rank']>=0
if self.verb :
logging.info(' DS:load-end %s locSamp=%d, X.shape: %s type: %s'%(self.conf['domain'],self.numLocFrames,str(self.data_frames.shape),self.data_frames.dtype))
#print(' DS:Xall',self.data_frames.shape,self.data_frames.dtype)
#print(' DS:Yall',self.data_parU.shape,self.data_parU.dtype)
#...!...!..................
def sanity(self):
stepPerEpoch=int(np.floor( self.numLocFrames/ self.conf['local_batch_size']))
if stepPerEpoch <1:
print('\nDS:ABORT, Have you requested too few samples per rank?, numLocFrames=%d, BS=%d name=%s'%(self.numLocFrames, localBS,self.conf['name']))
exit(67)
# all looks good
return stepPerEpoch
#...!...!..................
def openH5(self):
cf=self.conf
inpF=cf['h5name']
inpFeat=cf['numInpChan'] # this is what user wants
dom=cf['domain']
if self.verb>0 : logging.info('DS:fileH5 %s rank %d of %d '%(inpF,cf['world_rank'],cf['world_size']))
if not os.path.exists(inpF):
print('FAILD, missing HD5',inpF)
exit(22)
startTm0 = time.time()
# = = = READING HD5 start
h5f = h5py.File(inpF, 'r')
Xshape=h5f[dom+'_frames'].shape
totSamp=Xshape[0]
locStep=int(totSamp/cf['world_size']/cf['local_batch_size'])
locSamp=locStep*cf['local_batch_size']
#print('totSamp=%d locStep=%d'%(totSamp,locStep))
assert locStep>0
maxShard= totSamp// locSamp
assert maxShard>=cf['world_size']
# chosen shard is rank dependent, wraps up if not sufficient number of ranks
myShard=self.conf['world_rank'] %maxShard
sampIdxOff=myShard*locSamp
if self.verb: logging.info('DS:file dom=%s myShard=%d, maxShard=%d, sampIdxOff=%d allXshape=%s inpFeat=%d'%(cf['domain'],myShard,maxShard,sampIdxOff,str(Xshape),inpFeat))
# data reading starts ....
assert inpFeat<=Xshape[2]
if inpFeat==Xshape[2]:
self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp]#.astype('float32')
else:
self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp,:,:inpFeat]
self.data_parU=h5f[dom+'_unitStar_par'][sampIdxOff:sampIdxOff+locSamp]#.astype('float32')
if cf['doAux']: #never used
self.data_parP=h5f[dom+'_phys_par'][sampIdxOff:sampIdxOff+locSamp]
h5f.close()
# = = = READING HD5 done
if self.verb>0 :
startTm1 = time.time()
if self.verb: logging.info('DS: hd5 read time=%.2f(sec) dom=%s '%(startTm1 - startTm0,dom))
# .......................................................
#.... data embeddings, transformation should go here ....
#self.data_parU*=1.2
#.... end of embeddings ........
# .......................................................
if 0: # check normalization
xm=np.mean(self.data_frames)
xs=np.std(self.data_frames)
print('xm',xm,xs,myShard,cf['domain'])
ok99
self.numLocFrames=self.data_frames.shape[0]
#self.numLocFrames=512*10 # reduce nymber of samples
def __len__(self):
return self.numLocFrames
def __getitem__(self, idx):
# print('DSI:',idx,self.conf['name'],self.cnt); self.cnt+=1
assert idx>=0
assert idx< self.numLocFrames
X=self.data_frames[idx]
Y=self.data_parU[idx]
return (X,Y)
if self.conf['x_y_aux']: # predictions for Roy
AUX=self.data_parP[pCnt:pCnt+bs]
return (X,Y,AUX)
| 38.25 | 246 | 0.577431 | __author__ = "Jan Balewski"
__email__ = "janstar1122@gmail.com"
import time, os
import random
import h5py
import numpy as np
from pprint import pprint
import copy
from torch.utils.data import Dataset, DataLoader
import torch
import logging
import poptorch
def get_data_loader(params, inpMD,domain,popopts, verb=1):
conf=copy.deepcopy(params)
conf['domain']=domain
conf['h5name']=params['data_path']+inpMD['h5nameTemplate'].replace('*',params['cell_name'])
if params['num_inp_chan']!=None:
assert params['num_inp_chan']>0
assert params['num_inp_chan']<=inpMD['numFeature']
conf['numInpChan']=params['num_inp_chan']
else:
conf['numInpChan']=inpMD['numFeature']
conf['doAux']=False
dataset= Dataset_h5_neuronInverter(conf,verb)
if 'max_samples_per_epoch' in params:
max_samp= params['max_samples_per_epoch']
print('GDL: WARN, shorter %s max_samples=%d from %d'%(domain,max_samp,dataset.numLocFrames))
dataset.numLocFrames=min(max_samp,dataset.numLocFrames)
assert len(dataset)//conf['local_batch_size']//conf['gc_m2000']['replica_steps_per_iter']>0
params[domain+'_steps_per_epoch']=dataset.sanity()
params['model']['inputShape']=list(dataset.data_frames.shape[1:])
params['model']['outputSize']=dataset.data_parU.shape[1]
der = poptorch.DataLoader(popopts,dataset,
batch_size=conf['local_batch_size'],
num_workers=conf['num_data_workers'],
shuffle=shuffle,
persistent_workers=True,
mode=poptorch.DataLoaderMode.Async,
async_options={
"sharing_strategy":
poptorch.SharingStrategy.SharedMemory,
"early_preload": True,
"buffer_size": conf['num_data_workers'],
"load_indefinitely": True,
"miss_sleep_time_in_ms": 0
},
auto_distributed_partitioning=False,
)
dataloader.conf=conf
return dataloader
class Dataset_h5_neuronInverter(Dataset):
def __init__(self, conf,verb=1):
self.conf=conf
self.verb=verb
self.openH5()
if self.verb and 0:
print('\nDS-cnst name=%s shuffle=%r BS=%d steps=%d myRank=%d numSampl/hd5=%d'%(self.conf['name'],self.conf['shuffle'],self.localBS,self.__len__(),self.conf['world_rank'],self.conf['numSamplesPerH5']),'H5-path=',self.conf['dataPath'])
assert self.numLocFrames>0
assert self.conf['world_rank']>=0
if self.verb :
logging.info(' DS:load-end %s locSamp=%d, X.shape: %s type: %s'%(self.conf['domain'],self.numLocFrames,str(self.data_frames.shape),self.data_frames.dtype))
def sanity(self):
stepPerEpoch=int(np.floor( self.numLocFrames/ self.conf['local_batch_size']))
if stepPerEpoch <1:
print('\nDS:ABORT, Have you requested too few samples per rank?, numLocFrames=%d, BS=%d name=%s'%(self.numLocFrames, localBS,self.conf['name']))
exit(67)
return stepPerEpoch
def openH5(self):
cf=self.conf
inpF=cf['h5name']
inpFeat=cf['numInpChan']
dom=cf['domain']
if self.verb>0 : logging.info('DS:fileH5 %s rank %d of %d '%(inpF,cf['world_rank'],cf['world_size']))
if not os.path.exists(inpF):
print('FAILD, missing HD5',inpF)
exit(22)
startTm0 = time.time()
h5f = h5py.File(inpF, 'r')
Xshape=h5f[dom+'_frames'].shape
totSamp=Xshape[0]
locStep=int(totSamp/cf['world_size']/cf['local_batch_size'])
locSamp=locStep*cf['local_batch_size']
assert locStep>0
maxShard= totSamp// locSamp
assert maxShard>=cf['world_size']
myShard=self.conf['world_rank'] %maxShard
sampIdxOff=myShard*locSamp
if self.verb: logging.info('DS:file dom=%s myShard=%d, maxShard=%d, sampIdxOff=%d allXshape=%s inpFeat=%d'%(cf['domain'],myShard,maxShard,sampIdxOff,str(Xshape),inpFeat))
assert inpFeat<=Xshape[2]
if inpFeat==Xshape[2]:
self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp]
else:
self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp,:,:inpFeat]
self.data_parU=h5f[dom+'_unitStar_par'][sampIdxOff:sampIdxOff+locSamp]
if cf['doAux']:
self.data_parP=h5f[dom+'_phys_par'][sampIdxOff:sampIdxOff+locSamp]
h5f.close()
if self.verb>0 :
startTm1 = time.time()
if self.verb: logging.info('DS: hd5 read time=%.2f(sec) dom=%s '%(startTm1 - startTm0,dom))
if 0:
xm=np.mean(self.data_frames)
xs=np.std(self.data_frames)
print('xm',xm,xs,myShard,cf['domain'])
ok99
self.numLocFrames=self.data_frames.shape[0]
return self.numLocFrames
def __getitem__(self, idx):
assert idx>=0
assert idx< self.numLocFrames
X=self.data_frames[idx]
Y=self.data_parU[idx]
return (X,Y)
if self.conf['x_y_aux']:
AUX=self.data_parP[pCnt:pCnt+bs]
return (X,Y,AUX)
| true | true |
1c462d72ef28053c69095bed607d4c067e869b96 | 3,358 | py | Python | expression_evaluation.py | mengguoru/expression_evaluation | a2e4dd45611e4577c38b40de3a718ecd5f77c5ae | [
"MIT"
] | null | null | null | expression_evaluation.py | mengguoru/expression_evaluation | a2e4dd45611e4577c38b40de3a718ecd5f77c5ae | [
"MIT"
] | null | null | null | expression_evaluation.py | mengguoru/expression_evaluation | a2e4dd45611e4577c38b40de3a718ecd5f77c5ae | [
"MIT"
] | null | null | null | '''
expression evaluation
author : mengguoru
date : 2016/03/27
'''
import re
class Expression:
def split(self,expr):
'''split numbers and operators into a array,return the array (without whiteSpace)'''
temp = re.split(r"(\+|\-|\*|\/|\(|\))",re.sub(r"\s+",'',expr))
temp2 = []
for i in range(len(temp)):
if temp[i] != '':
temp2.append(temp[i])
return temp2
def infix_to_suffix(self,expr):
'''Shutting Yard Algorithm'''
stack_out = []
stack_operator = []
for i in range(len(expr)):
if str(expr[i]) >= '0' and str(expr[i]) <= '9':
stack_out.append(expr[i])
else:
if(len(stack_operator) == 0):
stack_operator.append(expr[i])
else:
if str(expr[i]) == ')':
while len(stack_operator) > 0:
temp = stack_operator.pop()
if temp != '(':
stack_out.append(temp)
else:
break
elif expr[i] == '(':
stack_operator.append(expr[i])
else:
temp = stack_operator.pop()
while self.cmp_Precedence(expr[i],temp) == False:
stack_out.append(temp)
if len(stack_operator) > 0:
temp = stack_operator.pop()
else:
break
# if expr[i] precedence >= temp,temp should push back
stack_operator.append(temp)
stack_operator.append(expr[i])
while len(stack_operator) > 0:
stack_out.append(stack_operator.pop())
return stack_out
def cmp_Precedence(self,op1,op2):
if(op1 == '*'or op1 == '/') and (op2 == '+'or op2 == '-'):
return True
elif(op1 == '*'or op1 == '/') and (op2 == '*'or op2=='/'):
return True
elif(op1=='+'or op1=='-')and(op2=='+'or op2=='-'):
return True
elif op2=='(':
return True
else:
return False
def evaluate_suffix(self,expr):
'''Reverse Polish Notation'''
stack = []
for i in range(len(expr)):
if str(expr[i]) >= '0' and str(expr[i]) <='9':
# print(stack)
stack.append(int(expr[i]))
else:
stack.append(self.calculate_2_param(expr[i],stack.pop(),stack.pop()))
return stack.pop()
def calculate_2_param(self,oper,num1,num2):
return {'+':num1+num2,'-':num2-num1,'*':num1*num2,'/':num1/num2}[oper]
def evaluate(self):
pass
if __name__ == '__main__':
'''
5 + ((1 + 2) * 4) − 3转成 [5,1,2,'+',4,'*','+',3,'-']
'''
a = Expression()
b = a.split("5 + ((1 + 2) * 4)-3")
print(b) # output: ['5', '+', '(', '(', '1', '+', '2', ')', '*', '4', ')', '-', '3'],test pass
# 5 1 2 + 4 * + 3 − 对应后缀 后缀求值应该为14
temp = ['5','1','2','+','4','*','+','3','-']
print(a.evaluate_suffix(temp)) # outpue : 14 test pass
# 5 1 2 + 4 * + 3 −为应输出结果
print(a.infix_to_suffix(b)) | 38.159091 | 98 | 0.432102 | import re
class Expression:
def split(self,expr):
temp = re.split(r"(\+|\-|\*|\/|\(|\))",re.sub(r"\s+",'',expr))
temp2 = []
for i in range(len(temp)):
if temp[i] != '':
temp2.append(temp[i])
return temp2
def infix_to_suffix(self,expr):
stack_out = []
stack_operator = []
for i in range(len(expr)):
if str(expr[i]) >= '0' and str(expr[i]) <= '9':
stack_out.append(expr[i])
else:
if(len(stack_operator) == 0):
stack_operator.append(expr[i])
else:
if str(expr[i]) == ')':
while len(stack_operator) > 0:
temp = stack_operator.pop()
if temp != '(':
stack_out.append(temp)
else:
break
elif expr[i] == '(':
stack_operator.append(expr[i])
else:
temp = stack_operator.pop()
while self.cmp_Precedence(expr[i],temp) == False:
stack_out.append(temp)
if len(stack_operator) > 0:
temp = stack_operator.pop()
else:
break
stack_operator.append(temp)
stack_operator.append(expr[i])
while len(stack_operator) > 0:
stack_out.append(stack_operator.pop())
return stack_out
def cmp_Precedence(self,op1,op2):
if(op1 == '*'or op1 == '/') and (op2 == '+'or op2 == '-'):
return True
elif(op1 == '*'or op1 == '/') and (op2 == '*'or op2=='/'):
return True
elif(op1=='+'or op1=='-')and(op2=='+'or op2=='-'):
return True
elif op2=='(':
return True
else:
return False
def evaluate_suffix(self,expr):
stack = []
for i in range(len(expr)):
if str(expr[i]) >= '0' and str(expr[i]) <='9':
stack.append(int(expr[i]))
else:
stack.append(self.calculate_2_param(expr[i],stack.pop(),stack.pop()))
return stack.pop()
def calculate_2_param(self,oper,num1,num2):
return {'+':num1+num2,'-':num2-num1,'*':num1*num2,'/':num1/num2}[oper]
def evaluate(self):
pass
if __name__ == '__main__':
a = Expression()
b = a.split("5 + ((1 + 2) * 4)-3")
print(b)
temp = ['5','1','2','+','4','*','+','3','-']
print(a.evaluate_suffix(temp))
print(a.infix_to_suffix(b)) | true | true |
1c4630086ef30c6136a9edabe95d3911ecb465d4 | 13,194 | py | Python | lambda_function.py | rubrikinc/aws-native-secrets-rotation | c1488cc1b6fc2b89d32c83bd220678ee3bebfdbd | [
"MIT"
] | 1 | 2019-12-20T13:35:34.000Z | 2019-12-20T13:35:34.000Z | lambda_function.py | rubrikinc/aws-native-secrets-rotation | c1488cc1b6fc2b89d32c83bd220678ee3bebfdbd | [
"MIT"
] | null | null | null | lambda_function.py | rubrikinc/aws-native-secrets-rotation | c1488cc1b6fc2b89d32c83bd220678ee3bebfdbd | [
"MIT"
] | 2 | 2019-04-01T22:18:58.000Z | 2020-03-13T15:08:26.000Z | #!/usr/local/bin/python3
import boto3
import logging
import os
import ast
import json
import rubrik_cdm
from copy import deepcopy
import urllib3
urllib3.disable_warnings()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""Secrets Manager Rotation Template
This is a template for creating an AWS Secrets Manager rotation lambda
Args:
event (dict): Lambda dictionary of event parameters. These keys must include the following:
- SecretId: The secret ARN or identifier
- ClientRequestToken: The ClientRequestToken of the secret version
- Step: The rotation step (one of createSecret, setSecret, testSecret, or finishSecret)
context (LambdaContext): The Lambda runtime information
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not properly configured for rotation
KeyError: If the event parameters do not contain the expected keys
"""
arn = event['SecretId']
token = event['ClientRequestToken']
step = event['Step']
# Setup the local secret manager client
secret_service_client = boto3.client('secretsmanager')
# Make sure the version is staged correctly
metadata = secret_service_client.describe_secret(SecretId=arn)
if not metadata['RotationEnabled']:
logger.error("Secret %s is not enabled for rotation" % arn)
raise ValueError("Secret %s is not enabled for rotation" % arn)
versions = metadata['VersionIdsToStages']
if token not in versions:
logger.error("Secret version %s has no stage for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s has no stage for rotation of secret %s." % (token, arn))
if "AWSCURRENT" in versions[token]:
logger.info("Secret version %s already set as AWSCURRENT for secret %s." % (token, arn))
return
elif "AWSPENDING" not in versions[token]:
logger.error("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
# retrieve current secret
current_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
# if the secret is for the account this function is executing in, use this function's role to talk to IAM
if current_secret['accountid'] == context.invoked_function_arn.split(":")[4]:
iam_service_client = boto3.client('iam')
# otherwise, attempt to assume a role into the target account
else:
iam_service_client = assume_role(role_arn=current_secret['rolearn'], session_name=current_secret['accountid']+'_session').client('iam')
if step == "createSecret":
create_secret(secret_service_client, arn, token, iam_service_client, current_secret)
elif step == "setSecret":
set_secret(secret_service_client, arn, token)
elif step == "testSecret":
test_secret(secret_service_client, arn, token)
elif step == "finishSecret":
finish_secret(secret_service_client, arn, token, iam_service_client)
else:
raise ValueError("Invalid step parameter")
def assume_role(role_arn=None, session_name='my_session'):
"""
If role_arn is given assumes a role and returns boto3 session
otherwise return a regular session with the current IAM user/role
"""
if role_arn:
client = boto3.client('sts')
response = client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)
session = boto3.Session(
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken'])
return session
else:
return boto3.Session()
def create_secret(secret_service_client, arn, token, iam_service_client, current_secret):
"""Create the secret
This method first checks for the existence of a secret for the passed in token. If one does not exist, it will generate a
new secret and put it with the passed in token.
Args:
secret_service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
"""
# Make sure the current secret exists
secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")
# Now try to get the secret version, if that fails, put a new secret
try:
secret_service_client.get_secret_value(SecretId=arn, VersionId=token, VersionStage="AWSPENDING")
logger.info("createSecret: Successfully retrieved secret for %s." % arn)
except secret_service_client.exceptions.ResourceNotFoundException:
# Generate new IAM credentials for this secret, fail if too many keys already exist
if len(iam_service_client.list_access_keys(UserName=current_secret['iamuser'])['AccessKeyMetadata']) > 1:
logger.error("User %s has more than one access key definied, cannot rotate" % current_secret['iamuser'])
raise ValueError("User %s has more than one access key definied, cannot rotate" % current_secret['iamuser'])
else:
new_access_keys = iam_service_client.create_access_key(UserName=current_secret['iamuser'])
# Create new secret string
new_secret = deepcopy(current_secret)
new_secret['iamaccesskey'] = new_access_keys['AccessKey']['AccessKeyId']
new_secret['iamsecretkey'] = new_access_keys['AccessKey']['SecretAccessKey']
new_secret_json = json.dumps(new_secret)
# Put the secret
secret_service_client.put_secret_value(SecretId=arn, ClientRequestToken=token, SecretString=new_secret_json, VersionStages=['AWSPENDING'])
logger.info("createSecret: Successfully put secret for ARN %s and version %s." % (arn, token))
def set_secret(secret_service_client, arn, token):
"""Set the secret
This method should set the AWSPENDING secret in the service that the secret belongs to. For example, if the secret is a database
credential, this method should take the value of the AWSPENDING secret and set the user's password to this value in the database.
Args:
secret_service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
"""
# Retrieve secrets
current_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
pending_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSPENDING")['SecretString'])
rubrik_credentials = ast.literal_eval(secret_service_client.get_secret_value(SecretId='/rubrik/rubrik_cdm_credentials', VersionStage="AWSCURRENT")['SecretString'])
# connect to rubrik api
rubrik = rubrik_cdm.Connect(rubrik_credentials['rubrikhost'], rubrik_credentials['rubrikuser'], rubrik_credentials['rubrikpassword'])
# find cloud native source, generate config for update operation
cloud_sources = rubrik.get('internal', '/aws/account', timeout=15, authentication=True)['data']
logger.info('attempting to get current cloud source detail from rubrik...')
for source in cloud_sources:
source_detail = rubrik.get('internal', '/aws/account/'+source['id'], timeout=15, authentication=True)
logger.info('got cloud source detail for %s' % source['id'])
logger.info(source_detail)
logger.info('checking if source detail access key %s matches current access key %s' % (source_detail['accessKey'], current_secret['iamaccesskey']))
if source_detail['accessKey'] == current_secret['iamaccesskey']:
logger.info('found match!')
source_update_detail = deepcopy(source_detail)
source_update_detail['secretKey'] = pending_secret['iamsecretkey']
source_update_detail['accessKey'] = pending_secret['iamaccesskey']
details_to_remove = ('configuredSlaDomainName', 'primaryClusterId', 'id', 'configuredSlaDomainId')
for key in details_to_remove:
source_update_detail.pop(key, None)
else:
logger.info('no match found')
# if we found a matching Cloud Source, rotate the access key
if source_update_detail:
rubrik.update_aws_native_account(source_update_detail['name'], source_update_detail, timeout=30)
else:
logger.error("Could not find Cloud Native Source on Rubrik %s with access key %s" % (rubrik_credentials['rubrikhost'], current_secret['iamaccesskey']))
raise ValueError("Could not find Cloud Native Source on Rubrik %s with access key %s" % (rubrik_credentials['rubrikhost'], current_secret['iamaccesskey']))
def test_secret(secret_service_client, arn, token):
"""Test the secret
This method should validate that the AWSPENDING secret works in the service that the secret belongs to. For example, if the secret
is a database credential, this method should validate that the user can login with the password in AWSPENDING and that the user has
all of the expected permissions against the database.
Args:
secret_service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
"""
# retrieve pending secret
pending_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSPENDING")['SecretString'])
# connect to rubrik api
rubrik_credentials = ast.literal_eval(secret_service_client.get_secret_value(SecretId='/rubrik/rubrik_cdm_credentials', VersionStage="AWSCURRENT")['SecretString'])
rubrik = rubrik_cdm.Connect(rubrik_credentials['rubrikhost'], rubrik_credentials['rubrikuser'], rubrik_credentials['rubrikpassword'])
# find relevant cloud source
cloud_sources = rubrik.get('internal', '/aws/account', timeout=60, authentication=True)['data']
for source in cloud_sources:
source_detail = rubrik.get('internal', '/aws/account/'+source['id'], timeout=60, authentication=True)
if source_detail['accessKey'] == pending_secret['iamaccesskey']:
source_id = source_detail['id']
# check if the cloud source can iterate subnets in us-east-1
try:
rubrik.get('internal', '/aws/account/%s/subnet?region=us-east-1' % (source_id), timeout=60, authentication=True)
except:
logger.error("Error iterating subnets in us-east-1 for Cloud Source %s" % source_id)
raise ValueError("Error iterating subnets in us-east-1 for Cloud Source %s" % source_id)
logger.info("testSecret: Successfully tested %s with new access keys" % source_id)
def finish_secret(secret_service_client, arn, token, iam_service_client):
"""Finish the secret
This method finalizes the rotation process by marking the secret version passed in as the AWSCURRENT secret.
Args:
secret_service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn does not exist
"""
# Get info about the depricated access key for deletion
depricated_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
# First describe the secret to get the current version
metadata = secret_service_client.describe_secret(SecretId=arn)
current_version = None
for version in metadata["VersionIdsToStages"]:
if "AWSCURRENT" in metadata["VersionIdsToStages"][version]:
if version == token:
# The correct version is already marked as current, return
logger.info("finishSecret: Version %s already marked as AWSCURRENT for %s" % (version, arn))
return
current_version = version
break
# Finalize by staging the secret version current
secret_service_client.update_secret_version_stage(SecretId=arn, VersionStage="AWSCURRENT", MoveToVersionId=token, RemoveFromVersionId=current_version)
logger.info("finishSecret: Successfully set AWSCURRENT stage to version %s for secret %s." % (version, arn))
# Delete the depricated access key
iam_service_client.delete_access_key(UserName=depricated_secret['iamuser'], AccessKeyId=depricated_secret['iamaccesskey'])
logger.info("Deleted depricated access key %s" % depricated_secret['iamaccesskey']) | 53.417004 | 167 | 0.719948 |
import boto3
import logging
import os
import ast
import json
import rubrik_cdm
from copy import deepcopy
import urllib3
urllib3.disable_warnings()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
arn = event['SecretId']
token = event['ClientRequestToken']
step = event['Step']
secret_service_client = boto3.client('secretsmanager')
metadata = secret_service_client.describe_secret(SecretId=arn)
if not metadata['RotationEnabled']:
logger.error("Secret %s is not enabled for rotation" % arn)
raise ValueError("Secret %s is not enabled for rotation" % arn)
versions = metadata['VersionIdsToStages']
if token not in versions:
logger.error("Secret version %s has no stage for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s has no stage for rotation of secret %s." % (token, arn))
if "AWSCURRENT" in versions[token]:
logger.info("Secret version %s already set as AWSCURRENT for secret %s." % (token, arn))
return
elif "AWSPENDING" not in versions[token]:
logger.error("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
current_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
if current_secret['accountid'] == context.invoked_function_arn.split(":")[4]:
iam_service_client = boto3.client('iam')
# otherwise, attempt to assume a role into the target account
else:
iam_service_client = assume_role(role_arn=current_secret['rolearn'], session_name=current_secret['accountid']+'_session').client('iam')
if step == "createSecret":
create_secret(secret_service_client, arn, token, iam_service_client, current_secret)
elif step == "setSecret":
set_secret(secret_service_client, arn, token)
elif step == "testSecret":
test_secret(secret_service_client, arn, token)
elif step == "finishSecret":
finish_secret(secret_service_client, arn, token, iam_service_client)
else:
raise ValueError("Invalid step parameter")
def assume_role(role_arn=None, session_name='my_session'):
if role_arn:
client = boto3.client('sts')
response = client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)
session = boto3.Session(
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken'])
return session
else:
return boto3.Session()
def create_secret(secret_service_client, arn, token, iam_service_client, current_secret):
# Make sure the current secret exists
secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")
# Now try to get the secret version, if that fails, put a new secret
try:
secret_service_client.get_secret_value(SecretId=arn, VersionId=token, VersionStage="AWSPENDING")
logger.info("createSecret: Successfully retrieved secret for %s." % arn)
except secret_service_client.exceptions.ResourceNotFoundException:
# Generate new IAM credentials for this secret, fail if too many keys already exist
if len(iam_service_client.list_access_keys(UserName=current_secret['iamuser'])['AccessKeyMetadata']) > 1:
logger.error("User %s has more than one access key definied, cannot rotate" % current_secret['iamuser'])
raise ValueError("User %s has more than one access key definied, cannot rotate" % current_secret['iamuser'])
else:
new_access_keys = iam_service_client.create_access_key(UserName=current_secret['iamuser'])
# Create new secret string
new_secret = deepcopy(current_secret)
new_secret['iamaccesskey'] = new_access_keys['AccessKey']['AccessKeyId']
new_secret['iamsecretkey'] = new_access_keys['AccessKey']['SecretAccessKey']
new_secret_json = json.dumps(new_secret)
# Put the secret
secret_service_client.put_secret_value(SecretId=arn, ClientRequestToken=token, SecretString=new_secret_json, VersionStages=['AWSPENDING'])
logger.info("createSecret: Successfully put secret for ARN %s and version %s." % (arn, token))
def set_secret(secret_service_client, arn, token):
# Retrieve secrets
current_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
pending_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSPENDING")['SecretString'])
rubrik_credentials = ast.literal_eval(secret_service_client.get_secret_value(SecretId='/rubrik/rubrik_cdm_credentials', VersionStage="AWSCURRENT")['SecretString'])
# connect to rubrik api
rubrik = rubrik_cdm.Connect(rubrik_credentials['rubrikhost'], rubrik_credentials['rubrikuser'], rubrik_credentials['rubrikpassword'])
# find cloud native source, generate config for update operation
cloud_sources = rubrik.get('internal', '/aws/account', timeout=15, authentication=True)['data']
logger.info('attempting to get current cloud source detail from rubrik...')
for source in cloud_sources:
source_detail = rubrik.get('internal', '/aws/account/'+source['id'], timeout=15, authentication=True)
logger.info('got cloud source detail for %s' % source['id'])
logger.info(source_detail)
logger.info('checking if source detail access key %s matches current access key %s' % (source_detail['accessKey'], current_secret['iamaccesskey']))
if source_detail['accessKey'] == current_secret['iamaccesskey']:
logger.info('found match!')
source_update_detail = deepcopy(source_detail)
source_update_detail['secretKey'] = pending_secret['iamsecretkey']
source_update_detail['accessKey'] = pending_secret['iamaccesskey']
details_to_remove = ('configuredSlaDomainName', 'primaryClusterId', 'id', 'configuredSlaDomainId')
for key in details_to_remove:
source_update_detail.pop(key, None)
else:
logger.info('no match found')
# if we found a matching Cloud Source, rotate the access key
if source_update_detail:
rubrik.update_aws_native_account(source_update_detail['name'], source_update_detail, timeout=30)
else:
logger.error("Could not find Cloud Native Source on Rubrik %s with access key %s" % (rubrik_credentials['rubrikhost'], current_secret['iamaccesskey']))
raise ValueError("Could not find Cloud Native Source on Rubrik %s with access key %s" % (rubrik_credentials['rubrikhost'], current_secret['iamaccesskey']))
def test_secret(secret_service_client, arn, token):
# retrieve pending secret
pending_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSPENDING")['SecretString'])
# connect to rubrik api
rubrik_credentials = ast.literal_eval(secret_service_client.get_secret_value(SecretId='/rubrik/rubrik_cdm_credentials', VersionStage="AWSCURRENT")['SecretString'])
rubrik = rubrik_cdm.Connect(rubrik_credentials['rubrikhost'], rubrik_credentials['rubrikuser'], rubrik_credentials['rubrikpassword'])
# find relevant cloud source
cloud_sources = rubrik.get('internal', '/aws/account', timeout=60, authentication=True)['data']
for source in cloud_sources:
source_detail = rubrik.get('internal', '/aws/account/'+source['id'], timeout=60, authentication=True)
if source_detail['accessKey'] == pending_secret['iamaccesskey']:
source_id = source_detail['id']
# check if the cloud source can iterate subnets in us-east-1
try:
rubrik.get('internal', '/aws/account/%s/subnet?region=us-east-1' % (source_id), timeout=60, authentication=True)
except:
logger.error("Error iterating subnets in us-east-1 for Cloud Source %s" % source_id)
raise ValueError("Error iterating subnets in us-east-1 for Cloud Source %s" % source_id)
logger.info("testSecret: Successfully tested %s with new access keys" % source_id)
def finish_secret(secret_service_client, arn, token, iam_service_client):
# Get info about the depricated access key for deletion
depricated_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
# First describe the secret to get the current version
metadata = secret_service_client.describe_secret(SecretId=arn)
current_version = None
for version in metadata["VersionIdsToStages"]:
if "AWSCURRENT" in metadata["VersionIdsToStages"][version]:
if version == token:
# The correct version is already marked as current, return
logger.info("finishSecret: Version %s already marked as AWSCURRENT for %s" % (version, arn))
return
current_version = version
break
# Finalize by staging the secret version current
secret_service_client.update_secret_version_stage(SecretId=arn, VersionStage="AWSCURRENT", MoveToVersionId=token, RemoveFromVersionId=current_version)
logger.info("finishSecret: Successfully set AWSCURRENT stage to version %s for secret %s." % (version, arn))
# Delete the depricated access key
iam_service_client.delete_access_key(UserName=depricated_secret['iamuser'], AccessKeyId=depricated_secret['iamaccesskey'])
logger.info("Deleted depricated access key %s" % depricated_secret['iamaccesskey']) | true | true |
1c4630ae0f50b4044900f2782a2b8d3bff5fdc1e | 401 | py | Python | task_manager_api/task_manager_api/urls.py | LsbProxy/task_manager_api | b014d74aa3cd5bc9952ac04548350d3a08836c8f | [
"MIT"
] | null | null | null | task_manager_api/task_manager_api/urls.py | LsbProxy/task_manager_api | b014d74aa3cd5bc9952ac04548350d3a08836c8f | [
"MIT"
] | null | null | null | task_manager_api/task_manager_api/urls.py | LsbProxy/task_manager_api | b014d74aa3cd5bc9952ac04548350d3a08836c8f | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('auth.urls')),
path('', include('api.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 26.733333 | 60 | 0.680798 | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('auth.urls')),
path('', include('api.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| true | true |
1c4631519443af09252e50a84ea2e878f561085d | 20,551 | py | Python | flux_combined_high_binding/model_857.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_combined_high_binding/model_857.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_combined_high_binding/model_857.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 100000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 170000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.143519 | 798 | 0.804146 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 100000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 170000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
1c4631a70ee71cb407b8c93a4400df836801fe55 | 1,365 | py | Python | examples/cellular_example.py | timhunderwood/numpy-to-stl | eea305ae30bb4aa5882d7c66edebe76173da8b06 | [
"MIT"
] | 1 | 2020-12-29T08:56:48.000Z | 2020-12-29T08:56:48.000Z | examples/cellular_example.py | timhunderwood/numpy-to-stl | eea305ae30bb4aa5882d7c66edebe76173da8b06 | [
"MIT"
] | null | null | null | examples/cellular_example.py | timhunderwood/numpy-to-stl | eea305ae30bb4aa5882d7c66edebe76173da8b06 | [
"MIT"
] | 1 | 2021-06-16T02:06:40.000Z | 2021-06-16T02:06:40.000Z | import cellular
import numpy
import mpl_toolkits.mplot3d
import matplotlib.pyplot as plt
import numpy_to_stl
def get_simulated_world(cells_per_day, rule, number_of_days):
world = cellular.World(cells_per_day, rule, ones=False)
world.simulate(number_of_days)
world.display(landscape=True)
return numpy.vstack(world.state)
def create_mesh_of_world(
cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=100
):
array = get_simulated_world(cells_per_day, rule, number_of_days)
return numpy_to_stl.create_surface_mesh_from_array(array, base_height=1)
def plot_stl_world(cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=200):
world_mesh = create_mesh_of_world(cells_per_day, rule, number_of_days)
figure = plt.figure()
axes = mpl_toolkits.mplot3d.Axes3D(figure)
#
# # Load the STL files and add the vectors to the plot
axes.add_collection3d(
mpl_toolkits.mplot3d.art3d.Poly3DCollection(
world_mesh.vectors, facecolor="red", edgecolor="black"
)
)
# Auto scale to the mesh size
scale = world_mesh.points.flatten(-1)
axes.auto_scale_xyz(scale, scale, scale)
# Show the plot to the screen
plt.show()
world_mesh.save("small_cellular_example.stl")
if __name__ == "__main__":
plot_stl_world(cells_per_day=100, number_of_days=200)
| 29.673913 | 88 | 0.745788 | import cellular
import numpy
import mpl_toolkits.mplot3d
import matplotlib.pyplot as plt
import numpy_to_stl
def get_simulated_world(cells_per_day, rule, number_of_days):
world = cellular.World(cells_per_day, rule, ones=False)
world.simulate(number_of_days)
world.display(landscape=True)
return numpy.vstack(world.state)
def create_mesh_of_world(
cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=100
):
array = get_simulated_world(cells_per_day, rule, number_of_days)
return numpy_to_stl.create_surface_mesh_from_array(array, base_height=1)
def plot_stl_world(cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=200):
world_mesh = create_mesh_of_world(cells_per_day, rule, number_of_days)
figure = plt.figure()
axes = mpl_toolkits.mplot3d.Axes3D(figure)
ot3d.art3d.Poly3DCollection(
world_mesh.vectors, facecolor="red", edgecolor="black"
)
)
scale = world_mesh.points.flatten(-1)
axes.auto_scale_xyz(scale, scale, scale)
plt.show()
world_mesh.save("small_cellular_example.stl")
if __name__ == "__main__":
plot_stl_world(cells_per_day=100, number_of_days=200)
| true | true |
1c463309478ab2730838c468b3402f7a8124d47e | 3,752 | py | Python | elasticlogger/hooks/elasticsearch/elasticsearch.py | danteay/elasticlogger | 3182e3d1d34564a5e95aaef3c10239d162eb691a | [
"MIT"
] | 1 | 2021-06-27T10:17:16.000Z | 2021-06-27T10:17:16.000Z | elasticlogger/hooks/elasticsearch/elasticsearch.py | danteay/elasticlogger | 3182e3d1d34564a5e95aaef3c10239d162eb691a | [
"MIT"
] | 4 | 2021-06-29T19:41:39.000Z | 2021-09-23T21:47:22.000Z | elasticlogger/hooks/elasticsearch/elasticsearch.py | danteay/elasticlogger | 3182e3d1d34564a5e95aaef3c10239d162eb691a | [
"MIT"
] | 1 | 2022-03-14T18:27:42.000Z | 2022-03-14T18:27:42.000Z | """Elastic search hook function."""
import os
import re
from datetime import datetime
from logging import CRITICAL, DEBUG, ERROR, INFO, WARNING
from typing import Any, AnyStr, Dict, NoReturn, Optional
from elasticsearch import Elasticsearch
from elasticlogger import utils
from elasticlogger.hooks import HookContext
from elasticlogger.ports.elasticsearch import get_instance
from .errors import ESConfigurationError, ESEmptyIndexError, ESEmptyUrlError
class ElasticSearch:
"""Elastic Search hook implementation.
:type url: str
:param url: Elasticsearch cluster endpoint
:type index: str
:param index: Index of ES where will be stored the logs
:param **kwargs: All Elasticsearch object params
"""
def __init__(self, url: Optional[AnyStr] = None, index: Optional[AnyStr] = None, **kwargs: Dict[AnyStr, Any]):
self.__url: AnyStr = url if url else os.getenv('ELASTICSEARCH_URL', None)
self.__index: AnyStr = index if index else os.getenv('ELASTICSEARCH_INDEX', None)
self.__kwargs: Dict[AnyStr, Any] = kwargs
self.__client: Elasticsearch = self.__init_client()
def __call__(self, context: HookContext) -> NoReturn:
"""Main execution of the Elastic Search Hook.
:param context: Current log record context
"""
if not self.__check_level(context.level, context.logger_level):
return
document = {
"@timestamp": datetime.now(),
"@message": context.message,
"level": utils.get_level_name(context.level),
"name": context.logger_name,
}
document.update(context.extra_data)
document = self.__clean_metadata_keys(document)
self.__client.index(index=self.__index, body=document)
def __init_client(self) -> Elasticsearch:
"""Create new client instance to stream logs.
:return Elasticsearch: New client instance
"""
if self.__url is None:
raise ESEmptyUrlError('Empty Elasticsearch server.')
if self.__index is None:
raise ESEmptyIndexError('Empty Elasticsearch index.')
try:
return get_instance(self.__url, **self.__kwargs)
except Exception as error:
raise ESConfigurationError('Error creating Elasticsearch client instance') from error
@staticmethod
def __clean_metadata_keys(document: Dict[AnyStr, Any]) -> Dict[AnyStr, Any]:
"""Remove all keys of a document that start with underscore to not be confused with metadata keys
:param document: Full document data
:return Dict[AnyStr, Any]: Cleaned document with out metadata keys
"""
new_document = document.copy()
for key in document.keys():
if re.search("^_", key) is not None:
del new_document[key]
return new_document
@staticmethod
def __check_level(log_level: int, logger_level: int) -> bool:
"""Validate if the configured level and the given logs are valid to stream to Elasticsearch
:param log_level: current log level of the ES document
:param logger_level: Global logger level
:return bool: Boolean assertion
"""
if log_level == DEBUG and logger_level == DEBUG:
return True
if log_level == INFO and logger_level in {DEBUG, INFO}:
return True
if log_level == WARNING and logger_level in {DEBUG, INFO, WARNING}:
return True
if log_level == ERROR and logger_level in {DEBUG, INFO, WARNING, ERROR}:
return True
if log_level == CRITICAL and logger_level in {DEBUG, INFO, WARNING, ERROR, CRITICAL}:
return True
return False
| 32.068376 | 114 | 0.661247 |
import os
import re
from datetime import datetime
from logging import CRITICAL, DEBUG, ERROR, INFO, WARNING
from typing import Any, AnyStr, Dict, NoReturn, Optional
from elasticsearch import Elasticsearch
from elasticlogger import utils
from elasticlogger.hooks import HookContext
from elasticlogger.ports.elasticsearch import get_instance
from .errors import ESConfigurationError, ESEmptyIndexError, ESEmptyUrlError
class ElasticSearch:
def __init__(self, url: Optional[AnyStr] = None, index: Optional[AnyStr] = None, **kwargs: Dict[AnyStr, Any]):
self.__url: AnyStr = url if url else os.getenv('ELASTICSEARCH_URL', None)
self.__index: AnyStr = index if index else os.getenv('ELASTICSEARCH_INDEX', None)
self.__kwargs: Dict[AnyStr, Any] = kwargs
self.__client: Elasticsearch = self.__init_client()
def __call__(self, context: HookContext) -> NoReturn:
if not self.__check_level(context.level, context.logger_level):
return
document = {
"@timestamp": datetime.now(),
"@message": context.message,
"level": utils.get_level_name(context.level),
"name": context.logger_name,
}
document.update(context.extra_data)
document = self.__clean_metadata_keys(document)
self.__client.index(index=self.__index, body=document)
def __init_client(self) -> Elasticsearch:
if self.__url is None:
raise ESEmptyUrlError('Empty Elasticsearch server.')
if self.__index is None:
raise ESEmptyIndexError('Empty Elasticsearch index.')
try:
return get_instance(self.__url, **self.__kwargs)
except Exception as error:
raise ESConfigurationError('Error creating Elasticsearch client instance') from error
@staticmethod
def __clean_metadata_keys(document: Dict[AnyStr, Any]) -> Dict[AnyStr, Any]:
new_document = document.copy()
for key in document.keys():
if re.search("^_", key) is not None:
del new_document[key]
return new_document
@staticmethod
def __check_level(log_level: int, logger_level: int) -> bool:
if log_level == DEBUG and logger_level == DEBUG:
return True
if log_level == INFO and logger_level in {DEBUG, INFO}:
return True
if log_level == WARNING and logger_level in {DEBUG, INFO, WARNING}:
return True
if log_level == ERROR and logger_level in {DEBUG, INFO, WARNING, ERROR}:
return True
if log_level == CRITICAL and logger_level in {DEBUG, INFO, WARNING, ERROR, CRITICAL}:
return True
return False
| true | true |
1c4633fe467d2a4c8b937c02025f2e49b2342f56 | 420 | py | Python | instapics/forms.py | UMULISA12/Instagram_Ip | 169c9326ef247c85808d9b7b8989c59740887615 | [
"MIT"
] | null | null | null | instapics/forms.py | UMULISA12/Instagram_Ip | 169c9326ef247c85808d9b7b8989c59740887615 | [
"MIT"
] | null | null | null | instapics/forms.py | UMULISA12/Instagram_Ip | 169c9326ef247c85808d9b7b8989c59740887615 | [
"MIT"
] | null | null | null | from .models import Image,Profile,Comment
from django import forms
class NewImageForm(forms.ModelForm):
class Meta:
model=Image
exclude=['profile','pub_date','name','likes','comments']
class NewProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['user']
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['commenter'] | 24.705882 | 64 | 0.666667 | from .models import Image,Profile,Comment
from django import forms
class NewImageForm(forms.ModelForm):
class Meta:
model=Image
exclude=['profile','pub_date','name','likes','comments']
class NewProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['user']
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['commenter'] | true | true |
1c4634872f7d494377366f5d864db3ecea175182 | 1,794 | py | Python | dataset/dataset_test.py | Beta3-Data/FacialLandmark-Live-Training | 10b2b464f1deb015a7f152bb14f120f0dc6f9de2 | [
"MIT"
] | null | null | null | dataset/dataset_test.py | Beta3-Data/FacialLandmark-Live-Training | 10b2b464f1deb015a7f152bb14f120f0dc6f9de2 | [
"MIT"
] | null | null | null | dataset/dataset_test.py | Beta3-Data/FacialLandmark-Live-Training | 10b2b464f1deb015a7f152bb14f120f0dc6f9de2 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from FaceLandmarksDataset import FaceLandmarksDataset
from FaceLandmarksDataset import SmartRandomCrop
from FaceLandmarksDataset import Rescale
# Ignore warnings
def show_landmarks(image, landmarks):
"""Show image with landmarks"""
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001) # pause a bit so that plots are updated
landmarks_frame = pd.read_csv('face_landmarks.csv')
n = 65
img_name = landmarks_frame.ix[n, 0]
landmarks = landmarks_frame.ix[n, 1:].as_matrix().astype('float')
landmarks = landmarks.reshape(-1, 2)
max_xy = np.max(landmarks,axis=0)
min_xy = np.min(landmarks,axis=0)
print(max_xy)
print(min_xy)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
face_dataset = FaceLandmarksDataset(csv_file='face_landmarks.csv',
root_dir='data/image/')
fig = plt.figure()
crop = SmartRandomCrop()
scale = Rescale((256,256))
composed = transforms.Compose([SmartRandomCrop(),])
for i in range(len(face_dataset)):
sample = face_dataset[i]
sample = crop(sample)
sample = scale(sample)
print(i, sample['image'].shape, sample['landmarks'].shape)
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
show_landmarks(**sample)
if i == 3:
plt.show()
break
| 30.40678 | 75 | 0.682832 | from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from FaceLandmarksDataset import FaceLandmarksDataset
from FaceLandmarksDataset import SmartRandomCrop
from FaceLandmarksDataset import Rescale
def show_landmarks(image, landmarks):
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001)
landmarks_frame = pd.read_csv('face_landmarks.csv')
n = 65
img_name = landmarks_frame.ix[n, 0]
landmarks = landmarks_frame.ix[n, 1:].as_matrix().astype('float')
landmarks = landmarks.reshape(-1, 2)
max_xy = np.max(landmarks,axis=0)
min_xy = np.min(landmarks,axis=0)
print(max_xy)
print(min_xy)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
face_dataset = FaceLandmarksDataset(csv_file='face_landmarks.csv',
root_dir='data/image/')
fig = plt.figure()
crop = SmartRandomCrop()
scale = Rescale((256,256))
composed = transforms.Compose([SmartRandomCrop(),])
for i in range(len(face_dataset)):
sample = face_dataset[i]
sample = crop(sample)
sample = scale(sample)
print(i, sample['image'].shape, sample['landmarks'].shape)
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
show_landmarks(**sample)
if i == 3:
plt.show()
break
| true | true |
1c4634bf1a119368bd2b1ab2cfa1775e8ec4d0ce | 9,856 | py | Python | pyzoo/test/zoo/automl/model/test_Seq2Seq.py | Wesley-Du/analytics-zoo | e4ca11b219a43bceec99aba39cf30c8aa368e8b3 | [
"Apache-2.0"
] | 35 | 2020-07-03T06:31:12.000Z | 2020-07-12T08:38:10.000Z | pyzoo/test/zoo/automl/model/test_Seq2Seq.py | Angelina319/analytics-zoo | 439f2c99d657fb20a5ff4bf510869616402ba0cf | [
"Apache-2.0"
] | 2 | 2018-10-31T01:20:05.000Z | 2018-11-02T06:06:35.000Z | pyzoo/test/zoo/automl/model/test_Seq2Seq.py | Angelina319/analytics-zoo | 439f2c99d657fb20a5ff4bf510869616402ba0cf | [
"Apache-2.0"
] | 4 | 2019-02-25T03:26:56.000Z | 2019-03-06T04:41:31.000Z | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
import tempfile
import pytest
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.automl.model.Seq2Seq import *
from zoo.automl.feature.time_sequence import TimeSequenceFeatureTransformer
from numpy.testing import assert_array_almost_equal
class TestSeq2Seq(ZooTestCase):
def setup_method(self, method):
# super().setup_method(method)
self.train_data = pd.DataFrame(data=np.random.randn(64, 4))
self.val_data = pd.DataFrame(data=np.random.randn(16, 4))
self.test_data = pd.DataFrame(data=np.random.randn(16, 4))
self.past_seq_len = 6
self.future_seq_len_1 = 1
self.future_seq_len_2 = 2
# use roll method in time_sequence
self.feat = TimeSequenceFeatureTransformer()
self.config = {
'batch_size': 32,
'epochs': 1
}
self.model_1 = LSTMSeq2Seq(check_optional_config=False,
future_seq_len=self.future_seq_len_1)
self.model_2 = LSTMSeq2Seq(check_optional_config=False,
future_seq_len=self.future_seq_len_2)
self.fitted = False
self.predict_1 = None
self.predict_2 = None
def teardown_method(self, method):
pass
def test_fit_eval_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
print("fit_eval_future_seq_len_1:",
self.model_1.fit_eval(x_train_1, y_train_1, **self.config))
assert self.model_1.past_seq_len == 6
assert self.model_1.feature_num == 4
assert self.model_1.future_seq_len == 1
assert self.model_1.target_col_num == 1
def test_fit_eval_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
print("fit_eval_future_seq_len_2:",
self.model_2.fit_eval(x_train_2, y_train_2, **self.config))
assert self.model_2.future_seq_len == 2
self.fitted = True
def test_evaluate_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_val_1, y_val_1 = self.feat._roll_train(self.val_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
print("evaluate_future_seq_len_1:", self.model_1.evaluate(x_val_1,
y_val_1,
metric=['mse',
'r2']))
def test_evaluate_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_val_2, y_val_2 = self.feat._roll_train(self.val_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
print("evaluate_future_seq_len_2:", self.model_2.evaluate(x_val_2,
y_val_2,
metric=['mse',
'r2']))
def test_predict_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
predict_1 = self.model_1.predict(x_test_1)
assert predict_1.shape == (x_test_1.shape[0], self.future_seq_len_1)
def test_predict_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
predict_2 = self.model_2.predict(x_test_2)
assert predict_2.shape == (x_test_2.shape[0], self.future_seq_len_2)
def test_save_restore_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
predict_1_before = self.model_1.predict(x_test_1)
new_model_1 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_1)
restore(dirname, model=new_model_1, config=self.config)
predict_1_after = new_model_1.predict(x_test_1)
assert_array_almost_equal(predict_1_before, predict_1_after, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(predict_1_before,
predict_1_after)
new_config = {'epochs': 1}
new_model_1.fit_eval(x_train_1, y_train_1, **new_config)
finally:
shutil.rmtree(dirname)
def test_save_restore_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
predict_2_before = self.model_2.predict(x_test_2)
new_model_2 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_2)
restore(dirname, model=new_model_2, config=self.config)
predict_2_after = new_model_2.predict(x_test_2)
assert_array_almost_equal(predict_2_before, predict_2_after, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(predict_2_before,
predict_2_after)
new_config = {'epochs': 2}
new_model_2.fit_eval(x_train_2, y_train_2, **new_config)
finally:
shutil.rmtree(dirname)
def test_predict_with_uncertainty(self,):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, mc=True, **self.config)
prediction, uncertainty = self.model_2.predict_with_uncertainty(x_test_2, n_iter=2)
assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert np.any(uncertainty)
new_model_2 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_2)
restore(dirname, model=new_model_2, config=self.config)
prediction, uncertainty = new_model_2.predict_with_uncertainty(x_test_2, n_iter=2)
assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert np.any(uncertainty)
finally:
shutil.rmtree(dirname)
if __name__ == '__main__':
pytest.main([__file__])
| 48.078049 | 94 | 0.581879 |
import shutil
import tempfile
import pytest
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.automl.model.Seq2Seq import *
from zoo.automl.feature.time_sequence import TimeSequenceFeatureTransformer
from numpy.testing import assert_array_almost_equal
class TestSeq2Seq(ZooTestCase):
def setup_method(self, method):
self.train_data = pd.DataFrame(data=np.random.randn(64, 4))
self.val_data = pd.DataFrame(data=np.random.randn(16, 4))
self.test_data = pd.DataFrame(data=np.random.randn(16, 4))
self.past_seq_len = 6
self.future_seq_len_1 = 1
self.future_seq_len_2 = 2
self.feat = TimeSequenceFeatureTransformer()
self.config = {
'batch_size': 32,
'epochs': 1
}
self.model_1 = LSTMSeq2Seq(check_optional_config=False,
future_seq_len=self.future_seq_len_1)
self.model_2 = LSTMSeq2Seq(check_optional_config=False,
future_seq_len=self.future_seq_len_2)
self.fitted = False
self.predict_1 = None
self.predict_2 = None
def teardown_method(self, method):
pass
def test_fit_eval_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
print("fit_eval_future_seq_len_1:",
self.model_1.fit_eval(x_train_1, y_train_1, **self.config))
assert self.model_1.past_seq_len == 6
assert self.model_1.feature_num == 4
assert self.model_1.future_seq_len == 1
assert self.model_1.target_col_num == 1
def test_fit_eval_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
print("fit_eval_future_seq_len_2:",
self.model_2.fit_eval(x_train_2, y_train_2, **self.config))
assert self.model_2.future_seq_len == 2
self.fitted = True
def test_evaluate_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_val_1, y_val_1 = self.feat._roll_train(self.val_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
print("evaluate_future_seq_len_1:", self.model_1.evaluate(x_val_1,
y_val_1,
metric=['mse',
'r2']))
def test_evaluate_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_val_2, y_val_2 = self.feat._roll_train(self.val_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
print("evaluate_future_seq_len_2:", self.model_2.evaluate(x_val_2,
y_val_2,
metric=['mse',
'r2']))
def test_predict_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
predict_1 = self.model_1.predict(x_test_1)
assert predict_1.shape == (x_test_1.shape[0], self.future_seq_len_1)
def test_predict_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
predict_2 = self.model_2.predict(x_test_2)
assert predict_2.shape == (x_test_2.shape[0], self.future_seq_len_2)
def test_save_restore_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
predict_1_before = self.model_1.predict(x_test_1)
new_model_1 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_1)
restore(dirname, model=new_model_1, config=self.config)
predict_1_after = new_model_1.predict(x_test_1)
assert_array_almost_equal(predict_1_before, predict_1_after, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(predict_1_before,
predict_1_after)
new_config = {'epochs': 1}
new_model_1.fit_eval(x_train_1, y_train_1, **new_config)
finally:
shutil.rmtree(dirname)
def test_save_restore_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
predict_2_before = self.model_2.predict(x_test_2)
new_model_2 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_2)
restore(dirname, model=new_model_2, config=self.config)
predict_2_after = new_model_2.predict(x_test_2)
assert_array_almost_equal(predict_2_before, predict_2_after, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(predict_2_before,
predict_2_after)
new_config = {'epochs': 2}
new_model_2.fit_eval(x_train_2, y_train_2, **new_config)
finally:
shutil.rmtree(dirname)
def test_predict_with_uncertainty(self,):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, mc=True, **self.config)
prediction, uncertainty = self.model_2.predict_with_uncertainty(x_test_2, n_iter=2)
assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert np.any(uncertainty)
new_model_2 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_2)
restore(dirname, model=new_model_2, config=self.config)
prediction, uncertainty = new_model_2.predict_with_uncertainty(x_test_2, n_iter=2)
assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert np.any(uncertainty)
finally:
shutil.rmtree(dirname)
if __name__ == '__main__':
pytest.main([__file__])
| true | true |
1c46378d907548f7177d7694871d9e0601053adf | 61,104 | py | Python | python/ccxt/bitfinex2.py | Jsn2win/ccxt | fff369de2192a3b7c71ab1d29d0923db8d5af913 | [
"MIT"
] | null | null | null | python/ccxt/bitfinex2.py | Jsn2win/ccxt | fff369de2192a3b7c71ab1d29d0923db8d5af913 | [
"MIT"
] | null | null | null | python/ccxt/bitfinex2.py | Jsn2win/ccxt | fff369de2192a3b7c71ab1d29d0923db8d5af913 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.bitfinex import bitfinex
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
class bitfinex2(bitfinex):
def describe(self):
return self.deep_extend(super(bitfinex2, self).describe(), {
'id': 'bitfinex2',
'name': 'Bitfinex',
'countries': ['VG'],
'version': 'v2',
'certified': False,
'pro': False,
# new metainfo interface
'has': {
'CORS': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': False,
'fetchBalance': True,
'fetchClosedOrder': True,
'fetchClosedOrders': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchFundingFees': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': True,
'fetchOpenOrders': True,
'fetchOrder': False,
'fetchOrderTrades': True,
'fetchStatus': True,
'fetchTickers': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'rateLimit': 1500,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': {
'v1': 'https://api.bitfinex.com',
'public': 'https://api-pub.bitfinex.com',
'private': 'https://api.bitfinex.com',
},
'www': 'https://www.bitfinex.com',
'doc': [
'https://docs.bitfinex.com/v2/docs/',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
'fees': 'https://www.bitfinex.com/fees',
},
'api': {
'v1': {
'get': [
'symbols',
'symbols_details',
],
},
'public': {
'get': [
'conf/{config}',
'conf/pub:{action}:{object}',
'conf/pub:{action}:{object}:{detail}',
'conf/pub:map:{object}',
'conf/pub:map:{object}:{detail}',
'conf/pub:map:currency:{detail}',
'conf/pub:map:currency:sym', # maps symbols to their API symbols, BAB > BCH
'conf/pub:map:currency:label', # verbose friendly names, BNT > Bancor
'conf/pub:map:currency:unit', # maps symbols to unit of measure where applicable
'conf/pub:map:currency:undl', # maps derivatives symbols to their underlying currency
'conf/pub:map:currency:pool', # maps symbols to underlying network/protocol they operate on
'conf/pub:map:currency:explorer', # maps symbols to their recognised block explorer URLs
'conf/pub:map:currency:tx:fee', # maps currencies to their withdrawal fees https://github.com/ccxt/ccxt/issues/7745
'conf/pub:map:tx:method',
'conf/pub:list:{object}',
'conf/pub:list:{object}:{detail}',
'conf/pub:list:currency',
'conf/pub:list:pair:exchange',
'conf/pub:list:pair:margin',
'conf/pub:list:pair:futures',
'conf/pub:list:competitions',
'conf/pub:info:{object}',
'conf/pub:info:{object}:{detail}',
'conf/pub:info:pair',
'conf/pub:info:tx:status', # [deposit, withdrawal] statuses 1 = active, 0 = maintenance
'conf/pub:fees',
'platform/status',
'tickers',
'ticker/{symbol}',
'trades/{symbol}/hist',
'book/{symbol}/{precision}',
'book/{symbol}/P0',
'book/{symbol}/P1',
'book/{symbol}/P2',
'book/{symbol}/P3',
'book/{symbol}/R0',
'stats1/{key}:{size}:{symbol}:{side}/{section}',
'stats1/{key}:{size}:{symbol}:{side}/last',
'stats1/{key}:{size}:{symbol}:{side}/hist',
'stats1/{key}:{size}:{symbol}/{section}',
'stats1/{key}:{size}:{symbol}/last',
'stats1/{key}:{size}:{symbol}/hist',
'stats1/{key}:{size}:{symbol}:long/last',
'stats1/{key}:{size}:{symbol}:long/hist',
'stats1/{key}:{size}:{symbol}:short/last',
'stats1/{key}:{size}:{symbol}:short/hist',
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
'status/{type}',
'status/deriv',
'liquidations/hist',
'rankings/{key}:{timeframe}:{symbol}/{section}',
'rankings/{key}:{timeframe}:{symbol}/hist',
],
'post': [
'calc/trade/avg',
'calc/fx',
],
},
'private': {
'post': [
# 'auth/r/orders/{symbol}/new', # outdated
# 'auth/r/stats/perf:{timeframe}/hist', # outdated
'auth/r/wallets',
'auth/r/wallets/hist',
'auth/r/orders',
'auth/r/orders/{symbol}',
'auth/w/order/submit',
'auth/w/order/update',
'auth/w/order/cancel',
'auth/w/order/multi',
'auth/w/order/cancel/multi',
'auth/r/orders/{symbol}/hist',
'auth/r/orders/hist',
'auth/r/order/{symbol}:{id}/trades',
'auth/r/trades/{symbol}/hist',
'auth/r/trades/hist',
'auth/r/ledgers/{currency}/hist',
'auth/r/ledgers/hist',
'auth/r/info/margin/{key}',
'auth/r/info/margin/base',
'auth/r/info/margin/sym_all',
'auth/r/positions',
'auth/w/position/claim',
'auth/r/positions/hist',
'auth/r/positions/audit',
'auth/r/positions/snap',
'auth/w/deriv/collateral/set',
'auth/w/deriv/collateral/limits',
'auth/r/funding/offers',
'auth/r/funding/offers/{symbol}',
'auth/w/funding/offer/submit',
'auth/w/funding/offer/cancel',
'auth/w/funding/offer/cancel/all',
'auth/w/funding/close',
'auth/w/funding/auto',
'auth/w/funding/keep',
'auth/r/funding/offers/{symbol}/hist',
'auth/r/funding/offers/hist',
'auth/r/funding/loans',
'auth/r/funding/loans/hist',
'auth/r/funding/loans/{symbol}',
'auth/r/funding/loans/{symbol}/hist',
'auth/r/funding/credits',
'auth/r/funding/credits/hist',
'auth/r/funding/credits/{symbol}',
'auth/r/funding/credits/{symbol}/hist',
'auth/r/funding/trades/{symbol}/hist',
'auth/r/funding/trades/hist',
'auth/r/info/funding/{key}',
'auth/r/info/user',
'auth/r/logins/hist',
'auth/w/transfer',
'auth/w/deposit/address',
'auth/w/deposit/invoice',
'auth/w/withdraw',
'auth/r/movements/{currency}/hist',
'auth/r/movements/hist',
'auth/r/alerts',
'auth/w/alert/set',
'auth/w/alert/price:{symbol}:{price}/del',
'auth/w/alert/{type}:{symbol}:{price}/del',
'auth/calc/order/avail',
'auth/w/settings/set',
'auth/r/settings',
'auth/w/settings/del',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.2 / 100,
},
'funding': {
'withdraw': {
'BTC': 0.0004,
'BCH': 0.0001,
'ETH': 0.00135,
'EOS': 0.0,
'LTC': 0.001,
'OMG': 0.15097,
'IOT': 0.0,
'NEO': 0.0,
'ETC': 0.01,
'XRP': 0.02,
'ETP': 0.01,
'ZEC': 0.001,
'BTG': 0.0,
'DASH': 0.01,
'XMR': 0.0001,
'QTM': 0.01,
'EDO': 0.23687,
'DAT': 9.8858,
'AVT': 1.1251,
'SAN': 0.35977,
'USDT': 5.0,
'SPK': 16.971,
'BAT': 1.1209,
'GNT': 2.8789,
'SNT': 9.0848,
'QASH': 1.726,
'YYW': 7.9464,
},
},
},
'options': {
'precision': 'R0', # P0, P1, P2, P3, P4, R0
# convert 'EXCHANGE MARKET' to lowercase 'market'
# convert 'EXCHANGE LIMIT' to lowercase 'limit'
# everything else remains uppercase
'exchangeTypes': {
# 'MARKET': None,
'EXCHANGE MARKET': 'market',
# 'LIMIT': None,
'EXCHANGE LIMIT': 'limit',
# 'STOP': None,
# 'EXCHANGE STOP': None,
# 'TRAILING STOP': None,
# 'EXCHANGE TRAILING STOP': None,
# 'FOK': None,
# 'EXCHANGE FOK': None,
# 'STOP LIMIT': None,
# 'EXCHANGE STOP LIMIT': None,
# 'IOC': None,
# 'EXCHANGE IOC': None,
},
# convert 'market' to 'EXCHANGE MARKET'
# convert 'limit' 'EXCHANGE LIMIT'
# everything else remains as is
'orderTypes': {
'market': 'EXCHANGE MARKET',
'limit': 'EXCHANGE LIMIT',
},
'fiat': {
'USD': 'USD',
'EUR': 'EUR',
'JPY': 'JPY',
'GBP': 'GBP',
},
},
'exceptions': {
'exact': {
'10020': BadRequest,
'10100': AuthenticationError,
'10114': InvalidNonce,
'20060': OnMaintenance,
},
'broad': {
'address': InvalidAddress,
'available balance is only': InsufficientFunds,
'not enough exchange balance': InsufficientFunds,
'Order not found': OrderNotFound,
'symbol: invalid': BadSymbol,
'Invalid order': InvalidOrder,
},
},
})
def is_fiat(self, code):
return(code in self.options['fiat'])
def get_currency_id(self, code):
return 'f' + code
def fetch_status(self, params={}):
#
# [1] # operative
# [0] # maintenance
#
response = self.publicGetPlatformStatus(params)
status = self.safe_value(response, 0)
formattedStatus = 'ok' if (status == 1) else 'maintenance'
self.status = self.extend(self.status, {
'status': formattedStatus,
'updated': self.milliseconds(),
})
return self.status
def fetch_markets(self, params={}):
# todo drop v1 in favor of v2 configs
# pub:list:pair:exchange,pub:list:pair:margin,pub:list:pair:futures,pub:info:pair
v2response = self.publicGetConfPubListPairFutures(params)
v1response = self.v1GetSymbolsDetails(params)
futuresMarketIds = self.safe_value(v2response, 0, [])
result = []
for i in range(0, len(v1response)):
market = v1response[i]
id = self.safe_string_upper(market, 'pair')
spot = True
if self.in_array(id, futuresMarketIds):
spot = False
futures = not spot
type = 'spot' if spot else 'futures'
baseId = None
quoteId = None
if id.find(':') >= 0:
parts = id.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
id = 't' + id
baseId = self.get_currency_id(baseId)
quoteId = self.get_currency_id(quoteId)
precision = {
'price': self.safe_integer(market, 'price_precision'),
'amount': 8, # https://github.com/ccxt/ccxt/issues/7310
}
limits = {
'amount': {
'min': self.safe_float(market, 'minimum_order_size'),
'max': self.safe_float(market, 'maximum_order_size'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
'type': type,
'swap': False,
'spot': spot,
'futures': futures,
})
return result
def fetch_currencies(self, params={}):
labels = [
'pub:list:currency',
'pub:map:currency:sym', # maps symbols to their API symbols, BAB > BCH
'pub:map:currency:label', # verbose friendly names, BNT > Bancor
'pub:map:currency:unit', # maps symbols to unit of measure where applicable
'pub:map:currency:undl', # maps derivatives symbols to their underlying currency
'pub:map:currency:pool', # maps symbols to underlying network/protocol they operate on
'pub:map:currency:explorer', # maps symbols to their recognised block explorer URLs
'pub:map:currency:tx:fee', # maps currencies to their withdrawal fees https://github.com/ccxt/ccxt/issues/7745
]
config = ','.join(labels)
request = {
'config': config,
}
response = self.publicGetConfConfig(self.extend(request, params))
#
# [
#
# a list of symbols
# ["AAA","ABS","ADA"],
#
# # sym
# # maps symbols to their API symbols, BAB > BCH
# [
# ['BAB', 'BCH'],
# ['CNHT', 'CNHt'],
# ['DSH', 'DASH'],
# ['IOT', 'IOTA'],
# ['LES', 'LEO-EOS'],
# ['LET', 'LEO-ERC20'],
# ['STJ', 'STORJ'],
# ['TSD', 'TUSD'],
# ['UDC', 'USDC'],
# ['USK', 'USDK'],
# ['UST', 'USDt'],
# ['USTF0', 'USDt0'],
# ['XCH', 'XCHF'],
# ['YYW', 'YOYOW'],
# # ...
# ],
# # label
# # verbose friendly names, BNT > Bancor
# [
# ['BAB', 'Bitcoin Cash'],
# ['BCH', 'Bitcoin Cash'],
# ['LEO', 'Unus Sed LEO'],
# ['LES', 'Unus Sed LEO(EOS)'],
# ['LET', 'Unus Sed LEO(ERC20)'],
# # ...
# ],
# # unit
# # maps symbols to unit of measure where applicable
# [
# ['IOT', 'Mi|MegaIOTA'],
# ],
# # undl
# # maps derivatives symbols to their underlying currency
# [
# ['USTF0', 'UST'],
# ['BTCF0', 'BTC'],
# ['ETHF0', 'ETH'],
# ],
# # pool
# # maps symbols to underlying network/protocol they operate on
# [
# ['SAN', 'ETH'], ['OMG', 'ETH'], ['AVT', 'ETH'], ['EDO', 'ETH'],
# ['ESS', 'ETH'], ['ATD', 'EOS'], ['ADD', 'EOS'], ['MTO', 'EOS'],
# ['PNK', 'ETH'], ['BAB', 'BCH'], ['WLO', 'XLM'], ['VLD', 'ETH'],
# ['BTT', 'TRX'], ['IMP', 'ETH'], ['SCR', 'ETH'], ['GNO', 'ETH'],
# # ...
# ],
# # explorer
# # maps symbols to their recognised block explorer URLs
# [
# [
# 'AIO',
# [
# "https://mainnet.aion.network",
# "https://mainnet.aion.network/#/account/VAL",
# "https://mainnet.aion.network/#/transaction/VAL"
# ]
# ],
# # ...
# ],
# # fee
# # maps currencies to their withdrawal fees
# [
# ["AAA",[0,0]],
# ["ABS",[0,131.3]],
# ["ADA",[0,0.3]],
# ],
# ]
#
indexed = {
'sym': self.index_by(self.safe_value(response, 1, []), 0),
'label': self.index_by(self.safe_value(response, 2, []), 0),
'unit': self.index_by(self.safe_value(response, 3, []), 0),
'undl': self.index_by(self.safe_value(response, 4, []), 0),
'pool': self.index_by(self.safe_value(response, 5, []), 0),
'explorer': self.index_by(self.safe_value(response, 6, []), 0),
'fees': self.index_by(self.safe_value(response, 7, []), 0),
}
ids = self.safe_value(response, 0, [])
result = {}
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
label = self.safe_value(indexed['label'], id, [])
name = self.safe_string(label, 1)
pool = self.safe_value(indexed['pool'], id, [])
type = self.safe_string(pool, 1)
feeValues = self.safe_value(indexed['fees'], id, [])
fees = self.safe_value(feeValues, 1, [])
fee = self.safe_float(fees, 1)
precision = 8 # default precision, todo: fix "magic constants"
id = 'f' + id
result[code] = {
'id': id,
'code': code,
'info': [id, label, pool, feeValues],
'type': type,
'name': name,
'active': True,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': 1 / math.pow(10, precision),
'max': None,
},
'price': {
'min': 1 / math.pow(10, precision),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': fee,
'max': None,
},
},
}
return result
def fetch_balance(self, params={}):
# self api call does not return the 'used' amount - use the v1 version instead(which also returns zero balances)
self.load_markets()
response = self.privatePostAuthRWallets(params)
balanceType = self.safe_string(params, 'type', 'exchange')
result = {'info': response}
for b in range(0, len(response)):
balance = response[b]
accountType = balance[0]
currency = balance[1]
total = balance[2]
available = balance[4]
if accountType == balanceType:
if currency[0] == 't':
currency = currency[1:]
code = self.safe_currency_code(currency)
account = self.account()
# do not fill in zeroes and missing values in the parser
# rewrite and unify the following to use the unified parseBalance
account['total'] = total
if not available:
if available == 0:
account['free'] = 0
account['used'] = total
else:
account['free'] = total
else:
account['free'] = available
account['used'] = account['total'] - account['free']
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
precision = self.safe_value(self.options, 'precision', 'R0')
request = {
'symbol': self.market_id(symbol),
'precision': precision,
}
if limit is not None:
request['len'] = limit # 25 or 100
fullRequest = self.extend(request, params)
orderbook = self.publicGetBookSymbolPrecision(fullRequest)
timestamp = self.milliseconds()
result = {
'bids': [],
'asks': [],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
priceIndex = 1 if (fullRequest['precision'] == 'R0') else 0
for i in range(0, len(orderbook)):
order = orderbook[i]
price = order[priceIndex]
amount = abs(order[2])
side = 'bids' if (order[2] > 0) else 'asks'
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
length = len(ticker)
last = ticker[length - 4]
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': ticker[length - 2],
'low': ticker[length - 1],
'bid': ticker[length - 10],
'bidVolume': None,
'ask': ticker[length - 8],
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': ticker[length - 6],
'percentage': ticker[length - 5] * 100,
'average': None,
'baseVolume': ticker[length - 3],
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
else:
request['symbols'] = 'ALL'
tickers = self.publicGetTickers(self.extend(request, params))
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker[0]
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = self.publicGetTickerSymbol(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_symbol(self, marketId):
if marketId is None:
return marketId
marketId = marketId.replace('t', '')
baseId = None
quoteId = None
if marketId.find(':') >= 0:
parts = marketId.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = marketId[0:3]
quoteId = marketId[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
return base + '/' + quote
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# [
# ID,
# MTS, # timestamp
# AMOUNT,
# PRICE
# ]
#
# fetchMyTrades(private)
#
# [
# ID,
# PAIR,
# MTS_CREATE,
# ORDER_ID,
# EXEC_AMOUNT,
# EXEC_PRICE,
# ORDER_TYPE,
# ORDER_PRICE,
# MAKER,
# FEE,
# FEE_CURRENCY,
# ...
# ]
#
tradeLength = len(trade)
isPrivate = (tradeLength > 5)
id = str(trade[0])
amountIndex = 4 if isPrivate else 2
amount = trade[amountIndex]
cost = None
priceIndex = 5 if isPrivate else 3
price = trade[priceIndex]
side = None
orderId = None
takerOrMaker = None
type = None
fee = None
symbol = None
timestampIndex = 2 if isPrivate else 1
timestamp = trade[timestampIndex]
if isPrivate:
marketId = trade[1]
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = self.parse_symbol(marketId)
orderId = str(trade[3])
takerOrMaker = 'maker' if (trade[8] == 1) else 'taker'
feeCost = trade[9]
feeCurrency = self.safe_currency_code(trade[10])
if feeCost is not None:
feeCost = -feeCost
if symbol in self.markets:
feeCost = self.fee_to_precision(symbol, feeCost)
else:
currencyId = 'f' + feeCurrency
if currencyId in self.currencies_by_id:
currency = self.currencies_by_id[currencyId]
feeCost = self.currency_to_precision(currency['code'], feeCost)
fee = {
'cost': float(feeCost),
'currency': feeCurrency,
}
orderType = trade[6]
type = self.safe_string(self.options['exchangeTypes'], orderType)
if symbol is None:
if market is not None:
symbol = market['symbol']
if amount is not None:
side = 'sell' if (amount < 0) else 'buy'
amount = abs(amount)
if cost is None:
if price is not None:
cost = amount * price
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'side': side,
'type': type,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
sort = '-1'
request = {
'symbol': market['id'],
}
if since is not None:
request['start'] = since
sort = '1'
if limit is not None:
request['limit'] = limit # default 120, max 5000
request['sort'] = sort
response = self.publicGetTradesSymbolHist(self.extend(request, params))
#
# [
# [
# ID,
# MTS, # timestamp
# AMOUNT,
# PRICE
# ]
# ]
#
trades = self.sort_by(response, 1)
return self.parse_trades(trades, market, None, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=100, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100 # default 100, max 5000
if since is None:
since = self.milliseconds() - self.parse_timeframe(timeframe) * limit * 1000
request = {
'symbol': market['id'],
'timeframe': self.timeframes[timeframe],
'sort': 1,
'start': since,
'limit': limit,
}
response = self.publicGetCandlesTradeTimeframeSymbolHist(self.extend(request, params))
#
# [
# [1591503840000,0.025069,0.025068,0.025069,0.025068,1.97828998],
# [1591504500000,0.025065,0.025065,0.025065,0.025065,1.0164],
# [1591504620000,0.025062,0.025062,0.025062,0.025062,0.5],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
if status is None:
return status
parts = status.split(' ')
state = self.safe_string(parts, 0)
statuses = {
'ACTIVE': 'open',
'PARTIALLY': 'open',
'EXECUTED': 'closed',
'CANCELED': 'canceled',
'INSUFFICIENT': 'canceled',
'RSN_DUST': 'rejected',
'RSN_PAUSE': 'rejected',
}
return self.safe_string(statuses, state, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 0)
symbol = None
marketId = self.safe_string(order, 3)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
symbol = self.parse_symbol(marketId)
if (symbol is None) and (market is not None):
symbol = market['symbol']
# https://github.com/ccxt/ccxt/issues/6686
# timestamp = self.safe_timestamp(order, 5)
timestamp = self.safe_integer(order, 5)
remaining = abs(self.safe_float(order, 6))
amount = abs(self.safe_float(order, 7))
filled = amount - remaining
side = 'sell' if (order[7] < 0) else 'buy'
orderType = self.safe_string(order, 8)
type = self.safe_string(self.safe_value(self.options, 'exchangeTypes'), orderType)
status = None
statusString = self.safe_string(order, 13)
if statusString is not None:
parts = statusString.split(' @ ')
status = self.parse_order_status(self.safe_string(parts, 0))
price = self.safe_float(order, 16)
average = self.safe_float(order, 17)
cost = price * filled
clientOrderId = self.safe_string(order, 2)
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderTypes = self.safe_value(self.options, 'orderTypes', {})
orderType = self.safe_string_upper(orderTypes, type, type)
amount = -amount if (side == 'sell') else amount
request = {
'symbol': market['id'],
'type': orderType,
'amount': self.number_to_string(amount),
}
if (orderType == 'LIMIT') or (orderType == 'EXCHANGE LIMIT'):
request['price'] = self.number_to_string(price)
elif (orderType == 'STOP') or (orderType == 'EXCHANGE STOP'):
stopPrice = self.safe_float(params, 'stopPrice', price)
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'STOP LIMIT') or (orderType == 'EXCHANGE STOP LIMIT'):
priceAuxLimit = self.safe_float(params, 'price_aux_limit')
stopPrice = self.safe_float(params, 'stopPrice')
if priceAuxLimit is None:
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter or a price_aux_limit parameter for a ' + orderType + ' order')
else:
request['price_aux_limit'] = self.number_to_string(price)
else:
request['price_aux_limit'] = self.number_to_string(priceAuxLimit)
if stopPrice is None:
stopPrice = price
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'TRAILING STOP') or (orderType == 'EXCHANGE TRAILING STOP'):
priceTrailing = self.safe_float(params, 'price_trailing')
request['price_trailing'] = self.number_to_string(priceTrailing)
stopPrice = self.safe_float(params, 'stopPrice', price)
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'FOK') or (orderType == 'EXCHANGE FOK') or (orderType == 'IOC') or (orderType == 'EXCHANGE IOC'):
request['price'] = self.number_to_string(price)
params = self.omit(params, ['stopPrice', 'price_aux_limit', 'price_trailing'])
clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')
if clientOrderId is not None:
request['cid'] = clientOrderId
params = self.omit(params, ['cid', 'clientOrderId'])
response = self.privatePostAuthWOrderSubmit(self.extend(request, params))
#
# [
# 1578784364.748, # Millisecond Time Stamp of the update
# "on-req", # Purpose of notification('on-req', 'oc-req', 'uca', 'fon-req', 'foc-req')
# null, # Unique ID of the message
# null, # Ignore
# [
# [
# 37271830598, # Order ID
# null, # Group ID
# 1578784364748, # Client Order ID
# "tBTCUST", # Pair
# 1578784364748, # Millisecond timestamp of creation
# 1578784364748, # Millisecond timestamp of update
# -0.005, # Positive means buy, negative means sell
# -0.005, # Original amount
# "EXCHANGE LIMIT", # Order type(LIMIT, MARKET, STOP, TRAILING STOP, EXCHANGE MARKET, EXCHANGE LIMIT, EXCHANGE STOP, EXCHANGE TRAILING STOP, FOK, EXCHANGE FOK, IOC, EXCHANGE IOC)
# null, # Previous order type
# null, # Millisecond timestamp of Time-In-Force: automatic order cancellation
# null, # Ignore
# 0, # Flags(see https://docs.bitfinex.com/docs/flag-values)
# "ACTIVE", # Order Status
# null, # Ignore
# null, # Ignore
# 20000, # Price
# 0, # Average price
# 0, # The trailing price
# 0, # Auxiliary Limit price(for STOP LIMIT)
# null, # Ignore
# null, # Ignore
# null, # Ignore
# 0, # 1 - hidden order
# null, # If another order caused self order to be placed(OCO) self will be that other order's ID
# null, # Ignore
# null, # Ignore
# null, # Ignore
# "API>BFX", # Origin of action: BFX, ETHFX, API>BFX, API>ETHFX
# null, # Ignore
# null, # Ignore
# null # Meta
# ]
# ],
# null, # Error code
# "SUCCESS", # Status(SUCCESS, ERROR, FAILURE, ...)
# "Submitting 1 orders." # Text of the notification
# ]
#
status = self.safe_string(response, 6)
if status != 'SUCCESS':
errorCode = response[5]
errorText = response[7]
raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(#' + errorCode + ')')
orders = self.safe_value(response, 4, [])
order = self.safe_value(orders, 0)
return self.parse_order(order, market)
def cancel_all_orders(self, symbol=None, params={}):
request = {
'all': 1,
}
response = self.privatePostAuthWOrderCancelMulti(self.extend(request, params))
orders = self.safe_value(response, 4, [])
return self.parse_orders(orders)
def cancel_order(self, id, symbol=None, params={}):
cid = self.safe_value_2(params, 'cid', 'clientOrderId') # client order id
request = None
if cid is not None:
cidDate = self.safe_value(params, 'cidDate') # client order id date
if cidDate is None:
raise InvalidOrder(self.id + " canceling an order by clientOrderId('cid') requires both 'cid' and 'cid_date'('YYYY-MM-DD')")
request = {
'cid': cid,
'cid_date': cidDate,
}
params = self.omit(params, ['cid', 'clientOrderId'])
else:
request = {
'id': int(id),
}
response = self.privatePostAuthWOrderCancel(self.extend(request, params))
order = self.safe_value(response, 4)
return self.parse_order(order)
def fetch_open_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = self.fetch_open_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
def fetch_closed_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = self.fetch_closed_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = self.privatePostAuthROrders(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostAuthROrdersSymbol(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# returns the most recent closed or canceled orders up to circa two weeks ago
self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = self.privatePostAuthROrdersHist(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostAuthROrdersSymbolHist(self.extend(request, params))
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 2500
return self.parse_orders(response, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
orderId = int(id)
request = {
'id': orderId,
'symbol': market['id'],
}
# valid for trades upto 10 days old
response = self.privatePostAuthROrderSymbolIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
'end': self.milliseconds(),
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 1000
method = 'privatePostAuthRTradesHist'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = 'privatePostAuthRTradesSymbolHist'
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_deposit_address(self, code, params={}):
self.load_markets()
request = {
'op_renew': 1,
}
response = self.fetch_deposit_address(code, self.extend(request, params))
return response
def fetch_deposit_address(self, code, params={}):
self.load_markets()
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'op_renew': 0, # a value of 1 will generate a new address
}
response = self.privatePostAuthWDepositAddress(self.extend(request, params))
#
# [
# 1582269616687, # MTS Millisecond Time Stamp of the update
# 'acc_dep', # TYPE Purpose of notification 'acc_dep' for account deposit
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# null, # PLACEHOLDER
# 'BITCOIN', # METHOD Method of deposit
# 'BTC', # CURRENCY_CODE Currency code of new address
# null, # PLACEHOLDER
# '1BC9PZqpUmjyEB54uggn8TFKj49zSDYzqG', # ADDRESS
# null, # POOL_ADDRESS
# ],
# null, # CODE null or integer work in progress
# 'SUCCESS', # STATUS Status of the notification, SUCCESS, ERROR, FAILURE
# 'success', # TEXT Text of the notification
# ]
#
result = self.safe_value(response, 4, [])
poolAddress = self.safe_string(result, 5)
address = self.safe_string(result, 4) if (poolAddress is None) else poolAddress
tag = None if (poolAddress is None) else self.safe_string(result, 4)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def parse_transaction_status(self, status):
statuses = {
'SUCCESS': 'ok',
'ERROR': 'failed',
'FAILURE': 'failed',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
# fetchTransactions
#
# [
# 13293039, # ID
# 'ETH', # CURRENCY
# 'ETHEREUM', # CURRENCY_NAME
# null,
# null,
# 1574175052000, # MTS_STARTED
# 1574181326000, # MTS_UPDATED
# null,
# null,
# 'CANCELED', # STATUS
# null,
# null,
# -0.24, # AMOUNT, negative for withdrawals
# -0.00135, # FEES
# null,
# null,
# 'DESTINATION_ADDRESS',
# null,
# null,
# null,
# 'TRANSACTION_ID',
# "Purchase of 100 pizzas", # WITHDRAW_TRANSACTION_NOTE
# ]
#
transactionLength = len(transaction)
timestamp = None
updated = None
code = None
amount = None
id = None
status = None
tag = None
type = None
feeCost = None
txid = None
addressTo = None
if transactionLength < 9:
data = self.safe_value(transaction, 4, [])
timestamp = self.safe_integer(transaction, 0)
if currency is not None:
code = currency['code']
feeCost = self.safe_float(data, 8)
if feeCost is not None:
feeCost = -feeCost
amount = self.safe_float(data, 5)
id = self.safe_value(data, 0)
status = 'ok'
if id == 0:
id = None
status = 'failed'
tag = self.safe_string(data, 3)
type = 'withdrawal'
else:
id = self.safe_string(transaction, 0)
timestamp = self.safe_integer(transaction, 5)
updated = self.safe_integer(transaction, 6)
status = self.parse_transaction_status(self.safe_string(transaction, 9))
amount = self.safe_float(transaction, 12)
if amount is not None:
if amount < 0:
type = 'withdrawal'
else:
type = 'deposit'
feeCost = self.safe_float(transaction, 13)
if feeCost is not None:
feeCost = -feeCost
addressTo = self.safe_string(transaction, 16)
txid = self.safe_string(transaction, 20)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': addressTo, # self is actually the tag for XRP transfers(the address is missing)
'addressTo': addressTo,
'tagFrom': None,
'tag': tag, # refix it properly for the tag from description
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {}
method = 'privatePostAuthRMovementsHist'
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method = 'privatePostAuthRMovementsCurrencyHist'
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # max 1000
response = getattr(self, method)(self.extend(request, params))
#
# [
# [
# 13293039, # ID
# 'ETH', # CURRENCY
# 'ETHEREUM', # CURRENCY_NAME
# null,
# null,
# 1574175052000, # MTS_STARTED
# 1574181326000, # MTS_UPDATED
# null,
# null,
# 'CANCELED', # STATUS
# null,
# null,
# -0.24, # AMOUNT, negative for withdrawals
# -0.00135, # FEES
# null,
# null,
# 'DESTINATION_ADDRESS',
# null,
# null,
# null,
# 'TRANSACTION_ID',
# "Purchase of 100 pizzas", # WITHDRAW_TRANSACTION_NOTE
# ]
# ]
#
return self.parse_transactions(response, currency, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'amount': self.number_to_string(amount),
'address': address,
}
if tag is not None:
request['payment_id'] = tag
response = self.privatePostAuthWWithdraw(self.extend(request, params))
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
text = self.safe_string(response, 7)
if text != 'success':
self.throw_broadly_matched_exception(self.exceptions['broad'], text, text)
transaction = self.parse_transaction(response, currency)
return self.extend(transaction, {
'address': address,
})
def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privatePostPositions(params)
#
# [
# [
# "tBTCUSD", # SYMBOL
# "ACTIVE", # STATUS
# 0.0195, # AMOUNT
# 8565.0267019, # BASE_PRICE
# 0, # MARGIN_FUNDING
# 0, # MARGIN_FUNDING_TYPE
# -0.33455568705000516, # PL
# -0.0003117550117425625, # PL_PERC
# 7045.876419249083, # PRICE_LIQ
# 3.0673001895895604, # LEVERAGE
# null, # _PLACEHOLDER
# 142355652, # POSITION_ID
# 1574002216000, # MTS_CREATE
# 1574002216000, # MTS_UPDATE
# null, # _PLACEHOLDER
# 0, # TYPE
# null, # _PLACEHOLDER
# 0, # COLLATERAL
# 0, # COLLATERAL_MIN
# # META
# {
# "reason":"TRADE",
# "order_id":34271018124,
# "liq_stage":null,
# "trade_price":"8565.0267019",
# "trade_amount":"0.0195",
# "order_id_oppo":34277498022
# }
# ]
# ]
#
# todo unify parsePosition/parsePositions
return response
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'v1':
request = api + request
else:
request = self.version + request
url = self.urls['api'][api] + '/' + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.json(query)
auth = '/api/' + request + nonce + body
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha384)
headers = {
'bfx-nonce': nonce,
'bfx-apikey': self.apiKey,
'bfx-signature': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if response:
if 'message' in response:
if response['message'].find('not enough exchange balance') >= 0:
raise InsufficientFunds(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
elif response == '':
raise ExchangeError(self.id + ' returned empty response')
return response
def handle_errors(self, statusCode, statusText, url, method, responseHeaders, responseBody, response, requestHeaders, requestBody):
if statusCode == 500:
# See https://docs.bitfinex.com/docs/abbreviations-glossary#section-errorinfo-codes
errorCode = self.number_to_string(response[1])
errorText = response[2]
feedback = self.id + ' ' + errorText
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorText, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorText, feedback)
raise ExchangeError(self.id + ' ' + errorText + '(#' + errorCode + ')')
| 41.539089 | 207 | 0.45961 |
port bitfinex
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
class bitfinex2(bitfinex):
def describe(self):
return self.deep_extend(super(bitfinex2, self).describe(), {
'id': 'bitfinex2',
'name': 'Bitfinex',
'countries': ['VG'],
'version': 'v2',
'certified': False,
'pro': False,
'has': {
'CORS': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': False,
'fetchBalance': True,
'fetchClosedOrder': True,
'fetchClosedOrders': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchFundingFees': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': True,
'fetchOpenOrders': True,
'fetchOrder': False,
'fetchOrderTrades': True,
'fetchStatus': True,
'fetchTickers': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'rateLimit': 1500,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': {
'v1': 'https://api.bitfinex.com',
'public': 'https://api-pub.bitfinex.com',
'private': 'https://api.bitfinex.com',
},
'www': 'https://www.bitfinex.com',
'doc': [
'https://docs.bitfinex.com/v2/docs/',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
'fees': 'https://www.bitfinex.com/fees',
},
'api': {
'v1': {
'get': [
'symbols',
'symbols_details',
],
},
'public': {
'get': [
'conf/{config}',
'conf/pub:{action}:{object}',
'conf/pub:{action}:{object}:{detail}',
'conf/pub:map:{object}',
'conf/pub:map:{object}:{detail}',
'conf/pub:map:currency:{detail}',
'conf/pub:map:currency:sym',
'conf/pub:map:currency:label',
'conf/pub:map:currency:unit',
'conf/pub:map:currency:undl',
'conf/pub:map:currency:pool',
'conf/pub:map:currency:explorer',
'conf/pub:map:currency:tx:fee',
'conf/pub:map:tx:method',
'conf/pub:list:{object}',
'conf/pub:list:{object}:{detail}',
'conf/pub:list:currency',
'conf/pub:list:pair:exchange',
'conf/pub:list:pair:margin',
'conf/pub:list:pair:futures',
'conf/pub:list:competitions',
'conf/pub:info:{object}',
'conf/pub:info:{object}:{detail}',
'conf/pub:info:pair',
'conf/pub:info:tx:status',
'conf/pub:fees',
'platform/status',
'tickers',
'ticker/{symbol}',
'trades/{symbol}/hist',
'book/{symbol}/{precision}',
'book/{symbol}/P0',
'book/{symbol}/P1',
'book/{symbol}/P2',
'book/{symbol}/P3',
'book/{symbol}/R0',
'stats1/{key}:{size}:{symbol}:{side}/{section}',
'stats1/{key}:{size}:{symbol}:{side}/last',
'stats1/{key}:{size}:{symbol}:{side}/hist',
'stats1/{key}:{size}:{symbol}/{section}',
'stats1/{key}:{size}:{symbol}/last',
'stats1/{key}:{size}:{symbol}/hist',
'stats1/{key}:{size}:{symbol}:long/last',
'stats1/{key}:{size}:{symbol}:long/hist',
'stats1/{key}:{size}:{symbol}:short/last',
'stats1/{key}:{size}:{symbol}:short/hist',
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
'status/{type}',
'status/deriv',
'liquidations/hist',
'rankings/{key}:{timeframe}:{symbol}/{section}',
'rankings/{key}:{timeframe}:{symbol}/hist',
],
'post': [
'calc/trade/avg',
'calc/fx',
],
},
'private': {
'post': [
'auth/r/wallets',
'auth/r/wallets/hist',
'auth/r/orders',
'auth/r/orders/{symbol}',
'auth/w/order/submit',
'auth/w/order/update',
'auth/w/order/cancel',
'auth/w/order/multi',
'auth/w/order/cancel/multi',
'auth/r/orders/{symbol}/hist',
'auth/r/orders/hist',
'auth/r/order/{symbol}:{id}/trades',
'auth/r/trades/{symbol}/hist',
'auth/r/trades/hist',
'auth/r/ledgers/{currency}/hist',
'auth/r/ledgers/hist',
'auth/r/info/margin/{key}',
'auth/r/info/margin/base',
'auth/r/info/margin/sym_all',
'auth/r/positions',
'auth/w/position/claim',
'auth/r/positions/hist',
'auth/r/positions/audit',
'auth/r/positions/snap',
'auth/w/deriv/collateral/set',
'auth/w/deriv/collateral/limits',
'auth/r/funding/offers',
'auth/r/funding/offers/{symbol}',
'auth/w/funding/offer/submit',
'auth/w/funding/offer/cancel',
'auth/w/funding/offer/cancel/all',
'auth/w/funding/close',
'auth/w/funding/auto',
'auth/w/funding/keep',
'auth/r/funding/offers/{symbol}/hist',
'auth/r/funding/offers/hist',
'auth/r/funding/loans',
'auth/r/funding/loans/hist',
'auth/r/funding/loans/{symbol}',
'auth/r/funding/loans/{symbol}/hist',
'auth/r/funding/credits',
'auth/r/funding/credits/hist',
'auth/r/funding/credits/{symbol}',
'auth/r/funding/credits/{symbol}/hist',
'auth/r/funding/trades/{symbol}/hist',
'auth/r/funding/trades/hist',
'auth/r/info/funding/{key}',
'auth/r/info/user',
'auth/r/logins/hist',
'auth/w/transfer',
'auth/w/deposit/address',
'auth/w/deposit/invoice',
'auth/w/withdraw',
'auth/r/movements/{currency}/hist',
'auth/r/movements/hist',
'auth/r/alerts',
'auth/w/alert/set',
'auth/w/alert/price:{symbol}:{price}/del',
'auth/w/alert/{type}:{symbol}:{price}/del',
'auth/calc/order/avail',
'auth/w/settings/set',
'auth/r/settings',
'auth/w/settings/del',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.2 / 100,
},
'funding': {
'withdraw': {
'BTC': 0.0004,
'BCH': 0.0001,
'ETH': 0.00135,
'EOS': 0.0,
'LTC': 0.001,
'OMG': 0.15097,
'IOT': 0.0,
'NEO': 0.0,
'ETC': 0.01,
'XRP': 0.02,
'ETP': 0.01,
'ZEC': 0.001,
'BTG': 0.0,
'DASH': 0.01,
'XMR': 0.0001,
'QTM': 0.01,
'EDO': 0.23687,
'DAT': 9.8858,
'AVT': 1.1251,
'SAN': 0.35977,
'USDT': 5.0,
'SPK': 16.971,
'BAT': 1.1209,
'GNT': 2.8789,
'SNT': 9.0848,
'QASH': 1.726,
'YYW': 7.9464,
},
},
},
'options': {
'precision': 'R0',
'exchangeTypes': {
'EXCHANGE MARKET': 'market',
'EXCHANGE LIMIT': 'limit',
},
'orderTypes': {
'market': 'EXCHANGE MARKET',
'limit': 'EXCHANGE LIMIT',
},
'fiat': {
'USD': 'USD',
'EUR': 'EUR',
'JPY': 'JPY',
'GBP': 'GBP',
},
},
'exceptions': {
'exact': {
'10020': BadRequest,
'10100': AuthenticationError,
'10114': InvalidNonce,
'20060': OnMaintenance,
},
'broad': {
'address': InvalidAddress,
'available balance is only': InsufficientFunds,
'not enough exchange balance': InsufficientFunds,
'Order not found': OrderNotFound,
'symbol: invalid': BadSymbol,
'Invalid order': InvalidOrder,
},
},
})
def is_fiat(self, code):
return(code in self.options['fiat'])
def get_currency_id(self, code):
return 'f' + code
def fetch_status(self, params={}):
response = self.publicGetPlatformStatus(params)
status = self.safe_value(response, 0)
formattedStatus = 'ok' if (status == 1) else 'maintenance'
self.status = self.extend(self.status, {
'status': formattedStatus,
'updated': self.milliseconds(),
})
return self.status
def fetch_markets(self, params={}):
v2response = self.publicGetConfPubListPairFutures(params)
v1response = self.v1GetSymbolsDetails(params)
futuresMarketIds = self.safe_value(v2response, 0, [])
result = []
for i in range(0, len(v1response)):
market = v1response[i]
id = self.safe_string_upper(market, 'pair')
spot = True
if self.in_array(id, futuresMarketIds):
spot = False
futures = not spot
type = 'spot' if spot else 'futures'
baseId = None
quoteId = None
if id.find(':') >= 0:
parts = id.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
id = 't' + id
baseId = self.get_currency_id(baseId)
quoteId = self.get_currency_id(quoteId)
precision = {
'price': self.safe_integer(market, 'price_precision'),
'amount': 8,
}
limits = {
'amount': {
'min': self.safe_float(market, 'minimum_order_size'),
'max': self.safe_float(market, 'maximum_order_size'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
'type': type,
'swap': False,
'spot': spot,
'futures': futures,
})
return result
def fetch_currencies(self, params={}):
labels = [
'pub:list:currency',
'pub:map:currency:sym',
'pub:map:currency:label',
'pub:map:currency:unit',
'pub:map:currency:undl',
'pub:map:currency:pool',
'pub:map:currency:explorer',
'pub:map:currency:tx:fee',
]
config = ','.join(labels)
request = {
'config': config,
}
response = self.publicGetConfConfig(self.extend(request, params))
indexed = {
'sym': self.index_by(self.safe_value(response, 1, []), 0),
'label': self.index_by(self.safe_value(response, 2, []), 0),
'unit': self.index_by(self.safe_value(response, 3, []), 0),
'undl': self.index_by(self.safe_value(response, 4, []), 0),
'pool': self.index_by(self.safe_value(response, 5, []), 0),
'explorer': self.index_by(self.safe_value(response, 6, []), 0),
'fees': self.index_by(self.safe_value(response, 7, []), 0),
}
ids = self.safe_value(response, 0, [])
result = {}
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
label = self.safe_value(indexed['label'], id, [])
name = self.safe_string(label, 1)
pool = self.safe_value(indexed['pool'], id, [])
type = self.safe_string(pool, 1)
feeValues = self.safe_value(indexed['fees'], id, [])
fees = self.safe_value(feeValues, 1, [])
fee = self.safe_float(fees, 1)
precision = 8
id = 'f' + id
result[code] = {
'id': id,
'code': code,
'info': [id, label, pool, feeValues],
'type': type,
'name': name,
'active': True,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': 1 / math.pow(10, precision),
'max': None,
},
'price': {
'min': 1 / math.pow(10, precision),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': fee,
'max': None,
},
},
}
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostAuthRWallets(params)
balanceType = self.safe_string(params, 'type', 'exchange')
result = {'info': response}
for b in range(0, len(response)):
balance = response[b]
accountType = balance[0]
currency = balance[1]
total = balance[2]
available = balance[4]
if accountType == balanceType:
if currency[0] == 't':
currency = currency[1:]
code = self.safe_currency_code(currency)
account = self.account()
account['total'] = total
if not available:
if available == 0:
account['free'] = 0
account['used'] = total
else:
account['free'] = total
else:
account['free'] = available
account['used'] = account['total'] - account['free']
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
precision = self.safe_value(self.options, 'precision', 'R0')
request = {
'symbol': self.market_id(symbol),
'precision': precision,
}
if limit is not None:
request['len'] = limit
fullRequest = self.extend(request, params)
orderbook = self.publicGetBookSymbolPrecision(fullRequest)
timestamp = self.milliseconds()
result = {
'bids': [],
'asks': [],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
priceIndex = 1 if (fullRequest['precision'] == 'R0') else 0
for i in range(0, len(orderbook)):
order = orderbook[i]
price = order[priceIndex]
amount = abs(order[2])
side = 'bids' if (order[2] > 0) else 'asks'
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
length = len(ticker)
last = ticker[length - 4]
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': ticker[length - 2],
'low': ticker[length - 1],
'bid': ticker[length - 10],
'bidVolume': None,
'ask': ticker[length - 8],
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': ticker[length - 6],
'percentage': ticker[length - 5] * 100,
'average': None,
'baseVolume': ticker[length - 3],
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
else:
request['symbols'] = 'ALL'
tickers = self.publicGetTickers(self.extend(request, params))
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker[0]
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = self.publicGetTickerSymbol(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_symbol(self, marketId):
if marketId is None:
return marketId
marketId = marketId.replace('t', '')
baseId = None
quoteId = None
if marketId.find(':') >= 0:
parts = marketId.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = marketId[0:3]
quoteId = marketId[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
return base + '/' + quote
def parse_trade(self, trade, market=None):
tradeLength = len(trade)
isPrivate = (tradeLength > 5)
id = str(trade[0])
amountIndex = 4 if isPrivate else 2
amount = trade[amountIndex]
cost = None
priceIndex = 5 if isPrivate else 3
price = trade[priceIndex]
side = None
orderId = None
takerOrMaker = None
type = None
fee = None
symbol = None
timestampIndex = 2 if isPrivate else 1
timestamp = trade[timestampIndex]
if isPrivate:
marketId = trade[1]
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = self.parse_symbol(marketId)
orderId = str(trade[3])
takerOrMaker = 'maker' if (trade[8] == 1) else 'taker'
feeCost = trade[9]
feeCurrency = self.safe_currency_code(trade[10])
if feeCost is not None:
feeCost = -feeCost
if symbol in self.markets:
feeCost = self.fee_to_precision(symbol, feeCost)
else:
currencyId = 'f' + feeCurrency
if currencyId in self.currencies_by_id:
currency = self.currencies_by_id[currencyId]
feeCost = self.currency_to_precision(currency['code'], feeCost)
fee = {
'cost': float(feeCost),
'currency': feeCurrency,
}
orderType = trade[6]
type = self.safe_string(self.options['exchangeTypes'], orderType)
if symbol is None:
if market is not None:
symbol = market['symbol']
if amount is not None:
side = 'sell' if (amount < 0) else 'buy'
amount = abs(amount)
if cost is None:
if price is not None:
cost = amount * price
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'side': side,
'type': type,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
sort = '-1'
request = {
'symbol': market['id'],
}
if since is not None:
request['start'] = since
sort = '1'
if limit is not None:
request['limit'] = limit
request['sort'] = sort
response = self.publicGetTradesSymbolHist(self.extend(request, params))
trades = self.sort_by(response, 1)
return self.parse_trades(trades, market, None, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=100, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100
if since is None:
since = self.milliseconds() - self.parse_timeframe(timeframe) * limit * 1000
request = {
'symbol': market['id'],
'timeframe': self.timeframes[timeframe],
'sort': 1,
'start': since,
'limit': limit,
}
response = self.publicGetCandlesTradeTimeframeSymbolHist(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
if status is None:
return status
parts = status.split(' ')
state = self.safe_string(parts, 0)
statuses = {
'ACTIVE': 'open',
'PARTIALLY': 'open',
'EXECUTED': 'closed',
'CANCELED': 'canceled',
'INSUFFICIENT': 'canceled',
'RSN_DUST': 'rejected',
'RSN_PAUSE': 'rejected',
}
return self.safe_string(statuses, state, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 0)
symbol = None
marketId = self.safe_string(order, 3)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
symbol = self.parse_symbol(marketId)
if (symbol is None) and (market is not None):
symbol = market['symbol']
timestamp = self.safe_integer(order, 5)
remaining = abs(self.safe_float(order, 6))
amount = abs(self.safe_float(order, 7))
filled = amount - remaining
side = 'sell' if (order[7] < 0) else 'buy'
orderType = self.safe_string(order, 8)
type = self.safe_string(self.safe_value(self.options, 'exchangeTypes'), orderType)
status = None
statusString = self.safe_string(order, 13)
if statusString is not None:
parts = statusString.split(' @ ')
status = self.parse_order_status(self.safe_string(parts, 0))
price = self.safe_float(order, 16)
average = self.safe_float(order, 17)
cost = price * filled
clientOrderId = self.safe_string(order, 2)
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderTypes = self.safe_value(self.options, 'orderTypes', {})
orderType = self.safe_string_upper(orderTypes, type, type)
amount = -amount if (side == 'sell') else amount
request = {
'symbol': market['id'],
'type': orderType,
'amount': self.number_to_string(amount),
}
if (orderType == 'LIMIT') or (orderType == 'EXCHANGE LIMIT'):
request['price'] = self.number_to_string(price)
elif (orderType == 'STOP') or (orderType == 'EXCHANGE STOP'):
stopPrice = self.safe_float(params, 'stopPrice', price)
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'STOP LIMIT') or (orderType == 'EXCHANGE STOP LIMIT'):
priceAuxLimit = self.safe_float(params, 'price_aux_limit')
stopPrice = self.safe_float(params, 'stopPrice')
if priceAuxLimit is None:
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter or a price_aux_limit parameter for a ' + orderType + ' order')
else:
request['price_aux_limit'] = self.number_to_string(price)
else:
request['price_aux_limit'] = self.number_to_string(priceAuxLimit)
if stopPrice is None:
stopPrice = price
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'TRAILING STOP') or (orderType == 'EXCHANGE TRAILING STOP'):
priceTrailing = self.safe_float(params, 'price_trailing')
request['price_trailing'] = self.number_to_string(priceTrailing)
stopPrice = self.safe_float(params, 'stopPrice', price)
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'FOK') or (orderType == 'EXCHANGE FOK') or (orderType == 'IOC') or (orderType == 'EXCHANGE IOC'):
request['price'] = self.number_to_string(price)
params = self.omit(params, ['stopPrice', 'price_aux_limit', 'price_trailing'])
clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')
if clientOrderId is not None:
request['cid'] = clientOrderId
params = self.omit(params, ['cid', 'clientOrderId'])
response = self.privatePostAuthWOrderSubmit(self.extend(request, params))
CESS, ERROR, FAILURE, ...)
# "Submitting 1 orders." # Text of the notification
# ]
#
status = self.safe_string(response, 6)
if status != 'SUCCESS':
errorCode = response[5]
errorText = response[7]
raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(
orders = self.safe_value(response, 4, [])
order = self.safe_value(orders, 0)
return self.parse_order(order, market)
def cancel_all_orders(self, symbol=None, params={}):
request = {
'all': 1,
}
response = self.privatePostAuthWOrderCancelMulti(self.extend(request, params))
orders = self.safe_value(response, 4, [])
return self.parse_orders(orders)
def cancel_order(self, id, symbol=None, params={}):
cid = self.safe_value_2(params, 'cid', 'clientOrderId') # client order id
request = None
if cid is not None:
cidDate = self.safe_value(params, 'cidDate') # client order id date
if cidDate is None:
raise InvalidOrder(self.id + " canceling an order by clientOrderId('cid') requires both 'cid' and 'cid_date'('YYYY-MM-DD')")
request = {
'cid': cid,
'cid_date': cidDate,
}
params = self.omit(params, ['cid', 'clientOrderId'])
else:
request = {
'id': int(id),
}
response = self.privatePostAuthWOrderCancel(self.extend(request, params))
order = self.safe_value(response, 4)
return self.parse_order(order)
def fetch_open_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = self.fetch_open_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
def fetch_closed_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = self.fetch_closed_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = self.privatePostAuthROrders(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostAuthROrdersSymbol(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# returns the most recent closed or canceled orders up to circa two weeks ago
self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = self.privatePostAuthROrdersHist(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostAuthROrdersSymbolHist(self.extend(request, params))
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 2500
return self.parse_orders(response, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
orderId = int(id)
request = {
'id': orderId,
'symbol': market['id'],
}
# valid for trades upto 10 days old
response = self.privatePostAuthROrderSymbolIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
'end': self.milliseconds(),
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 1000
method = 'privatePostAuthRTradesHist'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = 'privatePostAuthRTradesSymbolHist'
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_deposit_address(self, code, params={}):
self.load_markets()
request = {
'op_renew': 1,
}
response = self.fetch_deposit_address(code, self.extend(request, params))
return response
def fetch_deposit_address(self, code, params={}):
self.load_markets()
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'op_renew': 0, # a value of 1 will generate a new address
}
response = self.privatePostAuthWDepositAddress(self.extend(request, params))
#
# [
# 1582269616687, # MTS Millisecond Time Stamp of the update
# 'acc_dep', # TYPE Purpose of notification 'acc_dep' for account deposit
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# null, # PLACEHOLDER
# 'BITCOIN', # METHOD Method of deposit
# 'BTC', # CURRENCY_CODE Currency code of new address
# null, # PLACEHOLDER
# '1BC9PZqpUmjyEB54uggn8TFKj49zSDYzqG', # ADDRESS
# null, # POOL_ADDRESS
# ],
# null, # CODE null or integer work in progress
# 'SUCCESS', # STATUS Status of the notification, SUCCESS, ERROR, FAILURE
# 'success', # TEXT Text of the notification
# ]
#
result = self.safe_value(response, 4, [])
poolAddress = self.safe_string(result, 5)
address = self.safe_string(result, 4) if (poolAddress is None) else poolAddress
tag = None if (poolAddress is None) else self.safe_string(result, 4)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def parse_transaction_status(self, status):
statuses = {
'SUCCESS': 'ok',
'ERROR': 'failed',
'FAILURE': 'failed',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
# fetchTransactions
#
# [
# 13293039, # ID
# 'ETH', # CURRENCY
# 'ETHEREUM', # CURRENCY_NAME
# null,
# null,
# 1574175052000, # MTS_STARTED
# 1574181326000, # MTS_UPDATED
# null,
# null,
# 'CANCELED', # STATUS
# null,
# null,
# -0.24, # AMOUNT, negative for withdrawals
# -0.00135, # FEES
# null,
# null,
# 'DESTINATION_ADDRESS',
# null,
# null,
# null,
# 'TRANSACTION_ID',
# "Purchase of 100 pizzas", # WITHDRAW_TRANSACTION_NOTE
# ]
#
transactionLength = len(transaction)
timestamp = None
updated = None
code = None
amount = None
id = None
status = None
tag = None
type = None
feeCost = None
txid = None
addressTo = None
if transactionLength < 9:
data = self.safe_value(transaction, 4, [])
timestamp = self.safe_integer(transaction, 0)
if currency is not None:
code = currency['code']
feeCost = self.safe_float(data, 8)
if feeCost is not None:
feeCost = -feeCost
amount = self.safe_float(data, 5)
id = self.safe_value(data, 0)
status = 'ok'
if id == 0:
id = None
status = 'failed'
tag = self.safe_string(data, 3)
type = 'withdrawal'
else:
id = self.safe_string(transaction, 0)
timestamp = self.safe_integer(transaction, 5)
updated = self.safe_integer(transaction, 6)
status = self.parse_transaction_status(self.safe_string(transaction, 9))
amount = self.safe_float(transaction, 12)
if amount is not None:
if amount < 0:
type = 'withdrawal'
else:
type = 'deposit'
feeCost = self.safe_float(transaction, 13)
if feeCost is not None:
feeCost = -feeCost
addressTo = self.safe_string(transaction, 16)
txid = self.safe_string(transaction, 20)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': addressTo, # self is actually the tag for XRP transfers(the address is missing)
'addressTo': addressTo,
'tagFrom': None,
'tag': tag, # refix it properly for the tag from description
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {}
method = 'privatePostAuthRMovementsHist'
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method = 'privatePostAuthRMovementsCurrencyHist'
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # max 1000
response = getattr(self, method)(self.extend(request, params))
#
# [
# [
# 13293039, # ID
# 'ETH', # CURRENCY
# 'ETHEREUM', # CURRENCY_NAME
# null,
# null,
# 1574175052000, # MTS_STARTED
# 1574181326000, # MTS_UPDATED
# null,
# null,
# 'CANCELED', # STATUS
# null,
# null,
# -0.24, # AMOUNT, negative for withdrawals
# -0.00135, # FEES
# null,
# null,
# 'DESTINATION_ADDRESS',
# null,
# null,
# null,
# 'TRANSACTION_ID',
# "Purchase of 100 pizzas", # WITHDRAW_TRANSACTION_NOTE
# ]
# ]
#
return self.parse_transactions(response, currency, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'amount': self.number_to_string(amount),
'address': address,
}
if tag is not None:
request['payment_id'] = tag
response = self.privatePostAuthWWithdraw(self.extend(request, params))
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
text = self.safe_string(response, 7)
if text != 'success':
self.throw_broadly_matched_exception(self.exceptions['broad'], text, text)
transaction = self.parse_transaction(response, currency)
return self.extend(transaction, {
'address': address,
})
def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privatePostPositions(params)
#
# [
# [
# "tBTCUSD", # SYMBOL
# "ACTIVE", # STATUS
# 0.0195, # AMOUNT
# 8565.0267019, # BASE_PRICE
# 0, # MARGIN_FUNDING
# 0, # MARGIN_FUNDING_TYPE
# -0.33455568705000516, # PL
# -0.0003117550117425625, # PL_PERC
# 7045.876419249083, # PRICE_LIQ
# 3.0673001895895604, # LEVERAGE
# null, # _PLACEHOLDER
# 142355652, # POSITION_ID
# 1574002216000, # MTS_CREATE
# 1574002216000, # MTS_UPDATE
# null, # _PLACEHOLDER
# 0, # TYPE
# null, # _PLACEHOLDER
# 0, # COLLATERAL
# 0, # COLLATERAL_MIN
# # META
# {
# "reason":"TRADE",
# "order_id":34271018124,
# "liq_stage":null,
# "trade_price":"8565.0267019",
# "trade_amount":"0.0195",
# "order_id_oppo":34277498022
# }
# ]
# ]
#
# todo unify parsePosition/parsePositions
return response
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'v1':
request = api + request
else:
request = self.version + request
url = self.urls['api'][api] + '/' + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.json(query)
auth = '/api/' + request + nonce + body
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha384)
headers = {
'bfx-nonce': nonce,
'bfx-apikey': self.apiKey,
'bfx-signature': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if response:
if 'message' in response:
if response['message'].find('not enough exchange balance') >= 0:
raise InsufficientFunds(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
elif response == '':
raise ExchangeError(self.id + ' returned empty response')
return response
def handle_errors(self, statusCode, statusText, url, method, responseHeaders, responseBody, response, requestHeaders, requestBody):
if statusCode == 500:
# See https://docs.bitfinex.com/docs/abbreviations-glossary#section-errorinfo-codes
errorCode = self.number_to_string(response[1])
errorText = response[2]
feedback = self.id + ' ' + errorText
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorText, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorText, feedback)
raise ExchangeError(self.id + ' ' + errorText + '(
| true | true |
1c46380a9d4866b842ff9a1c2591e0d1ba1f4588 | 11,345 | py | Python | nuitka/tree/ReformulationDictionaryCreation.py | em3ndez/Nuitka | a5a036a94c1842d1cd72f27c0c67461798fdf977 | [
"Apache-2.0"
] | 1 | 2019-09-09T19:27:43.000Z | 2019-09-09T19:27:43.000Z | nuitka/tree/ReformulationDictionaryCreation.py | em3ndez/Nuitka | a5a036a94c1842d1cd72f27c0c67461798fdf977 | [
"Apache-2.0"
] | 1 | 2019-02-21T13:05:17.000Z | 2019-02-21T13:05:17.000Z | nuitka/tree/ReformulationDictionaryCreation.py | em3ndez/Nuitka | a5a036a94c1842d1cd72f27c0c67461798fdf977 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of dictionary creations.
Dictionary creations might be directly translated to constants, or they might
become nodes that build dictionaries.
For Python3.5, unpacking can happen while creating dictionaries, these are
being re-formulated to an internal function.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementDelVariable,
StatementReleaseVariable,
)
from nuitka.nodes.AttributeNodes import ExpressionAttributeLookup
from nuitka.nodes.BuiltinIteratorNodes import ExpressionBuiltinIter1
from nuitka.nodes.BuiltinNextNodes import ExpressionBuiltinNext1
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTuple
from nuitka.nodes.DictionaryNodes import (
ExpressionKeyValuePair,
StatementDictOperationUpdate,
makeExpressionMakeDict,
makeExpressionMakeDictOrConstant,
makeExpressionPairs,
)
from nuitka.nodes.ExceptionNodes import (
ExpressionBuiltinMakeException,
StatementRaiseException,
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef,
)
from nuitka.nodes.LoopNodes import StatementLoop, StatementLoopBreak
from nuitka.nodes.OperatorNodes import makeBinaryOperationNode
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.TypeNodes import ExpressionBuiltinType1
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import python_version
from nuitka.specs.ParameterSpecs import ParameterSpec
from .InternalModule import (
internal_source_ref,
makeInternalHelperFunctionBody,
once_decorator,
)
from .ReformulationTryExceptStatements import makeTryExceptSingleHandlerNode
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .TreeHelpers import (
buildNode,
buildNodeList,
makeStatementsSequenceFromStatement,
makeStatementsSequenceFromStatements,
)
def buildDictionaryNode(provider, node, source_ref):
if python_version >= 350:
for key in node.keys:
if key is None:
return buildDictionaryUnpacking(
provider=provider, node=node, source_ref=source_ref
)
return makeExpressionMakeDictOrConstant(
pairs=makeExpressionPairs(
keys=buildNodeList(provider, node.keys, source_ref),
values=buildNodeList(provider, node.values, source_ref),
),
user_provided=True,
source_ref=source_ref,
)
@once_decorator
def getDictUnpackingHelper():
helper_name = "_unpack_dict"
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=(),
ps_list_star_arg="args",
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
temp_scope = None
tmp_result_variable = result.allocateTempVariable(temp_scope, "dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter")
tmp_item_variable = result.allocateTempVariable(temp_scope, "keys")
loop_body = makeStatementsSequenceFromStatements(
makeTryExceptSingleHandlerNode(
tried=StatementAssignmentVariable(
variable=tmp_item_variable,
source=ExpressionBuiltinNext1(
value=ExpressionTempVariableRef(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="StopIteration",
handler_body=StatementLoopBreak(source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
makeTryExceptSingleHandlerNode(
tried=StatementDictOperationUpdate(
dict_arg=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
value=ExpressionTempVariableRef(
variable=tmp_item_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
exception_name="AttributeError",
handler_body=StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(
makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant="""\
'%s' object is not a mapping""",
source_ref=internal_source_ref,
user_provided=True,
),
right=makeExpressionMakeTuple(
elements=(
ExpressionAttributeLookup(
expression=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=tmp_item_variable,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
attribute_name="__name__",
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
args_variable = result.getVariableForAssignment(variable_name="args")
final = (
StatementReleaseVariable(
variable=tmp_result_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_item_variable, source_ref=internal_source_ref
),
# We get handed our args responsibility.
StatementDelVariable(
variable=args_variable, tolerant=False, source_ref=internal_source_ref
),
)
tried = makeStatementsSequenceFromStatements(
StatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
StatementAssignmentVariable(
variable=tmp_result_variable,
source=makeConstantRefNode(constant={}, source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
StatementLoop(body=loop_body, source_ref=internal_source_ref),
StatementReturn(
expression=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
)
result.setBody(
makeStatementsSequenceFromStatement(
makeTryFinallyStatement(
provider=result,
tried=tried,
final=final,
source_ref=internal_source_ref,
)
)
)
return result
def buildDictionaryUnpackingArgs(provider, keys, values, source_ref):
result = []
for key, value in zip(keys, values):
# TODO: We could be a lot cleverer about the dictionaries for non-starred
# arguments, but lets get this to work first.
if key is None:
result.append(buildNode(provider, value, source_ref))
elif type(key) is str:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=key, source_ref=source_ref
),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
else:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=buildNode(provider, key, source_ref),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
return result
def buildDictionaryUnpacking(provider, node, source_ref):
helper_args = buildDictionaryUnpackingArgs(
provider, node.keys, node.values, source_ref
)
result = ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getDictUnpackingHelper(), source_ref=source_ref
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=(makeExpressionMakeTuple(helper_args, source_ref),),
source_ref=source_ref,
)
result.setCompatibleSourceReference(helper_args[-1].getCompatibleSourceReference())
return result
| 36.362179 | 87 | 0.603967 |
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementDelVariable,
StatementReleaseVariable,
)
from nuitka.nodes.AttributeNodes import ExpressionAttributeLookup
from nuitka.nodes.BuiltinIteratorNodes import ExpressionBuiltinIter1
from nuitka.nodes.BuiltinNextNodes import ExpressionBuiltinNext1
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTuple
from nuitka.nodes.DictionaryNodes import (
ExpressionKeyValuePair,
StatementDictOperationUpdate,
makeExpressionMakeDict,
makeExpressionMakeDictOrConstant,
makeExpressionPairs,
)
from nuitka.nodes.ExceptionNodes import (
ExpressionBuiltinMakeException,
StatementRaiseException,
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef,
)
from nuitka.nodes.LoopNodes import StatementLoop, StatementLoopBreak
from nuitka.nodes.OperatorNodes import makeBinaryOperationNode
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.TypeNodes import ExpressionBuiltinType1
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import python_version
from nuitka.specs.ParameterSpecs import ParameterSpec
from .InternalModule import (
internal_source_ref,
makeInternalHelperFunctionBody,
once_decorator,
)
from .ReformulationTryExceptStatements import makeTryExceptSingleHandlerNode
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .TreeHelpers import (
buildNode,
buildNodeList,
makeStatementsSequenceFromStatement,
makeStatementsSequenceFromStatements,
)
def buildDictionaryNode(provider, node, source_ref):
if python_version >= 350:
for key in node.keys:
if key is None:
return buildDictionaryUnpacking(
provider=provider, node=node, source_ref=source_ref
)
return makeExpressionMakeDictOrConstant(
pairs=makeExpressionPairs(
keys=buildNodeList(provider, node.keys, source_ref),
values=buildNodeList(provider, node.values, source_ref),
),
user_provided=True,
source_ref=source_ref,
)
@once_decorator
def getDictUnpackingHelper():
helper_name = "_unpack_dict"
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=(),
ps_list_star_arg="args",
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
temp_scope = None
tmp_result_variable = result.allocateTempVariable(temp_scope, "dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter")
tmp_item_variable = result.allocateTempVariable(temp_scope, "keys")
loop_body = makeStatementsSequenceFromStatements(
makeTryExceptSingleHandlerNode(
tried=StatementAssignmentVariable(
variable=tmp_item_variable,
source=ExpressionBuiltinNext1(
value=ExpressionTempVariableRef(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="StopIteration",
handler_body=StatementLoopBreak(source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
makeTryExceptSingleHandlerNode(
tried=StatementDictOperationUpdate(
dict_arg=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
value=ExpressionTempVariableRef(
variable=tmp_item_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
exception_name="AttributeError",
handler_body=StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(
makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant="""\
'%s' object is not a mapping""",
source_ref=internal_source_ref,
user_provided=True,
),
right=makeExpressionMakeTuple(
elements=(
ExpressionAttributeLookup(
expression=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=tmp_item_variable,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
attribute_name="__name__",
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
args_variable = result.getVariableForAssignment(variable_name="args")
final = (
StatementReleaseVariable(
variable=tmp_result_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_item_variable, source_ref=internal_source_ref
),
StatementDelVariable(
variable=args_variable, tolerant=False, source_ref=internal_source_ref
),
)
tried = makeStatementsSequenceFromStatements(
StatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
StatementAssignmentVariable(
variable=tmp_result_variable,
source=makeConstantRefNode(constant={}, source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
StatementLoop(body=loop_body, source_ref=internal_source_ref),
StatementReturn(
expression=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
)
result.setBody(
makeStatementsSequenceFromStatement(
makeTryFinallyStatement(
provider=result,
tried=tried,
final=final,
source_ref=internal_source_ref,
)
)
)
return result
def buildDictionaryUnpackingArgs(provider, keys, values, source_ref):
result = []
for key, value in zip(keys, values):
if key is None:
result.append(buildNode(provider, value, source_ref))
elif type(key) is str:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=key, source_ref=source_ref
),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
else:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=buildNode(provider, key, source_ref),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
return result
def buildDictionaryUnpacking(provider, node, source_ref):
helper_args = buildDictionaryUnpackingArgs(
provider, node.keys, node.values, source_ref
)
result = ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getDictUnpackingHelper(), source_ref=source_ref
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=(makeExpressionMakeTuple(helper_args, source_ref),),
source_ref=source_ref,
)
result.setCompatibleSourceReference(helper_args[-1].getCompatibleSourceReference())
return result
| true | true |
1c46385a5596d5e0bd98b187dbff517c3d1d3c1c | 20,549 | py | Python | flux_combined_high_binding/model_741.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_combined_high_binding/model_741.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_combined_high_binding/model_741.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 87500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 60000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.134259 | 798 | 0.804127 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 87500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 60000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
1c463a5d7cdbdef19a2f8ee060198069e58e05e9 | 549 | py | Python | core/utils.py | matiaspacheco/cms_wehaa | 999f49344c453afd1cf8f11f36ac6b56b2b7f130 | [
"MIT"
] | null | null | null | core/utils.py | matiaspacheco/cms_wehaa | 999f49344c453afd1cf8f11f36ac6b56b2b7f130 | [
"MIT"
] | null | null | null | core/utils.py | matiaspacheco/cms_wehaa | 999f49344c453afd1cf8f11f36ac6b56b2b7f130 | [
"MIT"
] | null | null | null | # Standard Python library imports.
import math
import re
# Core Django imports.
from django.utils.html import strip_tags
def count_words(html_string):
# html_string = """
# <h1>This is a title</h1>
# """
word_string = strip_tags(html_string)
matching_words = re.findall(r'\w+', word_string)
count = len(matching_words) #joincfe.com/projects/
return count
def read_time(html_string):
count = count_words(html_string)
read_time_min = math.ceil(count/200.0) #assuming 200wpm reading
return int(read_time_min) | 26.142857 | 67 | 0.714026 |
import math
import re
from django.utils.html import strip_tags
def count_words(html_string):
# <h1>This is a title</h1>
# """
word_string = strip_tags(html_string)
matching_words = re.findall(r'\w+', word_string)
count = len(matching_words)
return count
def read_time(html_string):
count = count_words(html_string)
read_time_min = math.ceil(count/200.0)
return int(read_time_min) | true | true |
1c463a5ecbad7e73fb57009519d7ca474d07af2c | 2,566 | py | Python | web-api/favorites/views.py | Egor4ik325/anyberry | 87787f82f1cec0f32d9d7c7384e7b2771f34af3c | [
"MIT"
] | 1 | 2021-09-12T16:28:52.000Z | 2021-09-12T16:28:52.000Z | web-api/favorites/views.py | Egor4ik325/anyberry | 87787f82f1cec0f32d9d7c7384e7b2771f34af3c | [
"MIT"
] | 2 | 2021-09-06T08:31:56.000Z | 2021-09-06T08:35:25.000Z | web-api/favorites/views.py | Egor4ik325/anyberry | 87787f82f1cec0f32d9d7c7384e7b2771f34af3c | [
"MIT"
] | null | null | null | from berries.models import Berry
from berries.serializers import BerrySerializer
from django.core.cache import cache
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from favorites import serializers
from favorites.serializers import FavoriteDeserializer
class FavoriteViewSet(ViewSet):
"""
API view for CARRL berries from the favorite list.
List of favorite berries is stored in the Redis datastore and
references some (FK) user.
"""
cache_format = "favorite_{user}"
authentication_classes = [SessionAuthentication]
permission_classes = [IsAuthenticated]
def list(self, *args, **kwargs):
"""List berries in the favorite list.
List consist only of unique berries.
"""
# Get favorite berries from the Redis (cache)
berries = cache.get(self.get_cache_key(), default=set())
data = list(berries)
return Response(data)
def clear(self, *args, **kwargs):
"""Clear berries in the favorite list."""
cache.delete(self.get_cache_key())
return Response(status=status.HTTP_204_NO_CONTENT)
def add(self, *args, **kwargs):
"""Add berry to the favorite list.
//This has no effect if the element is already present.
"""
serializer = FavoriteDeserializer(data=self.request.data)
serializer.is_valid(raise_exception=True)
berry = serializer.save()
berries: set = cache.get(self.get_cache_key(), default=set())
berries.add(berry.id)
cache.set(self.get_cache_key(), berries, timeout=None)
return Response(status=status.HTTP_201_CREATED)
def remove(self, *args, **kwargs):
"""Remove berry from the favorite list."""
berry_id = self.kwargs["berry_id"]
berry = Berry.berries.get(id=berry_id)
berries: set = cache.get(self.get_cache_key(), default=set())
try:
berries.remove(berry.id)
except KeyError:
pass
cache.set(self.get_cache_key(), berries, timeout=None)
return Response(status=status.HTTP_204_NO_CONTENT)
def get_cache_key(self):
return self.cache_format.format(user=self.request.user.id)
favorite_list_view = FavoriteViewSet.as_view(
{"get": "list", "post": "add", "delete": "clear"})
favorite_detail_view = FavoriteViewSet.as_view(
{"delete": "remove"})
| 32.897436 | 69 | 0.687062 | from berries.models import Berry
from berries.serializers import BerrySerializer
from django.core.cache import cache
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from favorites import serializers
from favorites.serializers import FavoriteDeserializer
class FavoriteViewSet(ViewSet):
cache_format = "favorite_{user}"
authentication_classes = [SessionAuthentication]
permission_classes = [IsAuthenticated]
def list(self, *args, **kwargs):
berries = cache.get(self.get_cache_key(), default=set())
data = list(berries)
return Response(data)
def clear(self, *args, **kwargs):
cache.delete(self.get_cache_key())
return Response(status=status.HTTP_204_NO_CONTENT)
def add(self, *args, **kwargs):
serializer = FavoriteDeserializer(data=self.request.data)
serializer.is_valid(raise_exception=True)
berry = serializer.save()
berries: set = cache.get(self.get_cache_key(), default=set())
berries.add(berry.id)
cache.set(self.get_cache_key(), berries, timeout=None)
return Response(status=status.HTTP_201_CREATED)
def remove(self, *args, **kwargs):
berry_id = self.kwargs["berry_id"]
berry = Berry.berries.get(id=berry_id)
berries: set = cache.get(self.get_cache_key(), default=set())
try:
berries.remove(berry.id)
except KeyError:
pass
cache.set(self.get_cache_key(), berries, timeout=None)
return Response(status=status.HTTP_204_NO_CONTENT)
def get_cache_key(self):
return self.cache_format.format(user=self.request.user.id)
favorite_list_view = FavoriteViewSet.as_view(
{"get": "list", "post": "add", "delete": "clear"})
favorite_detail_view = FavoriteViewSet.as_view(
{"delete": "remove"})
| true | true |
1c463a9792c032fff33e3627f78835a1ae9c2a50 | 4,436 | py | Python | elastalert/kibana_external_url_formatter.py | buratinopy/elastalert2 | 27deb8a61dd48798c4686ec95d3e48909903a694 | [
"Apache-2.0"
] | null | null | null | elastalert/kibana_external_url_formatter.py | buratinopy/elastalert2 | 27deb8a61dd48798c4686ec95d3e48909903a694 | [
"Apache-2.0"
] | null | null | null | elastalert/kibana_external_url_formatter.py | buratinopy/elastalert2 | 27deb8a61dd48798c4686ec95d3e48909903a694 | [
"Apache-2.0"
] | null | null | null | import boto3
import os
from urllib.parse import parse_qsl, urlencode, urljoin, urlparse, urlsplit, urlunsplit
import requests
from requests import RequestException
from requests.auth import AuthBase, HTTPBasicAuth
from elastalert.auth import RefeshableAWSRequestsAuth
from elastalert.util import EAException
def append_security_tenant(url, security_tenant):
'''Appends the security_tenant query string parameter to the url'''
parsed = urlsplit(url)
if parsed.query:
qs = parse_qsl(parsed.query, keep_blank_values=True, strict_parsing=True)
else:
qs = []
qs.append(('security_tenant', security_tenant))
new_query = urlencode(qs)
new_args = parsed._replace(query=new_query)
new_url = urlunsplit(new_args)
return new_url
class KibanaExternalUrlFormatter:
'''Interface for formatting external Kibana urls'''
def format(self, relative_url: str) -> str:
raise NotImplementedError()
class AbsoluteKibanaExternalUrlFormatter(KibanaExternalUrlFormatter):
'''Formats absolute external Kibana urls'''
def __init__(self, base_url: str, security_tenant: str) -> None:
self.base_url = base_url
self.security_tenant = security_tenant
def format(self, relative_url: str) -> str:
url = urljoin(self.base_url, relative_url)
if self.security_tenant:
url = append_security_tenant(url, self.security_tenant)
return url
class ShortKibanaExternalUrlFormatter(KibanaExternalUrlFormatter):
'''Formats external urls using the Kibana Shorten URL API'''
def __init__(self, base_url: str, auth: AuthBase, security_tenant: str) -> None:
self.auth = auth
self.security_tenant = security_tenant
self.goto_url = urljoin(base_url, 'goto/')
shorten_url = urljoin(base_url, 'api/shorten_url')
if security_tenant:
shorten_url = append_security_tenant(shorten_url, security_tenant)
self.shorten_url = shorten_url
def format(self, relative_url: str) -> str:
# join with '/' to ensure relative to root of app
long_url = urljoin('/', relative_url)
if self.security_tenant:
long_url = append_security_tenant(long_url, self.security_tenant)
try:
response = requests.post(
url=self.shorten_url,
auth=self.auth,
headers={
'kbn-xsrf': 'elastalert',
'osd-xsrf': 'elastalert'
},
json={
'url': long_url
}
)
response.raise_for_status()
except RequestException as e:
raise EAException("Failed to invoke Kibana Shorten URL API: %s" % e)
response_body = response.json()
url_id = response_body.get('urlId')
goto_url = urljoin(self.goto_url, url_id)
if self.security_tenant:
goto_url = append_security_tenant(goto_url, self.security_tenant)
return goto_url
def create_kibana_auth(kibana_url, rule) -> AuthBase:
'''Creates a Kibana http authentication for use by requests'''
# Basic
username = rule.get('kibana_username')
password = rule.get('kibana_password')
if username and password:
return HTTPBasicAuth(username, password)
# AWS SigV4
aws_region = rule.get('aws_region')
if not aws_region:
aws_region = os.environ.get('AWS_DEFAULT_REGION')
if aws_region:
aws_profile = rule.get('profile')
session = boto3.session.Session(
profile_name=aws_profile,
region_name=aws_region
)
credentials = session.get_credentials()
kibana_host = urlparse(kibana_url).hostname
return RefeshableAWSRequestsAuth(
refreshable_credential=credentials,
aws_host=kibana_host,
aws_region=aws_region,
aws_service='es'
)
# Unauthenticated
return None
def create_kibana_external_url_formatter(
rule,
shorten: bool,
security_tenant: str
) -> KibanaExternalUrlFormatter:
'''Creates a Kibana external url formatter'''
base_url = rule.get('kibana_url')
if shorten:
auth = create_kibana_auth(base_url, rule)
return ShortKibanaExternalUrlFormatter(base_url, auth, security_tenant)
return AbsoluteKibanaExternalUrlFormatter(base_url, security_tenant)
| 31.913669 | 86 | 0.665014 | import boto3
import os
from urllib.parse import parse_qsl, urlencode, urljoin, urlparse, urlsplit, urlunsplit
import requests
from requests import RequestException
from requests.auth import AuthBase, HTTPBasicAuth
from elastalert.auth import RefeshableAWSRequestsAuth
from elastalert.util import EAException
def append_security_tenant(url, security_tenant):
parsed = urlsplit(url)
if parsed.query:
qs = parse_qsl(parsed.query, keep_blank_values=True, strict_parsing=True)
else:
qs = []
qs.append(('security_tenant', security_tenant))
new_query = urlencode(qs)
new_args = parsed._replace(query=new_query)
new_url = urlunsplit(new_args)
return new_url
class KibanaExternalUrlFormatter:
def format(self, relative_url: str) -> str:
raise NotImplementedError()
class AbsoluteKibanaExternalUrlFormatter(KibanaExternalUrlFormatter):
def __init__(self, base_url: str, security_tenant: str) -> None:
self.base_url = base_url
self.security_tenant = security_tenant
def format(self, relative_url: str) -> str:
url = urljoin(self.base_url, relative_url)
if self.security_tenant:
url = append_security_tenant(url, self.security_tenant)
return url
class ShortKibanaExternalUrlFormatter(KibanaExternalUrlFormatter):
def __init__(self, base_url: str, auth: AuthBase, security_tenant: str) -> None:
self.auth = auth
self.security_tenant = security_tenant
self.goto_url = urljoin(base_url, 'goto/')
shorten_url = urljoin(base_url, 'api/shorten_url')
if security_tenant:
shorten_url = append_security_tenant(shorten_url, security_tenant)
self.shorten_url = shorten_url
def format(self, relative_url: str) -> str:
long_url = urljoin('/', relative_url)
if self.security_tenant:
long_url = append_security_tenant(long_url, self.security_tenant)
try:
response = requests.post(
url=self.shorten_url,
auth=self.auth,
headers={
'kbn-xsrf': 'elastalert',
'osd-xsrf': 'elastalert'
},
json={
'url': long_url
}
)
response.raise_for_status()
except RequestException as e:
raise EAException("Failed to invoke Kibana Shorten URL API: %s" % e)
response_body = response.json()
url_id = response_body.get('urlId')
goto_url = urljoin(self.goto_url, url_id)
if self.security_tenant:
goto_url = append_security_tenant(goto_url, self.security_tenant)
return goto_url
def create_kibana_auth(kibana_url, rule) -> AuthBase:
username = rule.get('kibana_username')
password = rule.get('kibana_password')
if username and password:
return HTTPBasicAuth(username, password)
aws_region = rule.get('aws_region')
if not aws_region:
aws_region = os.environ.get('AWS_DEFAULT_REGION')
if aws_region:
aws_profile = rule.get('profile')
session = boto3.session.Session(
profile_name=aws_profile,
region_name=aws_region
)
credentials = session.get_credentials()
kibana_host = urlparse(kibana_url).hostname
return RefeshableAWSRequestsAuth(
refreshable_credential=credentials,
aws_host=kibana_host,
aws_region=aws_region,
aws_service='es'
)
return None
def create_kibana_external_url_formatter(
rule,
shorten: bool,
security_tenant: str
) -> KibanaExternalUrlFormatter:
base_url = rule.get('kibana_url')
if shorten:
auth = create_kibana_auth(base_url, rule)
return ShortKibanaExternalUrlFormatter(base_url, auth, security_tenant)
return AbsoluteKibanaExternalUrlFormatter(base_url, security_tenant)
| true | true |
1c463b00bcc93f690abe0126cebd12479e2b2c5d | 1,568 | py | Python | cirq/optimizers/drop_negligible.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | 1 | 2020-05-20T00:08:33.000Z | 2020-05-20T00:08:33.000Z | cirq/optimizers/drop_negligible.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | null | null | null | cirq/optimizers/drop_negligible.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimization pass that removes operations with tiny effects."""
from typing import TYPE_CHECKING
from cirq import protocols
from cirq.circuits import optimization_pass, circuit as _circuit
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import List, Tuple
from cirq import ops
class DropNegligible(optimization_pass.OptimizationPass):
"""An optimization pass that removes operations with tiny effects."""
def __init__(self, tolerance: float = 1e-8) -> None:
self.tolerance = tolerance
def optimize_circuit(self, circuit: _circuit.Circuit) -> None:
deletions = [] # type: List[Tuple[int, ops.Operation]]
for moment_index, moment in enumerate(circuit):
for op in moment.operations:
if (op is not None and
protocols.trace_distance_bound(op) <= self.tolerance):
deletions.append((moment_index, op))
circuit.batch_remove(deletions)
| 37.333333 | 78 | 0.714286 |
from typing import TYPE_CHECKING
from cirq import protocols
from cirq.circuits import optimization_pass, circuit as _circuit
if TYPE_CHECKING:
from typing import List, Tuple
from cirq import ops
class DropNegligible(optimization_pass.OptimizationPass):
def __init__(self, tolerance: float = 1e-8) -> None:
self.tolerance = tolerance
def optimize_circuit(self, circuit: _circuit.Circuit) -> None:
deletions = []
for moment_index, moment in enumerate(circuit):
for op in moment.operations:
if (op is not None and
protocols.trace_distance_bound(op) <= self.tolerance):
deletions.append((moment_index, op))
circuit.batch_remove(deletions)
| true | true |
1c463cf1cadf9635379497394d42b7e870640036 | 5,782 | py | Python | egs/wenetspeech/ASR/local/text2token.py | zhu-han/icefall | 9f6c748b3098e3e32c704c27c40ec31f2e9d376c | [
"Apache-2.0"
] | null | null | null | egs/wenetspeech/ASR/local/text2token.py | zhu-han/icefall | 9f6c748b3098e3e32c704c27c40ec31f2e9d376c | [
"Apache-2.0"
] | null | null | null | egs/wenetspeech/ASR/local/text2token.py | zhu-han/icefall | 9f6c748b3098e3e32c704c27c40ec31f2e9d376c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2017 Johns Hopkins University (authors: Shinji Watanabe)
# 2022 Xiaomi Corp. (authors: Mingshuang Luo)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import re
import sys
from typing import List
from pypinyin import lazy_pinyin, pinyin
is_python2 = sys.version_info[0] == 2
def exist_or_not(i, match_pos):
start_pos = None
end_pos = None
for pos in match_pos:
if pos[0] <= i < pos[1]:
start_pos = pos[0]
end_pos = pos[1]
break
return start_pos, end_pos
def get_parser():
parser = argparse.ArgumentParser(
description="convert raw text to tokenized text",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--nchar",
"-n",
default=1,
type=int,
help="number of characters to split, i.e., \
aabb -> a a b b with -n 1 and aa bb with -n 2",
)
parser.add_argument(
"--skip-ncols", "-s", default=0, type=int, help="skip first n columns"
)
parser.add_argument(
"--space", default="<space>", type=str, help="space symbol"
)
parser.add_argument(
"--non-lang-syms",
"-l",
default=None,
type=str,
help="list of non-linguistic symobles, e.g., <NOISE> etc.",
)
parser.add_argument(
"text", type=str, default=False, nargs="?", help="input text"
)
parser.add_argument(
"--trans_type",
"-t",
type=str,
default="char",
choices=["char", "pinyin", "lazy_pinyin"],
help="""Transcript type. char/pinyin/lazy_pinyin""",
)
return parser
def token2id(
texts, token_table, token_type: str = "lazy_pinyin", oov: str = "<unk>"
) -> List[List[int]]:
"""Convert token to id.
Args:
texts:
The input texts, it refers to the chinese text here.
token_table:
The token table is built based on "data/lang_xxx/token.txt"
token_type:
The type of token, such as "pinyin" and "lazy_pinyin".
oov:
Out of vocabulary token. When a word(token) in the transcript
does not exist in the token list, it is replaced with `oov`.
Returns:
The list of ids for the input texts.
"""
if texts is None:
raise ValueError("texts can't be None!")
else:
oov_id = token_table[oov]
ids: List[List[int]] = []
for text in texts:
chars_list = list(str(text))
if token_type == "lazy_pinyin":
text = lazy_pinyin(chars_list)
sub_ids = [
token_table[txt] if txt in token_table else oov_id
for txt in text
]
ids.append(sub_ids)
else: # token_type = "pinyin"
text = pinyin(chars_list)
sub_ids = [
token_table[txt[0]] if txt[0] in token_table else oov_id
for txt in text
]
ids.append(sub_ids)
return ids
def main():
parser = get_parser()
args = parser.parse_args()
rs = []
if args.non_lang_syms is not None:
with codecs.open(args.non_lang_syms, "r", encoding="utf-8") as f:
nls = [x.rstrip() for x in f.readlines()]
rs = [re.compile(re.escape(x)) for x in nls]
if args.text:
f = codecs.open(args.text, encoding="utf-8")
else:
f = codecs.getreader("utf-8")(
sys.stdin if is_python2 else sys.stdin.buffer
)
sys.stdout = codecs.getwriter("utf-8")(
sys.stdout if is_python2 else sys.stdout.buffer
)
line = f.readline()
n = args.nchar
while line:
x = line.split()
print(" ".join(x[: args.skip_ncols]), end=" ")
a = " ".join(x[args.skip_ncols :]) # noqa E203
# get all matched positions
match_pos = []
for r in rs:
i = 0
while i >= 0:
m = r.search(a, i)
if m:
match_pos.append([m.start(), m.end()])
i = m.end()
else:
break
if len(match_pos) > 0:
chars = []
i = 0
while i < len(a):
start_pos, end_pos = exist_or_not(i, match_pos)
if start_pos is not None:
chars.append(a[start_pos:end_pos])
i = end_pos
else:
chars.append(a[i])
i += 1
a = chars
if args.trans_type == "pinyin":
a = pinyin(list(str(a)))
a = [one[0] for one in a]
if args.trans_type == "lazy_pinyin":
a = lazy_pinyin(list(str(a)))
a = [a[j : j + n] for j in range(0, len(a), n)] # noqa E203
a_flat = []
for z in a:
a_flat.append("".join(z))
a_chars = [z.replace(" ", args.space) for z in a_flat]
print("".join(a_chars))
line = f.readline()
if __name__ == "__main__":
main()
| 29.350254 | 78 | 0.5422 |
import argparse
import codecs
import re
import sys
from typing import List
from pypinyin import lazy_pinyin, pinyin
is_python2 = sys.version_info[0] == 2
def exist_or_not(i, match_pos):
start_pos = None
end_pos = None
for pos in match_pos:
if pos[0] <= i < pos[1]:
start_pos = pos[0]
end_pos = pos[1]
break
return start_pos, end_pos
def get_parser():
parser = argparse.ArgumentParser(
description="convert raw text to tokenized text",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--nchar",
"-n",
default=1,
type=int,
help="number of characters to split, i.e., \
aabb -> a a b b with -n 1 and aa bb with -n 2",
)
parser.add_argument(
"--skip-ncols", "-s", default=0, type=int, help="skip first n columns"
)
parser.add_argument(
"--space", default="<space>", type=str, help="space symbol"
)
parser.add_argument(
"--non-lang-syms",
"-l",
default=None,
type=str,
help="list of non-linguistic symobles, e.g., <NOISE> etc.",
)
parser.add_argument(
"text", type=str, default=False, nargs="?", help="input text"
)
parser.add_argument(
"--trans_type",
"-t",
type=str,
default="char",
choices=["char", "pinyin", "lazy_pinyin"],
help="""Transcript type. char/pinyin/lazy_pinyin""",
)
return parser
def token2id(
texts, token_table, token_type: str = "lazy_pinyin", oov: str = "<unk>"
) -> List[List[int]]:
if texts is None:
raise ValueError("texts can't be None!")
else:
oov_id = token_table[oov]
ids: List[List[int]] = []
for text in texts:
chars_list = list(str(text))
if token_type == "lazy_pinyin":
text = lazy_pinyin(chars_list)
sub_ids = [
token_table[txt] if txt in token_table else oov_id
for txt in text
]
ids.append(sub_ids)
else: # token_type = "pinyin"
text = pinyin(chars_list)
sub_ids = [
token_table[txt[0]] if txt[0] in token_table else oov_id
for txt in text
]
ids.append(sub_ids)
return ids
def main():
parser = get_parser()
args = parser.parse_args()
rs = []
if args.non_lang_syms is not None:
with codecs.open(args.non_lang_syms, "r", encoding="utf-8") as f:
nls = [x.rstrip() for x in f.readlines()]
rs = [re.compile(re.escape(x)) for x in nls]
if args.text:
f = codecs.open(args.text, encoding="utf-8")
else:
f = codecs.getreader("utf-8")(
sys.stdin if is_python2 else sys.stdin.buffer
)
sys.stdout = codecs.getwriter("utf-8")(
sys.stdout if is_python2 else sys.stdout.buffer
)
line = f.readline()
n = args.nchar
while line:
x = line.split()
print(" ".join(x[: args.skip_ncols]), end=" ")
a = " ".join(x[args.skip_ncols :]) # noqa E203
# get all matched positions
match_pos = []
for r in rs:
i = 0
while i >= 0:
m = r.search(a, i)
if m:
match_pos.append([m.start(), m.end()])
i = m.end()
else:
break
if len(match_pos) > 0:
chars = []
i = 0
while i < len(a):
start_pos, end_pos = exist_or_not(i, match_pos)
if start_pos is not None:
chars.append(a[start_pos:end_pos])
i = end_pos
else:
chars.append(a[i])
i += 1
a = chars
if args.trans_type == "pinyin":
a = pinyin(list(str(a)))
a = [one[0] for one in a]
if args.trans_type == "lazy_pinyin":
a = lazy_pinyin(list(str(a)))
a = [a[j : j + n] for j in range(0, len(a), n)] # noqa E203
a_flat = []
for z in a:
a_flat.append("".join(z))
a_chars = [z.replace(" ", args.space) for z in a_flat]
print("".join(a_chars))
line = f.readline()
if __name__ == "__main__":
main()
| true | true |
1c463d061e46a0550d594d6f027f9723b5d225f9 | 43 | py | Python | streams/rewinder/__init__.py | adrn/streams | 6478d37309ba1dff4e13e8e46b93eafb4ef36431 | [
"MIT"
] | null | null | null | streams/rewinder/__init__.py | adrn/streams | 6478d37309ba1dff4e13e8e46b93eafb4ef36431 | [
"MIT"
] | null | null | null | streams/rewinder/__init__.py | adrn/streams | 6478d37309ba1dff4e13e8e46b93eafb4ef36431 | [
"MIT"
] | null | null | null | from .core import *
from .sampler import *
| 14.333333 | 22 | 0.72093 | from .core import *
from .sampler import *
| true | true |
1c463e35fe5e172b70142ced199c9afc204daeb5 | 662 | py | Python | main.py | wang-h/backend-app-fastapi-sqlite | c155229e7187e381457730a40a9d660c0e98440d | [
"MIT"
] | null | null | null | main.py | wang-h/backend-app-fastapi-sqlite | c155229e7187e381457730a40a9d660c0e98440d | [
"MIT"
] | null | null | null | main.py | wang-h/backend-app-fastapi-sqlite | c155229e7187e381457730a40a9d660c0e98440d | [
"MIT"
] | null | null | null | from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core.config import settings
app = FastAPI(
title=settings.PROJECT_NAME,
openapi_url="{}/openapi.json".format(settings.API_V1_STR)
)
# 设置跨域请求允许来源
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin)
for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=settings.API_V1_STR)
| 26.48 | 68 | 0.712991 | from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core.config import settings
app = FastAPI(
title=settings.PROJECT_NAME,
openapi_url="{}/openapi.json".format(settings.API_V1_STR)
)
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin)
for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=settings.API_V1_STR)
| true | true |
1c463e704757405d935040c3c1db9e5051f1a01b | 3,160 | py | Python | src/python/WMCore/WMRuntime/ScriptInvoke.py | vkuznet/WMCore | 001cc51651052405a7ecd811cde91da611b1dc57 | [
"Apache-2.0"
] | 21 | 2015-11-19T16:18:45.000Z | 2021-12-02T18:20:39.000Z | src/python/WMCore/WMRuntime/ScriptInvoke.py | vkuznet/WMCore | 001cc51651052405a7ecd811cde91da611b1dc57 | [
"Apache-2.0"
] | 5,671 | 2015-01-06T14:38:52.000Z | 2022-03-31T22:11:14.000Z | src/python/WMCore/WMRuntime/ScriptInvoke.py | vkuznet/WMCore | 001cc51651052405a7ecd811cde91da611b1dc57 | [
"Apache-2.0"
] | 67 | 2015-01-21T15:55:38.000Z | 2022-02-03T19:53:13.000Z | #!/usr/bin/env python
"""
_ScriptInvoker_
Util to invoke a Runtime Script and provide it with access to the
various bits of the job that it will need to access via the WMTaskSpace
library
This script will be invoked at runtime from the directory & subshell
environment in which the Runtime Script implementation needs to be called.
"""
from __future__ import print_function
from builtins import object
import logging
import os
import sys
import traceback
import WMCore.WMRuntime.Bootstrap as Bootstrap
from WMCore.WMRuntime.ScriptFactory import getScript
class ScriptInvoke(object):
"""
_ScriptInvoke_
Ctor takes two arguments:
- module name of step module in WMTaskSpace
- module name of the Script implementation to be invoked
"""
def __init__(self, stepModule, scriptModule):
self.stepModule = stepModule
self.module = scriptModule
self.exitCode = 0
self.stepSpace = None
self.script = None
self.step = None
self.task = None
self.job = None
currentDir = os.getcwd()
Bootstrap.setupLogging(currentDir, useStdout=True)
logging.info("Invoking scripts in current directory: %s", currentDir)
def boot(self):
"""
_boot_
Import the Step Module & get the stepSpace object from it.
Get an instance of the Script from the Script Factory
"""
self.job = Bootstrap.loadJobDefinition()
self.task = Bootstrap.loadTask(self.job)
stepSpaceMod = __import__(self.stepModule,
globals(), locals(), ['stepSpace'], 0)
self.stepSpace = stepSpaceMod.stepSpace
self.step = self.task.getStep(self.stepSpace.stepName)
self.script = getScript(scriptModule)
self.script.task = self.task
self.script.step = self.step
self.script.job = self.job
self.script.stepSpace = self.stepSpace
def invoke(self):
"""
_invoke_
call the Script implementation
"""
self.exitCode = self.script()
def exit(self):
return self.exitCode
if __name__ == '__main__':
try:
stepModule = sys.argv[1]
scriptModule = sys.argv[2]
except Exception as ex:
msg = "Usage: ScriptInvoke.py <Step Module> <Script Module>"
raise RuntimeError(msg)
invoker = ScriptInvoke(stepModule, scriptModule)
try:
invoker.boot()
except Exception as ex:
msg = "Error booting script invoker for step %s\n" % stepModule
msg += "withe Script module: %s\n" % scriptModule
msg += str(ex)
msg += "Details:\n"
for l in traceback.format_tb(sys.exc_info()[2]):
msg += l
raise RuntimeError(msg)
try:
invoker.invoke()
except Exception as ex:
msg = "Error invoking script for step %s\n" % stepModule
msg += "withe Script module: %s\n" % scriptModule
msg += str(ex)
msg += "Details:\n"
for l in traceback.format_tb(sys.exc_info()[2]):
msg += l
raise RuntimeError(msg)
sys.exit(invoker.exit())
| 26.115702 | 77 | 0.631013 |
from __future__ import print_function
from builtins import object
import logging
import os
import sys
import traceback
import WMCore.WMRuntime.Bootstrap as Bootstrap
from WMCore.WMRuntime.ScriptFactory import getScript
class ScriptInvoke(object):
def __init__(self, stepModule, scriptModule):
self.stepModule = stepModule
self.module = scriptModule
self.exitCode = 0
self.stepSpace = None
self.script = None
self.step = None
self.task = None
self.job = None
currentDir = os.getcwd()
Bootstrap.setupLogging(currentDir, useStdout=True)
logging.info("Invoking scripts in current directory: %s", currentDir)
def boot(self):
self.job = Bootstrap.loadJobDefinition()
self.task = Bootstrap.loadTask(self.job)
stepSpaceMod = __import__(self.stepModule,
globals(), locals(), ['stepSpace'], 0)
self.stepSpace = stepSpaceMod.stepSpace
self.step = self.task.getStep(self.stepSpace.stepName)
self.script = getScript(scriptModule)
self.script.task = self.task
self.script.step = self.step
self.script.job = self.job
self.script.stepSpace = self.stepSpace
def invoke(self):
self.exitCode = self.script()
def exit(self):
return self.exitCode
if __name__ == '__main__':
try:
stepModule = sys.argv[1]
scriptModule = sys.argv[2]
except Exception as ex:
msg = "Usage: ScriptInvoke.py <Step Module> <Script Module>"
raise RuntimeError(msg)
invoker = ScriptInvoke(stepModule, scriptModule)
try:
invoker.boot()
except Exception as ex:
msg = "Error booting script invoker for step %s\n" % stepModule
msg += "withe Script module: %s\n" % scriptModule
msg += str(ex)
msg += "Details:\n"
for l in traceback.format_tb(sys.exc_info()[2]):
msg += l
raise RuntimeError(msg)
try:
invoker.invoke()
except Exception as ex:
msg = "Error invoking script for step %s\n" % stepModule
msg += "withe Script module: %s\n" % scriptModule
msg += str(ex)
msg += "Details:\n"
for l in traceback.format_tb(sys.exc_info()[2]):
msg += l
raise RuntimeError(msg)
sys.exit(invoker.exit())
| true | true |
1c463f729b93fc1c6c11094196c373a30b659ac2 | 15,805 | py | Python | libs/remote_exec.py | dedwards-tech/fio-tools | 7d328cff9ba3c5e67c3df23ffb064361e64eac06 | [
"BSD-2-Clause"
] | null | null | null | libs/remote_exec.py | dedwards-tech/fio-tools | 7d328cff9ba3c5e67c3df23ffb064361e64eac06 | [
"BSD-2-Clause"
] | null | null | null | libs/remote_exec.py | dedwards-tech/fio-tools | 7d328cff9ba3c5e67c3df23ffb064361e64eac06 | [
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/python
# This is a script to simplify the creation of and use of an SSH connection between
# a proxyvm (Linux) and an ESXi host (SSH shell). It allows for command line execution
# and parsing of output without depending on the use of SSH key exchange.
#
# The parmiko library provides the helpers to process SSH connections and requests, and
# this script provides a wrapper for command line usage as well as library usage.
#
# There are also ESXi shell command helpers for doing common things like getting and setting
# kernel or advanced host settings. It is expected that this list grows over time to make
# it easier to share common ESXi shell command protocols.
#
"""
Utilities for makeing ssh connections to an ESXi host.
"""
import argparse
import paramiko
import yaml
import string
import os, sys
from time import sleep
from time import time
from scp import SCPClient
import threading;
# Defaults can be overridden via the command line.
CFG_DEF_TARGET_USER = "root";
CFG_DEF_TARGET_PWD = "pass!Q@W#E";
def AddArgs(parser_obj):
parser_obj.add_argument('-s', '--server', dest='CfgHostAddr', action='store', nargs='*', required=True, help='Remote host name or IP address; or list of hosts separated by spaces.');
parser_obj.add_argument('-u', '--user', dest='CfgUserName', action='store', required=False, default=CFG_DEF_TARGET_USER, help='ESXi host (SSH) user name (root).');
parser_obj.add_argument('-p', '--pwd', dest='CfgUserPwd', action='store', required=False, default=CFG_DEF_TARGET_PWD, help='ESXi (SSH) user password (root).');
def GetArgs():
""";
Supports the command-line arguments listed below.;
""";
# create the top-level parser
parser = argparse.ArgumentParser(description='Remote execution library - input arguments.');
AddArgs(parser);
# parse the args and call whatever function was selected
args = parser.parse_args();
return args;
#############################################
class SvrRemoteControl:
def __init__(self, host_name, user_name, password, option=None, auto_connect=True, exit_on_error=True):
self.HostName = host_name;
self.UserName = user_name;
self.Password = password;
# temp dae: I don't see a local use for this, why is this here?
self.Option = None;
#end dae
self.ConnectOnline = False;
# Create SSH connection to target_ip...
self.Client = paramiko.SSHClient();
self.ScpConn = None;
self.Client.set_missing_host_key_policy(paramiko.AutoAddPolicy());
self.Client.load_system_host_keys();
if (auto_connect):
self.connect(exit_on_error=exit_on_error);
def __ssh_exec_cmd__(self, cmd_str):
if (self.ConnectOnline):
try:
stdin, stdout, stderr = self.Client.exec_command(cmd_str);
stdin.close();
chk_err = string.join(stderr.readlines(), "");
if (len(chk_err) > 0):
return [chk_err, 1]
except:
# Failed to execute request, return error
str_out = "ERR: failed to execute SSH request:\n%s" % (cmd_str);
return [str_out, 2];
else:
print "ERR: no valid connection to host %s" % (self.HostName);
# Success
str_out = string.join(stdout.readlines(), "");
return [str_out, 0];
# connect - initial connection request to a specified host.
#
# Inputs:
# - exit_on_error will raise an exception if set to true, return
# 1 error code by default.
#
def connect(self, exit_on_error=False):
self.ConnectOnline = False;
try:
self.Client.connect(self.HostName, username=self.UserName, password=self.Password);
self.ScpConn = SCPClient(self.Client.get_transport())
self.ConnectOnline = True;
return 0; # success
except:
if (exit_on_error):
print "ERR: cannot connect to %s, exiting!" % (self.HostName);
raise SystemExit(1);
return 1; # failure
# connect_retry - connect with retry if a timeout occurs.
# Inputs:
# - retry_count (disabled default) - number of times to retry connection
# before failing.
# - retry_delay (30s default) - delay in seconds between retries.
# - signal "exit" with code 1 (enabled by default).
#
def connect_retry(self, retry_count=0, retry_delay=30, exit_on_error=False):
num_retries = 0;
for ii in range(0, retry_count + 1):
if (self.connect(exit_on_error) == 0):
return 0; # success
else:
num_retries += 1;
if (ii <= retry_count):
# sleep between retries.
sleep(retry_delay);
self.ConnectOnline = False;
if (exit_on_error):
print "ERR: Timeout, max retries %s, attempting to reconnect to host %s" % (num_retries, self.HostName);
raise SystemExit(1); # failure, raise exit exception
return 1; # timeout waiting
# rexec - remotely execute the command.
#
def rexec(self, cmd_str):
if (True):
out_str, err_code = self.__ssh_exec_cmd__(cmd_str);
else:
print "Connection to host %s not established." % (self.HostName);
err_code = 0;
out_str = "";
return [ err_code, out_str ];
# rexec_v - (verbose) remotely execute the command by displaying response text.
#
def rexec_v(self, cmd_str):
if (True):
out_str, err_code = self.__ssh_exec_cmd__(cmd_str);
else:
err_code = 0;
out_str = "";
if (out_str != ""):
print cmd_str + "\nReturned:\n" + out_str;
else:
print cmd_str + "\n";
return [ err_code, out_str ];
def put_file(self, local_file, remote_file):
e_code = 1;
print "Copy %s to remote host at %s" % (local_file, remote_file);
if (self.ScpConn is not None):
# scp the requested 'local_file' to the remote host file...
self.ScpConn.put(local_file, remote_file);
sys.stdout.flush();
e_code = 0;
else:
print "ERR: uninitialized SCP connection with host %s" % (self.HostName);
return e_code;
def get_file(self, remote_file, local_file):
print "Download %s from remote host at %s" % (remote_file, local_file);
if (self.ScpConn is not None):
# scp the requested 'local_file' to the remote host file...
self.ScpConn.get(remote_file, local_file);
sys.stdout.flush();
# waitfor_shutdown - does NOT send shutdown command, rather does some magic to wait
# for a host to stop responding to an execution request on a valid
# and active connection.
#
def waitfor_shutdown(self, quiet=False):
now = start = time();
while True:
e_code, out_str = self.rexec("ping -c 1 localhost");
if (e_code != 0):
# exit while loop
break;
# Delay for 10 seconds, then try again.
sleep(10);
now = time() - start;
if (not quiet):
print "\r *waiting for shutdown, %d(s)..." % (now);
sys.stdout.flush();
now = time() - start;
print "\r host shutdown took %d seconds." % (now);
sys.stdout.flush();
return 0;
# waitfor_bootup - assumes the system is actually booting, i.e. not supporting
# connection attempts. will wait for a host connection to
# be established to assume it has actually completed boot.
#
def waitfor_bootup(self, quiet=False):
print "Waiting for host bootup...";
start = time();
# There doesn't appear to be a timeout for waiting for a connection...
e_code = self.connect_retry(retry_count=10);
now = time() - start;
if ((e_code == 0) and (not quiet)):
print "\r host boot took %d minutes." % (now / 60);
sys.stdout.flush();
else:
print "\r ERR: connection to host during boot failed, timeout perhaps?";
return e_code;
# reboot - for a connected host, issue reboot command, wait for the host to
# stop responding to new commands (so we know reboot has actually
# initiated), then wait for the host to boot back up.
#
def reboot(self):
print"Sending request for reboot...";
e_code, out_str = self.rexec("reboot");
sys.stdout.flush();
if (e_code == 0):
self.waitfor_shutdown();
# Give the system a little longer to ensure the shell doesn't re-connect
# too quickly following the actual shutdown.
print " delaying 30s to ensure shutdown is well underway...";
sys.stdout.flush();
sleep(30);
self.waitfor_bootup();
else:
print "ERR: could not reboot host, something is wrong!";
sys.stdout.flush();
return e_code;
# reboot_async - for a connected host, issue reboot command, wait for the host to
# stop responding to new commands (so we know reboot has actually
# initiated), then exit.
#
def reboot_async(self):
print"Sending request for reboot (async)...";
e_code, out_str = self.rexec("reboot");
sys.stdout.flush();
if (e_code == 0):
self.waitfor_shutdown();
# Give the system a little longer to ensure the shell doesn't re-connect
# too quickly following the actual shutdown.
print " delaying 30s to ensure shutdown is well underway...";
sys.stdout.flush();
else:
print "ERR: could not reboot host, something is wrong!";
return e_code;
def is_connected(self):
return (self.ConnectOnline == True);
def close(self):
if (self.ConnectOnline):
self.Client.close();
self.ScpConn = None;
self.ConnectOnline = False;
#############################################
# SvrRemoteThread - allows executing of a remote command in a thread. This class will
# always create a new connection per thread!
#
class SvrRemoteThread(threading.Thread):
def __init__(self, host_name, user_name, password, thread_fn):
threading.Thread.__init__(self);
self.RetCode = -1;
self.OutStr = '<not started>';
if (thread_fn is None):
print "ERR: no thread function specified in SvrRemoteThread init.";
return;
self.ThreadFn = thread_fn;
self.Parameters = {};
self.RC = SvrRemoteControl(host_name, user_name, password, auto_connect=True, exit_on_error=False);
if (not self.RC.is_connected()):
print "ERR: could not establish a connection with host %s" % (host_name);
def setParams(self, params_list):
self.Parameters = params_list;
def run(self):
if ((self.ThreadFn is None) or (not self.RC.is_connected())):
return;
self.RetCode = 0;
self.OutStr = '<started>';
# Execute thread functionality
self.RetCode, self.OutStr = self.ThreadFn(self.RC, self.Parameters);
def disconnect(self):
# close the connection and exit.
self.RC.close();
# SvrRemoteThreadBase - setup the context for passing to a thread when it executes.
#
class SvrRemoteThreadBase(threading.Thread):
def __init__(self, host_name, user_name, user_pwd):
threading.Thread.__init__(self);
self.RetCode = -1;
self.OutStr = '<not started>';
self.RC = SvrRemoteControl(host_name, user_name, user_pwd, auto_connect=True, exit_on_error=False);
if (not self.RC.is_connected()):
print "ERR: could not establish a connection with host %s" % (host_name);
# create the context to pass to the thread_fn upon start.
self.Context = {};
self.Context['rc'] = self.RC;
self.Context['host'] = host_name;
self.Context['user_name'] = user_name;
self.Context['user_pwd'] = user_pwd;
self.Context['params'] = {};
@classmethod
def ThreadFn(self, context):
return [ 0, "(not implemented)" ];
def setParams(params_list):
self.Context['params'] = params_list;
def run(self):
if ((self.ThreadFn is None) or (not self.RC.is_connected())):
return;
self.RetCode = 0;
self.OutStr = '<started>';
# Execute thread functionality
self.RetCode, self.OutStr = self.ThreadFn(self.Context);
def disconnect(self):
# close the connection and exit.
self.RC.close();
#############################################
class ExecAdvancedSetting:
def __init__(self, option_path):
self.OptionPath = option_path;
def SetInt(self, option_value):
return "esxcli system settings advanced set -o %s --int-value %s" % (self.OptionPath, option_value);
def SetStr(self, option_value):
return "esxcli system settings advanced set -o %s --string-value %s" % (self.OptionPath, option_value);
def Get(self):
return "esxcli system settings advanced list -o %s" % (self.OptionPath);
class ExecKernelSetting:
def __init__(self, option_path):
self.OptionPath = option_path;
def SetValue(self, option_value):
return "esxcli system settings kernel set -o %s -v %s" % (self.OptionPath, option_value);
def Get(self):
return "esxcli system settings kernel list -o %s" % (self.OptionPath);
#############################################
# Determine how we were instantiated (command line, or included)
CFG_FROM_CMD_LINE = False;
if (sys.argv[0] == __file__):
CFG_FROM_CMD_LINE = True;
if (CFG_FROM_CMD_LINE):
# We were launched from the command line so execute a test workload, only on the first
# host in the list; this could easily be adapted to work on each host in the list but is
# not necessary for the "unit test" purpose of this basic functionality.
args = GetArgs();
# Test non-threaded class
rc = SvrRemoteControl(args.CfgHostAddr[0], args.CfgUserName, args.CfgUserPwd);
if (rc is not None):
e_code, out_str = rc.rexec("ping -c 1 localhost");
if (e_code == 0):
print "SvrRemoteControl: unit test successful";
#print "ls:\n%s\n" % (out_str);
else:
print "ERR: failed to execute simple remote command in SvrRemoteControl unit test.";
print out_str;
else:
e_code = 1;
print "ERR: failed to instantiage SvrRemoteControl class on server %s" % (args.CfgHostAddr[0]);
if (e_code == 0):
cmd_str = ExecKernelSetting("enablePCIEHotplug").Get();
e_code, out_str = rc.rexec(cmd_str);
print " ~ # %s\n%s" % (cmd_str, out_str);
cmd_str = ExecAdvancedSetting("/Disk/QFullSampleSize").Get();
e_code, out_str = rc.rexec(cmd_str);
print " ~ # %s\n%s" % (cmd_str, out_str);
cmd_str = ExecAdvancedSetting("/Disk/QFullThreshold").Get();
e_code, out_str = rc.rexec(cmd_str);
print " ~ # %s\n%s" % (cmd_str, out_str);
cmd_str = ExecAdvancedSetting("/Disk/SchedNumReqOutstanding").Get();
e_code, out_str = rc.rexec(cmd_str);
print " ~ # %s\n%s" % (cmd_str, out_str);
# Test file put operations
# - first test changing name of local file to a folder + file name.
# - second test copying a local file to a folder.
rc.put_file(__file__, '/scratch/delme.txt');
rc.rexec_v("cat /scratch/delme.txt");
rc.put_file(__file__, '/scratch/');
rc.rexec_v("cat /scratch/%s" % (__file__));
# Test file get operation
rc.get_file("/scratch/delme.txt", "/tmp/");
# remove misc target files for next run...
rc.rexec("rm /scratch/delme.txt");
rc.rexec("rm /scratch/%s" % (__file__));
# System is connected and powered on, test reboot.
#rc.reboot();
rc.close();
raise SystemExit(e_code);
| 37.188235 | 186 | 0.624739 |
"""
Utilities for makeing ssh connections to an ESXi host.
"""
import argparse
import paramiko
import yaml
import string
import os, sys
from time import sleep
from time import time
from scp import SCPClient
import threading;
CFG_DEF_TARGET_USER = "root";
CFG_DEF_TARGET_PWD = "pass!Q@W#E";
def AddArgs(parser_obj):
parser_obj.add_argument('-s', '--server', dest='CfgHostAddr', action='store', nargs='*', required=True, help='Remote host name or IP address; or list of hosts separated by spaces.');
parser_obj.add_argument('-u', '--user', dest='CfgUserName', action='store', required=False, default=CFG_DEF_TARGET_USER, help='ESXi host (SSH) user name (root).');
parser_obj.add_argument('-p', '--pwd', dest='CfgUserPwd', action='store', required=False, default=CFG_DEF_TARGET_PWD, help='ESXi (SSH) user password (root).');
def GetArgs():
""";
Supports the command-line arguments listed below.;
""";
parser = argparse.ArgumentParser(description='Remote execution library - input arguments.');
AddArgs(parser);
args = parser.parse_args();
return args;
return error
str_out = "ERR: failed to execute SSH request:\n%s" % (cmd_str);
return [str_out, 2];
else:
print "ERR: no valid connection to host %s" % (self.HostName);
# Success
str_out = string.join(stdout.readlines(), "");
return [str_out, 0];
# connect - initial connection request to a specified host.
#
# Inputs:
# - exit_on_error will raise an exception if set to true, return
# 1 error code by default.
#
def connect(self, exit_on_error=False):
self.ConnectOnline = False;
try:
self.Client.connect(self.HostName, username=self.UserName, password=self.Password);
self.ScpConn = SCPClient(self.Client.get_transport())
self.ConnectOnline = True;
return 0; # success
except:
if (exit_on_error):
print "ERR: cannot connect to %s, exiting!" % (self.HostName);
raise SystemExit(1);
return 1; # failure
# connect_retry - connect with retry if a timeout occurs.
# Inputs:
# - retry_count (disabled default) - number of times to retry connection
# before failing.
# - retry_delay (30s default) - delay in seconds between retries.
# - signal "exit" with code 1 (enabled by default).
#
def connect_retry(self, retry_count=0, retry_delay=30, exit_on_error=False):
num_retries = 0;
for ii in range(0, retry_count + 1):
if (self.connect(exit_on_error) == 0):
return 0; # success
else:
num_retries += 1;
if (ii <= retry_count):
# sleep between retries.
sleep(retry_delay);
self.ConnectOnline = False;
if (exit_on_error):
print "ERR: Timeout, max retries %s, attempting to reconnect to host %s" % (num_retries, self.HostName);
raise SystemExit(1); # failure, raise exit exception
return 1; # timeout waiting
# rexec - remotely execute the command.
#
def rexec(self, cmd_str):
if (True):
out_str, err_code = self.__ssh_exec_cmd__(cmd_str);
else:
print "Connection to host %s not established." % (self.HostName);
err_code = 0;
out_str = "";
return [ err_code, out_str ];
# rexec_v - (verbose) remotely execute the command by displaying response text.
#
def rexec_v(self, cmd_str):
if (True):
out_str, err_code = self.__ssh_exec_cmd__(cmd_str);
else:
err_code = 0;
out_str = "";
if (out_str != ""):
print cmd_str + "\nReturned:\n" + out_str;
else:
print cmd_str + "\n";
return [ err_code, out_str ];
def put_file(self, local_file, remote_file):
e_code = 1;
print "Copy %s to remote host at %s" % (local_file, remote_file);
if (self.ScpConn is not None):
# scp the requested 'local_file' to the remote host file...
self.ScpConn.put(local_file, remote_file);
sys.stdout.flush();
e_code = 0;
else:
print "ERR: uninitialized SCP connection with host %s" % (self.HostName);
return e_code;
def get_file(self, remote_file, local_file):
print "Download %s from remote host at %s" % (remote_file, local_file);
if (self.ScpConn is not None):
# scp the requested 'local_file' to the remote host file...
self.ScpConn.get(remote_file, local_file);
sys.stdout.flush();
# waitfor_shutdown - does NOT send shutdown command, rather does some magic to wait
# for a host to stop responding to an execution request on a valid
# and active connection.
#
def waitfor_shutdown(self, quiet=False):
now = start = time();
while True:
e_code, out_str = self.rexec("ping -c 1 localhost");
if (e_code != 0):
# exit while loop
break;
# Delay for 10 seconds, then try again.
sleep(10);
now = time() - start;
if (not quiet):
print "\r *waiting for shutdown, %d(s)..." % (now);
sys.stdout.flush();
now = time() - start;
print "\r host shutdown took %d seconds." % (now);
sys.stdout.flush();
return 0;
# waitfor_bootup - assumes the system is actually booting, i.e. not supporting
# connection attempts. will wait for a host connection to
# be established to assume it has actually completed boot.
#
def waitfor_bootup(self, quiet=False):
print "Waiting for host bootup...";
start = time();
# There doesn't appear to be a timeout for waiting for a connection...
e_code = self.connect_retry(retry_count=10);
now = time() - start;
if ((e_code == 0) and (not quiet)):
print "\r host boot took %d minutes." % (now / 60);
sys.stdout.flush();
else:
print "\r ERR: connection to host during boot failed, timeout perhaps?";
return e_code;
def reboot(self):
print"Sending request for reboot...";
e_code, out_str = self.rexec("reboot");
sys.stdout.flush();
if (e_code == 0):
self.waitfor_shutdown();
# too quickly following the actual shutdown.
print " delaying 30s to ensure shutdown is well underway...";
sys.stdout.flush();
sleep(30);
self.waitfor_bootup();
else:
print "ERR: could not reboot host, something is wrong!";
sys.stdout.flush();
return e_code;
# reboot_async - for a connected host, issue reboot command, wait for the host to
# stop responding to new commands (so we know reboot has actually
# initiated), then exit.
#
def reboot_async(self):
print"Sending request for reboot (async)...";
e_code, out_str = self.rexec("reboot");
sys.stdout.flush();
if (e_code == 0):
self.waitfor_shutdown();
# Give the system a little longer to ensure the shell doesn't re-connect
print " delaying 30s to ensure shutdown is well underway...";
sys.stdout.flush();
else:
print "ERR: could not reboot host, something is wrong!";
return e_code;
def is_connected(self):
return (self.ConnectOnline == True);
def close(self):
if (self.ConnectOnline):
self.Client.close();
self.ScpConn = None;
self.ConnectOnline = False;
self.RC.close();
class SvrRemoteThreadBase(threading.Thread):
def __init__(self, host_name, user_name, user_pwd):
threading.Thread.__init__(self);
self.RetCode = -1;
self.OutStr = '<not started>';
self.RC = SvrRemoteControl(host_name, user_name, user_pwd, auto_connect=True, exit_on_error=False);
if (not self.RC.is_connected()):
print "ERR: could not establish a connection with host %s" % (host_name);
self.Context = {};
self.Context['rc'] = self.RC;
self.Context['host'] = host_name;
self.Context['user_name'] = user_name;
self.Context['user_pwd'] = user_pwd;
self.Context['params'] = {};
@classmethod
def ThreadFn(self, context):
return [ 0, "(not implemented)" ];
def setParams(params_list):
self.Context['params'] = params_list;
def run(self):
if ((self.ThreadFn is None) or (not self.RC.is_connected())):
return;
self.RetCode = 0;
self.OutStr = '<started>';
self.RetCode, self.OutStr = self.ThreadFn(self.Context);
def disconnect(self):
self.RC.close();
cmd_str = ExecAdvancedSetting("/Disk/SchedNumReqOutstanding").Get();
e_code, out_str = rc.rexec(cmd_str);
print " ~ # %s\n%s" % (cmd_str, out_str);
rc.put_file(__file__, '/scratch/delme.txt');
rc.rexec_v("cat /scratch/delme.txt");
rc.put_file(__file__, '/scratch/');
rc.rexec_v("cat /scratch/%s" % (__file__));
rc.get_file("/scratch/delme.txt", "/tmp/");
rc.rexec("rm /scratch/delme.txt");
rc.rexec("rm /scratch/%s" % (__file__));
rc.close();
raise SystemExit(e_code);
| false | true |
1c4640c71ced2b43dbfbe2cdd9de56a41d3e64a9 | 100,233 | py | Python | superset/views/core.py | Altizon/incubator-superset | e55fe43ca67a29518674a1a2137a3dbd4f166864 | [
"Apache-2.0"
] | null | null | null | superset/views/core.py | Altizon/incubator-superset | e55fe43ca67a29518674a1a2137a3dbd4f166864 | [
"Apache-2.0"
] | 5 | 2021-02-02T22:53:35.000Z | 2022-03-29T22:28:22.000Z | superset/views/core.py | mhassant/apache-superset-multi-tenancy | e55fe43ca67a29518674a1a2137a3dbd4f166864 | [
"Apache-2.0"
] | 2 | 2017-12-20T02:44:05.000Z | 2018-02-09T07:19:49.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import logging
import re
from contextlib import closing
from datetime import datetime, timedelta
from typing import Any, cast, Dict, List, Optional, Union
from urllib import parse
import backoff
import msgpack
import pandas as pd
import pyarrow as pa
import simplejson as json
from flask import abort, flash, g, Markup, redirect, render_template, request, Response
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access, has_access_api
from flask_appbuilder.security.sqla import models as ab_models
from flask_babel import gettext as __, lazy_gettext as _
from sqlalchemy import and_, Integer, or_, select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.session import Session
from werkzeug.urls import Href
import superset.models.core as models
from superset import (
app,
appbuilder,
cache,
conf,
dataframe,
db,
event_logger,
get_feature_flags,
is_feature_enabled,
result_set,
results_backend,
results_backend_use_msgpack,
security_manager,
sql_lab,
talisman,
viz,
)
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import AnnotationDatasource
from superset.constants import RouteMethod
from superset.exceptions import (
DatabaseNotFound,
SupersetException,
SupersetSecurityException,
SupersetTimeoutException,
)
from superset.jinja_context import get_template_processor
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query, TabState
from superset.models.user_attributes import UserAttribute
from superset.sql_parse import ParsedQuery
from superset.sql_validators import get_validator_by_name
from superset.utils import core as utils, dashboard_import_export
from superset.utils.dates import now_as_float
from superset.utils.decorators import etag_cache, stats_timing
from superset.views.database.filters import DatabaseFilter
from .base import (
api,
BaseSupersetView,
check_ownership,
common_bootstrap_payload,
CsvResponse,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
SupersetModelView,
)
from .utils import (
apply_display_max_row_limit,
bootstrap_user_data,
get_datasource_info,
get_form_data,
get_viz,
)
config = app.config
CACHE_DEFAULT_TIMEOUT = config["CACHE_DEFAULT_TIMEOUT"]
SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"]
stats_logger = config["STATS_LOGGER"]
DAR = DatasourceAccessRequest
QueryStatus = utils.QueryStatus
logger = logging.getLogger(__name__)
DATABASE_KEYS = [
"allow_csv_upload",
"allow_ctas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_subquery",
"backend",
"database_name",
"expose_in_sqllab",
"force_ctas_schema",
"id",
]
ALL_DATASOURCE_ACCESS_ERR = __(
"This endpoint requires the `all_datasource_access` permission"
)
DATASOURCE_MISSING_ERR = __("The data source seems to have been deleted")
ACCESS_REQUEST_MISSING_ERR = __("The access requests seem to have been deleted")
USER_MISSING_ERR = __("The user seems to have been deleted")
FORM_DATA_KEY_BLACKLIST: List[str] = []
if not config["ENABLE_JAVASCRIPT_CONTROLS"]:
FORM_DATA_KEY_BLACKLIST = ["js_tooltip", "js_onclick_href", "js_data_mutator"]
def get_database_access_error_msg(database_name):
return __(
"This view requires the database %(name)s or "
"`all_datasource_access` permission",
name=database_name,
)
def is_owner(obj, user):
""" Check if user is owner of the slice """
return obj and user in obj.owners
def check_datasource_perms(
self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None
) -> None:
"""
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
:param datasource_type: The datasource type, i.e., 'druid' or 'table'
:param datasource_id: The datasource ID
:raises SupersetSecurityException: If the user cannot access the resource
"""
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as e:
raise SupersetSecurityException(str(e))
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_viz_permission(viz_obj)
def check_slice_perms(self, slice_id):
"""
Check if user can access a cached response from slice_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data, slc = get_form_data(slice_id, use_slice_data=True)
viz_obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=False,
)
security_manager.assert_viz_permission(viz_obj)
def _deserialize_results_payload(
payload: Union[bytes, str], query, use_msgpack: Optional[bool] = False
) -> dict:
logger.debug(f"Deserializing from msgpack: {use_msgpack}")
if use_msgpack:
with stats_timing(
"sqllab.query.results_backend_msgpack_deserialize", stats_logger
):
ds_payload = msgpack.loads(payload, raw=False)
with stats_timing("sqllab.query.results_backend_pa_deserialize", stats_logger):
pa_table = pa.deserialize(ds_payload["data"])
df = result_set.SupersetResultSet.convert_table_to_df(pa_table)
ds_payload["data"] = dataframe.df_to_records(df) or []
db_engine_spec = query.database.db_engine_spec
all_columns, data, expanded_columns = db_engine_spec.expand_data(
ds_payload["selected_columns"], ds_payload["data"]
)
ds_payload.update(
{"data": data, "columns": all_columns, "expanded_columns": expanded_columns}
)
return ds_payload
else:
with stats_timing(
"sqllab.query.results_backend_json_deserialize", stats_logger
):
return json.loads(payload) # type: ignore
class AccessRequestsModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(DAR)
include_route_methods = RouteMethod.CRUD_SET
list_columns = [
"username",
"user_roles",
"datasource_link",
"roles_with_datasource",
"created_on",
]
order_columns = ["created_on"]
base_order = ("changed_on", "desc")
label_columns = {
"username": _("User"),
"user_roles": _("User Roles"),
"database": _("Database URL"),
"datasource_link": _("Datasource"),
"roles_with_datasource": _("Roles to grant"),
"created_on": _("Created On"),
}
@talisman(force_https=False)
@app.route("/health")
def health():
return "OK"
@talisman(force_https=False)
@app.route("/healthcheck")
def healthcheck():
return "OK"
@talisman(force_https=False)
@app.route("/ping")
def ping():
return "OK"
class KV(BaseSupersetView):
"""Used for storing and retrieving key value pairs"""
@event_logger.log_this
@has_access_api
@expose("/store/", methods=["POST"])
def store(self):
try:
value = request.form.get("data")
obj = models.KeyValue(value=value)
db.session.add(obj)
db.session.commit()
except Exception as e:
return json_error_response(e)
return Response(json.dumps({"id": obj.id}), status=200)
@event_logger.log_this
@has_access_api
@expose("/<key_id>/", methods=["GET"])
def get_value(self, key_id):
try:
kv = db.session.query(models.KeyValue).filter_by(id=key_id).scalar()
if not kv:
return Response(status=404, content_type="text/plain")
except Exception as e:
return json_error_response(e)
return Response(kv.value, status=200, content_type="text/plain")
class R(BaseSupersetView):
"""used for short urls"""
@event_logger.log_this
@expose("/<url_id>")
def index(self, url_id):
url = db.session.query(models.Url).get(url_id)
if url and url.url:
explore_url = "//superset/explore/?"
if url.url.startswith(explore_url):
explore_url += f"r={url_id}"
return redirect(explore_url[1:])
else:
return redirect(url.url[1:])
else:
flash("URL to nowhere...", "danger")
return redirect("/")
@event_logger.log_this
@has_access_api
@expose("/shortner/", methods=["POST"])
def shortner(self):
url = request.form.get("data")
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return Response(
"{scheme}://{request.headers[Host]}/r/{obj.id}".format(
scheme=request.scheme, request=request, obj=obj
),
mimetype="text/plain",
)
class Superset(BaseSupersetView):
"""The base views for Superset!"""
logger = logging.getLogger(__name__)
@has_access_api
@expose("/datasources/")
def datasources(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [o.short_data for o in datasources if o.short_data.get("name")]
datasources = sorted(datasources, key=lambda o: o["name"])
return self.json_response(datasources)
@has_access_api
@expose("/override_role_permissions/", methods=["POST"])
def override_role_permissions(self):
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data["role_name"]
databases = data["database"]
db_ds_names = set()
for dbs in databases:
for schema in dbs["schema"]:
for ds_name in schema["datasources"]:
fullname = utils.get_datasource_full_name(
dbs["name"], ds_name, schema=schema["name"]
)
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm, permission_name="datasource_access"
)
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response(
{"granted": granted_perms, "requested": list(db_ds_names)}, status=201
)
@event_logger.log_this
@has_access
@expose("/request_access/")
def request_access(self):
datasources = set()
dashboard_id = request.args.get("dashboard_id")
if dashboard_id:
dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one()
datasources |= dash.datasources
datasource_id = request.args.get("datasource_id")
datasource_type = request.args.get("datasource_type")
if datasource_id:
ds_class = ConnectorRegistry.sources.get(datasource_type)
datasource = (
db.session.query(ds_class).filter_by(id=int(datasource_id)).one()
)
datasources.add(datasource)
has_access = all(
(
datasource and security_manager.datasource_access(datasource)
for datasource in datasources
)
)
if has_access:
return redirect("/superset/dashboard/{}".format(dashboard_id))
if request.args.get("action") == "go":
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id, datasource_type=datasource.type
)
db.session.add(access_request)
db.session.commit()
flash(__("Access was requested"), "info")
return redirect("/")
return self.render_template(
"superset/request_access.html",
datasources=datasources,
datasource_names=", ".join([o.name for o in datasources]),
)
@event_logger.log_this
@has_access
@expose("/approve")
def approve(self):
def clean_fulfilled_requests(session):
for r in session.query(DAR).all():
datasource = ConnectorRegistry.get_datasource(
r.datasource_type, r.datasource_id, session
)
if not datasource or security_manager.datasource_access(datasource):
# datasource does not exist anymore
session.delete(r)
session.commit()
datasource_type = request.args.get("datasource_type")
datasource_id = request.args.get("datasource_id")
created_by_username = request.args.get("created_by")
role_to_grant = request.args.get("role_to_grant")
role_to_extend = request.args.get("role_to_extend")
session = db.session
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "alert")
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, "alert")
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter(
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id,
)
.all()
)
if not requests:
flash(ACCESS_REQUEST_MISSING_ERR, "alert")
return json_error_response(ACCESS_REQUEST_MISSING_ERR)
# check if you can approve
if security_manager.all_datasource_access() or check_ownership(
datasource, raise_if_false=False
):
# can by done by admin only
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
"%(user)s was granted the role %(role)s that gives access "
"to the %(datasource)s",
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_granted.txt",
app.config,
)
flash(msg, "info")
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
"email/datasource_access", datasource.perm
)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __(
"Role %(r)s was extended to provide the access to "
"the datasource %(ds)s",
r=role_to_extend,
ds=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_extended.txt",
app.config,
)
flash(msg, "info")
clean_fulfilled_requests(session)
else:
flash(__("You have no permission to approve this request"), "danger")
return redirect("/accessrequestsmodelview/list/")
for r in requests:
session.delete(r)
session.commit()
return redirect("/accessrequestsmodelview/list/")
def get_viz(
self,
slice_id=None,
form_data=None,
datasource_type=None,
datasource_id=None,
force=False,
):
if slice_id:
slc = db.session.query(Slice).filter_by(id=slice_id).one()
return slc.get_viz()
else:
viz_type = form_data.get("viz_type", "table")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
viz_obj = viz.viz_types[viz_type](
datasource, form_data=form_data, force=force
)
return viz_obj
@has_access
@expose("/slice/<slice_id>/")
def slice(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
if not slc:
abort(404)
endpoint = "/superset/explore/?form_data={}".format(
parse.quote(json.dumps({"slice_id": slice_id}))
)
param = utils.ReservedUrlParameters.STANDALONE.value
if request.args.get(param) == "true":
endpoint += f"&{param}=true"
return redirect(endpoint)
def get_query_string_response(self, viz_obj):
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as e:
logger.exception(e)
return json_error_response(e)
if not query:
query = "No query."
return self.json_response(
{"query": query, "language": viz_obj.datasource.query_language}
)
def get_raw_results(self, viz_obj):
return self.json_response(
{"data": viz_obj.get_df_payload()["df"].to_dict("records")}
)
def get_samples(self, viz_obj):
return self.json_response({"data": viz_obj.get_samples()})
def generate_json(
self, viz_obj, csv=False, query=False, results=False, samples=False
):
if csv:
return CsvResponse(
viz_obj.get_csv(),
status=200,
headers=generate_download_headers("csv"),
mimetype="application/csv",
)
if query:
return self.get_query_string_response(viz_obj)
if results:
return self.get_raw_results(viz_obj)
if samples:
return self.get_samples(viz_obj)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
@event_logger.log_this
@api
@has_access_api
@expose("/slice_json/<slice_id>")
@etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_slice_perms)
def slice_json(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
return self.generate_json(viz_obj)
@event_logger.log_this
@api
@has_access_api
@expose("/annotation_json/<layer_id>")
def annotation_json(self, layer_id):
form_data = get_form_data()[0]
form_data["layer_id"] = layer_id
form_data["filters"] = [{"col": "layer_id", "op": "==", "val": layer_id}]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types["table"](datasource, form_data=form_data, force=False)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
EXPLORE_JSON_METHODS = ["POST"]
if not is_feature_enabled("ENABLE_EXPLORE_JSON_CSRF_PROTECTION"):
EXPLORE_JSON_METHODS.append("GET")
@event_logger.log_this
@api
@has_access_api
@handle_api_exception
@expose(
"/explore_json/<datasource_type>/<datasource_id>/", methods=EXPLORE_JSON_METHODS
)
@expose("/explore_json/", methods=EXPLORE_JSON_METHODS)
@etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_datasource_perms)
def explore_json(self, datasource_type=None, datasource_id=None):
"""Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape"""
csv = request.args.get("csv") == "true"
query = request.args.get("query") == "true"
results = request.args.get("results") == "true"
samples = request.args.get("samples") == "true"
force = request.args.get("force") == "true"
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as e:
return json_error_response(utils.error_msg_from_exception(e))
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
return self.generate_json(
viz_obj, csv=csv, query=query, results=results, samples=samples
)
@event_logger.log_this
@has_access
@expose("/import_dashboards", methods=["GET", "POST"])
def import_dashboards(self):
"""Overrides the dashboards using json instances from the file."""
f = request.files.get("file")
if request.method == "POST" and f:
try:
dashboard_import_export.import_dashboards(db.session, f.stream)
except DatabaseNotFound as e:
flash(
_(
"Cannot import dashboard: %(db_error)s.\n"
"Make sure to create the database before "
"importing the dashboard.",
db_error=e,
),
"danger",
)
except Exception as e:
logger.exception(e)
flash(
_(
"An unknown error occurred. "
"Please contact your Superset administrator"
),
"danger",
)
return redirect("/dashboard/list/")
return self.render_template("superset/import_dashboards.html")
@event_logger.log_this
@has_access
@expose("/explore/<datasource_type>/<datasource_id>/", methods=["GET", "POST"])
@expose("/explore/", methods=["GET", "POST"])
def explore(self, datasource_type=None, datasource_id=None):
user_id = g.user.get_id() if g.user else None
form_data, slc = get_form_data(use_slice_data=True)
# Flash the SIP-15 message if the slice is owned by the current user and has not
# been updated, i.e., is not using the [start, end) interval.
if (
config["SIP_15_ENABLED"]
and slc
and g.user in slc.owners
and (
not form_data.get("time_range_endpoints")
or form_data["time_range_endpoints"]
!= (
utils.TimeRangeEndpoint.INCLUSIVE,
utils.TimeRangeEndpoint.EXCLUSIVE,
)
)
):
url = Href("/superset/explore/")(
{
"form_data": json.dumps(
{
"slice_id": slc.id,
"time_range_endpoints": (
utils.TimeRangeEndpoint.INCLUSIVE.value,
utils.TimeRangeEndpoint.EXCLUSIVE.value,
),
}
)
}
)
flash(Markup(config["SIP_15_TOAST_MESSAGE"].format(url=url)))
error_redirect = "/chart/list/"
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException:
return redirect(error_redirect)
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "danger")
return redirect(error_redirect)
if config["ENABLE_ACCESS_REQUEST"] and (
not security_manager.datasource_access(datasource)
):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
"danger",
)
return redirect(
"superset/request_access/?"
f"datasource_type={datasource_type}&"
f"datasource_id={datasource_id}&"
)
viz_type = form_data.get("viz_type")
if not viz_type and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
# slc perms
slice_add_perm = security_manager.can_access("can_add", "SliceModelView")
slice_overwrite_perm = is_owner(slc, g.user)
slice_download_perm = security_manager.can_access(
"can_download", "SliceModelView"
)
form_data["datasource"] = str(datasource_id) + "__" + datasource_type
# On explore, merge legacy and extra filters into the form data
utils.convert_legacy_filters_into_adhoc(form_data)
utils.merge_extra_filters(form_data)
# merge request url params
if request.method == "GET":
utils.merge_request_params(form_data, request.args)
# handle save or overwrite
action = request.args.get("action")
if action == "overwrite" and not slice_overwrite_perm:
return json_error_response(
_("You don't have the rights to ") + _("alter this ") + _("chart"),
status=400,
)
if action == "saveas" and not slice_add_perm:
return json_error_response(
_("You don't have the rights to ") + _("create a ") + _("chart"),
status=400,
)
if action in ("saveas", "overwrite"):
return self.save_or_overwrite_slice(
request.args,
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource.name,
)
standalone = (
request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true"
)
bootstrap_data = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": slice_overwrite_perm,
"datasource": datasource.data,
"form_data": form_data,
"datasource_id": datasource_id,
"datasource_type": datasource_type,
"slice": slc.data if slc else None,
"standalone": standalone,
"user_id": user_id,
"forced_height": request.args.get("height"),
"common": common_bootstrap_payload(),
}
table_name = (
datasource.table_name
if datasource_type == "table"
else datasource.datasource_name
)
if slc:
title = slc.slice_name
else:
title = _("Explore - %(table)s", table=table_name)
return self.render_template(
"superset/basic.html",
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
entry="explore",
title=title,
standalone_mode=standalone,
)
@api
@handle_api_exception
@has_access_api
@expose("/filter/<datasource_type>/<datasource_id>/<column>/")
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
payload = json.dumps(
datasource.values_for_column(column, config["FILTER_SELECT_ROW_LIMIT"]),
default=utils.json_int_dttm_ser,
)
return json_success(payload)
def save_or_overwrite_slice(
self,
args,
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource_name,
):
"""Save or overwrite a slice"""
slice_name = args.get("slice_name")
action = args.get("action")
form_data = get_form_data()[0]
if action in ("saveas"):
if "slice_id" in form_data:
form_data.pop("slice_id") # don't save old slice_id
slc = Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data["viz_type"]
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ("saveas") and slice_add_perm:
self.save_slice(slc)
elif action == "overwrite" and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get("add_to_dash") == "existing":
dash = (
db.session.query(Dashboard)
.filter_by(id=int(request.args.get("save_to_dashboard_id")))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("alter this ")
+ _("dashboard"),
status=400,
)
flash(
_("Chart [{}] was added to dashboard [{}]").format(
slc.slice_name, dash.dashboard_title
),
"info",
)
elif request.args.get("add_to_dash") == "new":
# check create dashboard permissions
dash_add_perm = security_manager.can_access("can_add", "DashboardModelView")
if not dash_add_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("create a ")
+ _("dashboard"),
status=400,
)
dash = Dashboard(
dashboard_title=request.args.get("new_dashboard_name"),
owners=[g.user] if g.user else [],
)
flash(
_(
"Dashboard [{}] just got created and chart [{}] was added " "to it"
).format(dash.dashboard_title, slc.slice_name),
"info",
)
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": is_owner(slc, g.user),
"form_data": slc.form_data,
"slice": slc.data,
"dashboard_id": dash.id if dash else None,
}
if request.args.get("goto_dash") == "true":
response.update({"dashboard": dash.url})
return json_success(json.dumps(response))
def save_slice(self, slc):
session = db.session()
msg = _("Chart [{}] has been saved").format(slc.slice_name)
session.add(slc)
session.commit()
flash(msg, "info")
def overwrite_slice(self, slc):
session = db.session()
session.merge(slc)
session.commit()
msg = _("Chart [{}] has been overwritten").format(slc.slice_name)
flash(msg, "info")
@api
@has_access_api
@expose("/schemas/<db_id>/")
@expose("/schemas/<db_id>/<force_refresh>/")
def schemas(self, db_id, force_refresh="false"):
db_id = int(db_id)
force_refresh = force_refresh.lower() == "true"
database = db.session.query(models.Database).get(db_id)
if database:
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=force_refresh,
)
schemas = security_manager.schemas_accessible_by_user(database, schemas)
else:
schemas = []
return Response(json.dumps({"schemas": schemas}), mimetype="application/json")
@api
@has_access_api
@expose("/tables/<int:db_id>/<schema>/<substr>/")
@expose("/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/")
def tables(
self, db_id: int, schema: str, substr: str, force_refresh: str = "false"
):
"""Endpoint to fetch the list of tables for given database"""
# Guarantees database filtering by security access
query = db.session.query(models.Database)
query = DatabaseFilter("id", SQLAInterface(models.Database, db.session)).apply(
query, None
)
database = query.filter_by(id=db_id).one_or_none()
if not database:
return json_error_response("Not found", 404)
force_refresh_parsed = force_refresh.lower() == "true"
schema_parsed = utils.parse_js_uri_path_item(schema, eval_undefined=True)
substr_parsed = utils.parse_js_uri_path_item(substr, eval_undefined=True)
if schema_parsed:
tables = (
database.get_all_table_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
views = (
database.get_all_view_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
else:
tables = database.get_all_table_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
views = database.get_all_view_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
tables = security_manager.get_datasources_accessible_by_user(
database, tables, schema_parsed
)
views = security_manager.get_datasources_accessible_by_user(
database, views, schema_parsed
)
def get_datasource_label(ds_name: utils.DatasourceName) -> str:
return (
ds_name.table if schema_parsed else f"{ds_name.schema}.{ds_name.table}"
)
if substr_parsed:
tables = [tn for tn in tables if substr_parsed in get_datasource_label(tn)]
views = [vn for vn in views if substr_parsed in get_datasource_label(vn)]
if not schema_parsed and database.default_schemas:
user_schema = g.user.email.split("@")[0]
valid_schemas = set(database.default_schemas + [user_schema])
tables = [tn for tn in tables if tn.schema in valid_schemas]
views = [vn for vn in views if vn.schema in valid_schemas]
max_items = config["MAX_TABLE_NAMES"] or len(tables)
total_items = len(tables) + len(views)
max_tables = len(tables)
max_views = len(views)
if total_items and substr_parsed:
max_tables = max_items * len(tables) // total_items
max_views = max_items * len(views) // total_items
table_options = [
{
"value": tn.table,
"schema": tn.schema,
"label": get_datasource_label(tn),
"title": get_datasource_label(tn),
"type": "table",
}
for tn in tables[:max_tables]
]
table_options.extend(
[
{
"value": vn.table,
"schema": vn.schema,
"label": get_datasource_label(vn),
"title": get_datasource_label(vn),
"type": "view",
}
for vn in views[:max_views]
]
)
table_options.sort(key=lambda value: value["label"])
payload = {"tableLength": len(tables) + len(views), "options": table_options}
return json_success(json.dumps(payload))
@api
@has_access_api
@expose("/copy_dash/<dashboard_id>/", methods=["GET", "POST"])
def copy_dash(self, dashboard_id):
"""Copy dashboard"""
session = db.session()
data = json.loads(request.form.get("data"))
dash = models.Dashboard()
original_dash = session.query(Dashboard).get(dashboard_id)
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data["dashboard_title"]
if data["duplicate_slices"]:
# Duplicating slices as well, mapping old ids to new ones
old_to_new_sliceids = {}
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_sliceids["{}".format(slc.id)] = "{}".format(new_slice.id)
# update chartId of layout entities
# in v2_dash positions json data, chartId should be integer,
# while in older version slice_id is string type
for value in data["positions"].values():
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta").get("chartId")
):
old_id = "{}".format(value.get("meta").get("chartId"))
new_id = int(old_to_new_sliceids[old_id])
value["meta"]["chartId"] = new_id
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
self._set_dash_metadata(dash, data)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@expose("/save_dash/<dashboard_id>/", methods=["GET", "POST"])
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get("data"))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return json_success(json.dumps({"status": "SUCCESS"}))
@staticmethod
def _set_dash_metadata(dashboard, data):
positions = data["positions"]
# find slices in the position data
slice_ids = []
slice_id_to_name = {}
for value in positions.values():
if isinstance(value, dict):
try:
slice_id = value["meta"]["chartId"]
slice_ids.append(slice_id)
slice_id_to_name[slice_id] = value["meta"]["sliceName"]
except KeyError:
pass
session = db.session()
current_slices = session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
dashboard.slices = current_slices
# update slice names. this assumes user has permissions to update the slice
# we allow user set slice name be empty string
for slc in dashboard.slices:
try:
new_name = slice_id_to_name[slc.id]
if slc.slice_name != new_name:
slc.slice_name = new_name
session.merge(slc)
session.flush()
except KeyError:
pass
# remove leading and trailing white spaces in the dumped json
dashboard.position_json = json.dumps(
positions, indent=None, separators=(",", ":"), sort_keys=True
)
md = dashboard.params_dict
dashboard.css = data.get("css")
dashboard.dashboard_title = data["dashboard_title"]
if "timed_refresh_immune_slices" not in md:
md["timed_refresh_immune_slices"] = []
if "filter_scopes" in data:
md["filter_scopes"] = json.loads(data["filter_scopes"] or "{}")
md["expanded_slices"] = data["expanded_slices"]
md["refresh_frequency"] = data.get("refresh_frequency", 0)
default_filters_data = json.loads(data.get("default_filters", "{}"))
applicable_filters = {
key: v for key, v in default_filters_data.items() if int(key) in slice_ids
}
md["default_filters"] = json.dumps(applicable_filters)
if data.get("color_namespace"):
md["color_namespace"] = data.get("color_namespace")
if data.get("color_scheme"):
md["color_scheme"] = data.get("color_scheme")
if data.get("label_colors"):
md["label_colors"] = data.get("label_colors")
dashboard.json_metadata = json.dumps(md)
@api
@has_access_api
@expose("/add_slices/<dashboard_id>/", methods=["POST"])
def add_slices(self, dashboard_id):
"""Add and save slices to a dashboard"""
data = json.loads(request.form.get("data"))
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(Slice.id.in_(data["slice_ids"]))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return "SLICES ADDED"
@api
@has_access_api
@expose("/testconn", methods=["POST", "GET"])
def testconn(self):
"""Tests a sqla connection"""
try:
db_name = request.json.get("name")
uri = request.json.get("uri")
# if the database already exists in the database, only its safe (password-masked) URI
# would be shown in the UI and would be passed in the form data.
# so if the database already exists and the form was submitted with the safe URI,
# we assume we should retrieve the decrypted URI to test the connection.
if db_name:
existing_database = (
db.session.query(models.Database)
.filter_by(database_name=db_name)
.one_or_none()
)
if existing_database and uri == existing_database.safe_sqlalchemy_uri():
uri = existing_database.sqlalchemy_uri_decrypted
# this is the database instance that will be tested
database = models.Database(
# extras is sent as json, but required to be a string in the Database model
extra=json.dumps(request.json.get("extras", {})),
impersonate_user=request.json.get("impersonate_user"),
encrypted_extra=json.dumps(request.json.get("encrypted_extra", {})),
)
database.set_sqlalchemy_uri(uri)
username = g.user.username if g.user is not None else None
engine = database.get_sqla_engine(user_name=username)
with closing(engine.connect()) as conn:
conn.scalar(select([1]))
return json_success('"OK"')
except Exception as e:
logger.exception(e)
return json_error_response(
"Connection failed!\n\n" f"The error message returned was:\n{e}", 400
)
@api
@has_access_api
@expose("/recent_activity/<user_id>/", methods=["GET"])
def recent_activity(self, user_id):
"""Recent activity (actions) for a given user"""
M = models
if request.args.get("limit"):
limit = int(request.args.get("limit"))
else:
limit = 1000
qry = (
db.session.query(M.Log, M.Dashboard, Slice)
.outerjoin(M.Dashboard, M.Dashboard.id == M.Log.dashboard_id)
.outerjoin(Slice, Slice.id == M.Log.slice_id)
.filter(
and_(
~M.Log.action.in_(("queries", "shortner", "sql_json")),
M.Log.user_id == user_id,
)
)
.order_by(M.Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
if log.Dashboard:
item_url = log.Dashboard.url
item_title = log.Dashboard.dashboard_title
elif log.Slice:
item_url = log.Slice.slice_url
item_title = log.Slice.slice_name
payload.append(
{
"action": log.Log.action,
"item_url": item_url,
"item_title": item_title,
"time": log.Log.dttm,
}
)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/csrf_token/", methods=["GET"])
def csrf_token(self):
return Response(
self.render_template("superset/csrf_token.json"), mimetype="text/json"
)
@api
@has_access_api
@expose("/available_domains/", methods=["GET"])
def available_domains(self):
"""
Returns the list of available Superset Webserver domains (if any)
defined in config. This enables charts embedded in other apps to
leverage domain sharding if appropriately configured.
"""
return Response(
json.dumps(conf.get("SUPERSET_WEBSERVER_DOMAINS")), mimetype="text/json"
)
@api
@has_access_api
@expose("/fave_dashboards_by_username/<username>/", methods=["GET"])
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
@api
@has_access_api
@expose("/fave_dashboards/<user_id>/", methods=["GET"])
def fave_dashboards(self, user_id):
qry = (
db.session.query(Dashboard, models.FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "Dashboard",
Dashboard.id == models.FavStar.obj_id,
),
)
.order_by(models.FavStar.dttm.desc())
)
payload = []
for o in qry.all():
d = {
"id": o.Dashboard.id,
"dashboard": o.Dashboard.dashboard_link(),
"title": o.Dashboard.dashboard_title,
"url": o.Dashboard.url,
"dttm": o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
d["creator"] = str(user)
d["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(d)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/created_dashboards/<user_id>/", methods=["GET"])
def created_dashboards(self, user_id):
Dash = Dashboard
qry = (
db.session.query(Dash)
.filter(or_(Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id))
.order_by(Dash.changed_on.desc())
)
payload = [
{
"id": o.id,
"dashboard": o.dashboard_link(),
"title": o.dashboard_title,
"url": o.url,
"dttm": o.changed_on,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/user_slices", methods=["GET"])
@expose("/user_slices/<user_id>/", methods=["GET"])
def user_slices(self, user_id=None):
"""List of slices a user created, or faved"""
if not user_id:
user_id = g.user.id
FavStar = models.FavStar
qry = (
db.session.query(Slice, FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "slice",
Slice.id == models.FavStar.obj_id,
),
isouter=True,
)
.filter(
or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
)
)
.order_by(Slice.slice_name.asc())
)
payload = [
{
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"data": o.Slice.form_data,
"dttm": o.dttm if o.dttm else o.Slice.changed_on,
"viz_type": o.Slice.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/created_slices", methods=["GET"])
@expose("/created_slices/<user_id>/", methods=["GET"])
def created_slices(self, user_id=None):
"""List of slices created by this user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(Slice)
.filter(or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id))
.order_by(Slice.changed_on.desc())
)
payload = [
{
"id": o.id,
"title": o.slice_name,
"url": o.slice_url,
"dttm": o.changed_on,
"viz_type": o.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/fave_slices", methods=["GET"])
@expose("/fave_slices/<user_id>/", methods=["GET"])
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(Slice, models.FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "slice",
Slice.id == models.FavStar.obj_id,
),
)
.order_by(models.FavStar.dttm.desc())
)
payload = []
for o in qry.all():
d = {
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"dttm": o.dttm,
"viz_type": o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d["creator"] = str(user)
d["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(d)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/warm_up_cache/", methods=["GET"])
def warm_up_cache(self):
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
"""
slices = None
session = db.session()
slice_id = request.args.get("slice_id")
table_name = request.args.get("table_name")
db_name = request.args.get("db_name")
if not slice_id and not (table_name and db_name):
return json_error_response(
__(
"Malformed request. slice_id or table_name and db_name "
"arguments are expected"
),
status=400,
)
if slice_id:
slices = session.query(Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(
__("Chart %(id)s not found", id=slice_id), status=404
)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources["table"]
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name
or SqlaTable.table_name == table_name
)
).one_or_none()
if not table:
return json_error_response(
__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name,
s=db_name,
),
status=404,
)
slices = (
session.query(Slice)
.filter_by(datasource_id=table.id, datasource_type=table.type)
.all()
)
for slc in slices:
try:
form_data = get_form_data(slc.id, use_slice_data=True)[0]
obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=True,
)
obj.get_json()
except Exception as e:
logger.exception("Failed to warm up cache")
return json_error_response(utils.error_msg_from_exception(e))
return json_success(
json.dumps(
[{"slice_id": slc.id, "slice_name": slc.slice_name} for slc in slices]
)
)
@has_access_api
@expose("/favstar/<class_name>/<obj_id>/<action>/")
def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
FavStar = models.FavStar
count = 0
favs = (
session.query(FavStar)
.filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id())
.all()
)
if action == "select":
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
)
)
count = 1
elif action == "unselect":
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({"count": count}))
@api
@has_access_api
@expose("/dashboard/<dashboard_id>/published/", methods=("GET", "POST"))
def publish(self, dashboard_id):
"""Gets and toggles published status on dashboards"""
logger.warning(
"This API endpoint is deprecated and will be removed in version 1.0.0"
)
session = db.session()
Role = ab_models.Role
dash = (
session.query(Dashboard).filter(Dashboard.id == dashboard_id).one_or_none()
)
admin_role = session.query(Role).filter(Role.name == "Admin").one_or_none()
if request.method == "GET":
if dash:
return json_success(json.dumps({"published": dash.published}))
else:
return json_error_response(
f"ERROR: cannot find dashboard {dashboard_id}", status=404
)
else:
edit_perm = is_owner(dash, g.user) or admin_role in get_user_roles()
if not edit_perm:
return json_error_response(
f'ERROR: "{g.user.username}" cannot alter dashboard "{dash.dashboard_title}"',
status=403,
)
dash.published = str(request.form["published"]).lower() == "true"
session.commit()
return json_success(json.dumps({"published": dash.published}))
@has_access
@expose("/dashboard/<dashboard_id>/")
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one_or_none()
if not dash:
abort(404)
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config["ENABLE_ACCESS_REQUEST"]:
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(
security_manager.get_datasource_access_error_msg(datasource)
),
"danger",
)
return redirect(
"superset/request_access/?" f"dashboard_id={dash.id}&"
)
dash_edit_perm = check_ownership(
dash, raise_if_false=False
) and security_manager.can_access("can_save_dash", "Superset")
dash_save_perm = security_manager.can_access("can_save_dash", "Superset")
superset_can_explore = security_manager.can_access("can_explore", "Superset")
superset_can_csv = security_manager.can_access("can_csv", "Superset")
slice_can_edit = security_manager.can_access("can_edit", "SliceModelView")
standalone_mode = (
request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true"
)
edit_mode = (
request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == "true"
)
# Hack to log the dashboard_id properly, even when getting a slug
@event_logger.log_this
def dashboard(**kwargs):
pass
dashboard(
dashboard_id=dash.id,
dashboard_version="v2",
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode,
)
dashboard_data = dash.data
dashboard_data.update(
{
"standalone_mode": standalone_mode,
"dash_save_perm": dash_save_perm,
"dash_edit_perm": dash_edit_perm,
"superset_can_explore": superset_can_explore,
"superset_can_csv": superset_can_csv,
"slice_can_edit": slice_can_edit,
}
)
url_params = {
key: value
for key, value in request.args.items()
if key not in [param.value for param in utils.ReservedUrlParameters]
}
bootstrap_data = {
"user_id": g.user.get_id(),
"dashboard_data": dashboard_data,
"datasources": {ds.uid: ds.data for ds in datasources},
"common": common_bootstrap_payload(),
"editMode": edit_mode,
"urlParams": url_params,
}
if request.args.get("json") == "true":
return json_success(
json.dumps(bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser)
)
return self.render_template(
"superset/dashboard.html",
entry="dashboard",
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
)
@api
@event_logger.log_this
@expose("/log/", methods=["POST"])
def log(self):
return Response(status=200)
@has_access
@expose("/sync_druid/", methods=["POST"])
@event_logger.log_this
def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload["config"]
user_name = payload["user"]
cluster_name = payload["cluster"]
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources["druid"]
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __(
"Can't find User '%(name)s', please ask your admin " "to create one.",
name=user_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name=cluster_name)
.one_or_none()
)
if not cluster:
err_msg = __(
"Can't find DruidCluster with cluster_name = " "'%(name)s'",
name=cluster_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(druid_config, user, cluster)
except Exception as e:
logger.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
@has_access
@expose("/sqllab_viz/", methods=["POST"])
@event_logger.log_this
def sqllab_viz(self):
SqlaTable = ConnectorRegistry.sources["table"]
data = json.loads(request.form.get("data"))
table_name = data.get("datasourceName")
database_id = data.get("dbId")
table = (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
if not table:
table = SqlaTable(table_name=table_name, owners=[g.user])
table.database_id = database_id
table.schema = data.get("schema")
table.template_params = data.get("templateParams")
table.is_sqllab_view = True
q = ParsedQuery(data.get("sql"))
table.sql = q.stripped()
db.session.add(table)
cols = []
for config in data.get("columns"):
column_name = config.get("name")
SqlaTable = ConnectorRegistry.sources["table"]
TableColumn = SqlaTable.column_class
SqlMetric = SqlaTable.metric_class
col = TableColumn(
column_name=column_name,
filterable=True,
groupby=True,
is_dttm=config.get("is_date", False),
type=config.get("type", False),
)
cols.append(col)
table.columns = cols
table.metrics = [SqlMetric(metric_name="count", expression="count(*)")]
db.session.commit()
return json_success(json.dumps({"table_id": table.id}))
@has_access
@expose("/extra_table_metadata/<database_id>/<table_name>/<schema>/")
@event_logger.log_this
def extra_table_metadata(self, database_id, table_name, schema):
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(mydb, table_name, schema)
return json_success(json.dumps(payload))
@has_access
@expose("/select_star/<database_id>/<table_name>")
@expose("/select_star/<database_id>/<table_name>/<schema>")
@event_logger.log_this
def select_star(self, database_id, table_name, schema=None):
logging.warning(
f"{self.__class__.__name__}.select_star "
"This API endpoint is deprecated and will be removed in version 1.0.0"
)
stats_logger.incr(f"{self.__class__.__name__}.select_star.init")
database = db.session.query(models.Database).get(database_id)
if not database:
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.database_not_found"
)
return json_error_response("Not found", 404)
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name)
# Check that the user can access the datasource
if not self.appbuilder.sm.can_access_datasource(database, table_name, schema):
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.permission_denied"
)
logging.warning(
f"Permission denied for user {g.user} on table: {table_name} "
f"schema: {schema}"
)
return json_error_response("Not found", 404)
stats_logger.incr(f"deprecated.{self.__class__.__name__}.select_star.success")
return json_success(
database.select_star(
table_name, schema, latest_partition=True, show_cols=True
)
)
@has_access_api
@expose("/estimate_query_cost/<database_id>/", methods=["POST"])
@expose("/estimate_query_cost/<database_id>/<schema>/", methods=["POST"])
@event_logger.log_this
def estimate_query_cost(
self, database_id: int, schema: Optional[str] = None
) -> Response:
mydb = db.session.query(models.Database).get(database_id)
sql = json.loads(request.form.get("sql", '""'))
template_params = json.loads(request.form.get("templateParams") or "{}")
if template_params:
template_processor = get_template_processor(mydb)
sql = template_processor.process_template(sql, **template_params)
timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT
timeout_msg = f"The estimation exceeded the {timeout} seconds timeout."
try:
with utils.timeout(seconds=timeout, error_message=timeout_msg):
cost = mydb.db_engine_spec.estimate_query_cost(
mydb, schema, sql, utils.sources.get("sql_lab")
)
except SupersetTimeoutException as e:
logger.exception(e)
return json_error_response(timeout_msg)
except Exception as e:
return json_error_response(str(e))
spec = mydb.db_engine_spec
query_cost_formatters = get_feature_flags().get(
"QUERY_COST_FORMATTERS_BY_ENGINE", {}
)
query_cost_formatter = query_cost_formatters.get(
spec.engine, spec.query_cost_formatter
)
cost = query_cost_formatter(cost)
return json_success(json.dumps(cost))
@expose("/theme/")
def theme(self):
return self.render_template("superset/theme.html")
@has_access_api
@expose("/results/<key>/")
@event_logger.log_this
def results(self, key):
return self.results_exec(key)
def results_exec(self, key: str):
"""Serves a key off of the results backend
It is possible to pass the `rows` query argument to limit the number
of rows returned.
"""
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
"sqllab.query.results_backend_read",
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
"Data could not be retrieved. " "You may want to re-run the query.",
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one_or_none()
if query is None:
return json_error_response(
"Data could not be retrieved. You may want to re-run the query.",
status=404,
)
rejected_tables = security_manager.rejected_tables(
query.sql, query.database, query.schema
)
if rejected_tables:
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables), status=403
)
payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)
obj: dict = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
if "rows" in request.args:
try:
rows = int(request.args["rows"])
except ValueError:
return json_error_response("Invalid `rows` argument", status=400)
obj = apply_display_max_row_limit(obj, rows)
return json_success(
json.dumps(obj, default=utils.json_iso_dttm_ser, ignore_nan=True)
)
@has_access_api
@expose("/stop_query/", methods=["POST"])
@event_logger.log_this
@backoff.on_exception(
backoff.constant,
Exception,
interval=1,
on_backoff=lambda details: db.session.rollback(),
on_giveup=lambda details: db.session.rollback(),
max_tries=5,
)
def stop_query(self):
client_id = request.form.get("client_id")
query = db.session.query(Query).filter_by(client_id=client_id).one()
if query.status in [
QueryStatus.FAILED,
QueryStatus.SUCCESS,
QueryStatus.TIMED_OUT,
]:
logger.error(
f"Query with client_id {client_id} could not be stopped: query already complete"
)
return self.json_response("OK")
query.status = QueryStatus.STOPPED
db.session.commit()
return self.json_response("OK")
@has_access_api
@expose("/validate_sql_json/", methods=["POST", "GET"])
@event_logger.log_this
def validate_sql_json(self):
"""Validates that arbitrary sql is acceptable for the given database.
Returns a list of error/warning annotations as json.
"""
sql = request.form.get("sql")
database_id = request.form.get("database_id")
schema = request.form.get("schema") or None
template_params = json.loads(request.form.get("templateParams") or "{}")
if len(template_params) > 0:
# TODO: factor the Database object out of template rendering
# or provide it as mydb so we can render template params
# without having to also persist a Query ORM object.
return json_error_response(
"SQL validation does not support template parameters", status=400
)
session = db.session()
mydb = session.query(models.Database).filter_by(id=database_id).one_or_none()
if not mydb:
return json_error_response(
"Database with id {} is missing.".format(database_id), status=400
)
spec = mydb.db_engine_spec
validators_by_engine = get_feature_flags().get("SQL_VALIDATORS_BY_ENGINE")
if not validators_by_engine or spec.engine not in validators_by_engine:
return json_error_response(
"no SQL validator is configured for {}".format(spec.engine), status=400
)
validator_name = validators_by_engine[spec.engine]
validator = get_validator_by_name(validator_name)
if not validator:
return json_error_response(
"No validator named {} found (configured for the {} engine)".format(
validator_name, spec.engine
)
)
try:
timeout = config["SQLLAB_VALIDATION_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
with utils.timeout(seconds=timeout, error_message=timeout_msg):
errors = validator.validate(sql, schema, mydb)
payload = json.dumps(
[err.to_dict() for err in errors],
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
return json_success(payload)
except Exception as e:
logger.exception(e)
msg = _(
f"{validator.name} was unable to check your query.\n"
"Please recheck your query.\n"
f"Exception: {e}"
)
# Return as a 400 if the database error message says we got a 4xx error
if re.search(r"([\W]|^)4\d{2}([\W]|$)", str(e)):
return json_error_response(f"{msg}", status=400)
else:
return json_error_response(f"{msg}")
def _sql_json_async(
self,
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> str:
"""
Send SQL JSON query to celery workers
:param session: SQLAlchemy session object
:param rendered_query: the rendered query to perform by workers
:param query: The query (SQLAlchemy) object
:return: String JSON response
"""
logger.info(f"Query {query.id}: Running query on a Celery worker")
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
query.id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username if g.user else None,
start_time=now_as_float(),
expand_data=expand_data,
log_params=log_params,
)
except Exception as e:
logger.exception(f"Query {query.id}: {e}")
msg = _(
"Failed to start remote query on a worker. "
"Tell your administrator to verify the availability of "
"the message queue."
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response("{}".format(msg))
resp = json_success(
json.dumps(
{"query": query.to_dict()},
default=utils.json_int_dttm_ser,
ignore_nan=True,
),
status=202,
)
session.commit()
return resp
def _sql_json_sync(
self,
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> str:
"""
Execute SQL query (sql json)
:param rendered_query: The rendered query (included templates)
:param query: The query SQL (SQLAlchemy) object
:return: String JSON response
"""
try:
timeout = config["SQLLAB_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
store_results = (
is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE")
and not query.select_as_cta
)
with utils.timeout(seconds=timeout, error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query.id,
rendered_query,
return_results=True,
store_results=store_results,
user_name=g.user.username if g.user else None,
expand_data=expand_data,
log_params=log_params,
)
payload = json.dumps(
apply_display_max_row_limit(data),
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except Exception as e:
logger.exception(f"Query {query.id}: {e}")
return json_error_response(f"{{e}}")
if data.get("status") == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(payload)
@has_access_api
@expose("/sql_json/", methods=["POST"])
@event_logger.log_this
def sql_json(self):
log_params = {
"user_agent": cast(Optional[str], request.headers.get("USER_AGENT"))
}
return self.sql_json_exec(request.json, log_params)
def sql_json_exec(
self, query_params: dict, log_params: Optional[Dict[str, Any]] = None
):
"""Runs arbitrary sql and returns data as json"""
# Collect Values
database_id: int = cast(int, query_params.get("database_id"))
schema: str = cast(str, query_params.get("schema"))
sql: str = cast(str, query_params.get("sql"))
try:
template_params: dict = json.loads(
query_params.get("templateParams") or "{}"
)
except json.JSONDecodeError:
logger.warning(
f"Invalid template parameter {query_params.get('templateParams')}"
" specified. Defaulting to empty dict"
)
template_params = {}
limit: int = query_params.get("queryLimit") or app.config["SQL_MAX_ROW"]
async_flag: bool = cast(bool, query_params.get("runAsync"))
if limit < 0:
logger.warning(
f"Invalid limit of {limit} specified. Defaulting to max limit."
)
limit = 0
select_as_cta: bool = cast(bool, query_params.get("select_as_cta"))
tmp_table_name: str = cast(str, query_params.get("tmp_table_name"))
client_id: str = cast(
str, query_params.get("client_id") or utils.shortid()[:10]
)
sql_editor_id: str = cast(str, query_params.get("sql_editor_id"))
tab_name: str = cast(str, query_params.get("tab"))
status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING
session = db.session()
mydb = session.query(models.Database).get(database_id)
if not mydb:
return json_error_response(f"Database with id {database_id} is missing.")
# Set tmp_table_name for CTA
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = f"{mydb.force_ctas_schema}.{tmp_table_name}"
# Save current query
query = Query(
database_id=database_id,
sql=sql,
schema=schema,
select_as_cta=select_as_cta,
start_time=now_as_float(),
tab_name=tab_name,
status=status,
sql_editor_id=sql_editor_id,
tmp_table_name=tmp_table_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
try:
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
except SQLAlchemyError as e:
logger.error(f"Errors saving query details {e}")
session.rollback()
raise Exception(_("Query record was not created as expected."))
if not query_id:
raise Exception(_("Query record was not created as expected."))
logger.info(f"Triggering query_id: {query_id}")
rejected_tables = security_manager.rejected_tables(sql, mydb, schema)
if rejected_tables:
query.status = QueryStatus.FAILED
session.commit()
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables),
link=security_manager.get_table_access_link(rejected_tables),
status=403,
)
try:
template_processor = get_template_processor(
database=query.database, query=query
)
rendered_query = template_processor.process_template(
query.sql, **template_params
)
except Exception as e:
error_msg = utils.error_msg_from_exception(e)
return json_error_response(
f"Query {query_id}: Template rendering failed: {error_msg}"
)
# set LIMIT after template processing
limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]
query.limit = min(lim for lim in limits if lim is not None)
# Flag for whether or not to expand data
# (feature that will expand Presto row objects and arrays)
expand_data: bool = cast(
bool,
is_feature_enabled("PRESTO_EXPAND_DATA")
and query_params.get("expand_data"),
)
# Async request.
if async_flag:
return self._sql_json_async(
session, rendered_query, query, expand_data, log_params
)
# Sync request.
return self._sql_json_sync(
session, rendered_query, query, expand_data, log_params
)
@has_access
@expose("/csv/<client_id>")
@event_logger.log_this
def csv(self, client_id):
"""Download the query results as csv."""
logger.info("Exporting CSV file [{}]".format(client_id))
query = db.session.query(Query).filter_by(client_id=client_id).one()
rejected_tables = security_manager.rejected_tables(
query.sql, query.database, query.schema
)
if rejected_tables:
flash(security_manager.get_table_access_error_msg(rejected_tables))
return redirect("/")
blob = None
if results_backend and query.results_key:
logger.info(
"Fetching CSV from results backend " "[{}]".format(query.results_key)
)
blob = results_backend.get(query.results_key)
if blob:
logger.info("Decompressing")
payload = utils.zlib_decompress(
blob, decode=not results_backend_use_msgpack
)
obj = _deserialize_results_payload(
payload, query, results_backend_use_msgpack
)
columns = [c["name"] for c in obj["columns"]]
df = pd.DataFrame.from_records(obj["data"], columns=columns)
logger.info("Using pandas to convert to CSV")
csv = df.to_csv(index=False, **config["CSV_EXPORT"])
else:
logger.info("Running a query to turn into CSV")
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config["CSV_EXPORT"])
response = Response(csv, mimetype="text/csv")
response.headers[
"Content-Disposition"
] = f"attachment; filename={query.name}.csv"
event_info = {
"event_type": "data_export",
"client_id": client_id,
"row_count": len(df.index),
"database": query.database.name,
"schema": query.schema,
"sql": query.sql,
"exported_format": "csv",
}
logger.info(
f"CSV exported: {repr(event_info)}", extra={"superset_event": event_info}
)
return response
@api
@handle_api_exception
@has_access
@expose("/fetch_datasource_metadata")
@event_logger.log_this
def fetch_datasource_metadata(self):
datasource_id, datasource_type = request.args.get("datasourceKey").split("__")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
# Check if datasource exists
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
# Check permission for datasource
security_manager.assert_datasource_permission(datasource)
return json_success(json.dumps(datasource.data))
@has_access_api
@expose("/queries/<last_updated_ms>")
def queries(self, last_updated_ms):
"""
Get the updated queries.
:param last_updated_ms: unix time, milliseconds
"""
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
return self.queries_exec(last_updated_ms_int)
def queries_exec(self, last_updated_ms_int: int):
stats_logger.incr("queries")
if not g.user.get_id():
return json_error_response(
"Please login to access the queries.", status=403
)
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@expose("/search_queries")
@event_logger.log_this
def search_queries(self) -> Response:
"""
Search for previously run sqllab queries. Used for Sqllab Query Search
page /superset/sqllab#search.
Custom permission can_only_search_queries_owned restricts queries
to only queries run by current user.
:returns: Response with list of sql query dicts
"""
query = db.session.query(Query)
if security_manager.can_access_all_queries():
search_user_id = request.args.get("user_id")
elif (
request.args.get("user_id") is not None
and request.args.get("user_id") != g.user.get_user_id()
):
return Response(status=403, mimetype="application/json")
else:
search_user_id = g.user.get_user_id()
database_id = request.args.get("database_id")
search_text = request.args.get("search_text")
status = request.args.get("status")
# From and To time stamp should be Epoch timestamp in seconds
from_time = request.args.get("from")
to_time = request.args.get("to")
if search_user_id:
# Filter on user_id
query = query.filter(Query.user_id == search_user_id)
if database_id:
# Filter on db Id
query = query.filter(Query.database_id == database_id)
if status:
# Filter on status
query = query.filter(Query.status == status)
if search_text:
# Filter on search text
query = query.filter(Query.sql.like("%{}%".format(search_text)))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config["QUERY_SEARCH_LIMIT"]
sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all()
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype="application/json",
)
@app.errorhandler(500)
def show_traceback(self):
return (
render_template("superset/traceback.html", error_msg=get_error_msg()),
500,
)
@expose("/welcome")
def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
"user": bootstrap_user_data(g.user),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/welcome.html",
entry="welcome",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@has_access
@expose("/profile/<username>/")
def profile(self, username):
"""User profile page"""
if not username and g.user:
username = g.user.username
user = (
db.session.query(ab_models.User).filter_by(username=username).one_or_none()
)
if not user:
abort(404, description=f"User: {username} does not exist.")
payload = {
"user": bootstrap_user_data(user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/basic.html",
title=_("%(user)s's profile", user=username),
entry="profile",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@staticmethod
def _get_sqllab_payload(user_id: int) -> Dict[str, Any]:
# send list of tab state ids
tabs_state = (
db.session.query(TabState.id, TabState.label)
.filter_by(user_id=user_id)
.all()
)
tab_state_ids = [tab_state[0] for tab_state in tabs_state]
# return first active tab, or fallback to another one if no tab is active
active_tab = (
db.session.query(TabState)
.filter_by(user_id=user_id)
.order_by(TabState.active.desc())
.first()
)
databases: Dict[int, Any] = {}
queries: Dict[str, Any] = {}
# These are unnecessary if sqllab backend persistence is disabled
if is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE"):
databases = {
database.id: {
k: v for k, v in database.to_json().items() if k in DATABASE_KEYS
}
for database in db.session.query(models.Database).all()
}
# return all user queries associated with existing SQL editors
user_queries = (
db.session.query(Query)
.filter_by(user_id=user_id)
.filter(Query.sql_editor_id.cast(Integer).in_(tab_state_ids))
.all()
)
queries = {
query.client_id: {k: v for k, v in query.to_dict().items()}
for query in user_queries
}
return {
"defaultDbId": config["SQLLAB_DEFAULT_DBID"],
"common": common_bootstrap_payload(),
"tab_state_ids": tabs_state,
"active_tab": active_tab.to_dict() if active_tab else None,
"databases": databases,
"queries": queries,
}
@has_access
@expose("/sqllab")
def sqllab(self):
"""SQL Editor"""
payload = self._get_sqllab_payload(g.user.get_id())
bootstrap_data = json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
)
return self.render_template(
"superset/basic.html", entry="sqllab", bootstrap_data=bootstrap_data
)
@api
@handle_api_exception
@has_access_api
@expose("/slice_query/<slice_id>/")
def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = get_viz(slice_id)
security_manager.assert_viz_permission(viz_obj)
return self.get_query_string_response(viz_obj)
@api
@has_access_api
@expose("/schemas_access_for_csv_upload")
def schemas_access_for_csv_upload(self):
"""
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
"""
if not request.args.get("db_id"):
return json_error_response("No database is allowed for your csv upload")
db_id = int(request.args.get("db_id"))
database = db.session.query(models.Database).filter_by(id=db_id).one()
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (
security_manager.database_access(database)
or security_manager.all_datasource_access()
):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False
)
return self.json_response(schemas_allowed_processed)
except Exception as e:
logger.exception(e)
return json_error_response(
"Failed to fetch schemas allowed for csv upload in this database! "
"Please contact your Superset Admin!"
)
class CssTemplateModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.CssTemplate)
include_route_methods = RouteMethod.CRUD_SET
list_title = _("CSS Templates")
show_title = _("Show CSS Template")
add_title = _("Add CSS Template")
edit_title = _("Edit CSS Template")
list_columns = ["template_name"]
edit_columns = ["template_name", "css"]
add_columns = edit_columns
label_columns = {"template_name": _("Template Name")}
class CssTemplateAsyncModelView(CssTemplateModelView):
include_route_methods = {RouteMethod.API_READ}
list_columns = ["template_name", "css"]
@app.after_request
def apply_http_headers(response: Response):
"""Applies the configuration's http headers to all responses"""
# HTTP_HEADERS is deprecated, this provides backwards compatibility
response.headers.extend(
{**config["OVERRIDE_HTTP_HEADERS"], **config["HTTP_HEADERS"]}
)
for k, v in config["DEFAULT_HTTP_HEADERS"].items():
if k not in response.headers:
response.headers[k] = v
return response
| 36.382214 | 98 | 0.582992 |
import logging
import re
from contextlib import closing
from datetime import datetime, timedelta
from typing import Any, cast, Dict, List, Optional, Union
from urllib import parse
import backoff
import msgpack
import pandas as pd
import pyarrow as pa
import simplejson as json
from flask import abort, flash, g, Markup, redirect, render_template, request, Response
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access, has_access_api
from flask_appbuilder.security.sqla import models as ab_models
from flask_babel import gettext as __, lazy_gettext as _
from sqlalchemy import and_, Integer, or_, select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.session import Session
from werkzeug.urls import Href
import superset.models.core as models
from superset import (
app,
appbuilder,
cache,
conf,
dataframe,
db,
event_logger,
get_feature_flags,
is_feature_enabled,
result_set,
results_backend,
results_backend_use_msgpack,
security_manager,
sql_lab,
talisman,
viz,
)
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import AnnotationDatasource
from superset.constants import RouteMethod
from superset.exceptions import (
DatabaseNotFound,
SupersetException,
SupersetSecurityException,
SupersetTimeoutException,
)
from superset.jinja_context import get_template_processor
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query, TabState
from superset.models.user_attributes import UserAttribute
from superset.sql_parse import ParsedQuery
from superset.sql_validators import get_validator_by_name
from superset.utils import core as utils, dashboard_import_export
from superset.utils.dates import now_as_float
from superset.utils.decorators import etag_cache, stats_timing
from superset.views.database.filters import DatabaseFilter
from .base import (
api,
BaseSupersetView,
check_ownership,
common_bootstrap_payload,
CsvResponse,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
SupersetModelView,
)
from .utils import (
apply_display_max_row_limit,
bootstrap_user_data,
get_datasource_info,
get_form_data,
get_viz,
)
config = app.config
CACHE_DEFAULT_TIMEOUT = config["CACHE_DEFAULT_TIMEOUT"]
SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"]
stats_logger = config["STATS_LOGGER"]
DAR = DatasourceAccessRequest
QueryStatus = utils.QueryStatus
logger = logging.getLogger(__name__)
DATABASE_KEYS = [
"allow_csv_upload",
"allow_ctas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_subquery",
"backend",
"database_name",
"expose_in_sqllab",
"force_ctas_schema",
"id",
]
ALL_DATASOURCE_ACCESS_ERR = __(
"This endpoint requires the `all_datasource_access` permission"
)
DATASOURCE_MISSING_ERR = __("The data source seems to have been deleted")
ACCESS_REQUEST_MISSING_ERR = __("The access requests seem to have been deleted")
USER_MISSING_ERR = __("The user seems to have been deleted")
FORM_DATA_KEY_BLACKLIST: List[str] = []
if not config["ENABLE_JAVASCRIPT_CONTROLS"]:
FORM_DATA_KEY_BLACKLIST = ["js_tooltip", "js_onclick_href", "js_data_mutator"]
def get_database_access_error_msg(database_name):
return __(
"This view requires the database %(name)s or "
"`all_datasource_access` permission",
name=database_name,
)
def is_owner(obj, user):
return obj and user in obj.owners
def check_datasource_perms(
self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None
) -> None:
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as e:
raise SupersetSecurityException(str(e))
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_viz_permission(viz_obj)
def check_slice_perms(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
viz_obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=False,
)
security_manager.assert_viz_permission(viz_obj)
def _deserialize_results_payload(
payload: Union[bytes, str], query, use_msgpack: Optional[bool] = False
) -> dict:
logger.debug(f"Deserializing from msgpack: {use_msgpack}")
if use_msgpack:
with stats_timing(
"sqllab.query.results_backend_msgpack_deserialize", stats_logger
):
ds_payload = msgpack.loads(payload, raw=False)
with stats_timing("sqllab.query.results_backend_pa_deserialize", stats_logger):
pa_table = pa.deserialize(ds_payload["data"])
df = result_set.SupersetResultSet.convert_table_to_df(pa_table)
ds_payload["data"] = dataframe.df_to_records(df) or []
db_engine_spec = query.database.db_engine_spec
all_columns, data, expanded_columns = db_engine_spec.expand_data(
ds_payload["selected_columns"], ds_payload["data"]
)
ds_payload.update(
{"data": data, "columns": all_columns, "expanded_columns": expanded_columns}
)
return ds_payload
else:
with stats_timing(
"sqllab.query.results_backend_json_deserialize", stats_logger
):
return json.loads(payload)
class AccessRequestsModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(DAR)
include_route_methods = RouteMethod.CRUD_SET
list_columns = [
"username",
"user_roles",
"datasource_link",
"roles_with_datasource",
"created_on",
]
order_columns = ["created_on"]
base_order = ("changed_on", "desc")
label_columns = {
"username": _("User"),
"user_roles": _("User Roles"),
"database": _("Database URL"),
"datasource_link": _("Datasource"),
"roles_with_datasource": _("Roles to grant"),
"created_on": _("Created On"),
}
@talisman(force_https=False)
@app.route("/health")
def health():
return "OK"
@talisman(force_https=False)
@app.route("/healthcheck")
def healthcheck():
return "OK"
@talisman(force_https=False)
@app.route("/ping")
def ping():
return "OK"
class KV(BaseSupersetView):
@event_logger.log_this
@has_access_api
@expose("/store/", methods=["POST"])
def store(self):
try:
value = request.form.get("data")
obj = models.KeyValue(value=value)
db.session.add(obj)
db.session.commit()
except Exception as e:
return json_error_response(e)
return Response(json.dumps({"id": obj.id}), status=200)
@event_logger.log_this
@has_access_api
@expose("/<key_id>/", methods=["GET"])
def get_value(self, key_id):
try:
kv = db.session.query(models.KeyValue).filter_by(id=key_id).scalar()
if not kv:
return Response(status=404, content_type="text/plain")
except Exception as e:
return json_error_response(e)
return Response(kv.value, status=200, content_type="text/plain")
class R(BaseSupersetView):
@event_logger.log_this
@expose("/<url_id>")
def index(self, url_id):
url = db.session.query(models.Url).get(url_id)
if url and url.url:
explore_url = "//superset/explore/?"
if url.url.startswith(explore_url):
explore_url += f"r={url_id}"
return redirect(explore_url[1:])
else:
return redirect(url.url[1:])
else:
flash("URL to nowhere...", "danger")
return redirect("/")
@event_logger.log_this
@has_access_api
@expose("/shortner/", methods=["POST"])
def shortner(self):
url = request.form.get("data")
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return Response(
"{scheme}://{request.headers[Host]}/r/{obj.id}".format(
scheme=request.scheme, request=request, obj=obj
),
mimetype="text/plain",
)
class Superset(BaseSupersetView):
logger = logging.getLogger(__name__)
@has_access_api
@expose("/datasources/")
def datasources(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [o.short_data for o in datasources if o.short_data.get("name")]
datasources = sorted(datasources, key=lambda o: o["name"])
return self.json_response(datasources)
@has_access_api
@expose("/override_role_permissions/", methods=["POST"])
def override_role_permissions(self):
data = request.get_json(force=True)
role_name = data["role_name"]
databases = data["database"]
db_ds_names = set()
for dbs in databases:
for schema in dbs["schema"]:
for ds_name in schema["datasources"]:
fullname = utils.get_datasource_full_name(
dbs["name"], ds_name, schema=schema["name"]
)
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
role.permissions = []
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm, permission_name="datasource_access"
)
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response(
{"granted": granted_perms, "requested": list(db_ds_names)}, status=201
)
@event_logger.log_this
@has_access
@expose("/request_access/")
def request_access(self):
datasources = set()
dashboard_id = request.args.get("dashboard_id")
if dashboard_id:
dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one()
datasources |= dash.datasources
datasource_id = request.args.get("datasource_id")
datasource_type = request.args.get("datasource_type")
if datasource_id:
ds_class = ConnectorRegistry.sources.get(datasource_type)
datasource = (
db.session.query(ds_class).filter_by(id=int(datasource_id)).one()
)
datasources.add(datasource)
has_access = all(
(
datasource and security_manager.datasource_access(datasource)
for datasource in datasources
)
)
if has_access:
return redirect("/superset/dashboard/{}".format(dashboard_id))
if request.args.get("action") == "go":
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id, datasource_type=datasource.type
)
db.session.add(access_request)
db.session.commit()
flash(__("Access was requested"), "info")
return redirect("/")
return self.render_template(
"superset/request_access.html",
datasources=datasources,
datasource_names=", ".join([o.name for o in datasources]),
)
@event_logger.log_this
@has_access
@expose("/approve")
def approve(self):
def clean_fulfilled_requests(session):
for r in session.query(DAR).all():
datasource = ConnectorRegistry.get_datasource(
r.datasource_type, r.datasource_id, session
)
if not datasource or security_manager.datasource_access(datasource):
session.delete(r)
session.commit()
datasource_type = request.args.get("datasource_type")
datasource_id = request.args.get("datasource_id")
created_by_username = request.args.get("created_by")
role_to_grant = request.args.get("role_to_grant")
role_to_extend = request.args.get("role_to_extend")
session = db.session
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "alert")
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, "alert")
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter(
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id,
)
.all()
)
if not requests:
flash(ACCESS_REQUEST_MISSING_ERR, "alert")
return json_error_response(ACCESS_REQUEST_MISSING_ERR)
if security_manager.all_datasource_access() or check_ownership(
datasource, raise_if_false=False
):
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
"%(user)s was granted the role %(role)s that gives access "
"to the %(datasource)s",
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_granted.txt",
app.config,
)
flash(msg, "info")
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
"email/datasource_access", datasource.perm
)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __(
"Role %(r)s was extended to provide the access to "
"the datasource %(ds)s",
r=role_to_extend,
ds=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_extended.txt",
app.config,
)
flash(msg, "info")
clean_fulfilled_requests(session)
else:
flash(__("You have no permission to approve this request"), "danger")
return redirect("/accessrequestsmodelview/list/")
for r in requests:
session.delete(r)
session.commit()
return redirect("/accessrequestsmodelview/list/")
def get_viz(
self,
slice_id=None,
form_data=None,
datasource_type=None,
datasource_id=None,
force=False,
):
if slice_id:
slc = db.session.query(Slice).filter_by(id=slice_id).one()
return slc.get_viz()
else:
viz_type = form_data.get("viz_type", "table")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
viz_obj = viz.viz_types[viz_type](
datasource, form_data=form_data, force=force
)
return viz_obj
@has_access
@expose("/slice/<slice_id>/")
def slice(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
if not slc:
abort(404)
endpoint = "/superset/explore/?form_data={}".format(
parse.quote(json.dumps({"slice_id": slice_id}))
)
param = utils.ReservedUrlParameters.STANDALONE.value
if request.args.get(param) == "true":
endpoint += f"&{param}=true"
return redirect(endpoint)
def get_query_string_response(self, viz_obj):
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as e:
logger.exception(e)
return json_error_response(e)
if not query:
query = "No query."
return self.json_response(
{"query": query, "language": viz_obj.datasource.query_language}
)
def get_raw_results(self, viz_obj):
return self.json_response(
{"data": viz_obj.get_df_payload()["df"].to_dict("records")}
)
def get_samples(self, viz_obj):
return self.json_response({"data": viz_obj.get_samples()})
def generate_json(
self, viz_obj, csv=False, query=False, results=False, samples=False
):
if csv:
return CsvResponse(
viz_obj.get_csv(),
status=200,
headers=generate_download_headers("csv"),
mimetype="application/csv",
)
if query:
return self.get_query_string_response(viz_obj)
if results:
return self.get_raw_results(viz_obj)
if samples:
return self.get_samples(viz_obj)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
@event_logger.log_this
@api
@has_access_api
@expose("/slice_json/<slice_id>")
@etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_slice_perms)
def slice_json(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
return self.generate_json(viz_obj)
@event_logger.log_this
@api
@has_access_api
@expose("/annotation_json/<layer_id>")
def annotation_json(self, layer_id):
form_data = get_form_data()[0]
form_data["layer_id"] = layer_id
form_data["filters"] = [{"col": "layer_id", "op": "==", "val": layer_id}]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types["table"](datasource, form_data=form_data, force=False)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
EXPLORE_JSON_METHODS = ["POST"]
if not is_feature_enabled("ENABLE_EXPLORE_JSON_CSRF_PROTECTION"):
EXPLORE_JSON_METHODS.append("GET")
@event_logger.log_this
@api
@has_access_api
@handle_api_exception
@expose(
"/explore_json/<datasource_type>/<datasource_id>/", methods=EXPLORE_JSON_METHODS
)
@expose("/explore_json/", methods=EXPLORE_JSON_METHODS)
@etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_datasource_perms)
def explore_json(self, datasource_type=None, datasource_id=None):
csv = request.args.get("csv") == "true"
query = request.args.get("query") == "true"
results = request.args.get("results") == "true"
samples = request.args.get("samples") == "true"
force = request.args.get("force") == "true"
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as e:
return json_error_response(utils.error_msg_from_exception(e))
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
return self.generate_json(
viz_obj, csv=csv, query=query, results=results, samples=samples
)
@event_logger.log_this
@has_access
@expose("/import_dashboards", methods=["GET", "POST"])
def import_dashboards(self):
f = request.files.get("file")
if request.method == "POST" and f:
try:
dashboard_import_export.import_dashboards(db.session, f.stream)
except DatabaseNotFound as e:
flash(
_(
"Cannot import dashboard: %(db_error)s.\n"
"Make sure to create the database before "
"importing the dashboard.",
db_error=e,
),
"danger",
)
except Exception as e:
logger.exception(e)
flash(
_(
"An unknown error occurred. "
"Please contact your Superset administrator"
),
"danger",
)
return redirect("/dashboard/list/")
return self.render_template("superset/import_dashboards.html")
@event_logger.log_this
@has_access
@expose("/explore/<datasource_type>/<datasource_id>/", methods=["GET", "POST"])
@expose("/explore/", methods=["GET", "POST"])
def explore(self, datasource_type=None, datasource_id=None):
user_id = g.user.get_id() if g.user else None
form_data, slc = get_form_data(use_slice_data=True)
if (
config["SIP_15_ENABLED"]
and slc
and g.user in slc.owners
and (
not form_data.get("time_range_endpoints")
or form_data["time_range_endpoints"]
!= (
utils.TimeRangeEndpoint.INCLUSIVE,
utils.TimeRangeEndpoint.EXCLUSIVE,
)
)
):
url = Href("/superset/explore/")(
{
"form_data": json.dumps(
{
"slice_id": slc.id,
"time_range_endpoints": (
utils.TimeRangeEndpoint.INCLUSIVE.value,
utils.TimeRangeEndpoint.EXCLUSIVE.value,
),
}
)
}
)
flash(Markup(config["SIP_15_TOAST_MESSAGE"].format(url=url)))
error_redirect = "/chart/list/"
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException:
return redirect(error_redirect)
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "danger")
return redirect(error_redirect)
if config["ENABLE_ACCESS_REQUEST"] and (
not security_manager.datasource_access(datasource)
):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
"danger",
)
return redirect(
"superset/request_access/?"
f"datasource_type={datasource_type}&"
f"datasource_id={datasource_id}&"
)
viz_type = form_data.get("viz_type")
if not viz_type and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
slice_add_perm = security_manager.can_access("can_add", "SliceModelView")
slice_overwrite_perm = is_owner(slc, g.user)
slice_download_perm = security_manager.can_access(
"can_download", "SliceModelView"
)
form_data["datasource"] = str(datasource_id) + "__" + datasource_type
utils.convert_legacy_filters_into_adhoc(form_data)
utils.merge_extra_filters(form_data)
if request.method == "GET":
utils.merge_request_params(form_data, request.args)
action = request.args.get("action")
if action == "overwrite" and not slice_overwrite_perm:
return json_error_response(
_("You don't have the rights to ") + _("alter this ") + _("chart"),
status=400,
)
if action == "saveas" and not slice_add_perm:
return json_error_response(
_("You don't have the rights to ") + _("create a ") + _("chart"),
status=400,
)
if action in ("saveas", "overwrite"):
return self.save_or_overwrite_slice(
request.args,
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource.name,
)
standalone = (
request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true"
)
bootstrap_data = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": slice_overwrite_perm,
"datasource": datasource.data,
"form_data": form_data,
"datasource_id": datasource_id,
"datasource_type": datasource_type,
"slice": slc.data if slc else None,
"standalone": standalone,
"user_id": user_id,
"forced_height": request.args.get("height"),
"common": common_bootstrap_payload(),
}
table_name = (
datasource.table_name
if datasource_type == "table"
else datasource.datasource_name
)
if slc:
title = slc.slice_name
else:
title = _("Explore - %(table)s", table=table_name)
return self.render_template(
"superset/basic.html",
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
entry="explore",
title=title,
standalone_mode=standalone,
)
@api
@handle_api_exception
@has_access_api
@expose("/filter/<datasource_type>/<datasource_id>/<column>/")
def filter(self, datasource_type, datasource_id, column):
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
payload = json.dumps(
datasource.values_for_column(column, config["FILTER_SELECT_ROW_LIMIT"]),
default=utils.json_int_dttm_ser,
)
return json_success(payload)
def save_or_overwrite_slice(
self,
args,
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource_name,
):
slice_name = args.get("slice_name")
action = args.get("action")
form_data = get_form_data()[0]
if action in ("saveas"):
if "slice_id" in form_data:
form_data.pop("slice_id")
slc = Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data["viz_type"]
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ("saveas") and slice_add_perm:
self.save_slice(slc)
elif action == "overwrite" and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get("add_to_dash") == "existing":
dash = (
db.session.query(Dashboard)
.filter_by(id=int(request.args.get("save_to_dashboard_id")))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("alter this ")
+ _("dashboard"),
status=400,
)
flash(
_("Chart [{}] was added to dashboard [{}]").format(
slc.slice_name, dash.dashboard_title
),
"info",
)
elif request.args.get("add_to_dash") == "new":
dash_add_perm = security_manager.can_access("can_add", "DashboardModelView")
if not dash_add_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("create a ")
+ _("dashboard"),
status=400,
)
dash = Dashboard(
dashboard_title=request.args.get("new_dashboard_name"),
owners=[g.user] if g.user else [],
)
flash(
_(
"Dashboard [{}] just got created and chart [{}] was added " "to it"
).format(dash.dashboard_title, slc.slice_name),
"info",
)
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": is_owner(slc, g.user),
"form_data": slc.form_data,
"slice": slc.data,
"dashboard_id": dash.id if dash else None,
}
if request.args.get("goto_dash") == "true":
response.update({"dashboard": dash.url})
return json_success(json.dumps(response))
def save_slice(self, slc):
session = db.session()
msg = _("Chart [{}] has been saved").format(slc.slice_name)
session.add(slc)
session.commit()
flash(msg, "info")
def overwrite_slice(self, slc):
session = db.session()
session.merge(slc)
session.commit()
msg = _("Chart [{}] has been overwritten").format(slc.slice_name)
flash(msg, "info")
@api
@has_access_api
@expose("/schemas/<db_id>/")
@expose("/schemas/<db_id>/<force_refresh>/")
def schemas(self, db_id, force_refresh="false"):
db_id = int(db_id)
force_refresh = force_refresh.lower() == "true"
database = db.session.query(models.Database).get(db_id)
if database:
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=force_refresh,
)
schemas = security_manager.schemas_accessible_by_user(database, schemas)
else:
schemas = []
return Response(json.dumps({"schemas": schemas}), mimetype="application/json")
@api
@has_access_api
@expose("/tables/<int:db_id>/<schema>/<substr>/")
@expose("/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/")
def tables(
self, db_id: int, schema: str, substr: str, force_refresh: str = "false"
):
# Guarantees database filtering by security access
query = db.session.query(models.Database)
query = DatabaseFilter("id", SQLAInterface(models.Database, db.session)).apply(
query, None
)
database = query.filter_by(id=db_id).one_or_none()
if not database:
return json_error_response("Not found", 404)
force_refresh_parsed = force_refresh.lower() == "true"
schema_parsed = utils.parse_js_uri_path_item(schema, eval_undefined=True)
substr_parsed = utils.parse_js_uri_path_item(substr, eval_undefined=True)
if schema_parsed:
tables = (
database.get_all_table_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
views = (
database.get_all_view_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
else:
tables = database.get_all_table_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
views = database.get_all_view_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
tables = security_manager.get_datasources_accessible_by_user(
database, tables, schema_parsed
)
views = security_manager.get_datasources_accessible_by_user(
database, views, schema_parsed
)
def get_datasource_label(ds_name: utils.DatasourceName) -> str:
return (
ds_name.table if schema_parsed else f"{ds_name.schema}.{ds_name.table}"
)
if substr_parsed:
tables = [tn for tn in tables if substr_parsed in get_datasource_label(tn)]
views = [vn for vn in views if substr_parsed in get_datasource_label(vn)]
if not schema_parsed and database.default_schemas:
user_schema = g.user.email.split("@")[0]
valid_schemas = set(database.default_schemas + [user_schema])
tables = [tn for tn in tables if tn.schema in valid_schemas]
views = [vn for vn in views if vn.schema in valid_schemas]
max_items = config["MAX_TABLE_NAMES"] or len(tables)
total_items = len(tables) + len(views)
max_tables = len(tables)
max_views = len(views)
if total_items and substr_parsed:
max_tables = max_items * len(tables) // total_items
max_views = max_items * len(views) // total_items
table_options = [
{
"value": tn.table,
"schema": tn.schema,
"label": get_datasource_label(tn),
"title": get_datasource_label(tn),
"type": "table",
}
for tn in tables[:max_tables]
]
table_options.extend(
[
{
"value": vn.table,
"schema": vn.schema,
"label": get_datasource_label(vn),
"title": get_datasource_label(vn),
"type": "view",
}
for vn in views[:max_views]
]
)
table_options.sort(key=lambda value: value["label"])
payload = {"tableLength": len(tables) + len(views), "options": table_options}
return json_success(json.dumps(payload))
@api
@has_access_api
@expose("/copy_dash/<dashboard_id>/", methods=["GET", "POST"])
def copy_dash(self, dashboard_id):
session = db.session()
data = json.loads(request.form.get("data"))
dash = models.Dashboard()
original_dash = session.query(Dashboard).get(dashboard_id)
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data["dashboard_title"]
if data["duplicate_slices"]:
# Duplicating slices as well, mapping old ids to new ones
old_to_new_sliceids = {}
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_sliceids["{}".format(slc.id)] = "{}".format(new_slice.id)
# update chartId of layout entities
# in v2_dash positions json data, chartId should be integer,
# while in older version slice_id is string type
for value in data["positions"].values():
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta").get("chartId")
):
old_id = "{}".format(value.get("meta").get("chartId"))
new_id = int(old_to_new_sliceids[old_id])
value["meta"]["chartId"] = new_id
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
self._set_dash_metadata(dash, data)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@expose("/save_dash/<dashboard_id>/", methods=["GET", "POST"])
def save_dash(self, dashboard_id):
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get("data"))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return json_success(json.dumps({"status": "SUCCESS"}))
@staticmethod
def _set_dash_metadata(dashboard, data):
positions = data["positions"]
# find slices in the position data
slice_ids = []
slice_id_to_name = {}
for value in positions.values():
if isinstance(value, dict):
try:
slice_id = value["meta"]["chartId"]
slice_ids.append(slice_id)
slice_id_to_name[slice_id] = value["meta"]["sliceName"]
except KeyError:
pass
session = db.session()
current_slices = session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
dashboard.slices = current_slices
# update slice names. this assumes user has permissions to update the slice
# we allow user set slice name be empty string
for slc in dashboard.slices:
try:
new_name = slice_id_to_name[slc.id]
if slc.slice_name != new_name:
slc.slice_name = new_name
session.merge(slc)
session.flush()
except KeyError:
pass
# remove leading and trailing white spaces in the dumped json
dashboard.position_json = json.dumps(
positions, indent=None, separators=(",", ":"), sort_keys=True
)
md = dashboard.params_dict
dashboard.css = data.get("css")
dashboard.dashboard_title = data["dashboard_title"]
if "timed_refresh_immune_slices" not in md:
md["timed_refresh_immune_slices"] = []
if "filter_scopes" in data:
md["filter_scopes"] = json.loads(data["filter_scopes"] or "{}")
md["expanded_slices"] = data["expanded_slices"]
md["refresh_frequency"] = data.get("refresh_frequency", 0)
default_filters_data = json.loads(data.get("default_filters", "{}"))
applicable_filters = {
key: v for key, v in default_filters_data.items() if int(key) in slice_ids
}
md["default_filters"] = json.dumps(applicable_filters)
if data.get("color_namespace"):
md["color_namespace"] = data.get("color_namespace")
if data.get("color_scheme"):
md["color_scheme"] = data.get("color_scheme")
if data.get("label_colors"):
md["label_colors"] = data.get("label_colors")
dashboard.json_metadata = json.dumps(md)
@api
@has_access_api
@expose("/add_slices/<dashboard_id>/", methods=["POST"])
def add_slices(self, dashboard_id):
data = json.loads(request.form.get("data"))
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(Slice.id.in_(data["slice_ids"]))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return "SLICES ADDED"
@api
@has_access_api
@expose("/testconn", methods=["POST", "GET"])
def testconn(self):
try:
db_name = request.json.get("name")
uri = request.json.get("uri")
# if the database already exists in the database, only its safe (password-masked) URI
# would be shown in the UI and would be passed in the form data.
# so if the database already exists and the form was submitted with the safe URI,
# we assume we should retrieve the decrypted URI to test the connection.
if db_name:
existing_database = (
db.session.query(models.Database)
.filter_by(database_name=db_name)
.one_or_none()
)
if existing_database and uri == existing_database.safe_sqlalchemy_uri():
uri = existing_database.sqlalchemy_uri_decrypted
# this is the database instance that will be tested
database = models.Database(
# extras is sent as json, but required to be a string in the Database model
extra=json.dumps(request.json.get("extras", {})),
impersonate_user=request.json.get("impersonate_user"),
encrypted_extra=json.dumps(request.json.get("encrypted_extra", {})),
)
database.set_sqlalchemy_uri(uri)
username = g.user.username if g.user is not None else None
engine = database.get_sqla_engine(user_name=username)
with closing(engine.connect()) as conn:
conn.scalar(select([1]))
return json_success('"OK"')
except Exception as e:
logger.exception(e)
return json_error_response(
"Connection failed!\n\n" f"The error message returned was:\n{e}", 400
)
@api
@has_access_api
@expose("/recent_activity/<user_id>/", methods=["GET"])
def recent_activity(self, user_id):
M = models
if request.args.get("limit"):
limit = int(request.args.get("limit"))
else:
limit = 1000
qry = (
db.session.query(M.Log, M.Dashboard, Slice)
.outerjoin(M.Dashboard, M.Dashboard.id == M.Log.dashboard_id)
.outerjoin(Slice, Slice.id == M.Log.slice_id)
.filter(
and_(
~M.Log.action.in_(("queries", "shortner", "sql_json")),
M.Log.user_id == user_id,
)
)
.order_by(M.Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
if log.Dashboard:
item_url = log.Dashboard.url
item_title = log.Dashboard.dashboard_title
elif log.Slice:
item_url = log.Slice.slice_url
item_title = log.Slice.slice_name
payload.append(
{
"action": log.Log.action,
"item_url": item_url,
"item_title": item_title,
"time": log.Log.dttm,
}
)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/csrf_token/", methods=["GET"])
def csrf_token(self):
return Response(
self.render_template("superset/csrf_token.json"), mimetype="text/json"
)
@api
@has_access_api
@expose("/available_domains/", methods=["GET"])
def available_domains(self):
return Response(
json.dumps(conf.get("SUPERSET_WEBSERVER_DOMAINS")), mimetype="text/json"
)
@api
@has_access_api
@expose("/fave_dashboards_by_username/<username>/", methods=["GET"])
def fave_dashboards_by_username(self, username):
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
@api
@has_access_api
@expose("/fave_dashboards/<user_id>/", methods=["GET"])
def fave_dashboards(self, user_id):
qry = (
db.session.query(Dashboard, models.FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "Dashboard",
Dashboard.id == models.FavStar.obj_id,
),
)
.order_by(models.FavStar.dttm.desc())
)
payload = []
for o in qry.all():
d = {
"id": o.Dashboard.id,
"dashboard": o.Dashboard.dashboard_link(),
"title": o.Dashboard.dashboard_title,
"url": o.Dashboard.url,
"dttm": o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
d["creator"] = str(user)
d["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(d)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/created_dashboards/<user_id>/", methods=["GET"])
def created_dashboards(self, user_id):
Dash = Dashboard
qry = (
db.session.query(Dash)
.filter(or_(Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id))
.order_by(Dash.changed_on.desc())
)
payload = [
{
"id": o.id,
"dashboard": o.dashboard_link(),
"title": o.dashboard_title,
"url": o.url,
"dttm": o.changed_on,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/user_slices", methods=["GET"])
@expose("/user_slices/<user_id>/", methods=["GET"])
def user_slices(self, user_id=None):
if not user_id:
user_id = g.user.id
FavStar = models.FavStar
qry = (
db.session.query(Slice, FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "slice",
Slice.id == models.FavStar.obj_id,
),
isouter=True,
)
.filter(
or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
)
)
.order_by(Slice.slice_name.asc())
)
payload = [
{
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"data": o.Slice.form_data,
"dttm": o.dttm if o.dttm else o.Slice.changed_on,
"viz_type": o.Slice.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/created_slices", methods=["GET"])
@expose("/created_slices/<user_id>/", methods=["GET"])
def created_slices(self, user_id=None):
if not user_id:
user_id = g.user.id
qry = (
db.session.query(Slice)
.filter(or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id))
.order_by(Slice.changed_on.desc())
)
payload = [
{
"id": o.id,
"title": o.slice_name,
"url": o.slice_url,
"dttm": o.changed_on,
"viz_type": o.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/fave_slices", methods=["GET"])
@expose("/fave_slices/<user_id>/", methods=["GET"])
def fave_slices(self, user_id=None):
if not user_id:
user_id = g.user.id
qry = (
db.session.query(Slice, models.FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "slice",
Slice.id == models.FavStar.obj_id,
),
)
.order_by(models.FavStar.dttm.desc())
)
payload = []
for o in qry.all():
d = {
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"dttm": o.dttm,
"viz_type": o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d["creator"] = str(user)
d["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(d)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/warm_up_cache/", methods=["GET"])
def warm_up_cache(self):
slices = None
session = db.session()
slice_id = request.args.get("slice_id")
table_name = request.args.get("table_name")
db_name = request.args.get("db_name")
if not slice_id and not (table_name and db_name):
return json_error_response(
__(
"Malformed request. slice_id or table_name and db_name "
"arguments are expected"
),
status=400,
)
if slice_id:
slices = session.query(Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(
__("Chart %(id)s not found", id=slice_id), status=404
)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources["table"]
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name
or SqlaTable.table_name == table_name
)
).one_or_none()
if not table:
return json_error_response(
__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name,
s=db_name,
),
status=404,
)
slices = (
session.query(Slice)
.filter_by(datasource_id=table.id, datasource_type=table.type)
.all()
)
for slc in slices:
try:
form_data = get_form_data(slc.id, use_slice_data=True)[0]
obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=True,
)
obj.get_json()
except Exception as e:
logger.exception("Failed to warm up cache")
return json_error_response(utils.error_msg_from_exception(e))
return json_success(
json.dumps(
[{"slice_id": slc.id, "slice_name": slc.slice_name} for slc in slices]
)
)
@has_access_api
@expose("/favstar/<class_name>/<obj_id>/<action>/")
def favstar(self, class_name, obj_id, action):
session = db.session()
FavStar = models.FavStar
count = 0
favs = (
session.query(FavStar)
.filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id())
.all()
)
if action == "select":
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
)
)
count = 1
elif action == "unselect":
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({"count": count}))
@api
@has_access_api
@expose("/dashboard/<dashboard_id>/published/", methods=("GET", "POST"))
def publish(self, dashboard_id):
logger.warning(
"This API endpoint is deprecated and will be removed in version 1.0.0"
)
session = db.session()
Role = ab_models.Role
dash = (
session.query(Dashboard).filter(Dashboard.id == dashboard_id).one_or_none()
)
admin_role = session.query(Role).filter(Role.name == "Admin").one_or_none()
if request.method == "GET":
if dash:
return json_success(json.dumps({"published": dash.published}))
else:
return json_error_response(
f"ERROR: cannot find dashboard {dashboard_id}", status=404
)
else:
edit_perm = is_owner(dash, g.user) or admin_role in get_user_roles()
if not edit_perm:
return json_error_response(
f'ERROR: "{g.user.username}" cannot alter dashboard "{dash.dashboard_title}"',
status=403,
)
dash.published = str(request.form["published"]).lower() == "true"
session.commit()
return json_success(json.dumps({"published": dash.published}))
@has_access
@expose("/dashboard/<dashboard_id>/")
def dashboard(self, dashboard_id):
session = db.session()
qry = session.query(Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one_or_none()
if not dash:
abort(404)
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config["ENABLE_ACCESS_REQUEST"]:
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(
security_manager.get_datasource_access_error_msg(datasource)
),
"danger",
)
return redirect(
"superset/request_access/?" f"dashboard_id={dash.id}&"
)
dash_edit_perm = check_ownership(
dash, raise_if_false=False
) and security_manager.can_access("can_save_dash", "Superset")
dash_save_perm = security_manager.can_access("can_save_dash", "Superset")
superset_can_explore = security_manager.can_access("can_explore", "Superset")
superset_can_csv = security_manager.can_access("can_csv", "Superset")
slice_can_edit = security_manager.can_access("can_edit", "SliceModelView")
standalone_mode = (
request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true"
)
edit_mode = (
request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == "true"
)
@event_logger.log_this
def dashboard(**kwargs):
pass
dashboard(
dashboard_id=dash.id,
dashboard_version="v2",
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode,
)
dashboard_data = dash.data
dashboard_data.update(
{
"standalone_mode": standalone_mode,
"dash_save_perm": dash_save_perm,
"dash_edit_perm": dash_edit_perm,
"superset_can_explore": superset_can_explore,
"superset_can_csv": superset_can_csv,
"slice_can_edit": slice_can_edit,
}
)
url_params = {
key: value
for key, value in request.args.items()
if key not in [param.value for param in utils.ReservedUrlParameters]
}
bootstrap_data = {
"user_id": g.user.get_id(),
"dashboard_data": dashboard_data,
"datasources": {ds.uid: ds.data for ds in datasources},
"common": common_bootstrap_payload(),
"editMode": edit_mode,
"urlParams": url_params,
}
if request.args.get("json") == "true":
return json_success(
json.dumps(bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser)
)
return self.render_template(
"superset/dashboard.html",
entry="dashboard",
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
)
@api
@event_logger.log_this
@expose("/log/", methods=["POST"])
def log(self):
return Response(status=200)
@has_access
@expose("/sync_druid/", methods=["POST"])
@event_logger.log_this
def sync_druid_source(self):
payload = request.get_json(force=True)
druid_config = payload["config"]
user_name = payload["user"]
cluster_name = payload["cluster"]
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources["druid"]
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __(
"Can't find User '%(name)s', please ask your admin " "to create one.",
name=user_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name=cluster_name)
.one_or_none()
)
if not cluster:
err_msg = __(
"Can't find DruidCluster with cluster_name = " "'%(name)s'",
name=cluster_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(druid_config, user, cluster)
except Exception as e:
logger.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
@has_access
@expose("/sqllab_viz/", methods=["POST"])
@event_logger.log_this
def sqllab_viz(self):
SqlaTable = ConnectorRegistry.sources["table"]
data = json.loads(request.form.get("data"))
table_name = data.get("datasourceName")
database_id = data.get("dbId")
table = (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
if not table:
table = SqlaTable(table_name=table_name, owners=[g.user])
table.database_id = database_id
table.schema = data.get("schema")
table.template_params = data.get("templateParams")
table.is_sqllab_view = True
q = ParsedQuery(data.get("sql"))
table.sql = q.stripped()
db.session.add(table)
cols = []
for config in data.get("columns"):
column_name = config.get("name")
SqlaTable = ConnectorRegistry.sources["table"]
TableColumn = SqlaTable.column_class
SqlMetric = SqlaTable.metric_class
col = TableColumn(
column_name=column_name,
filterable=True,
groupby=True,
is_dttm=config.get("is_date", False),
type=config.get("type", False),
)
cols.append(col)
table.columns = cols
table.metrics = [SqlMetric(metric_name="count", expression="count(*)")]
db.session.commit()
return json_success(json.dumps({"table_id": table.id}))
@has_access
@expose("/extra_table_metadata/<database_id>/<table_name>/<schema>/")
@event_logger.log_this
def extra_table_metadata(self, database_id, table_name, schema):
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(mydb, table_name, schema)
return json_success(json.dumps(payload))
@has_access
@expose("/select_star/<database_id>/<table_name>")
@expose("/select_star/<database_id>/<table_name>/<schema>")
@event_logger.log_this
def select_star(self, database_id, table_name, schema=None):
logging.warning(
f"{self.__class__.__name__}.select_star "
"This API endpoint is deprecated and will be removed in version 1.0.0"
)
stats_logger.incr(f"{self.__class__.__name__}.select_star.init")
database = db.session.query(models.Database).get(database_id)
if not database:
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.database_not_found"
)
return json_error_response("Not found", 404)
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name)
if not self.appbuilder.sm.can_access_datasource(database, table_name, schema):
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.permission_denied"
)
logging.warning(
f"Permission denied for user {g.user} on table: {table_name} "
f"schema: {schema}"
)
return json_error_response("Not found", 404)
stats_logger.incr(f"deprecated.{self.__class__.__name__}.select_star.success")
return json_success(
database.select_star(
table_name, schema, latest_partition=True, show_cols=True
)
)
@has_access_api
@expose("/estimate_query_cost/<database_id>/", methods=["POST"])
@expose("/estimate_query_cost/<database_id>/<schema>/", methods=["POST"])
@event_logger.log_this
def estimate_query_cost(
self, database_id: int, schema: Optional[str] = None
) -> Response:
mydb = db.session.query(models.Database).get(database_id)
sql = json.loads(request.form.get("sql", '""'))
template_params = json.loads(request.form.get("templateParams") or "{}")
if template_params:
template_processor = get_template_processor(mydb)
sql = template_processor.process_template(sql, **template_params)
timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT
timeout_msg = f"The estimation exceeded the {timeout} seconds timeout."
try:
with utils.timeout(seconds=timeout, error_message=timeout_msg):
cost = mydb.db_engine_spec.estimate_query_cost(
mydb, schema, sql, utils.sources.get("sql_lab")
)
except SupersetTimeoutException as e:
logger.exception(e)
return json_error_response(timeout_msg)
except Exception as e:
return json_error_response(str(e))
spec = mydb.db_engine_spec
query_cost_formatters = get_feature_flags().get(
"QUERY_COST_FORMATTERS_BY_ENGINE", {}
)
query_cost_formatter = query_cost_formatters.get(
spec.engine, spec.query_cost_formatter
)
cost = query_cost_formatter(cost)
return json_success(json.dumps(cost))
@expose("/theme/")
def theme(self):
return self.render_template("superset/theme.html")
@has_access_api
@expose("/results/<key>/")
@event_logger.log_this
def results(self, key):
return self.results_exec(key)
def results_exec(self, key: str):
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
"sqllab.query.results_backend_read",
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
"Data could not be retrieved. " "You may want to re-run the query.",
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one_or_none()
if query is None:
return json_error_response(
"Data could not be retrieved. You may want to re-run the query.",
status=404,
)
rejected_tables = security_manager.rejected_tables(
query.sql, query.database, query.schema
)
if rejected_tables:
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables), status=403
)
payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)
obj: dict = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
if "rows" in request.args:
try:
rows = int(request.args["rows"])
except ValueError:
return json_error_response("Invalid `rows` argument", status=400)
obj = apply_display_max_row_limit(obj, rows)
return json_success(
json.dumps(obj, default=utils.json_iso_dttm_ser, ignore_nan=True)
)
@has_access_api
@expose("/stop_query/", methods=["POST"])
@event_logger.log_this
@backoff.on_exception(
backoff.constant,
Exception,
interval=1,
on_backoff=lambda details: db.session.rollback(),
on_giveup=lambda details: db.session.rollback(),
max_tries=5,
)
def stop_query(self):
client_id = request.form.get("client_id")
query = db.session.query(Query).filter_by(client_id=client_id).one()
if query.status in [
QueryStatus.FAILED,
QueryStatus.SUCCESS,
QueryStatus.TIMED_OUT,
]:
logger.error(
f"Query with client_id {client_id} could not be stopped: query already complete"
)
return self.json_response("OK")
query.status = QueryStatus.STOPPED
db.session.commit()
return self.json_response("OK")
@has_access_api
@expose("/validate_sql_json/", methods=["POST", "GET"])
@event_logger.log_this
def validate_sql_json(self):
sql = request.form.get("sql")
database_id = request.form.get("database_id")
schema = request.form.get("schema") or None
template_params = json.loads(request.form.get("templateParams") or "{}")
if len(template_params) > 0:
# TODO: factor the Database object out of template rendering
# or provide it as mydb so we can render template params
# without having to also persist a Query ORM object.
return json_error_response(
"SQL validation does not support template parameters", status=400
)
session = db.session()
mydb = session.query(models.Database).filter_by(id=database_id).one_or_none()
if not mydb:
return json_error_response(
"Database with id {} is missing.".format(database_id), status=400
)
spec = mydb.db_engine_spec
validators_by_engine = get_feature_flags().get("SQL_VALIDATORS_BY_ENGINE")
if not validators_by_engine or spec.engine not in validators_by_engine:
return json_error_response(
"no SQL validator is configured for {}".format(spec.engine), status=400
)
validator_name = validators_by_engine[spec.engine]
validator = get_validator_by_name(validator_name)
if not validator:
return json_error_response(
"No validator named {} found (configured for the {} engine)".format(
validator_name, spec.engine
)
)
try:
timeout = config["SQLLAB_VALIDATION_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
with utils.timeout(seconds=timeout, error_message=timeout_msg):
errors = validator.validate(sql, schema, mydb)
payload = json.dumps(
[err.to_dict() for err in errors],
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
return json_success(payload)
except Exception as e:
logger.exception(e)
msg = _(
f"{validator.name} was unable to check your query.\n"
"Please recheck your query.\n"
f"Exception: {e}"
)
# Return as a 400 if the database error message says we got a 4xx error
if re.search(r"([\W]|^)4\d{2}([\W]|$)", str(e)):
return json_error_response(f"{msg}", status=400)
else:
return json_error_response(f"{msg}")
def _sql_json_async(
self,
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> str:
logger.info(f"Query {query.id}: Running query on a Celery worker")
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
query.id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username if g.user else None,
start_time=now_as_float(),
expand_data=expand_data,
log_params=log_params,
)
except Exception as e:
logger.exception(f"Query {query.id}: {e}")
msg = _(
"Failed to start remote query on a worker. "
"Tell your administrator to verify the availability of "
"the message queue."
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response("{}".format(msg))
resp = json_success(
json.dumps(
{"query": query.to_dict()},
default=utils.json_int_dttm_ser,
ignore_nan=True,
),
status=202,
)
session.commit()
return resp
def _sql_json_sync(
self,
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> str:
try:
timeout = config["SQLLAB_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
store_results = (
is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE")
and not query.select_as_cta
)
with utils.timeout(seconds=timeout, error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query.id,
rendered_query,
return_results=True,
store_results=store_results,
user_name=g.user.username if g.user else None,
expand_data=expand_data,
log_params=log_params,
)
payload = json.dumps(
apply_display_max_row_limit(data),
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except Exception as e:
logger.exception(f"Query {query.id}: {e}")
return json_error_response(f"{{e}}")
if data.get("status") == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(payload)
@has_access_api
@expose("/sql_json/", methods=["POST"])
@event_logger.log_this
def sql_json(self):
log_params = {
"user_agent": cast(Optional[str], request.headers.get("USER_AGENT"))
}
return self.sql_json_exec(request.json, log_params)
def sql_json_exec(
self, query_params: dict, log_params: Optional[Dict[str, Any]] = None
):
# Collect Values
database_id: int = cast(int, query_params.get("database_id"))
schema: str = cast(str, query_params.get("schema"))
sql: str = cast(str, query_params.get("sql"))
try:
template_params: dict = json.loads(
query_params.get("templateParams") or "{}"
)
except json.JSONDecodeError:
logger.warning(
f"Invalid template parameter {query_params.get('templateParams')}"
" specified. Defaulting to empty dict"
)
template_params = {}
limit: int = query_params.get("queryLimit") or app.config["SQL_MAX_ROW"]
async_flag: bool = cast(bool, query_params.get("runAsync"))
if limit < 0:
logger.warning(
f"Invalid limit of {limit} specified. Defaulting to max limit."
)
limit = 0
select_as_cta: bool = cast(bool, query_params.get("select_as_cta"))
tmp_table_name: str = cast(str, query_params.get("tmp_table_name"))
client_id: str = cast(
str, query_params.get("client_id") or utils.shortid()[:10]
)
sql_editor_id: str = cast(str, query_params.get("sql_editor_id"))
tab_name: str = cast(str, query_params.get("tab"))
status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING
session = db.session()
mydb = session.query(models.Database).get(database_id)
if not mydb:
return json_error_response(f"Database with id {database_id} is missing.")
# Set tmp_table_name for CTA
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = f"{mydb.force_ctas_schema}.{tmp_table_name}"
# Save current query
query = Query(
database_id=database_id,
sql=sql,
schema=schema,
select_as_cta=select_as_cta,
start_time=now_as_float(),
tab_name=tab_name,
status=status,
sql_editor_id=sql_editor_id,
tmp_table_name=tmp_table_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
try:
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
except SQLAlchemyError as e:
logger.error(f"Errors saving query details {e}")
session.rollback()
raise Exception(_("Query record was not created as expected."))
if not query_id:
raise Exception(_("Query record was not created as expected."))
logger.info(f"Triggering query_id: {query_id}")
rejected_tables = security_manager.rejected_tables(sql, mydb, schema)
if rejected_tables:
query.status = QueryStatus.FAILED
session.commit()
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables),
link=security_manager.get_table_access_link(rejected_tables),
status=403,
)
try:
template_processor = get_template_processor(
database=query.database, query=query
)
rendered_query = template_processor.process_template(
query.sql, **template_params
)
except Exception as e:
error_msg = utils.error_msg_from_exception(e)
return json_error_response(
f"Query {query_id}: Template rendering failed: {error_msg}"
)
limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]
query.limit = min(lim for lim in limits if lim is not None)
expand_data: bool = cast(
bool,
is_feature_enabled("PRESTO_EXPAND_DATA")
and query_params.get("expand_data"),
)
if async_flag:
return self._sql_json_async(
session, rendered_query, query, expand_data, log_params
)
return self._sql_json_sync(
session, rendered_query, query, expand_data, log_params
)
@has_access
@expose("/csv/<client_id>")
@event_logger.log_this
def csv(self, client_id):
logger.info("Exporting CSV file [{}]".format(client_id))
query = db.session.query(Query).filter_by(client_id=client_id).one()
rejected_tables = security_manager.rejected_tables(
query.sql, query.database, query.schema
)
if rejected_tables:
flash(security_manager.get_table_access_error_msg(rejected_tables))
return redirect("/")
blob = None
if results_backend and query.results_key:
logger.info(
"Fetching CSV from results backend " "[{}]".format(query.results_key)
)
blob = results_backend.get(query.results_key)
if blob:
logger.info("Decompressing")
payload = utils.zlib_decompress(
blob, decode=not results_backend_use_msgpack
)
obj = _deserialize_results_payload(
payload, query, results_backend_use_msgpack
)
columns = [c["name"] for c in obj["columns"]]
df = pd.DataFrame.from_records(obj["data"], columns=columns)
logger.info("Using pandas to convert to CSV")
csv = df.to_csv(index=False, **config["CSV_EXPORT"])
else:
logger.info("Running a query to turn into CSV")
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
csv = df.to_csv(index=False, **config["CSV_EXPORT"])
response = Response(csv, mimetype="text/csv")
response.headers[
"Content-Disposition"
] = f"attachment; filename={query.name}.csv"
event_info = {
"event_type": "data_export",
"client_id": client_id,
"row_count": len(df.index),
"database": query.database.name,
"schema": query.schema,
"sql": query.sql,
"exported_format": "csv",
}
logger.info(
f"CSV exported: {repr(event_info)}", extra={"superset_event": event_info}
)
return response
@api
@handle_api_exception
@has_access
@expose("/fetch_datasource_metadata")
@event_logger.log_this
def fetch_datasource_metadata(self):
datasource_id, datasource_type = request.args.get("datasourceKey").split("__")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
return json_success(json.dumps(datasource.data))
@has_access_api
@expose("/queries/<last_updated_ms>")
def queries(self, last_updated_ms):
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
return self.queries_exec(last_updated_ms_int)
def queries_exec(self, last_updated_ms_int: int):
stats_logger.incr("queries")
if not g.user.get_id():
return json_error_response(
"Please login to access the queries.", status=403
)
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@expose("/search_queries")
@event_logger.log_this
def search_queries(self) -> Response:
query = db.session.query(Query)
if security_manager.can_access_all_queries():
search_user_id = request.args.get("user_id")
elif (
request.args.get("user_id") is not None
and request.args.get("user_id") != g.user.get_user_id()
):
return Response(status=403, mimetype="application/json")
else:
search_user_id = g.user.get_user_id()
database_id = request.args.get("database_id")
search_text = request.args.get("search_text")
status = request.args.get("status")
from_time = request.args.get("from")
to_time = request.args.get("to")
if search_user_id:
query = query.filter(Query.user_id == search_user_id)
if database_id:
query = query.filter(Query.database_id == database_id)
if status:
query = query.filter(Query.status == status)
if search_text:
query = query.filter(Query.sql.like("%{}%".format(search_text)))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config["QUERY_SEARCH_LIMIT"]
sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all()
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype="application/json",
)
@app.errorhandler(500)
def show_traceback(self):
return (
render_template("superset/traceback.html", error_msg=get_error_msg()),
500,
)
@expose("/welcome")
def welcome(self):
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
"user": bootstrap_user_data(g.user),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/welcome.html",
entry="welcome",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@has_access
@expose("/profile/<username>/")
def profile(self, username):
if not username and g.user:
username = g.user.username
user = (
db.session.query(ab_models.User).filter_by(username=username).one_or_none()
)
if not user:
abort(404, description=f"User: {username} does not exist.")
payload = {
"user": bootstrap_user_data(user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/basic.html",
title=_("%(user)s's profile", user=username),
entry="profile",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@staticmethod
def _get_sqllab_payload(user_id: int) -> Dict[str, Any]:
# send list of tab state ids
tabs_state = (
db.session.query(TabState.id, TabState.label)
.filter_by(user_id=user_id)
.all()
)
tab_state_ids = [tab_state[0] for tab_state in tabs_state]
# return first active tab, or fallback to another one if no tab is active
active_tab = (
db.session.query(TabState)
.filter_by(user_id=user_id)
.order_by(TabState.active.desc())
.first()
)
databases: Dict[int, Any] = {}
queries: Dict[str, Any] = {}
# These are unnecessary if sqllab backend persistence is disabled
if is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE"):
databases = {
database.id: {
k: v for k, v in database.to_json().items() if k in DATABASE_KEYS
}
for database in db.session.query(models.Database).all()
}
# return all user queries associated with existing SQL editors
user_queries = (
db.session.query(Query)
.filter_by(user_id=user_id)
.filter(Query.sql_editor_id.cast(Integer).in_(tab_state_ids))
.all()
)
queries = {
query.client_id: {k: v for k, v in query.to_dict().items()}
for query in user_queries
}
return {
"defaultDbId": config["SQLLAB_DEFAULT_DBID"],
"common": common_bootstrap_payload(),
"tab_state_ids": tabs_state,
"active_tab": active_tab.to_dict() if active_tab else None,
"databases": databases,
"queries": queries,
}
@has_access
@expose("/sqllab")
def sqllab(self):
payload = self._get_sqllab_payload(g.user.get_id())
bootstrap_data = json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
)
return self.render_template(
"superset/basic.html", entry="sqllab", bootstrap_data=bootstrap_data
)
@api
@handle_api_exception
@has_access_api
@expose("/slice_query/<slice_id>/")
def slice_query(self, slice_id):
viz_obj = get_viz(slice_id)
security_manager.assert_viz_permission(viz_obj)
return self.get_query_string_response(viz_obj)
@api
@has_access_api
@expose("/schemas_access_for_csv_upload")
def schemas_access_for_csv_upload(self):
if not request.args.get("db_id"):
return json_error_response("No database is allowed for your csv upload")
db_id = int(request.args.get("db_id"))
database = db.session.query(models.Database).filter_by(id=db_id).one()
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (
security_manager.database_access(database)
or security_manager.all_datasource_access()
):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False
)
return self.json_response(schemas_allowed_processed)
except Exception as e:
logger.exception(e)
return json_error_response(
"Failed to fetch schemas allowed for csv upload in this database! "
"Please contact your Superset Admin!"
)
class CssTemplateModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.CssTemplate)
include_route_methods = RouteMethod.CRUD_SET
list_title = _("CSS Templates")
show_title = _("Show CSS Template")
add_title = _("Add CSS Template")
edit_title = _("Edit CSS Template")
list_columns = ["template_name"]
edit_columns = ["template_name", "css"]
add_columns = edit_columns
label_columns = {"template_name": _("Template Name")}
class CssTemplateAsyncModelView(CssTemplateModelView):
include_route_methods = {RouteMethod.API_READ}
list_columns = ["template_name", "css"]
@app.after_request
def apply_http_headers(response: Response):
# HTTP_HEADERS is deprecated, this provides backwards compatibility
response.headers.extend(
{**config["OVERRIDE_HTTP_HEADERS"], **config["HTTP_HEADERS"]}
)
for k, v in config["DEFAULT_HTTP_HEADERS"].items():
if k not in response.headers:
response.headers[k] = v
return response
| true | true |
1c4641077fa1b4a1700437711e9267173cfd5410 | 160 | py | Python | lights/gridlight_off.py | bprevost/brad_demo | 7c071709f763627d870e2b9e55be332e6af5f4c3 | [
"MIT"
] | null | null | null | lights/gridlight_off.py | bprevost/brad_demo | 7c071709f763627d870e2b9e55be332e6af5f4c3 | [
"MIT"
] | null | null | null | lights/gridlight_off.py | bprevost/brad_demo | 7c071709f763627d870e2b9e55be332e6af5f4c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import board
import neopixel
NUMPIXELS = 50
pixels = neopixel.NeoPixel(board.D21, NUMPIXELS)
pixels.fill((0, 0, 0)) # Turn off pixels
| 16 | 48 | 0.73125 |
import board
import neopixel
NUMPIXELS = 50
pixels = neopixel.NeoPixel(board.D21, NUMPIXELS)
pixels.fill((0, 0, 0))
| true | true |
1c4641397d7bb7c30bef7cad7ee43801ba62d268 | 2,251 | py | Python | dmgui_au/utilities/find_dropbox.py | Swanson-Hysell-Group/demag_gui_au | d1a233a82ec52dd5907bfee6885668a8c84ae892 | [
"BSD-3-Clause"
] | null | null | null | dmgui_au/utilities/find_dropbox.py | Swanson-Hysell-Group/demag_gui_au | d1a233a82ec52dd5907bfee6885668a8c84ae892 | [
"BSD-3-Clause"
] | null | null | null | dmgui_au/utilities/find_dropbox.py | Swanson-Hysell-Group/demag_gui_au | d1a233a82ec52dd5907bfee6885668a8c84ae892 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
try:
import json
except:
pass
def find_dropbox():
"""
Attempts to find local Dropbox folder using json file that Dropbox writes to
users' home directory. Will additionally search for `Hargraves_Data` folder
in the top directory (UC Berkeley Pmag Lab).
Returns
-------
string
Absolute path to Dropbox folder or subfolder, or another path given by
user input. If
"""
if os.path.isfile(os.path.expanduser(os.path.join("~", ".dropbox", "info.json"))):
drpbx_info_file = os.path.expanduser(os.path.join("~", ".dropbox", "info.json"))
drpbx_info = open(drpbx_info_file, 'r')
drpbx_json = drpbx_info.read()
drpbx_info.close()
try:
drpbx_dict = json.loads(drpbx_json)
except:
drpbx_dict = dict(eval(drpbx_json.replace('false','False').replace('true','True')))
finally:
drpbx_acts = list(drpbx_dict.keys())
if len(drpbx_acts)>1:
print("Found multiple Dropbox accounts:")
for i,j in enumerate(drpbx_acts):
print("[", i,"]", j)
n = input("Which account to use? [index number]: ")
drpbx_dict = drpbx_dict[drpbx_acts[n]]
else:
drpbx_dict = drpbx_dict[drpbx_acts[0]]
drpbx_path = os.path.abspath(drpbx_dict['path'])
else:
drpbx_path = ''
print("-W- There was a problem finding your Dropbox folder.")
return drpbx_path
# while not os.path.isdir(drpbx_path):
# drpbx_path = input("Please provide the path to your Dropbox, "
# "or press [Enter] to skip and provide a d.\n> ")
# if not drpbx_path:
# print("-E- Failed to find Dropbox folder")
# return drpbx_path
# elif os.path.isdir(os.path.realpath(os.path.expanduser(drpbx_path))):
# for UC Berkeley lab
if os.path.isdir(os.path.join(drpbx_path,"Hargraves_Data")):
drpbx_path = os.path.join(drpbx_path,"Hargraves_Data")
return drpbx_path
if __name__ == "__main__":
find_dropbox()
| 34.106061 | 95 | 0.58685 |
import os
import sys
import shutil
try:
import json
except:
pass
def find_dropbox():
if os.path.isfile(os.path.expanduser(os.path.join("~", ".dropbox", "info.json"))):
drpbx_info_file = os.path.expanduser(os.path.join("~", ".dropbox", "info.json"))
drpbx_info = open(drpbx_info_file, 'r')
drpbx_json = drpbx_info.read()
drpbx_info.close()
try:
drpbx_dict = json.loads(drpbx_json)
except:
drpbx_dict = dict(eval(drpbx_json.replace('false','False').replace('true','True')))
finally:
drpbx_acts = list(drpbx_dict.keys())
if len(drpbx_acts)>1:
print("Found multiple Dropbox accounts:")
for i,j in enumerate(drpbx_acts):
print("[", i,"]", j)
n = input("Which account to use? [index number]: ")
drpbx_dict = drpbx_dict[drpbx_acts[n]]
else:
drpbx_dict = drpbx_dict[drpbx_acts[0]]
drpbx_path = os.path.abspath(drpbx_dict['path'])
else:
drpbx_path = ''
print("-W- There was a problem finding your Dropbox folder.")
return drpbx_path
if os.path.isdir(os.path.join(drpbx_path,"Hargraves_Data")):
drpbx_path = os.path.join(drpbx_path,"Hargraves_Data")
return drpbx_path
if __name__ == "__main__":
find_dropbox()
| true | true |
1c46440c615bf74cb4301b1593107054081dbfd6 | 440 | py | Python | venv/Scripts/easy_install-script.py | TG-Techie/HackUMass0111 | 603344064605979b85a2e142caf7a2a7439d60f5 | [
"MIT"
] | null | null | null | venv/Scripts/easy_install-script.py | TG-Techie/HackUMass0111 | 603344064605979b85a2e142caf7a2a7439d60f5 | [
"MIT"
] | 1 | 2019-10-19T09:24:56.000Z | 2019-10-20T05:37:06.000Z | venv/Scripts/easy_install-script.py | TG-Techie/HackUMass0111 | 603344064605979b85a2e142caf7a2a7439d60f5 | [
"MIT"
] | 1 | 2019-10-18T14:18:28.000Z | 2019-10-18T14:18:28.000Z | #!C:\Users\danhi\hackumass0111\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| 33.846154 | 83 | 0.693182 |
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| true | true |
1c464563ae1c020a956ead49bce39b9e88737950 | 223 | py | Python | cannes_accomodation/tests/test_accomodation.py | Xogiga/CPOA_INEC_SAVIGNY_VALADE | f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a | [
"MIT"
] | null | null | null | cannes_accomodation/tests/test_accomodation.py | Xogiga/CPOA_INEC_SAVIGNY_VALADE | f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a | [
"MIT"
] | null | null | null | cannes_accomodation/tests/test_accomodation.py | Xogiga/CPOA_INEC_SAVIGNY_VALADE | f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a | [
"MIT"
] | null | null | null | class TestAccomodation:
def test_list_accomodation(self, client):
response = client.get('/accomodation')
assert response.status_code == 200
def test_update_accomodation(client):
pass
| 27.875 | 47 | 0.672646 | class TestAccomodation:
def test_list_accomodation(self, client):
response = client.get('/accomodation')
assert response.status_code == 200
def test_update_accomodation(client):
pass
| true | true |
1c4645726b27358a00869176f220b50c08f8f957 | 7,264 | py | Python | client/log.py | diophung/pyre-check | a488698d86b06b550c0e6e133009c1f396925af2 | [
"MIT"
] | null | null | null | client/log.py | diophung/pyre-check | a488698d86b06b550c0e6e133009c1f396925af2 | [
"MIT"
] | null | null | null | client/log.py | diophung/pyre-check | a488698d86b06b550c0e6e133009c1f396925af2 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List # noqa
LOG = logging.getLogger(__name__)
PERFORMANCE = 15
PROMPT = 50
SUCCESS = 60
stdout = io.StringIO()
class Color:
YELLOW = "\033[33m"
RED = "\033[31m"
class Format:
BOLD = "\033[1m"
CLEAR_LINE = "\x1b[0G\x1b[K"
CLEAR = "\033[0m"
TRUNCATE_OVERFLOW = "\033[?7l"
WRAP_OVERFLOW = "\033[?7h"
NEWLINE = "\n"
CURSOR_UP_LINE = "\x1b[1A"
HIDE_CURSOR = "\x1b[?25l"
SHOW_CURSOR = "\x1b[?25h"
class Character:
LAMBDA = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record):
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD = 0.5
LINE_BREAKING_LEVELS = ["ERROR", "WARNING", "SUCCESS"]
_terminate = False # type: bool
_last_update = 0.0 # type: float
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator = ""
self.setLevel(logging.INFO)
self._record = None
self._last_record = None
self._active_lines = 0
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.TRUNCATE_OVERFLOW
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self):
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record, age=None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
if self._record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(self._record, age)
time.sleep(0.1)
def terminate(self) -> None:
last_record = self._last_record
if last_record and last_record.levelname not in self.LINE_BREAKING_LEVELS:
sys.stderr.write("\n")
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
self._terminate = True
def initialize(arguments) -> None:
if arguments.noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
arguments.timed_stream_handler = None
else:
stream_handler = TimedStreamHandler()
arguments.timed_stream_handler = stream_handler
handlers = [stream_handler] # type: List[logging.Handler]
if not arguments.noninteractive:
try:
os.mkdir(".pyre")
except FileExistsError:
pass
file_handler = logging.FileHandler(".pyre/pyre.stderr")
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup(arguments) -> None:
if arguments.timed_stream_handler:
arguments.timed_stream_handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD = 0.1
_flushed = False # type: bool
def __init__(self, section, data) -> None:
self._section = section
self._data = data
self._lock = threading.RLock()
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
| 27.938462 | 87 | 0.58549 |
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List
LOG = logging.getLogger(__name__)
PERFORMANCE = 15
PROMPT = 50
SUCCESS = 60
stdout = io.StringIO()
class Color:
YELLOW = "\033[33m"
RED = "\033[31m"
class Format:
BOLD = "\033[1m"
CLEAR_LINE = "\x1b[0G\x1b[K"
CLEAR = "\033[0m"
TRUNCATE_OVERFLOW = "\033[?7l"
WRAP_OVERFLOW = "\033[?7h"
NEWLINE = "\n"
CURSOR_UP_LINE = "\x1b[1A"
HIDE_CURSOR = "\x1b[?25l"
SHOW_CURSOR = "\x1b[?25h"
class Character:
LAMBDA = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record):
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD = 0.5
LINE_BREAKING_LEVELS = ["ERROR", "WARNING", "SUCCESS"]
_terminate = False
_last_update = 0.0
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator = ""
self.setLevel(logging.INFO)
self._record = None
self._last_record = None
self._active_lines = 0
sys.stderr.write(
Format.NEWLINE
+ Format.TRUNCATE_OVERFLOW
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self):
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record, age=None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
if self._record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(self._record, age)
time.sleep(0.1)
def terminate(self) -> None:
last_record = self._last_record
if last_record and last_record.levelname not in self.LINE_BREAKING_LEVELS:
sys.stderr.write("\n")
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
self._terminate = True
def initialize(arguments) -> None:
if arguments.noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
arguments.timed_stream_handler = None
else:
stream_handler = TimedStreamHandler()
arguments.timed_stream_handler = stream_handler
handlers = [stream_handler]
if not arguments.noninteractive:
try:
os.mkdir(".pyre")
except FileExistsError:
pass
file_handler = logging.FileHandler(".pyre/pyre.stderr")
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup(arguments) -> None:
if arguments.timed_stream_handler:
arguments.timed_stream_handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD = 0.1
_flushed = False
def __init__(self, section, data) -> None:
self._section = section
self._data = data
self._lock = threading.RLock()
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
| true | true |
1c4645c2167dbdc5384e8afc9db3098d68ffbf3f | 3,666 | py | Python | scripts/launch_test.py | amarildolikmeta/oac-explore | e3d63992a4ff33c8df593941f498457e94f81eb8 | [
"MIT"
] | null | null | null | scripts/launch_test.py | amarildolikmeta/oac-explore | e3d63992a4ff33c8df593941f498457e94f81eb8 | [
"MIT"
] | null | null | null | scripts/launch_test.py | amarildolikmeta/oac-explore | e3d63992a4ff33c8df593941f498457e94f81eb8 | [
"MIT"
] | 1 | 2021-12-13T15:38:41.000Z | 2021-12-13T15:38:41.000Z | import json
import sys
sys.path.append("../")
from trainer.particle_trainer import ParticleTrainer
from trainer.gaussian_trainer import GaussianTrainer
from trainer.trainer import SACTrainer
import numpy as np
import torch
from main import env_producer, get_policy_producer, get_q_producer
from utils.pythonplusplus import load_gzip_pickle
ts = '1584884279.5007188'
ts = '1589352957.4422379'
iter = 190
path = '../data/point/sac_/' + ts
ts = '1590677750.0582957'
path = '../data/point/mean_update_counts/p-oac_/' + ts
ts = '1595343877.9346888'
path = '../data/point/hard/terminal/ddpgcounts/p-oac_/no_bias/' + ts
restore = True
variant = json.load(open(path + '/variant.json', 'r'))
domain = variant['domain']
seed = variant['seed']
r_max = variant['r_max']
ensemble = variant['ensemble']
delta = variant['delta']
n_estimators = variant['n_estimators']
if seed == 0:
np.random.seed()
seed = np.random.randint(0, 1000000)
torch.manual_seed(seed)
np.random.seed(seed)
env_args = {}
if domain in ['riverswim']:
env_args['dim'] = variant['dim']
if domain in ['point']:
env_args['difficulty'] = variant['difficulty']
env_args['max_state'] = variant['max_state']
env_args['clip_state'] = variant['clip_state']
env_args['terminal'] = variant['terminal']
expl_env = env_producer(domain, seed, **env_args)
eval_env = env_producer(domain, seed * 10 + 1, **env_args)
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
# Get producer function for policy and value functions
M = variant['layer_size']
N = variant['num_layers']
alg = variant['alg']
if alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac'] and variant['share_layers']:
output_size = n_estimators
n_estimators = 1
else:
output_size = 1
ob = expl_env.reset()
print(ob)
q_producer = get_q_producer(obs_dim, action_dim, hidden_sizes=[M] * N, output_size=output_size)
policy_producer = get_policy_producer(
obs_dim, action_dim, hidden_sizes=[M] * N)
q_min = variant['r_min'] / (1 - variant['trainer_kwargs']['discount'])
q_max = variant['r_max'] / (1 - variant['trainer_kwargs']['discount'])
alg_to_trainer = {
'sac': SACTrainer,
'oac': SACTrainer,
'p-oac': ParticleTrainer,
'g-oac': GaussianTrainer
}
trainer = alg_to_trainer[variant['alg']]
kwargs ={ }
if alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac']:
n_estimators = variant['n_estimators']
kwargs = dict(
n_estimators=n_estimators,
delta=variant['delta'],
q_min=q_min,
q_max=q_max,
ensemble=variant['ensemble'],
n_policies=variant['n_policies'],
)
kwargs.update(dict(
policy_producer=policy_producer,
q_producer=q_producer,
action_space=expl_env.action_space,
))
print(kwargs)
kwargs.update(variant['trainer_kwargs'])
trainer = trainer(**kwargs)
# try:
# experiment = path + '/best.zip_pkl'
# exp = load_gzip_pickle(experiment)
# print(exp['epoch'])
# trainer.restore_from_snapshot(exp['trainer'])
# except:
experiment = path + '/params.zip_pkl'
exp = load_gzip_pickle(experiment)
print(exp['epoch'])
trainer.restore_from_snapshot(exp['trainer'])
for i in range(10):
s = expl_env.reset()
done = False
ret = 0
t = 0
while not done and t < 400:
expl_env.render()
if hasattr(trainer, 'target_policy'):
a, agent_info = trainer.target_policy.get_action(s, deterministic=True)
else:
a, agent_info = trainer.policy.get_action(s, deterministic=True)
s, r, done, _ = expl_env.step(a)
t += 1
ret += r
expl_env.render()
print("Return: ", ret)
input() | 30.04918 | 95 | 0.678396 | import json
import sys
sys.path.append("../")
from trainer.particle_trainer import ParticleTrainer
from trainer.gaussian_trainer import GaussianTrainer
from trainer.trainer import SACTrainer
import numpy as np
import torch
from main import env_producer, get_policy_producer, get_q_producer
from utils.pythonplusplus import load_gzip_pickle
ts = '1584884279.5007188'
ts = '1589352957.4422379'
iter = 190
path = '../data/point/sac_/' + ts
ts = '1590677750.0582957'
path = '../data/point/mean_update_counts/p-oac_/' + ts
ts = '1595343877.9346888'
path = '../data/point/hard/terminal/ddpgcounts/p-oac_/no_bias/' + ts
restore = True
variant = json.load(open(path + '/variant.json', 'r'))
domain = variant['domain']
seed = variant['seed']
r_max = variant['r_max']
ensemble = variant['ensemble']
delta = variant['delta']
n_estimators = variant['n_estimators']
if seed == 0:
np.random.seed()
seed = np.random.randint(0, 1000000)
torch.manual_seed(seed)
np.random.seed(seed)
env_args = {}
if domain in ['riverswim']:
env_args['dim'] = variant['dim']
if domain in ['point']:
env_args['difficulty'] = variant['difficulty']
env_args['max_state'] = variant['max_state']
env_args['clip_state'] = variant['clip_state']
env_args['terminal'] = variant['terminal']
expl_env = env_producer(domain, seed, **env_args)
eval_env = env_producer(domain, seed * 10 + 1, **env_args)
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
M = variant['layer_size']
N = variant['num_layers']
alg = variant['alg']
if alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac'] and variant['share_layers']:
output_size = n_estimators
n_estimators = 1
else:
output_size = 1
ob = expl_env.reset()
print(ob)
q_producer = get_q_producer(obs_dim, action_dim, hidden_sizes=[M] * N, output_size=output_size)
policy_producer = get_policy_producer(
obs_dim, action_dim, hidden_sizes=[M] * N)
q_min = variant['r_min'] / (1 - variant['trainer_kwargs']['discount'])
q_max = variant['r_max'] / (1 - variant['trainer_kwargs']['discount'])
alg_to_trainer = {
'sac': SACTrainer,
'oac': SACTrainer,
'p-oac': ParticleTrainer,
'g-oac': GaussianTrainer
}
trainer = alg_to_trainer[variant['alg']]
kwargs ={ }
if alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac']:
n_estimators = variant['n_estimators']
kwargs = dict(
n_estimators=n_estimators,
delta=variant['delta'],
q_min=q_min,
q_max=q_max,
ensemble=variant['ensemble'],
n_policies=variant['n_policies'],
)
kwargs.update(dict(
policy_producer=policy_producer,
q_producer=q_producer,
action_space=expl_env.action_space,
))
print(kwargs)
kwargs.update(variant['trainer_kwargs'])
trainer = trainer(**kwargs)
experiment = path + '/params.zip_pkl'
exp = load_gzip_pickle(experiment)
print(exp['epoch'])
trainer.restore_from_snapshot(exp['trainer'])
for i in range(10):
s = expl_env.reset()
done = False
ret = 0
t = 0
while not done and t < 400:
expl_env.render()
if hasattr(trainer, 'target_policy'):
a, agent_info = trainer.target_policy.get_action(s, deterministic=True)
else:
a, agent_info = trainer.policy.get_action(s, deterministic=True)
s, r, done, _ = expl_env.step(a)
t += 1
ret += r
expl_env.render()
print("Return: ", ret)
input() | true | true |
1c464629dbe7ff667eaf19f42c16ee577f2ed4fd | 1,277 | py | Python | Echoo/echoo.py | UsedToBe97/Echoo | b08069170bf470415b9fd91fcb943214b69805b8 | [
"MIT"
] | null | null | null | Echoo/echoo.py | UsedToBe97/Echoo | b08069170bf470415b9fd91fcb943214b69805b8 | [
"MIT"
] | null | null | null | Echoo/echoo.py | UsedToBe97/Echoo | b08069170bf470415b9fd91fcb943214b69805b8 | [
"MIT"
] | null | null | null | # import logging
# logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# level=logging.INFO)
import os
import argparse
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
def main(token, chat_id, msg):
bot = telegram.Bot(token=token)
bot.send_message(chat_id=chat_id, text=msg)
def run():
parser = argparse.ArgumentParser(description=r'''Echoo:: A tool let's your program echo.''')
parser.add_argument("-t", "--token", default=None, type=str, help="Token for your bot.")
parser.add_argument("-id", "--chat_id", default=None, type=str, help="Chat_id of your audience.")
parser.add_argument("msg", default="Are u ok?", type=str, help="Message to send")
args = parser.parse_args()
if args.token is None:
try:
args.token = os.environ["TG_TOKEN"]
except KeyError:
raise KeyError("Neither --token nor TG_TOKEN is set.")
if args.chat_id is None:
try:
args.chat_id = os.environ["TG_CHAT_ID"]
except KeyError:
raise KeyError("Neither --chat_id nor TG_CHAT_ID is set.")
main(token=args.token, chat_id=args.chat_id, msg=args.msg)
if __name__ == '__main__':
run()
| 30.404762 | 101 | 0.648395 |
import os
import argparse
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
def main(token, chat_id, msg):
bot = telegram.Bot(token=token)
bot.send_message(chat_id=chat_id, text=msg)
def run():
parser = argparse.ArgumentParser(description=r'''Echoo:: A tool let's your program echo.''')
parser.add_argument("-t", "--token", default=None, type=str, help="Token for your bot.")
parser.add_argument("-id", "--chat_id", default=None, type=str, help="Chat_id of your audience.")
parser.add_argument("msg", default="Are u ok?", type=str, help="Message to send")
args = parser.parse_args()
if args.token is None:
try:
args.token = os.environ["TG_TOKEN"]
except KeyError:
raise KeyError("Neither --token nor TG_TOKEN is set.")
if args.chat_id is None:
try:
args.chat_id = os.environ["TG_CHAT_ID"]
except KeyError:
raise KeyError("Neither --chat_id nor TG_CHAT_ID is set.")
main(token=args.token, chat_id=args.chat_id, msg=args.msg)
if __name__ == '__main__':
run()
| true | true |
1c464866312c86c67ec166f6a47982af30b5e1bc | 9,752 | py | Python | src/v5.1/resources/swagger_client/models/tpdm_teacher_candidate_academic_record_reference.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 2 | 2021-04-27T17:18:17.000Z | 2021-04-27T19:14:39.000Z | src/v5.1/resources/swagger_client/models/tpdm_teacher_candidate_academic_record_reference.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | null | null | null | src/v5.1/resources/swagger_client/models/tpdm_teacher_candidate_academic_record_reference.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 1 | 2022-01-06T09:43:11.000Z | 2022-01-06T09:43:11.000Z | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class TpdmTeacherCandidateAcademicRecordReference(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'education_organization_id': 'int',
'school_year': 'int',
'teacher_candidate_identifier': 'str',
'term_descriptor': 'str',
'link': 'Link'
}
attribute_map = {
'education_organization_id': 'educationOrganizationId',
'school_year': 'schoolYear',
'teacher_candidate_identifier': 'teacherCandidateIdentifier',
'term_descriptor': 'termDescriptor',
'link': 'link'
}
def __init__(self, education_organization_id=None, school_year=None, teacher_candidate_identifier=None, term_descriptor=None, link=None, _configuration=None): # noqa: E501
"""TpdmTeacherCandidateAcademicRecordReference - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._education_organization_id = None
self._school_year = None
self._teacher_candidate_identifier = None
self._term_descriptor = None
self._link = None
self.discriminator = None
self.education_organization_id = education_organization_id
self.school_year = school_year
self.teacher_candidate_identifier = teacher_candidate_identifier
self.term_descriptor = term_descriptor
if link is not None:
self.link = link
@property
def education_organization_id(self):
"""Gets the education_organization_id of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
The identifier assigned to an education organization. # noqa: E501
:return: The education_organization_id of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: int
"""
return self._education_organization_id
@education_organization_id.setter
def education_organization_id(self, education_organization_id):
"""Sets the education_organization_id of this TpdmTeacherCandidateAcademicRecordReference.
The identifier assigned to an education organization. # noqa: E501
:param education_organization_id: The education_organization_id of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and education_organization_id is None:
raise ValueError("Invalid value for `education_organization_id`, must not be `None`") # noqa: E501
self._education_organization_id = education_organization_id
@property
def school_year(self):
"""Gets the school_year of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
The identifier for the school year. # noqa: E501
:return: The school_year of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: int
"""
return self._school_year
@school_year.setter
def school_year(self, school_year):
"""Sets the school_year of this TpdmTeacherCandidateAcademicRecordReference.
The identifier for the school year. # noqa: E501
:param school_year: The school_year of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and school_year is None:
raise ValueError("Invalid value for `school_year`, must not be `None`") # noqa: E501
self._school_year = school_year
@property
def teacher_candidate_identifier(self):
"""Gets the teacher_candidate_identifier of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
A unique alphanumeric code assigned to a teacher candidate. # noqa: E501
:return: The teacher_candidate_identifier of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: str
"""
return self._teacher_candidate_identifier
@teacher_candidate_identifier.setter
def teacher_candidate_identifier(self, teacher_candidate_identifier):
"""Sets the teacher_candidate_identifier of this TpdmTeacherCandidateAcademicRecordReference.
A unique alphanumeric code assigned to a teacher candidate. # noqa: E501
:param teacher_candidate_identifier: The teacher_candidate_identifier of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and teacher_candidate_identifier is None:
raise ValueError("Invalid value for `teacher_candidate_identifier`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
teacher_candidate_identifier is not None and len(teacher_candidate_identifier) > 32):
raise ValueError("Invalid value for `teacher_candidate_identifier`, length must be less than or equal to `32`") # noqa: E501
self._teacher_candidate_identifier = teacher_candidate_identifier
@property
def term_descriptor(self):
"""Gets the term_descriptor of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
The term for the session during the school year. # noqa: E501
:return: The term_descriptor of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: str
"""
return self._term_descriptor
@term_descriptor.setter
def term_descriptor(self, term_descriptor):
"""Sets the term_descriptor of this TpdmTeacherCandidateAcademicRecordReference.
The term for the session during the school year. # noqa: E501
:param term_descriptor: The term_descriptor of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and term_descriptor is None:
raise ValueError("Invalid value for `term_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
term_descriptor is not None and len(term_descriptor) > 306):
raise ValueError("Invalid value for `term_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._term_descriptor = term_descriptor
@property
def link(self):
"""Gets the link of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:return: The link of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: Link
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this TpdmTeacherCandidateAcademicRecordReference.
:param link: The link of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: Link
"""
self._link = link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmTeacherCandidateAcademicRecordReference, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TpdmTeacherCandidateAcademicRecordReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TpdmTeacherCandidateAcademicRecordReference):
return True
return self.to_dict() != other.to_dict()
| 39.642276 | 482 | 0.675759 |
import pprint
import re
import six
from swagger_client.configuration import Configuration
class TpdmTeacherCandidateAcademicRecordReference(object):
swagger_types = {
'education_organization_id': 'int',
'school_year': 'int',
'teacher_candidate_identifier': 'str',
'term_descriptor': 'str',
'link': 'Link'
}
attribute_map = {
'education_organization_id': 'educationOrganizationId',
'school_year': 'schoolYear',
'teacher_candidate_identifier': 'teacherCandidateIdentifier',
'term_descriptor': 'termDescriptor',
'link': 'link'
}
def __init__(self, education_organization_id=None, school_year=None, teacher_candidate_identifier=None, term_descriptor=None, link=None, _configuration=None):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._education_organization_id = None
self._school_year = None
self._teacher_candidate_identifier = None
self._term_descriptor = None
self._link = None
self.discriminator = None
self.education_organization_id = education_organization_id
self.school_year = school_year
self.teacher_candidate_identifier = teacher_candidate_identifier
self.term_descriptor = term_descriptor
if link is not None:
self.link = link
@property
def education_organization_id(self):
return self._education_organization_id
@education_organization_id.setter
def education_organization_id(self, education_organization_id):
if self._configuration.client_side_validation and education_organization_id is None:
raise ValueError("Invalid value for `education_organization_id`, must not be `None`")
self._education_organization_id = education_organization_id
@property
def school_year(self):
return self._school_year
@school_year.setter
def school_year(self, school_year):
if self._configuration.client_side_validation and school_year is None:
raise ValueError("Invalid value for `school_year`, must not be `None`")
self._school_year = school_year
@property
def teacher_candidate_identifier(self):
return self._teacher_candidate_identifier
@teacher_candidate_identifier.setter
def teacher_candidate_identifier(self, teacher_candidate_identifier):
if self._configuration.client_side_validation and teacher_candidate_identifier is None:
raise ValueError("Invalid value for `teacher_candidate_identifier`, must not be `None`")
if (self._configuration.client_side_validation and
teacher_candidate_identifier is not None and len(teacher_candidate_identifier) > 32):
raise ValueError("Invalid value for `teacher_candidate_identifier`, length must be less than or equal to `32`")
self._teacher_candidate_identifier = teacher_candidate_identifier
@property
def term_descriptor(self):
return self._term_descriptor
@term_descriptor.setter
def term_descriptor(self, term_descriptor):
if self._configuration.client_side_validation and term_descriptor is None:
raise ValueError("Invalid value for `term_descriptor`, must not be `None`")
if (self._configuration.client_side_validation and
term_descriptor is not None and len(term_descriptor) > 306):
raise ValueError("Invalid value for `term_descriptor`, length must be less than or equal to `306`")
self._term_descriptor = term_descriptor
@property
def link(self):
return self._link
@link.setter
def link(self, link):
self._link = link
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmTeacherCandidateAcademicRecordReference, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TpdmTeacherCandidateAcademicRecordReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, TpdmTeacherCandidateAcademicRecordReference):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c464912ecb97ea85dc0a43a1776142eb3f9360b | 1,613 | py | Python | sip/examples/flask_processing_controller/app/api/subarray_list.py | SKA-ScienceDataProcessor/integration-prototype | 5875dc0489f707232534ce75daf3707f909bcd15 | [
"BSD-3-Clause"
] | 3 | 2016-11-08T02:27:05.000Z | 2018-01-22T13:26:11.000Z | sip/examples/flask_processing_controller/app/api/subarray_list.py | SKA-ScienceDataProcessor/integration-prototype | 5875dc0489f707232534ce75daf3707f909bcd15 | [
"BSD-3-Clause"
] | 87 | 2016-11-24T11:09:01.000Z | 2021-03-25T22:23:59.000Z | sip/examples/flask_processing_controller/app/api/subarray_list.py | SKA-ScienceDataProcessor/integration-prototype | 5875dc0489f707232534ce75daf3707f909bcd15 | [
"BSD-3-Clause"
] | 10 | 2016-05-18T09:41:36.000Z | 2019-07-04T10:19:24.000Z | # -*- coding: utf-8 -*-
"""Sub array route"""
import logging
from flask import Blueprint, request
from flask_api import status
from .utils import get_root_url, missing_db_response
from config_db import SchedulingBlockDbClient
BP = Blueprint('subarray_list:', __name__)
DB = SchedulingBlockDbClient()
LOG = logging.getLogger('SIP.EC.PCI')
@BP.route('/subarrays', methods=['GET'])
@missing_db_response
def get():
"""Subarray list.
This method will list all sub-arrays known to SDP.
"""
_url = get_root_url()
LOG.debug('GET Sub array list')
sub_array_ids = sorted(DB.get_sub_array_ids())
response = dict(sub_arrays=[])
for array_id in sub_array_ids:
array_summary = dict(sub_arrary_id=array_id)
block_ids = DB.get_sub_array_sbi_ids(array_id)
LOG.debug('Subarray IDs: %s', array_id)
LOG.debug('SBI IDs: %s', block_ids)
array_summary['num_scheduling_blocks'] = len(block_ids)
array_summary['links'] = {
'detail': '{}/sub-array/{}'.format(_url, array_id)
}
response['sub_arrays'].append(array_summary)
response['links'] = dict(self=request.url, home=_url)
return response, status.HTTP_200_OK
@BP.route('/subarrays/schedule', methods=['POST'])
@missing_db_response
def post():
"""Generate a SBI."""
_url = get_root_url()
LOG.debug("POST subarray SBI.")
# TODO(BM) generate sbi_config .. see report ...
# ... will need to add this as a util function on the db...
sbi_config = {}
DB.add_sbi(sbi_config)
response = dict()
return response, status.HTTP_200_OK
| 27.810345 | 63 | 0.67018 |
import logging
from flask import Blueprint, request
from flask_api import status
from .utils import get_root_url, missing_db_response
from config_db import SchedulingBlockDbClient
BP = Blueprint('subarray_list:', __name__)
DB = SchedulingBlockDbClient()
LOG = logging.getLogger('SIP.EC.PCI')
@BP.route('/subarrays', methods=['GET'])
@missing_db_response
def get():
_url = get_root_url()
LOG.debug('GET Sub array list')
sub_array_ids = sorted(DB.get_sub_array_ids())
response = dict(sub_arrays=[])
for array_id in sub_array_ids:
array_summary = dict(sub_arrary_id=array_id)
block_ids = DB.get_sub_array_sbi_ids(array_id)
LOG.debug('Subarray IDs: %s', array_id)
LOG.debug('SBI IDs: %s', block_ids)
array_summary['num_scheduling_blocks'] = len(block_ids)
array_summary['links'] = {
'detail': '{}/sub-array/{}'.format(_url, array_id)
}
response['sub_arrays'].append(array_summary)
response['links'] = dict(self=request.url, home=_url)
return response, status.HTTP_200_OK
@BP.route('/subarrays/schedule', methods=['POST'])
@missing_db_response
def post():
_url = get_root_url()
LOG.debug("POST subarray SBI.")
sbi_config = {}
DB.add_sbi(sbi_config)
response = dict()
return response, status.HTTP_200_OK
| true | true |
1c46491789a2b206ec7a467f93eaa6eeb029b3c1 | 4,899 | py | Python | train_face_recognition.py | JustinWingChungHui/okkindred_facial_recognition | e6744e604d0bf25f9024a2ef2ba7ca9d0760c8b1 | [
"MIT"
] | null | null | null | train_face_recognition.py | JustinWingChungHui/okkindred_facial_recognition | e6744e604d0bf25f9024a2ef2ba7ca9d0760c8b1 | [
"MIT"
] | 5 | 2019-10-21T20:33:13.000Z | 2022-03-12T00:00:19.000Z | train_face_recognition.py | JustinWingChungHui/okkindred_facial_recognition | e6744e604d0bf25f9024a2ef2ba7ca9d0760c8b1 | [
"MIT"
] | null | null | null | # https://github.com/ageitgey/face_recognition/blob/master/examples/face_recognition_knn.py
import math
import os
import pickle
from PIL import Image as PilImage
from sklearn import neighbors
from models import Person, Image, Tag, FaceModel
from secrets import TRAIN_FACE_RECOGNITION_TEMP_DIR
from file_downloader import download_file, clear_directory
import face_recognition
def get_file_for_tag(tag, image, session, dir_name):
'''
Gets file for tag and image
'''
print(' = Processing Tag and Image =')
print(' tag.id: {}'.format(tag.id))
print(' image.id: {}'.format(image.id))
file = download_file(dir_name, image.large_thumbnail)
print(' Opening Image')
original = PilImage.open(file)
print(' Cropping image')
left = tag.x1 * image.large_thumbnail_width
right = tag.x2 * image.large_thumbnail_width
top = tag.y1 * image.large_thumbnail_height
bottom = tag.y2 * image.large_thumbnail_height
cropped = original.crop((left, top, right, bottom))
cropped.save(file)
return file
def process_person(person, session, X, y):
'''
Processes images for one person
'''
print(' == Processing person name: {0} id: {1} =='.format(person.name, person.id))
dir_name = os.path.join(TRAIN_FACE_RECOGNITION_TEMP_DIR, str(person.id))
print(' Creating directory {}'.format(dir_name))
os.mkdir(dir_name)
files = []
if person.large_thumbnail:
print(' Getting profile photo'.format(dir_name))
files.append(download_file(dir_name, person.large_thumbnail))
print(' Get all face detected tags for person')
tags_and_images = session.query(Tag, Image). \
filter(Tag.person_id == person.id). \
filter(Tag.face_detected == True). \
filter(Tag.image_id == Image.id).all()
print(' Total number of tags: {}'.format(len(tags_and_images)))
for tag, image in tags_and_images:
files.append(get_file_for_tag(tag, image, session, dir_name))
print(' Process Images')
for file in files:
process_file(file, X, y, person.id)
def process_file(file, X, y, person_id):
print(' Creating face encoding for {}'.format(file))
im = face_recognition.load_image_file(file)
face_bounding_boxes = face_recognition.face_locations(im)
# Add face encoding for current image to the training set
if len(face_bounding_boxes) == 1:
print(' Adding face to model')
X.append(face_recognition.face_encodings(im, known_face_locations=face_bounding_boxes)[0])
y.append(person_id)
else:
print(' XXX No Face Found!!! XXX')
def process_family(family_id, session):
'''
Creates a K Nearest neighbour model for a family
'''
print('')
print('===== Processing Family_id: {} ====='.format(family_id))
print('Clearing working directory')
clear_directory(TRAIN_FACE_RECOGNITION_TEMP_DIR)
face_model = FaceModel(family_id = family_id)
print('Get all people for family')
people = session.query(Person).filter(Person.family_id == family_id).all()
print('Total number of people: {}'.format(len(people)))
X = []
y = []
for person in people:
process_person(person, session, X, y)
if (len(X) > 0):
n_neighbors = int(round(math.sqrt(len(X))))
print('Setting n_neighbors to {}'.format(n_neighbors))
print('Creating and training the KNN classifier')
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='ball_tree', weights='distance')
knn_clf.fit(X, y)
print('y:')
print(y)
print('Pickling and saving to db')
face_model.fit_data_faces = pickle.dumps(X)
face_model.fit_data_person_ids = pickle.dumps(y)
face_model.n_neighbors = n_neighbors
face_model.trained_knn_model = pickle.dumps(knn_clf)
session.add(face_model)
session.commit()
else:
print('Not enough data to create model')
#print('#############################################')
#print('')
#print('Connecting to db')
# mysql+mysqldb://<user>:<password>@<host>/<dbname>
#connection_string = 'mysql+mysqldb://{0}:{1}@{2}/{3}'.format(DATABASE['USER'],
# DATABASE['PASSWORD'],
# DATABASE['HOST'],
# DATABASE['NAME'])
#engine = create_engine(connection_string)
#Base.metadata.bind = engine
#DBSession = sessionmaker()
#DBSession.bind = engine
#session = DBSession()
#print('Get all families')
#families = session.query(Family).all()
#print('Total number of families: {}'.format(len(families)))
#for family in families:
# process_family(family.id, session)
| 31.203822 | 116 | 0.633395 |
import math
import os
import pickle
from PIL import Image as PilImage
from sklearn import neighbors
from models import Person, Image, Tag, FaceModel
from secrets import TRAIN_FACE_RECOGNITION_TEMP_DIR
from file_downloader import download_file, clear_directory
import face_recognition
def get_file_for_tag(tag, image, session, dir_name):
print(' = Processing Tag and Image =')
print(' tag.id: {}'.format(tag.id))
print(' image.id: {}'.format(image.id))
file = download_file(dir_name, image.large_thumbnail)
print(' Opening Image')
original = PilImage.open(file)
print(' Cropping image')
left = tag.x1 * image.large_thumbnail_width
right = tag.x2 * image.large_thumbnail_width
top = tag.y1 * image.large_thumbnail_height
bottom = tag.y2 * image.large_thumbnail_height
cropped = original.crop((left, top, right, bottom))
cropped.save(file)
return file
def process_person(person, session, X, y):
print(' == Processing person name: {0} id: {1} =='.format(person.name, person.id))
dir_name = os.path.join(TRAIN_FACE_RECOGNITION_TEMP_DIR, str(person.id))
print(' Creating directory {}'.format(dir_name))
os.mkdir(dir_name)
files = []
if person.large_thumbnail:
print(' Getting profile photo'.format(dir_name))
files.append(download_file(dir_name, person.large_thumbnail))
print(' Get all face detected tags for person')
tags_and_images = session.query(Tag, Image). \
filter(Tag.person_id == person.id). \
filter(Tag.face_detected == True). \
filter(Tag.image_id == Image.id).all()
print(' Total number of tags: {}'.format(len(tags_and_images)))
for tag, image in tags_and_images:
files.append(get_file_for_tag(tag, image, session, dir_name))
print(' Process Images')
for file in files:
process_file(file, X, y, person.id)
def process_file(file, X, y, person_id):
print(' Creating face encoding for {}'.format(file))
im = face_recognition.load_image_file(file)
face_bounding_boxes = face_recognition.face_locations(im)
if len(face_bounding_boxes) == 1:
print(' Adding face to model')
X.append(face_recognition.face_encodings(im, known_face_locations=face_bounding_boxes)[0])
y.append(person_id)
else:
print(' XXX No Face Found!!! XXX')
def process_family(family_id, session):
print('')
print('===== Processing Family_id: {} ====='.format(family_id))
print('Clearing working directory')
clear_directory(TRAIN_FACE_RECOGNITION_TEMP_DIR)
face_model = FaceModel(family_id = family_id)
print('Get all people for family')
people = session.query(Person).filter(Person.family_id == family_id).all()
print('Total number of people: {}'.format(len(people)))
X = []
y = []
for person in people:
process_person(person, session, X, y)
if (len(X) > 0):
n_neighbors = int(round(math.sqrt(len(X))))
print('Setting n_neighbors to {}'.format(n_neighbors))
print('Creating and training the KNN classifier')
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='ball_tree', weights='distance')
knn_clf.fit(X, y)
print('y:')
print(y)
print('Pickling and saving to db')
face_model.fit_data_faces = pickle.dumps(X)
face_model.fit_data_person_ids = pickle.dumps(y)
face_model.n_neighbors = n_neighbors
face_model.trained_knn_model = pickle.dumps(knn_clf)
session.add(face_model)
session.commit()
else:
print('Not enough data to create model')
| true | true |
1c4649bc75a615ae3b5e27abb7216ac014db4166 | 36,743 | py | Python | sklearn_extensions/model_selection/_search.py | ruppinlab/tcga-microbiome-prediction | e7923b94738f9bd1b7862bb109002554430d9ace | [
"BSD-3-Clause"
] | 3 | 2022-01-11T08:40:37.000Z | 2022-01-28T08:00:39.000Z | sklearn_extensions/model_selection/_search.py | ruppinlab/tcga-microbiome-prediction | e7923b94738f9bd1b7862bb109002554430d9ace | [
"BSD-3-Clause"
] | null | null | null | sklearn_extensions/model_selection/_search.py | ruppinlab/tcga-microbiome-prediction | e7923b94738f9bd1b7862bb109002554430d9ace | [
"BSD-3-Clause"
] | 1 | 2022-01-11T08:44:08.000Z | 2022-01-11T08:44:08.000Z | """
The :mod:`sklearn_extesions.model_selection._search` includes utilities to
fine-tune the parameters of an estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# Leandro Cruz Hermida <hermidal@cs.umd.edu>
# License: BSD 3 clause
from abc import abstractmethod
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from itertools import product
import numbers
import time
import warnings
import numpy as np
from joblib import Parallel, delayed
from scipy.stats import rankdata
from sklearn.base import is_classifier, clone
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import _aggregate_score_dicts
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import (indexable, check_is_fitted,
_check_fit_params)
from ..metrics._scorer import _check_multimetric_scoring
from ..utils.metaestimators import check_routing
from ._validation import _fit_and_score
__all__ = ['ExtendedGridSearchCV']
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 2:
raise ValueError("Parameter array should be one- or "
"two-dimensional.")
if (isinstance(v, str) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence(but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class ExtendedBaseSearchCV(BaseSearchCV):
"""Abstract base class for hyper parameter search with cross-validation.
"""
@abstractmethod
def __init__(self, estimator, scoring=None, n_jobs=None, iid='deprecated',
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=True,
param_routing=None):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
self.param_routing = param_routing
self.router = check_routing(
self.param_routing, ['estimator', 'cv', 'scoring'],
{'cv': {'groups': 'groups', 'weights': 'group_weights'},
'estimator': ['-groups', '-group_weights']})
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def _pairwise(self):
# allows cross-validation to see 'precomputed' metrics
return getattr(self.estimator, '_pairwise', False)
def set_params(self, **params):
super().set_params(**params)
if 'param_routing' in params:
self.router = check_routing(
self.param_routing, ['estimator', 'cv', 'scoring'],
{'cv': {'groups': 'groups', 'weights': 'group_weights'},
'estimator': ['-groups', '-group_weights']})
return self
def score(self, X, y=None, **score_params):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) or (n_samples,), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
score = self.scorer_[self.refit] if self.multimetric_ else self.scorer_
return score(self.best_estimator_, X, y, **score_params)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError('This %s instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. You can refit an estimator '
'manually using the ``best_params_`` '
'attribute'
% (type(self).__name__, method_name))
else:
check_is_fitted(self)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X, **predict_params):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X, **predict_params):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X, **predict_params):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X, **predict_params):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X, **transform_params):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X, **transform_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt, **transform_params):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt, **transform_params)
@property
def classes_(self):
self._check_is_fitted("classes_")
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
"""Repeatedly calls `evaluate_candidates` to conduct a search.
This method, implemented in sub-classes, makes it possible to
customize the the scheduling of evaluations: GridSearchCV and
RandomizedSearchCV schedule evaluations for their whole parameter
search space at once but other more sequential approaches are also
possible: for instance is possible to iteratively schedule evaluations
for new regions of the parameter search space based on previously
collected evaluation results. This makes it possible to implement
Bayesian optimization or more generally sequential model-based
optimization by deriving from the BaseSearchCV abstract base class.
Parameters
----------
evaluate_candidates : callable
This callback accepts a list of candidates, where each candidate is
a dict of parameter settings. It returns a dict of all results so
far, formatted like ``cv_results_``.
Examples
--------
::
def _run_search(self, evaluate_candidates):
'Try C=0.1 only if C=1 is better than C=10'
all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
score = all_results['mean_test_score']
if score[0] < score[1]:
evaluate_candidates([{'C': 0.1}])
"""
raise NotImplementedError("_run_search not implemented.")
def fit(self, X, y=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) or (n_samples,), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, str) or
# This will work for both dict / list (tuple)
self.refit not in scorers) and not callable(self.refit):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key or a "
"callable to refit an estimator with the "
"best parameter setting on the whole "
"data and make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was passed."
% self.refit)
else:
refit_metric = self.refit
else:
refit_metric = 'score'
# so feature metadata/properties can work
feature_params = {k: v for k, v in fit_params.items()
if k == 'feature_meta'}
fit_params = {k: v for k, v in fit_params.items()
if k != 'feature_meta'}
X, y, *fit_params_values = indexable(X, y, *fit_params.values())
fit_params = dict(zip(fit_params.keys(), fit_params_values))
fit_params = _check_fit_params(X, fit_params)
(fit_params, cv_params, score_params), remainder = (
self.router(fit_params))
if remainder:
raise TypeError('fit() got unexpected keyword arguments %r'
% sorted(remainder))
n_splits = cv.get_n_splits(X, y, **cv_params)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
score_params=score_params,
feature_params=feature_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score)(clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
**fit_and_score_kwargs)
for parameters, (train, test)
in product(candidate_params,
cv.split(X, y, **cv_params)))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(
all_candidate_params, scorers, n_splits, all_out)
return results
self._run_search(evaluate_candidates)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params, **feature_params)
else:
self.best_estimator_.fit(X, **fit_params, **feature_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers['score']
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, scorers, n_splits, out):
n_candidates = len(candidate_params)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_score_dicts, test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
else:
(test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
# test_score_dicts and train_score dicts are lists of dictionaries and
# we make them into dict of lists
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
_store('fit_time', fit_time)
_store('score_time', score_time)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
if self.iid != 'deprecated':
warnings.warn(
"The parameter 'iid' is deprecated in 0.22 and will be "
"removed in 0.24.", FutureWarning
)
iid = self.iid
else:
iid = False
for scorer_name in scorers.keys():
# Computed the (weighted) mean and std for test scores alone
_store('test_%s' % scorer_name, test_scores[scorer_name],
splits=True, rank=True,
weights=test_sample_counts if iid else None)
if self.return_train_score:
_store('train_%s' % scorer_name, train_scores[scorer_name],
splits=True)
return results
class ExtendedGridSearchCV(ExtendedBaseSearchCV, GridSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=False
If True, return the average score across folds, weighted by the number
of samples in each test set. In this case, the data is assumed to be
identically distributed across the folds, and the loss minimized is
the total loss per sample, and not the mean loss across the folds.
.. deprecated:: 0.22
Parameter ``iid`` is deprecated in 0.22 and will be removed in 0.24
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
refit : boolean, string, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``. In that
case, the ``best_estimator_`` and ``best_parameters_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
.. versionchanged:: 0.20
Support for callable added.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : boolean, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svc = svm.SVC()
>>> clf = GridSearchCV(svc, parameters)
>>> clf.fit(iris.data, iris.target)
GridSearchCV(estimator=SVC(),
param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')})
>>> sorted(clf.cv_results_.keys())
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split2_test_score', ...
'std_fit_time', 'std_score_time', 'std_test_score']
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.80 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.70 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.80 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.93 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
'std_test_score' : [0.01, 0.10, 0.05, 0.08],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
'std_train_score' : [0.01, 0.19, 0.00, 0.03],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
'std_score_time' : [0.00, 0.00, 0.00, 0.01],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
This attribute is not available if ``refit`` is a function.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
Notes
-----
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
_required_parameters = ["estimator", "param_grid"]
def __init__(self, estimator, param_grid, scoring=None, n_jobs=None,
iid='deprecated', refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score=np.nan,
return_train_score=False, param_routing=None):
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score, param_routing=param_routing)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
evaluate_candidates(ParameterGrid(self.param_grid))
| 42.924065 | 82 | 0.590398 |
from abc import abstractmethod
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from itertools import product
import numbers
import time
import warnings
import numpy as np
from joblib import Parallel, delayed
from scipy.stats import rankdata
from sklearn.base import is_classifier, clone
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import _aggregate_score_dicts
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import (indexable, check_is_fitted,
_check_fit_params)
from ..metrics._scorer import _check_multimetric_scoring
from ..utils.metaestimators import check_routing
from ._validation import _fit_and_score
__all__ = ['ExtendedGridSearchCV']
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 2:
raise ValueError("Parameter array should be one- or "
"two-dimensional.")
if (isinstance(v, str) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence(but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class ExtendedBaseSearchCV(BaseSearchCV):
@abstractmethod
def __init__(self, estimator, scoring=None, n_jobs=None, iid='deprecated',
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=True,
param_routing=None):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
self.param_routing = param_routing
self.router = check_routing(
self.param_routing, ['estimator', 'cv', 'scoring'],
{'cv': {'groups': 'groups', 'weights': 'group_weights'},
'estimator': ['-groups', '-group_weights']})
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def _pairwise(self):
return getattr(self.estimator, '_pairwise', False)
def set_params(self, **params):
super().set_params(**params)
if 'param_routing' in params:
self.router = check_routing(
self.param_routing, ['estimator', 'cv', 'scoring'],
{'cv': {'groups': 'groups', 'weights': 'group_weights'},
'estimator': ['-groups', '-group_weights']})
return self
def score(self, X, y=None, **score_params):
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
score = self.scorer_[self.refit] if self.multimetric_ else self.scorer_
return score(self.best_estimator_, X, y, **score_params)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError('This %s instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. You can refit an estimator '
'manually using the ``best_params_`` '
'attribute'
% (type(self).__name__, method_name))
else:
check_is_fitted(self)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X, **predict_params):
self._check_is_fitted('predict')
return self.best_estimator_.predict(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X, **predict_params):
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X, **predict_params):
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X, **predict_params):
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X, **transform_params):
self._check_is_fitted('transform')
return self.best_estimator_.transform(X, **transform_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt, **transform_params):
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt, **transform_params)
@property
def classes_(self):
self._check_is_fitted("classes_")
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
raise NotImplementedError("_run_search not implemented.")
def fit(self, X, y=None, **fit_params):
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, str) or
# This will work for both dict / list (tuple)
self.refit not in scorers) and not callable(self.refit):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key or a "
"callable to refit an estimator with the "
"best parameter setting on the whole "
"data and make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was passed."
% self.refit)
else:
refit_metric = self.refit
else:
refit_metric = 'score'
# so feature metadata/properties can work
feature_params = {k: v for k, v in fit_params.items()
if k == 'feature_meta'}
fit_params = {k: v for k, v in fit_params.items()
if k != 'feature_meta'}
X, y, *fit_params_values = indexable(X, y, *fit_params.values())
fit_params = dict(zip(fit_params.keys(), fit_params_values))
fit_params = _check_fit_params(X, fit_params)
(fit_params, cv_params, score_params), remainder = (
self.router(fit_params))
if remainder:
raise TypeError('fit() got unexpected keyword arguments %r'
% sorted(remainder))
n_splits = cv.get_n_splits(X, y, **cv_params)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
score_params=score_params,
feature_params=feature_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score)(clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
**fit_and_score_kwargs)
for parameters, (train, test)
in product(candidate_params,
cv.split(X, y, **cv_params)))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(
all_candidate_params, scorers, n_splits, all_out)
return results
self._run_search(evaluate_candidates)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params, **feature_params)
else:
self.best_estimator_.fit(X, **fit_params, **feature_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers['score']
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, scorers, n_splits, out):
n_candidates = len(candidate_params)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_score_dicts, test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
else:
(test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
# test_score_dicts and train_score dicts are lists of dictionaries and
# we make them into dict of lists
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
_store('fit_time', fit_time)
_store('score_time', score_time)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
if self.iid != 'deprecated':
warnings.warn(
"The parameter 'iid' is deprecated in 0.22 and will be "
"removed in 0.24.", FutureWarning
)
iid = self.iid
else:
iid = False
for scorer_name in scorers.keys():
# Computed the (weighted) mean and std for test scores alone
_store('test_%s' % scorer_name, test_scores[scorer_name],
splits=True, rank=True,
weights=test_sample_counts if iid else None)
if self.return_train_score:
_store('train_%s' % scorer_name, train_scores[scorer_name],
splits=True)
return results
class ExtendedGridSearchCV(ExtendedBaseSearchCV, GridSearchCV):
_required_parameters = ["estimator", "param_grid"]
def __init__(self, estimator, param_grid, scoring=None, n_jobs=None,
iid='deprecated', refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score=np.nan,
return_train_score=False, param_routing=None):
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score, param_routing=param_routing)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
evaluate_candidates(ParameterGrid(self.param_grid))
| true | true |
1c464a27e4586e149240c4356a12128973601b60 | 6,714 | py | Python | fw_neopixel_pride.py | tammymakesthings/fw_neopixel_pride | 3d8df503f7161a23b11d9298c62d45b2e6c17d60 | [
"MIT"
] | 2 | 2019-06-09T19:19:34.000Z | 2021-06-02T20:40:21.000Z | fw_neopixel_pride.py | tammymakesthings/fw_neopixel_pride | 3d8df503f7161a23b11d9298c62d45b2e6c17d60 | [
"MIT"
] | null | null | null | fw_neopixel_pride.py | tammymakesthings/fw_neopixel_pride | 3d8df503f7161a23b11d9298c62d45b2e6c17d60 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Pride Flag NetPixel Badge
Displays a bunch of different Pride flags on a NeoPixel grid. Designed for use
with the Adafruit Feather M0 Express and NeoPixel FeatherWing.
Full details at <http://github.com/tammymakesthings/cpy_neopixel_pride>
@author: tammy.cravit
@license: MIT
"""
import sys
from time import sleep
# Delay in seconds between frames of the animation.
ANIMATION_SPEED = 0.3
# Time in seconds to hold each flag on screen before switching.
SHOW_PATTERN_DELAY = 15
# Intensity (0-1) of the NeoPixels. Higher intensity is brighter but draws
# more current.
PATTERN_INTENSITY = 0.3
# The number of rows in the NeoPixel grid.
NUM_ROWS = 4
# The number of columns in the NeoPixel grid.
NUM_COLS = 8
# Board pin to which the NeoPixel is connected
neopixel_pin = None
# The NeoPixel object controlling the pixels.
pixels = None
# Do the hardware setup if we're running on CircuitPython.
if sys.implementation.name == "circuitpython":
import time
import board
import neopixel
# Control pin defaults to #6
neopixel_pin = board.D6
pixels = neopixel.NeoPixel(neopixel_pin, (NUM_ROWS * NUM_COLS),
brightness=PATTERN_INTENSITY, auto_write=False)
############################################################################
# Define all of the flag color palettes
############################################################################
flag_colors = {
"-": (0, 0, 0), # Black
# LGBT Flag
'A': (231, 0, 0), # Electric Red
'B': (224, 89, 17), # Dark Orange
'C': (255, 239, 0), # Canary Yellow
'D': (0, 129, 31), # La Salle Green
'E': (0, 68, 255), # Blue (RYB)
'F': (118, 0, 137), # Patriarch
# Trans Flag
'G': (65, 175, 222), # Maya Blue
'H': (255, 255, 255), # White
'I': (217, 148, 144), # Amaranth Pink
# Bi Pride Flag
'J': (215, 2, 112), # Magenta
'K': (115, 79, 150), # Deep Lavender
'L': (0, 56, 168), # Royal
# Nonbinary Flag
'M': (255, 239, 0), # Yellow
'N': (230, 230, 230), # White
'O': (255, 20, 140), # Lavender
# Pansexual Flag
'P': (255, 20, 140), # Deep Pink
'Q': (255, 218, 0), # Sizzling Sunrise
'R': (5, 174, 255) # Blue Bolt
}
############################################################################
# Define the actual flag patterns. Each pattern must refernece colors defined
# in the associated color map. The pattern contains one letter per column of
# the display.
############################################################################
patterns = {
'pride_flag': {'pattern': '-ABCDEF-', 'colors': flag_colors},
'trans_flag': {'pattern': '-JKLKJ--', 'colors': flag_colors},
'bi_flag' : {'pattern': '--JJKLL-', 'colors': flag_colors},
'nb_flag' : {'pattern': 'MMNNOO--', 'colors': flag_colors},
'pan_flag' : {'pattern': '-PPQQRR-', 'colors': flag_colors},
}
############################################################################
# Helper functions
############################################################################
def clear_pixels(rows=NUM_ROWS, cols=NUM_COLS):
"""
.. function:: clear_pixels([rows, cols])
Clear the entire pixel array.
Sets all of the pixels in the NeoPixel array to black and hten writes
the values to the array. Has no effect if not running on a CircuitPython
device.
:param rows: number of rows in the array (defaults to value of NUM_ROWS)
:param cols: number of cols in the array (defaults to value of NUM_COLS)
:rtype: None
"""
print("inside clearPixels({0}, {1})".format(rows, cols))
if pixels is not None:
pixels.fill(0, 0, 0)
pixels.show()
def set_column(display_column, rgb_value):
"""
.. function:: set_column(display_column, rgb_value)
Set all pixels in one column of the display to the given color.
:param display_column: The column on the display to set
:param rgb_value: The RGB color to set the pixels to
:type rgb_value: 3-tuple (R, G, B)
:rtype: None
"""
print('Called set_column({0}, {1})'.format(display_column, rgb_value))
if pixels is not None:
for i in range(0, NUM_ROWS):
which_pixel = (i * NUM_COLS) + display_column
pixels[which_pixel] = rgb_value
def slide_in_animation(the_pattern, color_map, animation_speed=ANIMATION_SPEED):
"""
.. function:: slide_in_animation(the_pattern, color_map, animation_speed)
Render the animation for a single flag.
:param the_pattern: The flag pattern, rendered as a string. Each character
in the string should match a color in the color map.
:param color_map: The color map. The keys of the dictionary should be a
character from the pattern. The value of the dictionary entries should be
3-tuples with the R, G, B values for the specified color.
:type color_map: dict
:param animation_speed: The time (in seconds) to sleep between frames of
the animation.
:rtype: None
"""
print("inside slideInAnimation({0}, {1}, {2})".format(the_pattern, color_map, animation_speed))
for i in range(0, len(the_pattern)):
starting_column = len(the_pattern) - i - 1
ending_column = len(the_pattern)
which_letter = 0
print("Animation: Repetition {0}, starting column={1}".format(i+1, starting_column))
for j in range(0, starting_column):
set_column(j, (0,0,0))
print("-", sep='', end='')
for j in range(starting_column, ending_column):
print(the_pattern[which_letter], sep='', end='')
set_column(j, color_map[the_pattern[which_letter]])
which_letter += 1
print('\n')
if sys.implementation.name == "circuitpython":
pixels.show()
sleep(animation_speed)
def renderAllPatterns(the_patterns):
for pattern_name, pattern_data in the_patterns.items():
print("renderAllPatterns(): rendering flag: {0}".format(pattern_name))
the_pattern = pattern_data['pattern']
color_map = pattern_data['colors']
slide_in_animation(the_pattern, color_map)
sleep(SHOW_PATTERN_DELAY)
############################################################################
# Main execution loop
############################################################################
if __name__=="__main__":
while True:
renderAllPatterns(patterns) | 34.608247 | 99 | 0.566279 |
import sys
from time import sleep
ANIMATION_SPEED = 0.3
SHOW_PATTERN_DELAY = 15
PATTERN_INTENSITY = 0.3
NUM_ROWS = 4
NUM_COLS = 8
neopixel_pin = None
pixels = None
if sys.implementation.name == "circuitpython":
import time
import board
import neopixel
# Control pin defaults to #6
neopixel_pin = board.D6
pixels = neopixel.NeoPixel(neopixel_pin, (NUM_ROWS * NUM_COLS),
brightness=PATTERN_INTENSITY, auto_write=False)
############################################################################
# Define all of the flag color palettes
############################################################################
flag_colors = {
"-": (0, 0, 0), # Black
# LGBT Flag
'A': (231, 0, 0), # Electric Red
'B': (224, 89, 17), # Dark Orange
'C': (255, 239, 0), # Canary Yellow
'D': (0, 129, 31), # La Salle Green
'E': (0, 68, 255), # Blue (RYB)
'F': (118, 0, 137), # Patriarch
# Trans Flag
'G': (65, 175, 222), # Maya Blue
'H': (255, 255, 255), # White
'I': (217, 148, 144), # Amaranth Pink
# Bi Pride Flag
'J': (215, 2, 112), # Magenta
'K': (115, 79, 150), # Deep Lavender
'L': (0, 56, 168), # Royal
# Nonbinary Flag
'M': (255, 239, 0), # Yellow
'N': (230, 230, 230), # White
'O': (255, 20, 140), # Lavender
# Pansexual Flag
'P': (255, 20, 140), # Deep Pink
'Q': (255, 218, 0), # Sizzling Sunrise
'R': (5, 174, 255) # Blue Bolt
}
############################################################################
# Define the actual flag patterns. Each pattern must refernece colors defined
# in the associated color map. The pattern contains one letter per column of
# the display.
############################################################################
patterns = {
'pride_flag': {'pattern': '-ABCDEF-', 'colors': flag_colors},
'trans_flag': {'pattern': '-JKLKJ--', 'colors': flag_colors},
'bi_flag' : {'pattern': '--JJKLL-', 'colors': flag_colors},
'nb_flag' : {'pattern': 'MMNNOO--', 'colors': flag_colors},
'pan_flag' : {'pattern': '-PPQQRR-', 'colors': flag_colors},
}
############################################################################
# Helper functions
############################################################################
def clear_pixels(rows=NUM_ROWS, cols=NUM_COLS):
print("inside clearPixels({0}, {1})".format(rows, cols))
if pixels is not None:
pixels.fill(0, 0, 0)
pixels.show()
def set_column(display_column, rgb_value):
print('Called set_column({0}, {1})'.format(display_column, rgb_value))
if pixels is not None:
for i in range(0, NUM_ROWS):
which_pixel = (i * NUM_COLS) + display_column
pixels[which_pixel] = rgb_value
def slide_in_animation(the_pattern, color_map, animation_speed=ANIMATION_SPEED):
print("inside slideInAnimation({0}, {1}, {2})".format(the_pattern, color_map, animation_speed))
for i in range(0, len(the_pattern)):
starting_column = len(the_pattern) - i - 1
ending_column = len(the_pattern)
which_letter = 0
print("Animation: Repetition {0}, starting column={1}".format(i+1, starting_column))
for j in range(0, starting_column):
set_column(j, (0,0,0))
print("-", sep='', end='')
for j in range(starting_column, ending_column):
print(the_pattern[which_letter], sep='', end='')
set_column(j, color_map[the_pattern[which_letter]])
which_letter += 1
print('\n')
if sys.implementation.name == "circuitpython":
pixels.show()
sleep(animation_speed)
def renderAllPatterns(the_patterns):
for pattern_name, pattern_data in the_patterns.items():
print("renderAllPatterns(): rendering flag: {0}".format(pattern_name))
the_pattern = pattern_data['pattern']
color_map = pattern_data['colors']
slide_in_animation(the_pattern, color_map)
sleep(SHOW_PATTERN_DELAY)
############################################################################
# Main execution loop
############################################################################
if __name__=="__main__":
while True:
renderAllPatterns(patterns) | true | true |
1c464c7f4975739b483955b49e931f3e73459cb0 | 966 | py | Python | pietoolbelt/augmentations/segmentation.py | kitkat52/pietoolbelt | 0e0b5859662fcb43b008218746cc3e76cc66b6b8 | [
"MIT"
] | 1 | 2021-05-30T08:21:12.000Z | 2021-05-30T08:21:12.000Z | pietoolbelt/augmentations/segmentation.py | kitkat52/pietoolbelt | 0e0b5859662fcb43b008218746cc3e76cc66b6b8 | [
"MIT"
] | 7 | 2020-07-07T21:04:08.000Z | 2021-12-13T10:08:17.000Z | pietoolbelt/augmentations/segmentation.py | kitkat52/pietoolbelt | 0e0b5859662fcb43b008218746cc3e76cc66b6b8 | [
"MIT"
] | 1 | 2021-06-17T09:21:39.000Z | 2021-06-17T09:21:39.000Z | import numpy as np
import torch
from .common import BaseAugmentations
__all__ = ['SegmentationAugmentations']
class SegmentationAugmentations(BaseAugmentations):
def __init__(self, is_train: bool, to_pytorch: bool, preprocess: callable):
super().__init__(is_train, to_pytorch, preprocess)
def augmentation(self, data: dict) -> dict:
augmented = self._aug(image=data['data'], mask=data['target'] / (data['target'].max() + 1e-7))
img, mask = augmented['image'], augmented['mask']
if self._need_to_pytorch:
img, mask = self.img_to_pytorch(img), self.mask_to_pytorch(mask)
return {'data': img, 'target': mask}
@staticmethod
def img_to_pytorch(image):
return torch.from_numpy(np.expand_dims(np.moveaxis(image, -1, 0).astype(np.float32) / 128 - 1, axis=0))
@staticmethod
def mask_to_pytorch(mask):
return torch.from_numpy(np.expand_dims(mask.astype(np.float32), axis=0))
| 33.310345 | 111 | 0.68323 | import numpy as np
import torch
from .common import BaseAugmentations
__all__ = ['SegmentationAugmentations']
class SegmentationAugmentations(BaseAugmentations):
def __init__(self, is_train: bool, to_pytorch: bool, preprocess: callable):
super().__init__(is_train, to_pytorch, preprocess)
def augmentation(self, data: dict) -> dict:
augmented = self._aug(image=data['data'], mask=data['target'] / (data['target'].max() + 1e-7))
img, mask = augmented['image'], augmented['mask']
if self._need_to_pytorch:
img, mask = self.img_to_pytorch(img), self.mask_to_pytorch(mask)
return {'data': img, 'target': mask}
@staticmethod
def img_to_pytorch(image):
return torch.from_numpy(np.expand_dims(np.moveaxis(image, -1, 0).astype(np.float32) / 128 - 1, axis=0))
@staticmethod
def mask_to_pytorch(mask):
return torch.from_numpy(np.expand_dims(mask.astype(np.float32), axis=0))
| true | true |
1c464c918d295b7c3a348cdb1a566cf0a3e06af7 | 5,165 | py | Python | starfish/core/experiment/builder/test/factories/all_purpose.py | kne42/starfish | 78b348c9756f367221dcca725cfa5107e5520b33 | [
"MIT"
] | null | null | null | starfish/core/experiment/builder/test/factories/all_purpose.py | kne42/starfish | 78b348c9756f367221dcca725cfa5107e5520b33 | [
"MIT"
] | null | null | null | starfish/core/experiment/builder/test/factories/all_purpose.py | kne42/starfish | 78b348c9756f367221dcca725cfa5107e5520b33 | [
"MIT"
] | null | null | null | from abc import ABCMeta
from typing import Callable, cast, Collection, Mapping, Sequence, Type, Union
import numpy as np
import slicedimage
from starfish.core.experiment.builder import (
build_irregular_image,
FetchedTile,
tile_fetcher_factory,
TileFetcher,
TileIdentifier,
)
from starfish.core.types import Axes, Coordinates, CoordinateValue
class LocationAwareFetchedTile(FetchedTile, metaclass=ABCMeta):
"""This is the base class for tiles that are aware of their location in the 5D tensor.
"""
def __init__(
self,
# these are the arguments passed in as a result of tile_fetcher_factory's
# pass_tile_indices parameter.
fov_id: int, round_label: int, ch_label: int, zplane_label: int,
# these are the arguments we are passing through tile_fetcher_factory.
fovs: Sequence[int], rounds: Sequence[int], chs: Sequence[int], zplanes: Sequence[int],
tile_height: int, tile_width: int,
) -> None:
super().__init__()
self.fov_id = fov_id
self.round_label = round_label
self.ch_label = ch_label
self.zplane_label = zplane_label
self.fovs = fovs
self.rounds = rounds
self.chs = chs
self.zplanes = zplanes
self.tile_height = tile_height
self.tile_width = tile_width
def _apply_coords_range_fetcher(
backing_tile_fetcher: TileFetcher,
tile_coordinates_callback: Callable[
[TileIdentifier], Mapping[Coordinates, CoordinateValue]],
) -> TileFetcher:
"""Given a :py:class:`TileFetcher`, intercept all the returned :py:class:`FetchedTile` instances
and replace the coordinates using the coordinates from `tile_coordinates_callback`."""
class ModifiedTile(FetchedTile):
def __init__(
self,
backing_tile: FetchedTile,
tile_identifier: TileIdentifier,
*args, **kwargs
):
super().__init__(*args, **kwargs)
self.backing_tile = backing_tile
self.tile_identifier = tile_identifier
@property
def shape(self) -> Mapping[Axes, int]:
return self.backing_tile.shape
@property
def coordinates(self) -> Mapping[Union[str, Coordinates], CoordinateValue]:
return cast(
Mapping[Union[str, Coordinates], CoordinateValue],
tile_coordinates_callback(self.tile_identifier))
def tile_data(self) -> np.ndarray:
return self.backing_tile.tile_data()
class ModifiedTileFetcher(TileFetcher):
def get_tile(
self, fov_id: int, round_label: int, ch_label: int, zplane_label: int,
) -> FetchedTile:
original_fetched_tile = backing_tile_fetcher.get_tile(
fov_id, round_label, ch_label, zplane_label)
tile_identifier = TileIdentifier(fov_id, round_label, ch_label, zplane_label)
return ModifiedTile(original_fetched_tile, tile_identifier)
return ModifiedTileFetcher()
def collection_factory(
fetched_tile_cls: Type[LocationAwareFetchedTile],
tile_identifiers: Collection[TileIdentifier],
tile_coordinates_callback: Callable[
[TileIdentifier], Mapping[Coordinates, CoordinateValue]],
tile_height: int,
tile_width: int,
) -> slicedimage.Collection:
"""Given a type that implements the :py:class:`LocationAwareFetchedTile` contract, produce a
slicedimage Collection with the tiles in `tile_identifiers`. For a given tile_identifier,
retrieve the coordinates by invoking the callback `tile_coordinates_callback`.
Parameters
----------
fetched_tile_cls : Type[LocationAwareFetchedTile]
The class of the FetchedTile.
tile_identifiers : Collection[TileIdentifier]
TileIdentifiers for each of the tiles in the collection.
tile_coordinates_callback : Callable[[TileIdentifier], Mapping[Coordinates, CoordinatesValue]]
A callable that returns the coordinates for a given tile's TileIdentifier.
tile_height : int
Height of each tile, in pixels.
tile_width : int
Width of each tile, in pixels.
"""
all_fov_ids = sorted(set(
tile_identifier.fov_id for tile_identifier in tile_identifiers))
all_round_labels = sorted(set(
tile_identifier.round_label for tile_identifier in tile_identifiers))
all_ch_labels = sorted(set(
tile_identifier.ch_label for tile_identifier in tile_identifiers))
all_zplane_labels = sorted(set(
tile_identifier.zplane_label for tile_identifier in tile_identifiers))
original_tile_fetcher = tile_fetcher_factory(
fetched_tile_cls, True,
all_fov_ids, all_round_labels, all_ch_labels, all_zplane_labels,
tile_height, tile_width,
)
modified_tile_fetcher = _apply_coords_range_fetcher(
original_tile_fetcher, tile_coordinates_callback)
return build_irregular_image(
tile_identifiers,
modified_tile_fetcher,
default_shape={Axes.Y: tile_height, Axes.X: tile_width}
)
| 39.128788 | 100 | 0.684802 | from abc import ABCMeta
from typing import Callable, cast, Collection, Mapping, Sequence, Type, Union
import numpy as np
import slicedimage
from starfish.core.experiment.builder import (
build_irregular_image,
FetchedTile,
tile_fetcher_factory,
TileFetcher,
TileIdentifier,
)
from starfish.core.types import Axes, Coordinates, CoordinateValue
class LocationAwareFetchedTile(FetchedTile, metaclass=ABCMeta):
def __init__(
self,
# pass_tile_indices parameter.
fov_id: int, round_label: int, ch_label: int, zplane_label: int,
# these are the arguments we are passing through tile_fetcher_factory.
fovs: Sequence[int], rounds: Sequence[int], chs: Sequence[int], zplanes: Sequence[int],
tile_height: int, tile_width: int,
) -> None:
super().__init__()
self.fov_id = fov_id
self.round_label = round_label
self.ch_label = ch_label
self.zplane_label = zplane_label
self.fovs = fovs
self.rounds = rounds
self.chs = chs
self.zplanes = zplanes
self.tile_height = tile_height
self.tile_width = tile_width
def _apply_coords_range_fetcher(
backing_tile_fetcher: TileFetcher,
tile_coordinates_callback: Callable[
[TileIdentifier], Mapping[Coordinates, CoordinateValue]],
) -> TileFetcher:
class ModifiedTile(FetchedTile):
def __init__(
self,
backing_tile: FetchedTile,
tile_identifier: TileIdentifier,
*args, **kwargs
):
super().__init__(*args, **kwargs)
self.backing_tile = backing_tile
self.tile_identifier = tile_identifier
@property
def shape(self) -> Mapping[Axes, int]:
return self.backing_tile.shape
@property
def coordinates(self) -> Mapping[Union[str, Coordinates], CoordinateValue]:
return cast(
Mapping[Union[str, Coordinates], CoordinateValue],
tile_coordinates_callback(self.tile_identifier))
def tile_data(self) -> np.ndarray:
return self.backing_tile.tile_data()
class ModifiedTileFetcher(TileFetcher):
def get_tile(
self, fov_id: int, round_label: int, ch_label: int, zplane_label: int,
) -> FetchedTile:
original_fetched_tile = backing_tile_fetcher.get_tile(
fov_id, round_label, ch_label, zplane_label)
tile_identifier = TileIdentifier(fov_id, round_label, ch_label, zplane_label)
return ModifiedTile(original_fetched_tile, tile_identifier)
return ModifiedTileFetcher()
def collection_factory(
fetched_tile_cls: Type[LocationAwareFetchedTile],
tile_identifiers: Collection[TileIdentifier],
tile_coordinates_callback: Callable[
[TileIdentifier], Mapping[Coordinates, CoordinateValue]],
tile_height: int,
tile_width: int,
) -> slicedimage.Collection:
all_fov_ids = sorted(set(
tile_identifier.fov_id for tile_identifier in tile_identifiers))
all_round_labels = sorted(set(
tile_identifier.round_label for tile_identifier in tile_identifiers))
all_ch_labels = sorted(set(
tile_identifier.ch_label for tile_identifier in tile_identifiers))
all_zplane_labels = sorted(set(
tile_identifier.zplane_label for tile_identifier in tile_identifiers))
original_tile_fetcher = tile_fetcher_factory(
fetched_tile_cls, True,
all_fov_ids, all_round_labels, all_ch_labels, all_zplane_labels,
tile_height, tile_width,
)
modified_tile_fetcher = _apply_coords_range_fetcher(
original_tile_fetcher, tile_coordinates_callback)
return build_irregular_image(
tile_identifiers,
modified_tile_fetcher,
default_shape={Axes.Y: tile_height, Axes.X: tile_width}
)
| true | true |
1c464cd9a94ce016f5a29a3b4da763617bf225a8 | 75,458 | py | Python | simulator/config_pb2.py | googleinterns/cluster-resource-forecast | 48b67346160e4f9c010552b9b20b8bace1a321ad | [
"Apache-2.0"
] | 25 | 2020-05-06T21:29:04.000Z | 2022-02-17T05:25:25.000Z | simulator/config_pb2.py | touchuyht/cluster-resource-forecast | 48b67346160e4f9c010552b9b20b8bace1a321ad | [
"Apache-2.0"
] | 3 | 2020-06-09T04:14:08.000Z | 2021-04-25T07:30:38.000Z | simulator/config_pb2.py | touchuyht/cluster-resource-forecast | 48b67346160e4f9c010552b9b20b8bace1a321ad | [
"Apache-2.0"
] | 12 | 2020-06-05T00:52:01.000Z | 2021-12-17T06:55:30.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: simulator/config.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="simulator/config.proto",
package="",
syntax="proto2",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x16simulator/config.proto"6\n\nInt64Range\x12\x13\n\x0blower_bound\x18\x01 \x01(\x03\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x03".\n\x0c\x44\x61taLocation\x12\x0f\n\x07\x64\x61taset\x18\x01 \x01(\t\x12\r\n\x05table\x18\x02 \x01(\t"\xb8\x01\n\x08VMFilter\x12\x12\n\nstart_time\x18\x01 \x01(\x03\x12\x10\n\x08\x65nd_time\x18\x02 \x01(\x03\x12 \n\x18remove_non_top_level_vms\x18\x03 \x01(\x08\x12#\n\x0epriority_range\x18\x04 \x01(\x0b\x32\x0b.Int64Range\x12+\n\x16scheduling_class_range\x18\x05 \x01(\x0b\x32\x0b.Int64Range\x12\x12\n\nmachine_id\x18\x06 \x03(\x03"_\n\x0bLoadOrWrite\x12\x1e\n\x05input\x18\x01 \x01(\x0b\x32\r.DataLocationH\x00\x12\x1f\n\x06output\x18\x02 \x01(\x0b\x32\r.DataLocationH\x00\x42\x0f\n\rload_or_write"\xac\x01\n\x16\x41\x62stractMetricSelector\x12\x1a\n\x10max_memory_usage\x18\x01 \x01(\x08H\x00\x12\x1e\n\x14\x63pu_usage_percentile\x18\x02 \x01(\x03H\x00\x12\x17\n\ravg_cpu_usage\x18\x03 \x01(\x08H\x00\x12\x1a\n\x10\x61vg_memory_usage\x18\x04 \x01(\x08H\x00\x12\x17\n\rmax_cpu_usage\x18\x05 \x01(\x08H\x00\x42\x08\n\x06metric"\\\n\rResetAndShift\x12\x1a\n\x12reset_time_to_zero\x18\x01 \x01(\x08\x12!\n\x0crandom_shift\x18\x02 \x01(\x0b\x32\x0b.Int64Range\x12\x0c\n\x04seed\x18\x03 \x01(\x03"\xa6\x01\n\tScheduler\x12(\n\tat_random\x18\x01 \x01(\x0b\x32\x13.Scheduler.AtRandomH\x00\x12\x17\n\rby_machine_id\x18\x02 \x01(\x08H\x00\x12\x19\n\x0f\x62y_vm_unique_id\x18\x03 \x01(\x08H\x00\x1a.\n\x08\x41tRandom\x12\x14\n\x0cnum_machines\x18\x01 \x01(\x03\x12\x0c\n\x04seed\x18\x02 \x01(\x03\x42\x0b\n\tscheduler"\xbc\t\n\x0fPredictorConfig\x12.\n\x14\x64\x65\x63orated_predictors\x18\n \x03(\x0b\x32\x10.PredictorConfig\x12<\n\ravg_predictor\x18\x01 \x01(\x0b\x32#.PredictorConfig.AvgPredictorConfigH\x00\x12<\n\rmax_predictor\x18\x02 \x01(\x0b\x32#.PredictorConfig.MaxPredictorConfigH\x00\x12<\n\ravg_decorator\x18\x03 \x01(\x0b\x32#.PredictorConfig.AvgDecoratorConfigH\x00\x12<\n\rmax_decorator\x18\x04 \x01(\x0b\x32#.PredictorConfig.MaxDecoratorConfigH\x00\x12M\n\x1bper_vm_percentile_predictor\x18\x05 \x01(\x0b\x32&.PredictorConfig.PerVMPercentileConfigH\x00\x12:\n\x11n_sigma_predictor\x18\x06 \x01(\x0b\x32\x1d.PredictorConfig.NSigmaConfigH\x00\x12@\n\x0flimit_predictor\x18\x07 \x01(\x0b\x32%.PredictorConfig.LimitPredictorConfigH\x00\x12W\n per_machine_percentile_predictor\x18\x08 \x01(\x0b\x32+.PredictorConfig.PerMachinePercentileConfigH\x00\x1a\x43\n\x12\x41vgPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x1a/\n\x14LimitPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x1a\x43\n\x12MaxPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x1a|\n\x15PerVMPercentileConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x01:\x03\x31\x30\x30\x12\x1b\n\x13num_history_samples\x18\x04 \x01(\x03\x1a\x81\x01\n\x1aPerMachinePercentileConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x01:\x03\x31\x30\x30\x12\x1b\n\x13num_history_samples\x18\x04 \x01(\x03\x1a\x65\n\x0cNSigmaConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x1b\n\x13num_history_samples\x18\x03 \x01(\x03\x12\t\n\x01n\x18\x04 \x01(\x03\x1a\x14\n\x12\x41vgDecoratorConfig\x1a\x14\n\x12MaxDecoratorConfigB\x0b\n\tpredictor"\xfa\x01\n\x13\x46ortuneTellerConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csave_samples\x18\x02 \x01(\x08\x12\x33\n\x06oracle\x18\x03 \x01(\x0b\x32!.FortuneTellerConfig.OracleConfigH\x00\x12%\n\tpredictor\x18\x04 \x01(\x0b\x32\x10.PredictorConfigH\x00\x1aY\n\x0cOracleConfig\x12\x1a\n\x12horizon_in_seconds\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x03:\x03\x31\x30\x30\x42\x08\n\x06teller"\xfa\x03\n\x10SimulationConfig\x12\x1c\n\x05input\x18\x01 \x01(\x0b\x32\r.DataLocation\x12\x19\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\t.VMFilter\x12&\n\x10\x66iltered_samples\x18\x03 \x01(\x0b\x32\x0c.LoadOrWrite\x12*\n\x14time_aligned_samples\x18\x04 \x01(\x0b\x32\x0c.LoadOrWrite\x12\'\n\x06metric\x18\x05 \x01(\x0b\x32\x17.AbstractMetricSelector\x12\x33\n\x1dsamples_with_abstract_metrics\x18\x06 \x01(\x0b\x32\x0c.LoadOrWrite\x12\'\n\x0freset_and_shift\x18\x07 \x01(\x0b\x32\x0e.ResetAndShift\x12\x32\n\x1csamples_with_reset_and_shift\x18\x08 \x01(\x0b\x32\x0c.LoadOrWrite\x12\x1d\n\tscheduler\x18\t \x01(\x0b\x32\n.Scheduler\x12\'\n\x11scheduled_samples\x18\n \x01(\x0b\x32\x0c.LoadOrWrite\x12,\n\x0e\x66ortune_teller\x18\x0b \x03(\x0b\x32\x14.FortuneTellerConfig\x12(\n\x11simulation_result\x18\x0c \x01(\x0b\x32\r.DataLocation',
)
_INT64RANGE = _descriptor.Descriptor(
name="Int64Range",
full_name="Int64Range",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="lower_bound",
full_name="Int64Range.lower_bound",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="upper_bound",
full_name="Int64Range.upper_bound",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=26,
serialized_end=80,
)
_DATALOCATION = _descriptor.Descriptor(
name="DataLocation",
full_name="DataLocation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="dataset",
full_name="DataLocation.dataset",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="table",
full_name="DataLocation.table",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=82,
serialized_end=128,
)
_VMFILTER = _descriptor.Descriptor(
name="VMFilter",
full_name="VMFilter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="VMFilter.start_time",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="VMFilter.end_time",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="remove_non_top_level_vms",
full_name="VMFilter.remove_non_top_level_vms",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="priority_range",
full_name="VMFilter.priority_range",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduling_class_range",
full_name="VMFilter.scheduling_class_range",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="machine_id",
full_name="VMFilter.machine_id",
index=5,
number=6,
type=3,
cpp_type=2,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=131,
serialized_end=315,
)
_LOADORWRITE = _descriptor.Descriptor(
name="LoadOrWrite",
full_name="LoadOrWrite",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="LoadOrWrite.input",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="output",
full_name="LoadOrWrite.output",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="load_or_write",
full_name="LoadOrWrite.load_or_write",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=317,
serialized_end=412,
)
_ABSTRACTMETRICSELECTOR = _descriptor.Descriptor(
name="AbstractMetricSelector",
full_name="AbstractMetricSelector",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="max_memory_usage",
full_name="AbstractMetricSelector.max_memory_usage",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cpu_usage_percentile",
full_name="AbstractMetricSelector.cpu_usage_percentile",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_cpu_usage",
full_name="AbstractMetricSelector.avg_cpu_usage",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_memory_usage",
full_name="AbstractMetricSelector.avg_memory_usage",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_cpu_usage",
full_name="AbstractMetricSelector.max_cpu_usage",
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="metric",
full_name="AbstractMetricSelector.metric",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=415,
serialized_end=587,
)
_RESETANDSHIFT = _descriptor.Descriptor(
name="ResetAndShift",
full_name="ResetAndShift",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="reset_time_to_zero",
full_name="ResetAndShift.reset_time_to_zero",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="random_shift",
full_name="ResetAndShift.random_shift",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed",
full_name="ResetAndShift.seed",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=589,
serialized_end=681,
)
_SCHEDULER_ATRANDOM = _descriptor.Descriptor(
name="AtRandom",
full_name="Scheduler.AtRandom",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="num_machines",
full_name="Scheduler.AtRandom.num_machines",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed",
full_name="Scheduler.AtRandom.seed",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=791,
serialized_end=837,
)
_SCHEDULER = _descriptor.Descriptor(
name="Scheduler",
full_name="Scheduler",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="at_random",
full_name="Scheduler.at_random",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="by_machine_id",
full_name="Scheduler.by_machine_id",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="by_vm_unique_id",
full_name="Scheduler.by_vm_unique_id",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_SCHEDULER_ATRANDOM,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="scheduler",
full_name="Scheduler.scheduler",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=684,
serialized_end=850,
)
_PREDICTORCONFIG_AVGPREDICTORCONFIG = _descriptor.Descriptor(
name="AvgPredictorConfig",
full_name="PredictorConfig.AvgPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.AvgPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.AvgPredictorConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1462,
serialized_end=1529,
)
_PREDICTORCONFIG_LIMITPREDICTORCONFIG = _descriptor.Descriptor(
name="LimitPredictorConfig",
full_name="PredictorConfig.LimitPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.LimitPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1531,
serialized_end=1578,
)
_PREDICTORCONFIG_MAXPREDICTORCONFIG = _descriptor.Descriptor(
name="MaxPredictorConfig",
full_name="PredictorConfig.MaxPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.MaxPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.MaxPredictorConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1580,
serialized_end=1647,
)
_PREDICTORCONFIG_PERVMPERCENTILECONFIG = _descriptor.Descriptor(
name="PerVMPercentileConfig",
full_name="PredictorConfig.PerVMPercentileConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.PerVMPercentileConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.PerVMPercentileConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="PredictorConfig.PerVMPercentileConfig.percentile",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=True,
default_value=float(100),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.PerVMPercentileConfig.num_history_samples",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1649,
serialized_end=1773,
)
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG = _descriptor.Descriptor(
name="PerMachinePercentileConfig",
full_name="PredictorConfig.PerMachinePercentileConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.PerMachinePercentileConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.PerMachinePercentileConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="PredictorConfig.PerMachinePercentileConfig.percentile",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=True,
default_value=float(100),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.PerMachinePercentileConfig.num_history_samples",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1776,
serialized_end=1905,
)
_PREDICTORCONFIG_NSIGMACONFIG = _descriptor.Descriptor(
name="NSigmaConfig",
full_name="PredictorConfig.NSigmaConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.NSigmaConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.NSigmaConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.NSigmaConfig.num_history_samples",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="n",
full_name="PredictorConfig.NSigmaConfig.n",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1907,
serialized_end=2008,
)
_PREDICTORCONFIG_AVGDECORATORCONFIG = _descriptor.Descriptor(
name="AvgDecoratorConfig",
full_name="PredictorConfig.AvgDecoratorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2010,
serialized_end=2030,
)
_PREDICTORCONFIG_MAXDECORATORCONFIG = _descriptor.Descriptor(
name="MaxDecoratorConfig",
full_name="PredictorConfig.MaxDecoratorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2032,
serialized_end=2052,
)
_PREDICTORCONFIG = _descriptor.Descriptor(
name="PredictorConfig",
full_name="PredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="decorated_predictors",
full_name="PredictorConfig.decorated_predictors",
index=0,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_predictor",
full_name="PredictorConfig.avg_predictor",
index=1,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_predictor",
full_name="PredictorConfig.max_predictor",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_decorator",
full_name="PredictorConfig.avg_decorator",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_decorator",
full_name="PredictorConfig.max_decorator",
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="per_vm_percentile_predictor",
full_name="PredictorConfig.per_vm_percentile_predictor",
index=5,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="n_sigma_predictor",
full_name="PredictorConfig.n_sigma_predictor",
index=6,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="limit_predictor",
full_name="PredictorConfig.limit_predictor",
index=7,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="per_machine_percentile_predictor",
full_name="PredictorConfig.per_machine_percentile_predictor",
index=8,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_PREDICTORCONFIG_AVGPREDICTORCONFIG,
_PREDICTORCONFIG_LIMITPREDICTORCONFIG,
_PREDICTORCONFIG_MAXPREDICTORCONFIG,
_PREDICTORCONFIG_PERVMPERCENTILECONFIG,
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG,
_PREDICTORCONFIG_NSIGMACONFIG,
_PREDICTORCONFIG_AVGDECORATORCONFIG,
_PREDICTORCONFIG_MAXDECORATORCONFIG,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="predictor",
full_name="PredictorConfig.predictor",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=853,
serialized_end=2065,
)
_FORTUNETELLERCONFIG_ORACLECONFIG = _descriptor.Descriptor(
name="OracleConfig",
full_name="FortuneTellerConfig.OracleConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="horizon_in_seconds",
full_name="FortuneTellerConfig.OracleConfig.horizon_in_seconds",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="FortuneTellerConfig.OracleConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="FortuneTellerConfig.OracleConfig.percentile",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=True,
default_value=100,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2219,
serialized_end=2308,
)
_FORTUNETELLERCONFIG = _descriptor.Descriptor(
name="FortuneTellerConfig",
full_name="FortuneTellerConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="FortuneTellerConfig.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="save_samples",
full_name="FortuneTellerConfig.save_samples",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="oracle",
full_name="FortuneTellerConfig.oracle",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="predictor",
full_name="FortuneTellerConfig.predictor",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_FORTUNETELLERCONFIG_ORACLECONFIG,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="teller",
full_name="FortuneTellerConfig.teller",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=2068,
serialized_end=2318,
)
_SIMULATIONCONFIG = _descriptor.Descriptor(
name="SimulationConfig",
full_name="SimulationConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="SimulationConfig.input",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="SimulationConfig.filter",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filtered_samples",
full_name="SimulationConfig.filtered_samples",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time_aligned_samples",
full_name="SimulationConfig.time_aligned_samples",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="metric",
full_name="SimulationConfig.metric",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="samples_with_abstract_metrics",
full_name="SimulationConfig.samples_with_abstract_metrics",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reset_and_shift",
full_name="SimulationConfig.reset_and_shift",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="samples_with_reset_and_shift",
full_name="SimulationConfig.samples_with_reset_and_shift",
index=7,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduler",
full_name="SimulationConfig.scheduler",
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduled_samples",
full_name="SimulationConfig.scheduled_samples",
index=9,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="fortune_teller",
full_name="SimulationConfig.fortune_teller",
index=10,
number=11,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="simulation_result",
full_name="SimulationConfig.simulation_result",
index=11,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2321,
serialized_end=2827,
)
_VMFILTER.fields_by_name["priority_range"].message_type = _INT64RANGE
_VMFILTER.fields_by_name["scheduling_class_range"].message_type = _INT64RANGE
_LOADORWRITE.fields_by_name["input"].message_type = _DATALOCATION
_LOADORWRITE.fields_by_name["output"].message_type = _DATALOCATION
_LOADORWRITE.oneofs_by_name["load_or_write"].fields.append(
_LOADORWRITE.fields_by_name["input"]
)
_LOADORWRITE.fields_by_name["input"].containing_oneof = _LOADORWRITE.oneofs_by_name[
"load_or_write"
]
_LOADORWRITE.oneofs_by_name["load_or_write"].fields.append(
_LOADORWRITE.fields_by_name["output"]
)
_LOADORWRITE.fields_by_name["output"].containing_oneof = _LOADORWRITE.oneofs_by_name[
"load_or_write"
]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["max_memory_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"max_memory_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["cpu_usage_percentile"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"cpu_usage_percentile"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["avg_cpu_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"avg_cpu_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["avg_memory_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"avg_memory_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["max_cpu_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"max_cpu_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_RESETANDSHIFT.fields_by_name["random_shift"].message_type = _INT64RANGE
_SCHEDULER_ATRANDOM.containing_type = _SCHEDULER
_SCHEDULER.fields_by_name["at_random"].message_type = _SCHEDULER_ATRANDOM
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["at_random"]
)
_SCHEDULER.fields_by_name["at_random"].containing_oneof = _SCHEDULER.oneofs_by_name[
"scheduler"
]
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["by_machine_id"]
)
_SCHEDULER.fields_by_name["by_machine_id"].containing_oneof = _SCHEDULER.oneofs_by_name[
"scheduler"
]
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["by_vm_unique_id"]
)
_SCHEDULER.fields_by_name[
"by_vm_unique_id"
].containing_oneof = _SCHEDULER.oneofs_by_name["scheduler"]
_PREDICTORCONFIG_AVGPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_LIMITPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_MAXPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_PERVMPERCENTILECONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_NSIGMACONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_AVGDECORATORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_MAXDECORATORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name["decorated_predictors"].message_type = _PREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"avg_predictor"
].message_type = _PREDICTORCONFIG_AVGPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"max_predictor"
].message_type = _PREDICTORCONFIG_MAXPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"avg_decorator"
].message_type = _PREDICTORCONFIG_AVGDECORATORCONFIG
_PREDICTORCONFIG.fields_by_name[
"max_decorator"
].message_type = _PREDICTORCONFIG_MAXDECORATORCONFIG
_PREDICTORCONFIG.fields_by_name[
"per_vm_percentile_predictor"
].message_type = _PREDICTORCONFIG_PERVMPERCENTILECONFIG
_PREDICTORCONFIG.fields_by_name[
"n_sigma_predictor"
].message_type = _PREDICTORCONFIG_NSIGMACONFIG
_PREDICTORCONFIG.fields_by_name[
"limit_predictor"
].message_type = _PREDICTORCONFIG_LIMITPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"per_machine_percentile_predictor"
].message_type = _PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["avg_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"avg_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["max_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"max_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["avg_decorator"]
)
_PREDICTORCONFIG.fields_by_name[
"avg_decorator"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["max_decorator"]
)
_PREDICTORCONFIG.fields_by_name[
"max_decorator"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["per_vm_percentile_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"per_vm_percentile_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["n_sigma_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"n_sigma_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["limit_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"limit_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["per_machine_percentile_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"per_machine_percentile_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_FORTUNETELLERCONFIG_ORACLECONFIG.containing_type = _FORTUNETELLERCONFIG
_FORTUNETELLERCONFIG.fields_by_name[
"oracle"
].message_type = _FORTUNETELLERCONFIG_ORACLECONFIG
_FORTUNETELLERCONFIG.fields_by_name["predictor"].message_type = _PREDICTORCONFIG
_FORTUNETELLERCONFIG.oneofs_by_name["teller"].fields.append(
_FORTUNETELLERCONFIG.fields_by_name["oracle"]
)
_FORTUNETELLERCONFIG.fields_by_name[
"oracle"
].containing_oneof = _FORTUNETELLERCONFIG.oneofs_by_name["teller"]
_FORTUNETELLERCONFIG.oneofs_by_name["teller"].fields.append(
_FORTUNETELLERCONFIG.fields_by_name["predictor"]
)
_FORTUNETELLERCONFIG.fields_by_name[
"predictor"
].containing_oneof = _FORTUNETELLERCONFIG.oneofs_by_name["teller"]
_SIMULATIONCONFIG.fields_by_name["input"].message_type = _DATALOCATION
_SIMULATIONCONFIG.fields_by_name["filter"].message_type = _VMFILTER
_SIMULATIONCONFIG.fields_by_name["filtered_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["time_aligned_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["metric"].message_type = _ABSTRACTMETRICSELECTOR
_SIMULATIONCONFIG.fields_by_name[
"samples_with_abstract_metrics"
].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["reset_and_shift"].message_type = _RESETANDSHIFT
_SIMULATIONCONFIG.fields_by_name[
"samples_with_reset_and_shift"
].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["scheduler"].message_type = _SCHEDULER
_SIMULATIONCONFIG.fields_by_name["scheduled_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["fortune_teller"].message_type = _FORTUNETELLERCONFIG
_SIMULATIONCONFIG.fields_by_name["simulation_result"].message_type = _DATALOCATION
DESCRIPTOR.message_types_by_name["Int64Range"] = _INT64RANGE
DESCRIPTOR.message_types_by_name["DataLocation"] = _DATALOCATION
DESCRIPTOR.message_types_by_name["VMFilter"] = _VMFILTER
DESCRIPTOR.message_types_by_name["LoadOrWrite"] = _LOADORWRITE
DESCRIPTOR.message_types_by_name["AbstractMetricSelector"] = _ABSTRACTMETRICSELECTOR
DESCRIPTOR.message_types_by_name["ResetAndShift"] = _RESETANDSHIFT
DESCRIPTOR.message_types_by_name["Scheduler"] = _SCHEDULER
DESCRIPTOR.message_types_by_name["PredictorConfig"] = _PREDICTORCONFIG
DESCRIPTOR.message_types_by_name["FortuneTellerConfig"] = _FORTUNETELLERCONFIG
DESCRIPTOR.message_types_by_name["SimulationConfig"] = _SIMULATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Int64Range = _reflection.GeneratedProtocolMessageType(
"Int64Range",
(_message.Message,),
{
"DESCRIPTOR": _INT64RANGE,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Int64Range)
},
)
_sym_db.RegisterMessage(Int64Range)
DataLocation = _reflection.GeneratedProtocolMessageType(
"DataLocation",
(_message.Message,),
{
"DESCRIPTOR": _DATALOCATION,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:DataLocation)
},
)
_sym_db.RegisterMessage(DataLocation)
VMFilter = _reflection.GeneratedProtocolMessageType(
"VMFilter",
(_message.Message,),
{
"DESCRIPTOR": _VMFILTER,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:VMFilter)
},
)
_sym_db.RegisterMessage(VMFilter)
LoadOrWrite = _reflection.GeneratedProtocolMessageType(
"LoadOrWrite",
(_message.Message,),
{
"DESCRIPTOR": _LOADORWRITE,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:LoadOrWrite)
},
)
_sym_db.RegisterMessage(LoadOrWrite)
AbstractMetricSelector = _reflection.GeneratedProtocolMessageType(
"AbstractMetricSelector",
(_message.Message,),
{
"DESCRIPTOR": _ABSTRACTMETRICSELECTOR,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:AbstractMetricSelector)
},
)
_sym_db.RegisterMessage(AbstractMetricSelector)
ResetAndShift = _reflection.GeneratedProtocolMessageType(
"ResetAndShift",
(_message.Message,),
{
"DESCRIPTOR": _RESETANDSHIFT,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:ResetAndShift)
},
)
_sym_db.RegisterMessage(ResetAndShift)
Scheduler = _reflection.GeneratedProtocolMessageType(
"Scheduler",
(_message.Message,),
{
"AtRandom": _reflection.GeneratedProtocolMessageType(
"AtRandom",
(_message.Message,),
{
"DESCRIPTOR": _SCHEDULER_ATRANDOM,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Scheduler.AtRandom)
},
),
"DESCRIPTOR": _SCHEDULER,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Scheduler)
},
)
_sym_db.RegisterMessage(Scheduler)
_sym_db.RegisterMessage(Scheduler.AtRandom)
PredictorConfig = _reflection.GeneratedProtocolMessageType(
"PredictorConfig",
(_message.Message,),
{
"AvgPredictorConfig": _reflection.GeneratedProtocolMessageType(
"AvgPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_AVGPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.AvgPredictorConfig)
},
),
"LimitPredictorConfig": _reflection.GeneratedProtocolMessageType(
"LimitPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_LIMITPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.LimitPredictorConfig)
},
),
"MaxPredictorConfig": _reflection.GeneratedProtocolMessageType(
"MaxPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_MAXPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.MaxPredictorConfig)
},
),
"PerVMPercentileConfig": _reflection.GeneratedProtocolMessageType(
"PerVMPercentileConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_PERVMPERCENTILECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.PerVMPercentileConfig)
},
),
"PerMachinePercentileConfig": _reflection.GeneratedProtocolMessageType(
"PerMachinePercentileConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.PerMachinePercentileConfig)
},
),
"NSigmaConfig": _reflection.GeneratedProtocolMessageType(
"NSigmaConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_NSIGMACONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.NSigmaConfig)
},
),
"AvgDecoratorConfig": _reflection.GeneratedProtocolMessageType(
"AvgDecoratorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_AVGDECORATORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.AvgDecoratorConfig)
},
),
"MaxDecoratorConfig": _reflection.GeneratedProtocolMessageType(
"MaxDecoratorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_MAXDECORATORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.MaxDecoratorConfig)
},
),
"DESCRIPTOR": _PREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig)
},
)
_sym_db.RegisterMessage(PredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.AvgPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.LimitPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.MaxPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.PerVMPercentileConfig)
_sym_db.RegisterMessage(PredictorConfig.PerMachinePercentileConfig)
_sym_db.RegisterMessage(PredictorConfig.NSigmaConfig)
_sym_db.RegisterMessage(PredictorConfig.AvgDecoratorConfig)
_sym_db.RegisterMessage(PredictorConfig.MaxDecoratorConfig)
FortuneTellerConfig = _reflection.GeneratedProtocolMessageType(
"FortuneTellerConfig",
(_message.Message,),
{
"OracleConfig": _reflection.GeneratedProtocolMessageType(
"OracleConfig",
(_message.Message,),
{
"DESCRIPTOR": _FORTUNETELLERCONFIG_ORACLECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:FortuneTellerConfig.OracleConfig)
},
),
"DESCRIPTOR": _FORTUNETELLERCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:FortuneTellerConfig)
},
)
_sym_db.RegisterMessage(FortuneTellerConfig)
_sym_db.RegisterMessage(FortuneTellerConfig.OracleConfig)
SimulationConfig = _reflection.GeneratedProtocolMessageType(
"SimulationConfig",
(_message.Message,),
{
"DESCRIPTOR": _SIMULATIONCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:SimulationConfig)
},
)
_sym_db.RegisterMessage(SimulationConfig)
# @@protoc_insertion_point(module_scope)
| 33.686607 | 4,812 | 0.620279 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="simulator/config.proto",
package="",
syntax="proto2",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x16simulator/config.proto"6\n\nInt64Range\x12\x13\n\x0blower_bound\x18\x01 \x01(\x03\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x03".\n\x0c\x44\x61taLocation\x12\x0f\n\x07\x64\x61taset\x18\x01 \x01(\t\x12\r\n\x05table\x18\x02 \x01(\t"\xb8\x01\n\x08VMFilter\x12\x12\n\nstart_time\x18\x01 \x01(\x03\x12\x10\n\x08\x65nd_time\x18\x02 \x01(\x03\x12 \n\x18remove_non_top_level_vms\x18\x03 \x01(\x08\x12#\n\x0epriority_range\x18\x04 \x01(\x0b\x32\x0b.Int64Range\x12+\n\x16scheduling_class_range\x18\x05 \x01(\x0b\x32\x0b.Int64Range\x12\x12\n\nmachine_id\x18\x06 \x03(\x03"_\n\x0bLoadOrWrite\x12\x1e\n\x05input\x18\x01 \x01(\x0b\x32\r.DataLocationH\x00\x12\x1f\n\x06output\x18\x02 \x01(\x0b\x32\r.DataLocationH\x00\x42\x0f\n\rload_or_write"\xac\x01\n\x16\x41\x62stractMetricSelector\x12\x1a\n\x10max_memory_usage\x18\x01 \x01(\x08H\x00\x12\x1e\n\x14\x63pu_usage_percentile\x18\x02 \x01(\x03H\x00\x12\x17\n\ravg_cpu_usage\x18\x03 \x01(\x08H\x00\x12\x1a\n\x10\x61vg_memory_usage\x18\x04 \x01(\x08H\x00\x12\x17\n\rmax_cpu_usage\x18\x05 \x01(\x08H\x00\x42\x08\n\x06metric"\\\n\rResetAndShift\x12\x1a\n\x12reset_time_to_zero\x18\x01 \x01(\x08\x12!\n\x0crandom_shift\x18\x02 \x01(\x0b\x32\x0b.Int64Range\x12\x0c\n\x04seed\x18\x03 \x01(\x03"\xa6\x01\n\tScheduler\x12(\n\tat_random\x18\x01 \x01(\x0b\x32\x13.Scheduler.AtRandomH\x00\x12\x17\n\rby_machine_id\x18\x02 \x01(\x08H\x00\x12\x19\n\x0f\x62y_vm_unique_id\x18\x03 \x01(\x08H\x00\x1a.\n\x08\x41tRandom\x12\x14\n\x0cnum_machines\x18\x01 \x01(\x03\x12\x0c\n\x04seed\x18\x02 \x01(\x03\x42\x0b\n\tscheduler"\xbc\t\n\x0fPredictorConfig\x12.\n\x14\x64\x65\x63orated_predictors\x18\n \x03(\x0b\x32\x10.PredictorConfig\x12<\n\ravg_predictor\x18\x01 \x01(\x0b\x32#.PredictorConfig.AvgPredictorConfigH\x00\x12<\n\rmax_predictor\x18\x02 \x01(\x0b\x32#.PredictorConfig.MaxPredictorConfigH\x00\x12<\n\ravg_decorator\x18\x03 \x01(\x0b\x32#.PredictorConfig.AvgDecoratorConfigH\x00\x12<\n\rmax_decorator\x18\x04 \x01(\x0b\x32#.PredictorConfig.MaxDecoratorConfigH\x00\x12M\n\x1bper_vm_percentile_predictor\x18\x05 \x01(\x0b\x32&.PredictorConfig.PerVMPercentileConfigH\x00\x12:\n\x11n_sigma_predictor\x18\x06 \x01(\x0b\x32\x1d.PredictorConfig.NSigmaConfigH\x00\x12@\n\x0flimit_predictor\x18\x07 \x01(\x0b\x32%.PredictorConfig.LimitPredictorConfigH\x00\x12W\n per_machine_percentile_predictor\x18\x08 \x01(\x0b\x32+.PredictorConfig.PerMachinePercentileConfigH\x00\x1a\x43\n\x12\x41vgPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x1a/\n\x14LimitPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x1a\x43\n\x12MaxPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x1a|\n\x15PerVMPercentileConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x01:\x03\x31\x30\x30\x12\x1b\n\x13num_history_samples\x18\x04 \x01(\x03\x1a\x81\x01\n\x1aPerMachinePercentileConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x01:\x03\x31\x30\x30\x12\x1b\n\x13num_history_samples\x18\x04 \x01(\x03\x1a\x65\n\x0cNSigmaConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x1b\n\x13num_history_samples\x18\x03 \x01(\x03\x12\t\n\x01n\x18\x04 \x01(\x03\x1a\x14\n\x12\x41vgDecoratorConfig\x1a\x14\n\x12MaxDecoratorConfigB\x0b\n\tpredictor"\xfa\x01\n\x13\x46ortuneTellerConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csave_samples\x18\x02 \x01(\x08\x12\x33\n\x06oracle\x18\x03 \x01(\x0b\x32!.FortuneTellerConfig.OracleConfigH\x00\x12%\n\tpredictor\x18\x04 \x01(\x0b\x32\x10.PredictorConfigH\x00\x1aY\n\x0cOracleConfig\x12\x1a\n\x12horizon_in_seconds\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x03:\x03\x31\x30\x30\x42\x08\n\x06teller"\xfa\x03\n\x10SimulationConfig\x12\x1c\n\x05input\x18\x01 \x01(\x0b\x32\r.DataLocation\x12\x19\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\t.VMFilter\x12&\n\x10\x66iltered_samples\x18\x03 \x01(\x0b\x32\x0c.LoadOrWrite\x12*\n\x14time_aligned_samples\x18\x04 \x01(\x0b\x32\x0c.LoadOrWrite\x12\'\n\x06metric\x18\x05 \x01(\x0b\x32\x17.AbstractMetricSelector\x12\x33\n\x1dsamples_with_abstract_metrics\x18\x06 \x01(\x0b\x32\x0c.LoadOrWrite\x12\'\n\x0freset_and_shift\x18\x07 \x01(\x0b\x32\x0e.ResetAndShift\x12\x32\n\x1csamples_with_reset_and_shift\x18\x08 \x01(\x0b\x32\x0c.LoadOrWrite\x12\x1d\n\tscheduler\x18\t \x01(\x0b\x32\n.Scheduler\x12\'\n\x11scheduled_samples\x18\n \x01(\x0b\x32\x0c.LoadOrWrite\x12,\n\x0e\x66ortune_teller\x18\x0b \x03(\x0b\x32\x14.FortuneTellerConfig\x12(\n\x11simulation_result\x18\x0c \x01(\x0b\x32\r.DataLocation',
)
_INT64RANGE = _descriptor.Descriptor(
name="Int64Range",
full_name="Int64Range",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="lower_bound",
full_name="Int64Range.lower_bound",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="upper_bound",
full_name="Int64Range.upper_bound",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=26,
serialized_end=80,
)
_DATALOCATION = _descriptor.Descriptor(
name="DataLocation",
full_name="DataLocation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="dataset",
full_name="DataLocation.dataset",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="table",
full_name="DataLocation.table",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=82,
serialized_end=128,
)
_VMFILTER = _descriptor.Descriptor(
name="VMFilter",
full_name="VMFilter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="VMFilter.start_time",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="VMFilter.end_time",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="remove_non_top_level_vms",
full_name="VMFilter.remove_non_top_level_vms",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="priority_range",
full_name="VMFilter.priority_range",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduling_class_range",
full_name="VMFilter.scheduling_class_range",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="machine_id",
full_name="VMFilter.machine_id",
index=5,
number=6,
type=3,
cpp_type=2,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=131,
serialized_end=315,
)
_LOADORWRITE = _descriptor.Descriptor(
name="LoadOrWrite",
full_name="LoadOrWrite",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="LoadOrWrite.input",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="output",
full_name="LoadOrWrite.output",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="load_or_write",
full_name="LoadOrWrite.load_or_write",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=317,
serialized_end=412,
)
_ABSTRACTMETRICSELECTOR = _descriptor.Descriptor(
name="AbstractMetricSelector",
full_name="AbstractMetricSelector",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="max_memory_usage",
full_name="AbstractMetricSelector.max_memory_usage",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cpu_usage_percentile",
full_name="AbstractMetricSelector.cpu_usage_percentile",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_cpu_usage",
full_name="AbstractMetricSelector.avg_cpu_usage",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_memory_usage",
full_name="AbstractMetricSelector.avg_memory_usage",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_cpu_usage",
full_name="AbstractMetricSelector.max_cpu_usage",
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="metric",
full_name="AbstractMetricSelector.metric",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=415,
serialized_end=587,
)
_RESETANDSHIFT = _descriptor.Descriptor(
name="ResetAndShift",
full_name="ResetAndShift",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="reset_time_to_zero",
full_name="ResetAndShift.reset_time_to_zero",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="random_shift",
full_name="ResetAndShift.random_shift",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed",
full_name="ResetAndShift.seed",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=589,
serialized_end=681,
)
_SCHEDULER_ATRANDOM = _descriptor.Descriptor(
name="AtRandom",
full_name="Scheduler.AtRandom",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="num_machines",
full_name="Scheduler.AtRandom.num_machines",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed",
full_name="Scheduler.AtRandom.seed",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=791,
serialized_end=837,
)
_SCHEDULER = _descriptor.Descriptor(
name="Scheduler",
full_name="Scheduler",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="at_random",
full_name="Scheduler.at_random",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="by_machine_id",
full_name="Scheduler.by_machine_id",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="by_vm_unique_id",
full_name="Scheduler.by_vm_unique_id",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_SCHEDULER_ATRANDOM,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="scheduler",
full_name="Scheduler.scheduler",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=684,
serialized_end=850,
)
_PREDICTORCONFIG_AVGPREDICTORCONFIG = _descriptor.Descriptor(
name="AvgPredictorConfig",
full_name="PredictorConfig.AvgPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.AvgPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.AvgPredictorConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1462,
serialized_end=1529,
)
_PREDICTORCONFIG_LIMITPREDICTORCONFIG = _descriptor.Descriptor(
name="LimitPredictorConfig",
full_name="PredictorConfig.LimitPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.LimitPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1531,
serialized_end=1578,
)
_PREDICTORCONFIG_MAXPREDICTORCONFIG = _descriptor.Descriptor(
name="MaxPredictorConfig",
full_name="PredictorConfig.MaxPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.MaxPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.MaxPredictorConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1580,
serialized_end=1647,
)
_PREDICTORCONFIG_PERVMPERCENTILECONFIG = _descriptor.Descriptor(
name="PerVMPercentileConfig",
full_name="PredictorConfig.PerVMPercentileConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.PerVMPercentileConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.PerVMPercentileConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="PredictorConfig.PerVMPercentileConfig.percentile",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=True,
default_value=float(100),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.PerVMPercentileConfig.num_history_samples",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1649,
serialized_end=1773,
)
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG = _descriptor.Descriptor(
name="PerMachinePercentileConfig",
full_name="PredictorConfig.PerMachinePercentileConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.PerMachinePercentileConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.PerMachinePercentileConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="PredictorConfig.PerMachinePercentileConfig.percentile",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=True,
default_value=float(100),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.PerMachinePercentileConfig.num_history_samples",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1776,
serialized_end=1905,
)
_PREDICTORCONFIG_NSIGMACONFIG = _descriptor.Descriptor(
name="NSigmaConfig",
full_name="PredictorConfig.NSigmaConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.NSigmaConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.NSigmaConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.NSigmaConfig.num_history_samples",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="n",
full_name="PredictorConfig.NSigmaConfig.n",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1907,
serialized_end=2008,
)
_PREDICTORCONFIG_AVGDECORATORCONFIG = _descriptor.Descriptor(
name="AvgDecoratorConfig",
full_name="PredictorConfig.AvgDecoratorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2010,
serialized_end=2030,
)
_PREDICTORCONFIG_MAXDECORATORCONFIG = _descriptor.Descriptor(
name="MaxDecoratorConfig",
full_name="PredictorConfig.MaxDecoratorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2032,
serialized_end=2052,
)
_PREDICTORCONFIG = _descriptor.Descriptor(
name="PredictorConfig",
full_name="PredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="decorated_predictors",
full_name="PredictorConfig.decorated_predictors",
index=0,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_predictor",
full_name="PredictorConfig.avg_predictor",
index=1,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_predictor",
full_name="PredictorConfig.max_predictor",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_decorator",
full_name="PredictorConfig.avg_decorator",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_decorator",
full_name="PredictorConfig.max_decorator",
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="per_vm_percentile_predictor",
full_name="PredictorConfig.per_vm_percentile_predictor",
index=5,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="n_sigma_predictor",
full_name="PredictorConfig.n_sigma_predictor",
index=6,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="limit_predictor",
full_name="PredictorConfig.limit_predictor",
index=7,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="per_machine_percentile_predictor",
full_name="PredictorConfig.per_machine_percentile_predictor",
index=8,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_PREDICTORCONFIG_AVGPREDICTORCONFIG,
_PREDICTORCONFIG_LIMITPREDICTORCONFIG,
_PREDICTORCONFIG_MAXPREDICTORCONFIG,
_PREDICTORCONFIG_PERVMPERCENTILECONFIG,
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG,
_PREDICTORCONFIG_NSIGMACONFIG,
_PREDICTORCONFIG_AVGDECORATORCONFIG,
_PREDICTORCONFIG_MAXDECORATORCONFIG,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="predictor",
full_name="PredictorConfig.predictor",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=853,
serialized_end=2065,
)
_FORTUNETELLERCONFIG_ORACLECONFIG = _descriptor.Descriptor(
name="OracleConfig",
full_name="FortuneTellerConfig.OracleConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="horizon_in_seconds",
full_name="FortuneTellerConfig.OracleConfig.horizon_in_seconds",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="FortuneTellerConfig.OracleConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="FortuneTellerConfig.OracleConfig.percentile",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=True,
default_value=100,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2219,
serialized_end=2308,
)
_FORTUNETELLERCONFIG = _descriptor.Descriptor(
name="FortuneTellerConfig",
full_name="FortuneTellerConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="FortuneTellerConfig.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="save_samples",
full_name="FortuneTellerConfig.save_samples",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="oracle",
full_name="FortuneTellerConfig.oracle",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="predictor",
full_name="FortuneTellerConfig.predictor",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_FORTUNETELLERCONFIG_ORACLECONFIG,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="teller",
full_name="FortuneTellerConfig.teller",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=2068,
serialized_end=2318,
)
_SIMULATIONCONFIG = _descriptor.Descriptor(
name="SimulationConfig",
full_name="SimulationConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="SimulationConfig.input",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="SimulationConfig.filter",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filtered_samples",
full_name="SimulationConfig.filtered_samples",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time_aligned_samples",
full_name="SimulationConfig.time_aligned_samples",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="metric",
full_name="SimulationConfig.metric",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="samples_with_abstract_metrics",
full_name="SimulationConfig.samples_with_abstract_metrics",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reset_and_shift",
full_name="SimulationConfig.reset_and_shift",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="samples_with_reset_and_shift",
full_name="SimulationConfig.samples_with_reset_and_shift",
index=7,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduler",
full_name="SimulationConfig.scheduler",
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduled_samples",
full_name="SimulationConfig.scheduled_samples",
index=9,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="fortune_teller",
full_name="SimulationConfig.fortune_teller",
index=10,
number=11,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="simulation_result",
full_name="SimulationConfig.simulation_result",
index=11,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2321,
serialized_end=2827,
)
_VMFILTER.fields_by_name["priority_range"].message_type = _INT64RANGE
_VMFILTER.fields_by_name["scheduling_class_range"].message_type = _INT64RANGE
_LOADORWRITE.fields_by_name["input"].message_type = _DATALOCATION
_LOADORWRITE.fields_by_name["output"].message_type = _DATALOCATION
_LOADORWRITE.oneofs_by_name["load_or_write"].fields.append(
_LOADORWRITE.fields_by_name["input"]
)
_LOADORWRITE.fields_by_name["input"].containing_oneof = _LOADORWRITE.oneofs_by_name[
"load_or_write"
]
_LOADORWRITE.oneofs_by_name["load_or_write"].fields.append(
_LOADORWRITE.fields_by_name["output"]
)
_LOADORWRITE.fields_by_name["output"].containing_oneof = _LOADORWRITE.oneofs_by_name[
"load_or_write"
]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["max_memory_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"max_memory_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["cpu_usage_percentile"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"cpu_usage_percentile"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["avg_cpu_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"avg_cpu_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["avg_memory_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"avg_memory_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["max_cpu_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"max_cpu_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_RESETANDSHIFT.fields_by_name["random_shift"].message_type = _INT64RANGE
_SCHEDULER_ATRANDOM.containing_type = _SCHEDULER
_SCHEDULER.fields_by_name["at_random"].message_type = _SCHEDULER_ATRANDOM
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["at_random"]
)
_SCHEDULER.fields_by_name["at_random"].containing_oneof = _SCHEDULER.oneofs_by_name[
"scheduler"
]
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["by_machine_id"]
)
_SCHEDULER.fields_by_name["by_machine_id"].containing_oneof = _SCHEDULER.oneofs_by_name[
"scheduler"
]
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["by_vm_unique_id"]
)
_SCHEDULER.fields_by_name[
"by_vm_unique_id"
].containing_oneof = _SCHEDULER.oneofs_by_name["scheduler"]
_PREDICTORCONFIG_AVGPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_LIMITPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_MAXPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_PERVMPERCENTILECONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_NSIGMACONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_AVGDECORATORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_MAXDECORATORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name["decorated_predictors"].message_type = _PREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"avg_predictor"
].message_type = _PREDICTORCONFIG_AVGPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"max_predictor"
].message_type = _PREDICTORCONFIG_MAXPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"avg_decorator"
].message_type = _PREDICTORCONFIG_AVGDECORATORCONFIG
_PREDICTORCONFIG.fields_by_name[
"max_decorator"
].message_type = _PREDICTORCONFIG_MAXDECORATORCONFIG
_PREDICTORCONFIG.fields_by_name[
"per_vm_percentile_predictor"
].message_type = _PREDICTORCONFIG_PERVMPERCENTILECONFIG
_PREDICTORCONFIG.fields_by_name[
"n_sigma_predictor"
].message_type = _PREDICTORCONFIG_NSIGMACONFIG
_PREDICTORCONFIG.fields_by_name[
"limit_predictor"
].message_type = _PREDICTORCONFIG_LIMITPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"per_machine_percentile_predictor"
].message_type = _PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["avg_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"avg_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["max_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"max_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["avg_decorator"]
)
_PREDICTORCONFIG.fields_by_name[
"avg_decorator"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["max_decorator"]
)
_PREDICTORCONFIG.fields_by_name[
"max_decorator"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["per_vm_percentile_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"per_vm_percentile_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["n_sigma_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"n_sigma_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["limit_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"limit_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["per_machine_percentile_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"per_machine_percentile_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_FORTUNETELLERCONFIG_ORACLECONFIG.containing_type = _FORTUNETELLERCONFIG
_FORTUNETELLERCONFIG.fields_by_name[
"oracle"
].message_type = _FORTUNETELLERCONFIG_ORACLECONFIG
_FORTUNETELLERCONFIG.fields_by_name["predictor"].message_type = _PREDICTORCONFIG
_FORTUNETELLERCONFIG.oneofs_by_name["teller"].fields.append(
_FORTUNETELLERCONFIG.fields_by_name["oracle"]
)
_FORTUNETELLERCONFIG.fields_by_name[
"oracle"
].containing_oneof = _FORTUNETELLERCONFIG.oneofs_by_name["teller"]
_FORTUNETELLERCONFIG.oneofs_by_name["teller"].fields.append(
_FORTUNETELLERCONFIG.fields_by_name["predictor"]
)
_FORTUNETELLERCONFIG.fields_by_name[
"predictor"
].containing_oneof = _FORTUNETELLERCONFIG.oneofs_by_name["teller"]
_SIMULATIONCONFIG.fields_by_name["input"].message_type = _DATALOCATION
_SIMULATIONCONFIG.fields_by_name["filter"].message_type = _VMFILTER
_SIMULATIONCONFIG.fields_by_name["filtered_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["time_aligned_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["metric"].message_type = _ABSTRACTMETRICSELECTOR
_SIMULATIONCONFIG.fields_by_name[
"samples_with_abstract_metrics"
].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["reset_and_shift"].message_type = _RESETANDSHIFT
_SIMULATIONCONFIG.fields_by_name[
"samples_with_reset_and_shift"
].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["scheduler"].message_type = _SCHEDULER
_SIMULATIONCONFIG.fields_by_name["scheduled_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["fortune_teller"].message_type = _FORTUNETELLERCONFIG
_SIMULATIONCONFIG.fields_by_name["simulation_result"].message_type = _DATALOCATION
DESCRIPTOR.message_types_by_name["Int64Range"] = _INT64RANGE
DESCRIPTOR.message_types_by_name["DataLocation"] = _DATALOCATION
DESCRIPTOR.message_types_by_name["VMFilter"] = _VMFILTER
DESCRIPTOR.message_types_by_name["LoadOrWrite"] = _LOADORWRITE
DESCRIPTOR.message_types_by_name["AbstractMetricSelector"] = _ABSTRACTMETRICSELECTOR
DESCRIPTOR.message_types_by_name["ResetAndShift"] = _RESETANDSHIFT
DESCRIPTOR.message_types_by_name["Scheduler"] = _SCHEDULER
DESCRIPTOR.message_types_by_name["PredictorConfig"] = _PREDICTORCONFIG
DESCRIPTOR.message_types_by_name["FortuneTellerConfig"] = _FORTUNETELLERCONFIG
DESCRIPTOR.message_types_by_name["SimulationConfig"] = _SIMULATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Int64Range = _reflection.GeneratedProtocolMessageType(
"Int64Range",
(_message.Message,),
{
"DESCRIPTOR": _INT64RANGE,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Int64Range)
},
)
_sym_db.RegisterMessage(Int64Range)
DataLocation = _reflection.GeneratedProtocolMessageType(
"DataLocation",
(_message.Message,),
{
"DESCRIPTOR": _DATALOCATION,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:DataLocation)
},
)
_sym_db.RegisterMessage(DataLocation)
VMFilter = _reflection.GeneratedProtocolMessageType(
"VMFilter",
(_message.Message,),
{
"DESCRIPTOR": _VMFILTER,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:VMFilter)
},
)
_sym_db.RegisterMessage(VMFilter)
LoadOrWrite = _reflection.GeneratedProtocolMessageType(
"LoadOrWrite",
(_message.Message,),
{
"DESCRIPTOR": _LOADORWRITE,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:LoadOrWrite)
},
)
_sym_db.RegisterMessage(LoadOrWrite)
AbstractMetricSelector = _reflection.GeneratedProtocolMessageType(
"AbstractMetricSelector",
(_message.Message,),
{
"DESCRIPTOR": _ABSTRACTMETRICSELECTOR,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:AbstractMetricSelector)
},
)
_sym_db.RegisterMessage(AbstractMetricSelector)
ResetAndShift = _reflection.GeneratedProtocolMessageType(
"ResetAndShift",
(_message.Message,),
{
"DESCRIPTOR": _RESETANDSHIFT,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:ResetAndShift)
},
)
_sym_db.RegisterMessage(ResetAndShift)
Scheduler = _reflection.GeneratedProtocolMessageType(
"Scheduler",
(_message.Message,),
{
"AtRandom": _reflection.GeneratedProtocolMessageType(
"AtRandom",
(_message.Message,),
{
"DESCRIPTOR": _SCHEDULER_ATRANDOM,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Scheduler.AtRandom)
},
),
"DESCRIPTOR": _SCHEDULER,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Scheduler)
},
)
_sym_db.RegisterMessage(Scheduler)
_sym_db.RegisterMessage(Scheduler.AtRandom)
PredictorConfig = _reflection.GeneratedProtocolMessageType(
"PredictorConfig",
(_message.Message,),
{
"AvgPredictorConfig": _reflection.GeneratedProtocolMessageType(
"AvgPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_AVGPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.AvgPredictorConfig)
},
),
"LimitPredictorConfig": _reflection.GeneratedProtocolMessageType(
"LimitPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_LIMITPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.LimitPredictorConfig)
},
),
"MaxPredictorConfig": _reflection.GeneratedProtocolMessageType(
"MaxPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_MAXPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.MaxPredictorConfig)
},
),
"PerVMPercentileConfig": _reflection.GeneratedProtocolMessageType(
"PerVMPercentileConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_PERVMPERCENTILECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.PerVMPercentileConfig)
},
),
"PerMachinePercentileConfig": _reflection.GeneratedProtocolMessageType(
"PerMachinePercentileConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.PerMachinePercentileConfig)
},
),
"NSigmaConfig": _reflection.GeneratedProtocolMessageType(
"NSigmaConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_NSIGMACONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.NSigmaConfig)
},
),
"AvgDecoratorConfig": _reflection.GeneratedProtocolMessageType(
"AvgDecoratorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_AVGDECORATORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.AvgDecoratorConfig)
},
),
"MaxDecoratorConfig": _reflection.GeneratedProtocolMessageType(
"MaxDecoratorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_MAXDECORATORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.MaxDecoratorConfig)
},
),
"DESCRIPTOR": _PREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig)
},
)
_sym_db.RegisterMessage(PredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.AvgPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.LimitPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.MaxPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.PerVMPercentileConfig)
_sym_db.RegisterMessage(PredictorConfig.PerMachinePercentileConfig)
_sym_db.RegisterMessage(PredictorConfig.NSigmaConfig)
_sym_db.RegisterMessage(PredictorConfig.AvgDecoratorConfig)
_sym_db.RegisterMessage(PredictorConfig.MaxDecoratorConfig)
FortuneTellerConfig = _reflection.GeneratedProtocolMessageType(
"FortuneTellerConfig",
(_message.Message,),
{
"OracleConfig": _reflection.GeneratedProtocolMessageType(
"OracleConfig",
(_message.Message,),
{
"DESCRIPTOR": _FORTUNETELLERCONFIG_ORACLECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:FortuneTellerConfig.OracleConfig)
},
),
"DESCRIPTOR": _FORTUNETELLERCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:FortuneTellerConfig)
},
)
_sym_db.RegisterMessage(FortuneTellerConfig)
_sym_db.RegisterMessage(FortuneTellerConfig.OracleConfig)
SimulationConfig = _reflection.GeneratedProtocolMessageType(
"SimulationConfig",
(_message.Message,),
{
"DESCRIPTOR": _SIMULATIONCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:SimulationConfig)
},
)
_sym_db.RegisterMessage(SimulationConfig)
# @@protoc_insertion_point(module_scope)
| true | true |
1c464d12c804104184ab9202416708560155519f | 1,270 | py | Python | packages/pyre/weaver/MixedComments.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/weaver/MixedComments.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/weaver/MixedComments.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
class MixedComments:
"""
The mixed commenting strategy: both a block marker pair and an individual line marker
"""
# implemented interface
def commentBlock(self, lines):
"""
Create a comment block out of the given {lines}
"""
# build the leader
leader = self.leader + self.commentMarker
# place the start comment block marker
yield self.leader + self.startBlock
# iterate over the {lines}
for line in lines:
# and render each one
yield leader + ' ' + line
# place the end comment block marker
yield self.leader + ' ' + self.startBlock
# all done
return
def commentLine(self, line):
"""
Mark {line} as a comment
"""
# build the leader
leader = self.leader + self.commentMarker
# if the line is non-empty
if line:
# mark it
return leader + ' ' + line
# otherwise, just return the comment characters
return leader
# private data
endBlock = None
startBlock = None
commentMarker = None
# end of file
| 22.678571 | 89 | 0.568504 |
class MixedComments:
def commentBlock(self, lines):
leader = self.leader + self.commentMarker
yield self.leader + self.startBlock
for line in lines:
yield leader + ' ' + line
yield self.leader + ' ' + self.startBlock
return
def commentLine(self, line):
leader = self.leader + self.commentMarker
if line:
return leader + ' ' + line
return leader
endBlock = None
startBlock = None
commentMarker = None
| true | true |
1c464e80baae8523873eba7c28b31180433a9491 | 244 | py | Python | accounts/templatetags/account_tags.py | GadirMirzayev/Django-E-commerce | 0ca289fdf584b29636a8fc9416319defad0be5a5 | [
"MIT"
] | 1 | 2021-08-20T07:44:39.000Z | 2021-08-20T07:44:39.000Z | accounts/templatetags/account_tags.py | GadirMirzayev/Django-E-commerce | 0ca289fdf584b29636a8fc9416319defad0be5a5 | [
"MIT"
] | null | null | null | accounts/templatetags/account_tags.py | GadirMirzayev/Django-E-commerce | 0ca289fdf584b29636a8fc9416319defad0be5a5 | [
"MIT"
] | null | null | null | from django.template import Library
from accounts.forms import LoginForm, RegistrationForm
register = Library()
@register.simple_tag
def get_login():
return LoginForm
@register.simple_tag
def get_register():
return RegistrationForm | 17.428571 | 54 | 0.795082 | from django.template import Library
from accounts.forms import LoginForm, RegistrationForm
register = Library()
@register.simple_tag
def get_login():
return LoginForm
@register.simple_tag
def get_register():
return RegistrationForm | true | true |
1c464ec8780d8f5ce3fb571d62ddf71de207f74c | 2,383 | py | Python | app/words.py | anbasile/mwe | 2a56b889c7c7f28aa479e477f8e52da7501c2691 | [
"Apache-2.0"
] | null | null | null | app/words.py | anbasile/mwe | 2a56b889c7c7f28aa479e477f8e52da7501c2691 | [
"Apache-2.0"
] | 2 | 2016-08-31T16:21:31.000Z | 2016-09-10T21:50:12.000Z | app/words.py | anbasile/mwe | 2a56b889c7c7f28aa479e477f8e52da7501c2691 | [
"Apache-2.0"
] | null | null | null | import requests
from bs4 import BeautifulSoup
from collections import defaultdict
import pandas as pd
import json
import networkx as nx
from networkx.readwrite import json_graph
import numpy as np
from lightning import Lightning
from colorsys import hsv_to_rgb
from sklearn import datasets
lgn = Lightning(host='http://public.lightning-viz.org')
def calculate(words):
# instantiate a dictionary to later be filled with word:miscores
wc = defaultdict(float)
frames = []
print("...it will take a while. Wait a sec...")
for word in words:
payload = {'searchstring': word.encode('ascii'),
'searchpositional':'word',
'searchpostag':'all',
'contextsize':'60c',
'sort2':'right',
'terminate':'100',
'searchtype':'coll',
'mistat':'on',
'collocspanleft':'2',
'collocspanright':'2',
'collocfilter':'noun'}
r = requests.get("http://clic.cimec.unitn.it/cgi-bin/cqp/cqp.pl?corpuslist=WEBBIT", params=payload)
soup = BeautifulSoup(r.content, 'lxml')
# parse the html table and extract words and miscores. Add scores
temp = []
for tr in soup.find_all('tr')[1:]:
tds = tr.find_all('td')
word = tds[0].text.split('~~')[1]
mi = float(tds[4].text)
wc[word] += mi
temp.append(map(lambda x:x.text,tds[0:]))
x = pd.DataFrame(temp)
df = pd.DataFrame()
df['coll'] = x.ix[0:,0].apply(lambda x: x.split('~~')[1])
df['word'] = x.ix[0:,0].apply(lambda x: x.split('~~')[0])
df['mi'] = x.ix[0:,4]
frames.append(df)
#sort the results in decreasing order
results = []
for w in sorted(wc, key=wc.get, reverse=True):
results.append((w, wc[w]))
#spit out the top result. If using ipython you can check the rest of the list by tiping `results`
#viz part
results_df = pd.concat(frames)
G=nx.from_pandas_dataframe(results_df, 'word','coll',['mi'])
mat = nx.adjacency_matrix(G).todense()
viz = lgn.force(mat)
vid = viz.id
print(vid)
url = '<iframe src="http://public.lightning-viz.org/visualizations/'+vid+'/iframe/" width=100% height=400px>'
return (results[0][0].strip(),url)
| 35.567164 | 113 | 0.578682 | import requests
from bs4 import BeautifulSoup
from collections import defaultdict
import pandas as pd
import json
import networkx as nx
from networkx.readwrite import json_graph
import numpy as np
from lightning import Lightning
from colorsys import hsv_to_rgb
from sklearn import datasets
lgn = Lightning(host='http://public.lightning-viz.org')
def calculate(words):
wc = defaultdict(float)
frames = []
print("...it will take a while. Wait a sec...")
for word in words:
payload = {'searchstring': word.encode('ascii'),
'searchpositional':'word',
'searchpostag':'all',
'contextsize':'60c',
'sort2':'right',
'terminate':'100',
'searchtype':'coll',
'mistat':'on',
'collocspanleft':'2',
'collocspanright':'2',
'collocfilter':'noun'}
r = requests.get("http://clic.cimec.unitn.it/cgi-bin/cqp/cqp.pl?corpuslist=WEBBIT", params=payload)
soup = BeautifulSoup(r.content, 'lxml')
temp = []
for tr in soup.find_all('tr')[1:]:
tds = tr.find_all('td')
word = tds[0].text.split('~~')[1]
mi = float(tds[4].text)
wc[word] += mi
temp.append(map(lambda x:x.text,tds[0:]))
x = pd.DataFrame(temp)
df = pd.DataFrame()
df['coll'] = x.ix[0:,0].apply(lambda x: x.split('~~')[1])
df['word'] = x.ix[0:,0].apply(lambda x: x.split('~~')[0])
df['mi'] = x.ix[0:,4]
frames.append(df)
results = []
for w in sorted(wc, key=wc.get, reverse=True):
results.append((w, wc[w]))
results_df = pd.concat(frames)
G=nx.from_pandas_dataframe(results_df, 'word','coll',['mi'])
mat = nx.adjacency_matrix(G).todense()
viz = lgn.force(mat)
vid = viz.id
print(vid)
url = '<iframe src="http://public.lightning-viz.org/visualizations/'+vid+'/iframe/" width=100% height=400px>'
return (results[0][0].strip(),url)
| true | true |
1c464fea142c0d5443ee2c8f9823dac623cc81f2 | 10,404 | py | Python | gui/kivy/uix/dialogs/settings.py | lionzeye/reddelectrum | e39497aee08b08bed89efa10072d17fb1e37920c | [
"MIT"
] | null | null | null | gui/kivy/uix/dialogs/settings.py | lionzeye/reddelectrum | e39497aee08b08bed89efa10072d17fb1e37920c | [
"MIT"
] | null | null | null | gui/kivy/uix/dialogs/settings.py | lionzeye/reddelectrum | e39497aee08b08bed89efa10072d17fb1e37920c | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from reddelectrum.util import base_units
from reddelectrum.i18n import languages
from reddelectrum_gui.kivy.i18n import _
from reddelectrum.plugins import run_hook
from reddelectrum import coinchooser
from reddelectrum.util import fee_levels
from choice_dialog import ChoiceDialog
Builder.load_string('''
#:import partial functools.partial
#:import _ electrum_ltc_gui.kivy.i18n._
<SettingsDialog@Popup>
id: settings
title: _('Electrum Settings')
disable_pin: False
use_encryption: False
BoxLayout:
orientation: 'vertical'
ScrollView:
GridLayout:
id: scrollviewlayout
cols:1
size_hint: 1, None
height: self.minimum_height
padding: '10dp'
SettingsItem:
lang: settings.get_language_name()
title: 'Language' + ': ' + str(self.lang)
description: _('Language')
action: partial(root.language_dialog, self)
CardSeparator
SettingsItem:
status: '' if root.disable_pin else ('ON' if root.use_encryption else 'OFF')
disabled: root.disable_pin
title: _('PIN code') + ': ' + self.status
description: _("Change your PIN code.")
action: partial(root.change_password, self)
CardSeparator
SettingsItem:
bu: app.base_unit
title: _('Denomination') + ': ' + self.bu
description: _("Base unit for Reddcoin amounts.")
action: partial(root.unit_dialog, self)
CardSeparator
SettingsItem:
status: root.fee_status()
title: _('Fees') + ': ' + self.status
description: _("Fees paid to the Reddcoin miners.")
action: partial(root.fee_dialog, self)
CardSeparator
SettingsItem:
status: root.fx_status()
title: _('Fiat Currency') + ': ' + self.status
description: _("Display amounts in fiat currency.")
action: partial(root.fx_dialog, self)
CardSeparator
SettingsItem:
status: 'ON' if bool(app.plugins.get('labels')) else 'OFF'
title: _('Labels Sync') + ': ' + self.status
description: _("Save and synchronize your labels.")
action: partial(root.plugin_dialog, 'labels', self)
CardSeparator
SettingsItem:
status: 'ON' if app.use_rbf else 'OFF'
title: _('Replace-by-fee') + ': ' + self.status
description: _("Create replaceable transactions.")
message:
_('If you check this box, your transactions will be marked as non-final,') \
+ ' ' + _('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pays higher fees.') \
+ ' ' + _('Note that some merchants do not accept non-final transactions until they are confirmed.')
action: partial(root.boolean_dialog, 'use_rbf', _('Replace by fee'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_unconfirmed else _('No')
title: _('Spend unconfirmed') + ': ' + self.status
description: _("Use unconfirmed coins in transactions.")
message: _('Spend unconfirmed coins')
action: partial(root.boolean_dialog, 'use_unconfirmed', _('Use unconfirmed'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_change else _('No')
title: _('Use change addresses') + ': ' + self.status
description: _("Send your change to separate addresses.")
message: _('Send excess coins to change addresses')
action: partial(root.boolean_dialog, 'use_change', _('Use change addresses'), self.message)
CardSeparator
SettingsItem:
status: root.coinselect_status()
title: _('Coin selection') + ': ' + self.status
description: "Coin selection method"
action: partial(root.coinselect_dialog, self)
''')
class SettingsDialog(Factory.Popup):
def __init__(self, app):
self.app = app
self.plugins = self.app.plugins
self.config = self.app.electrum_config
Factory.Popup.__init__(self)
layout = self.ids.scrollviewlayout
layout.bind(minimum_height=layout.setter('height'))
# cached dialogs
self._fx_dialog = None
self._fee_dialog = None
self._proxy_dialog = None
self._language_dialog = None
self._unit_dialog = None
self._coinselect_dialog = None
def update(self):
self.wallet = self.app.wallet
self.disable_pin = self.wallet.is_watching_only() if self.wallet else True
self.use_encryption = self.wallet.has_password() if self.wallet else False
def get_language_name(self):
return languages.get(self.config.get('language', 'en_UK'), '')
def change_password(self, item, dt):
self.app.change_password(self.update)
def language_dialog(self, item, dt):
if self._language_dialog is None:
l = self.config.get('language', 'en_UK')
def cb(key):
self.config.set_key("language", key, True)
item.lang = self.get_language_name()
self.app.language = key
self._language_dialog = ChoiceDialog(_('Language'), languages, l, cb)
self._language_dialog.open()
def unit_dialog(self, item, dt):
if self._unit_dialog is None:
def cb(text):
self.app._set_bu(text)
item.bu = self.app.base_unit
self._unit_dialog = ChoiceDialog(_('Denomination'), base_units.keys(), self.app.base_unit, cb)
self._unit_dialog.open()
def coinselect_status(self):
return coinchooser.get_name(self.app.electrum_config)
def coinselect_dialog(self, item, dt):
if self._coinselect_dialog is None:
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
def cb(text):
self.config.set_key('coin_chooser', text)
item.status = text
self._coinselect_dialog = ChoiceDialog(_('Coin selection'), choosers, chooser_name, cb)
self._coinselect_dialog.open()
def proxy_status(self):
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
return proxy.get('host') +':' + proxy.get('port') if proxy else _('None')
def proxy_dialog(self, item, dt):
if self._proxy_dialog is None:
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
def callback(popup):
if popup.ids.mode.text != 'None':
proxy = {
'mode':popup.ids.mode.text,
'host':popup.ids.host.text,
'port':popup.ids.port.text,
'user':popup.ids.user.text,
'password':popup.ids.password.text
}
else:
proxy = None
self.app.network.set_parameters(server, port, protocol, proxy, auto_connect)
item.status = self.proxy_status()
popup = Builder.load_file('gui/kivy/uix/ui_screens/proxy.kv')
popup.ids.mode.text = proxy.get('mode') if proxy else 'None'
popup.ids.host.text = proxy.get('host') if proxy else ''
popup.ids.port.text = proxy.get('port') if proxy else ''
popup.ids.user.text = proxy.get('user') if proxy else ''
popup.ids.password.text = proxy.get('password') if proxy else ''
popup.on_dismiss = lambda: callback(popup)
self._proxy_dialog = popup
self._proxy_dialog.open()
def plugin_dialog(self, name, label, dt):
from checkbox_dialog import CheckBoxDialog
def callback(status):
self.plugins.enable(name) if status else self.plugins.disable(name)
label.status = 'ON' if status else 'OFF'
status = bool(self.plugins.get(name))
dd = self.plugins.descriptions.get(name)
descr = dd.get('description')
fullname = dd.get('fullname')
d = CheckBoxDialog(fullname, descr, status, callback)
d.open()
def fee_status(self):
if self.config.get('dynamic_fees', True):
return fee_levels[self.config.get('fee_level', 2)]
else:
return self.app.format_amount_and_units(self.config.fee_per_kb()) + '/kB'
def fee_dialog(self, label, dt):
if self._fee_dialog is None:
from fee_dialog import FeeDialog
def cb():
label.status = self.fee_status()
self._fee_dialog = FeeDialog(self.app, self.config, cb)
self._fee_dialog.open()
def boolean_dialog(self, name, title, message, dt):
from checkbox_dialog import CheckBoxDialog
CheckBoxDialog(title, message, getattr(self.app, name), lambda x: setattr(self.app, name, x)).open()
def fx_status(self):
fx = self.app.fx
if fx.is_enabled():
source = fx.exchange.name()
ccy = fx.get_currency()
return '%s [%s]' %(ccy, source)
else:
return _('None')
def fx_dialog(self, label, dt):
if self._fx_dialog is None:
from fx_dialog import FxDialog
def cb():
label.status = self.fx_status()
self._fx_dialog = FxDialog(self.app, self.plugins, self.config, cb)
self._fx_dialog.open()
| 43.714286 | 157 | 0.569685 | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from reddelectrum.util import base_units
from reddelectrum.i18n import languages
from reddelectrum_gui.kivy.i18n import _
from reddelectrum.plugins import run_hook
from reddelectrum import coinchooser
from reddelectrum.util import fee_levels
from choice_dialog import ChoiceDialog
Builder.load_string('''
#:import partial functools.partial
#:import _ electrum_ltc_gui.kivy.i18n._
<SettingsDialog@Popup>
id: settings
title: _('Electrum Settings')
disable_pin: False
use_encryption: False
BoxLayout:
orientation: 'vertical'
ScrollView:
GridLayout:
id: scrollviewlayout
cols:1
size_hint: 1, None
height: self.minimum_height
padding: '10dp'
SettingsItem:
lang: settings.get_language_name()
title: 'Language' + ': ' + str(self.lang)
description: _('Language')
action: partial(root.language_dialog, self)
CardSeparator
SettingsItem:
status: '' if root.disable_pin else ('ON' if root.use_encryption else 'OFF')
disabled: root.disable_pin
title: _('PIN code') + ': ' + self.status
description: _("Change your PIN code.")
action: partial(root.change_password, self)
CardSeparator
SettingsItem:
bu: app.base_unit
title: _('Denomination') + ': ' + self.bu
description: _("Base unit for Reddcoin amounts.")
action: partial(root.unit_dialog, self)
CardSeparator
SettingsItem:
status: root.fee_status()
title: _('Fees') + ': ' + self.status
description: _("Fees paid to the Reddcoin miners.")
action: partial(root.fee_dialog, self)
CardSeparator
SettingsItem:
status: root.fx_status()
title: _('Fiat Currency') + ': ' + self.status
description: _("Display amounts in fiat currency.")
action: partial(root.fx_dialog, self)
CardSeparator
SettingsItem:
status: 'ON' if bool(app.plugins.get('labels')) else 'OFF'
title: _('Labels Sync') + ': ' + self.status
description: _("Save and synchronize your labels.")
action: partial(root.plugin_dialog, 'labels', self)
CardSeparator
SettingsItem:
status: 'ON' if app.use_rbf else 'OFF'
title: _('Replace-by-fee') + ': ' + self.status
description: _("Create replaceable transactions.")
message:
_('If you check this box, your transactions will be marked as non-final,') \
+ ' ' + _('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pays higher fees.') \
+ ' ' + _('Note that some merchants do not accept non-final transactions until they are confirmed.')
action: partial(root.boolean_dialog, 'use_rbf', _('Replace by fee'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_unconfirmed else _('No')
title: _('Spend unconfirmed') + ': ' + self.status
description: _("Use unconfirmed coins in transactions.")
message: _('Spend unconfirmed coins')
action: partial(root.boolean_dialog, 'use_unconfirmed', _('Use unconfirmed'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_change else _('No')
title: _('Use change addresses') + ': ' + self.status
description: _("Send your change to separate addresses.")
message: _('Send excess coins to change addresses')
action: partial(root.boolean_dialog, 'use_change', _('Use change addresses'), self.message)
CardSeparator
SettingsItem:
status: root.coinselect_status()
title: _('Coin selection') + ': ' + self.status
description: "Coin selection method"
action: partial(root.coinselect_dialog, self)
''')
class SettingsDialog(Factory.Popup):
def __init__(self, app):
self.app = app
self.plugins = self.app.plugins
self.config = self.app.electrum_config
Factory.Popup.__init__(self)
layout = self.ids.scrollviewlayout
layout.bind(minimum_height=layout.setter('height'))
self._fx_dialog = None
self._fee_dialog = None
self._proxy_dialog = None
self._language_dialog = None
self._unit_dialog = None
self._coinselect_dialog = None
def update(self):
self.wallet = self.app.wallet
self.disable_pin = self.wallet.is_watching_only() if self.wallet else True
self.use_encryption = self.wallet.has_password() if self.wallet else False
def get_language_name(self):
return languages.get(self.config.get('language', 'en_UK'), '')
def change_password(self, item, dt):
self.app.change_password(self.update)
def language_dialog(self, item, dt):
if self._language_dialog is None:
l = self.config.get('language', 'en_UK')
def cb(key):
self.config.set_key("language", key, True)
item.lang = self.get_language_name()
self.app.language = key
self._language_dialog = ChoiceDialog(_('Language'), languages, l, cb)
self._language_dialog.open()
def unit_dialog(self, item, dt):
if self._unit_dialog is None:
def cb(text):
self.app._set_bu(text)
item.bu = self.app.base_unit
self._unit_dialog = ChoiceDialog(_('Denomination'), base_units.keys(), self.app.base_unit, cb)
self._unit_dialog.open()
def coinselect_status(self):
return coinchooser.get_name(self.app.electrum_config)
def coinselect_dialog(self, item, dt):
if self._coinselect_dialog is None:
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
def cb(text):
self.config.set_key('coin_chooser', text)
item.status = text
self._coinselect_dialog = ChoiceDialog(_('Coin selection'), choosers, chooser_name, cb)
self._coinselect_dialog.open()
def proxy_status(self):
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
return proxy.get('host') +':' + proxy.get('port') if proxy else _('None')
def proxy_dialog(self, item, dt):
if self._proxy_dialog is None:
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
def callback(popup):
if popup.ids.mode.text != 'None':
proxy = {
'mode':popup.ids.mode.text,
'host':popup.ids.host.text,
'port':popup.ids.port.text,
'user':popup.ids.user.text,
'password':popup.ids.password.text
}
else:
proxy = None
self.app.network.set_parameters(server, port, protocol, proxy, auto_connect)
item.status = self.proxy_status()
popup = Builder.load_file('gui/kivy/uix/ui_screens/proxy.kv')
popup.ids.mode.text = proxy.get('mode') if proxy else 'None'
popup.ids.host.text = proxy.get('host') if proxy else ''
popup.ids.port.text = proxy.get('port') if proxy else ''
popup.ids.user.text = proxy.get('user') if proxy else ''
popup.ids.password.text = proxy.get('password') if proxy else ''
popup.on_dismiss = lambda: callback(popup)
self._proxy_dialog = popup
self._proxy_dialog.open()
def plugin_dialog(self, name, label, dt):
from checkbox_dialog import CheckBoxDialog
def callback(status):
self.plugins.enable(name) if status else self.plugins.disable(name)
label.status = 'ON' if status else 'OFF'
status = bool(self.plugins.get(name))
dd = self.plugins.descriptions.get(name)
descr = dd.get('description')
fullname = dd.get('fullname')
d = CheckBoxDialog(fullname, descr, status, callback)
d.open()
def fee_status(self):
if self.config.get('dynamic_fees', True):
return fee_levels[self.config.get('fee_level', 2)]
else:
return self.app.format_amount_and_units(self.config.fee_per_kb()) + '/kB'
def fee_dialog(self, label, dt):
if self._fee_dialog is None:
from fee_dialog import FeeDialog
def cb():
label.status = self.fee_status()
self._fee_dialog = FeeDialog(self.app, self.config, cb)
self._fee_dialog.open()
def boolean_dialog(self, name, title, message, dt):
from checkbox_dialog import CheckBoxDialog
CheckBoxDialog(title, message, getattr(self.app, name), lambda x: setattr(self.app, name, x)).open()
def fx_status(self):
fx = self.app.fx
if fx.is_enabled():
source = fx.exchange.name()
ccy = fx.get_currency()
return '%s [%s]' %(ccy, source)
else:
return _('None')
def fx_dialog(self, label, dt):
if self._fx_dialog is None:
from fx_dialog import FxDialog
def cb():
label.status = self.fx_status()
self._fx_dialog = FxDialog(self.app, self.plugins, self.config, cb)
self._fx_dialog.open()
| true | true |
1c46504895e0e2d1fa84256a4ac14e48db7125f9 | 19,813 | py | Python | Lib/site-packages/pygments/lexers/html.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 1 | 2021-12-14T21:23:25.000Z | 2021-12-14T21:23:25.000Z | Lib/site-packages/pygments/lexers/html.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 1,242 | 2019-08-31T16:03:19.000Z | 2019-08-31T18:00:46.000Z | Lib/site-packages/pygments/lexers/html.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 1 | 2019-10-04T01:56:03.000Z | 2019-10-04T01:56:03.000Z | """
pygments.lexers.html
~~~~~~~~~~~~~~~~~~~~
Lexers for HTML, XML and related markup.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'PugLexer']
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
url = 'https://html.spec.whatwg.org/'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'(<)(\s*)(script)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('script-content', 'tag')),
(r'(<)(\s*)(style)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('style-content', 'tag')),
# note: this allows tag names not used in HTML like <x:with-dash>,
# this is to support yet-unknown template engines and the like
(r'(<)(\s*)([\w:.-]+)',
bygroups(Punctuation, Text, Name.Tag), 'tag'),
(r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation)),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
'attr'),
(r'[\w:-]+', Name.Attribute),
(r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
],
'script-content': [
(r'(<)(\s*)(/)(\s*)(script)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation), '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
# fallback cases for when there is no closing script tag
# first look for newline and then go back into root state
# if that fails just read the rest of the file
# this is similar to the error handling logic in lexer.py
(r'.+?\n', using(JavascriptLexer), '#pop'),
(r'.+', using(JavascriptLexer), '#pop'),
],
'style-content': [
(r'(<)(\s*)(/)(\s*)(style)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation),'#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
# fallback cases for when there is no closing style tag
# first look for newline and then go back into root state
# if that fails just read the rest of the file
# this is similar to the error handling logic in lexer.py
(r'.+?\n', using(CssLexer), '#pop'),
(r'.+', using(CssLexer), '#pop'),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
(r'\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
"""
A lexer for XSLT.
.. versionadded:: 0.10
"""
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = {
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
}
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
.. versionadded:: 1.3
"""
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accommodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(RubyLexer)),
(r'\[' + _dot + r'*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
.. versionadded:: 1.4
"""
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class PugLexer(ExtendedRegexLexer):
"""
For Pug markup.
Pug is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
.. versionadded:: 1.4
"""
name = 'Pug'
aliases = ['pug', 'jade']
filenames = ['*.pug', '*.jade']
mimetypes = ['text/x-pug', 'text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
JadeLexer = PugLexer # compat
| 32.74876 | 83 | 0.4225 |
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'PugLexer']
class HtmlLexer(RegexLexer):
name = 'HTML'
url = 'https://html.spec.whatwg.org/'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'(<)(\s*)(script)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('script-content', 'tag')),
(r'(<)(\s*)(style)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('style-content', 'tag')),
(r'(<)(\s*)([\w:.-]+)',
bygroups(Punctuation, Text, Name.Tag), 'tag'),
(r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation)),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
'attr'),
(r'[\w:-]+', Name.Attribute),
(r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
],
'script-content': [
(r'(<)(\s*)(/)(\s*)(script)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation), '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
(r'.+?\n', using(JavascriptLexer), '#pop'),
(r'.+', using(JavascriptLexer), '#pop'),
],
'style-content': [
(r'(<)(\s*)(/)(\s*)(style)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation),'#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
(r'.+?\n', using(CssLexer), '#pop'),
(r'.+', using(CssLexer), '#pop'),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)',
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
flags = re.MULTILINE | re.DOTALL
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
(r'\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = {
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
}
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accommodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(RubyLexer)),
(r'\[' + _dot + r'*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class PugLexer(ExtendedRegexLexer):
name = 'Pug'
aliases = ['pug', 'jade']
filenames = ['*.pug', '*.jade']
mimetypes = ['text/x-pug', 'text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
JadeLexer = PugLexer
| true | true |
1c46543448022df5270116046c61a0e794fe676d | 5,784 | py | Python | shakenfist/daemons/resources.py | fidoandfido/shakenfist | 18612b27649310fb2d6ea1b32dce89640e8c857d | [
"Apache-2.0"
] | null | null | null | shakenfist/daemons/resources.py | fidoandfido/shakenfist | 18612b27649310fb2d6ea1b32dce89640e8c857d | [
"Apache-2.0"
] | null | null | null | shakenfist/daemons/resources.py | fidoandfido/shakenfist | 18612b27649310fb2d6ea1b32dce89640e8c857d | [
"Apache-2.0"
] | null | null | null | import os
import psutil
import time
from prometheus_client import Gauge
from prometheus_client import start_http_server
from shakenfist.daemons import daemon
from shakenfist import config
from shakenfist import db
from shakenfist import logutil
from shakenfist import util
LOG, _ = logutil.setup(__name__)
def _get_stats():
libvirt = util.get_libvirt()
retval = {}
conn = libvirt.open(None)
# CPU info
present_cpus, _, available_cpus = conn.getCPUMap()
retval.update({
'cpu_max': present_cpus,
'cpu_available': available_cpus,
})
retval['cpu_max_per_instance'] = conn.getMaxVcpus(None)
# This is disabled as data we don't currently use
# for i in range(present_cpus):
# per_cpu_stats = conn.getCPUStats(i)
# for key in per_cpu_stats:
# retval['cpu_core%d_%s' % (i, key)] = per_cpu_stats[key]
try:
load_1, load_5, load_15 = psutil.getloadavg()
retval.update({
'cpu_load_1': load_1,
'cpu_load_5': load_5,
'cpu_load_15': load_15,
})
except Exception as e:
util.ignore_exception('load average', e)
# System memory info, converting bytes to mb
stats = psutil.virtual_memory()
retval.update({
'memory_max': stats.total // 1024 // 1024,
'memory_available': stats.available // 1024 // 1024
})
# libvirt memory info, converting kb to mb
memory_status = conn.getMemoryStats(
libvirt.VIR_NODE_MEMORY_STATS_ALL_CELLS)
retval.update({
'memory_max_libvirt': memory_status['total'] // 1024,
'memory_available_libvirt': memory_status['free'] // 1024,
})
# Kernel Shared Memory (KSM) information
ksm_details = {}
for ent in os.listdir('/sys/kernel/mm/ksm'):
with open('/sys/kernel/mm/ksm/%s' % ent) as f:
ksm_details['memory_ksm_%s' % ent] = int(f.read().rstrip())
retval.update(ksm_details)
# Disk info
s = os.statvfs(config.parsed.get('STORAGE_PATH'))
disk_counters = psutil.disk_io_counters()
retval.update({
'disk_total': s.f_frsize * s.f_blocks,
'disk_free': s.f_frsize * s.f_bavail,
'disk_used': s.f_frsize * (s.f_blocks - s.f_bfree),
'disk_read_bytes': disk_counters.read_bytes,
'disk_write_bytes': disk_counters.write_bytes,
})
# Network info
net_counters = psutil.net_io_counters()
retval.update({
'network_read_bytes': net_counters.bytes_recv,
'network_write_bytes': net_counters.bytes_sent,
})
# Virtual machine consumption info
total_instances = 0
total_active_instances = 0
total_instance_max_memory = 0
total_instance_actual_memory = 0
total_instance_vcpus = 0
total_instance_cpu_time = 0
for guest in conn.listAllDomains():
try:
active = guest.isActive() == 1
except Exception:
active = False
_, maxmem, mem, cpus, cpu_time = guest.info()
if active:
total_instances += 1
total_active_instances += 1
total_instance_max_memory += maxmem
total_instance_actual_memory += mem
total_instance_vcpus += cpus
total_instance_cpu_time += cpu_time
# Queue health statistics
node_queue_processing, node_queue_waiting = db.get_queue_length(
config.parsed.get('NODE_NAME'))
retval.update({
'cpu_total_instance_vcpus': total_instance_vcpus,
'cpu_total_instance_cpu_time': total_instance_cpu_time,
'memory_total_instance_max': total_instance_max_memory // 1024,
'memory_total_instance_actual': total_instance_actual_memory // 1024,
'instances_total': total_instances,
'instances_active': total_active_instances,
'node_queue_processing': node_queue_processing,
'node_queue_waiting': node_queue_waiting,
})
if util.is_network_node():
network_queue_processing, network_queue_waiting = db.get_queue_length(
'networknode')
retval.update({
'network_queue_processing': network_queue_processing,
'network_queue_waiting': network_queue_waiting,
})
return retval
class Monitor(daemon.Daemon):
def __init__(self, id):
super(Monitor, self).__init__(id)
start_http_server(config.parsed.get('PROMETHEUS_METRICS_PORT'))
def run(self):
LOG.info('Starting')
gauges = {
'updated_at': Gauge('updated_at', 'The last time metrics were updated')
}
last_metrics = 0
def update_metrics():
global last_metrics
stats = _get_stats()
for metric in stats:
if metric not in gauges:
gauges[metric] = Gauge(metric, '')
gauges[metric].set(stats[metric])
db.update_metrics_bulk(stats)
LOG.debug('Updated metrics')
gauges['updated_at'].set_to_current_time()
while True:
try:
jobname, _ = db.dequeue(
'%s-metrics' % config.parsed.get('NODE_NAME'))
if jobname:
if time.time() - last_metrics > 2:
update_metrics()
last_metrics = time.time()
db.resolve('%s-metrics' % config.parsed.get('NODE_NAME'),
jobname)
else:
time.sleep(0.2)
if time.time() - last_metrics > config.parsed.get('SCHEDULER_CACHE_TIMEOUT'):
update_metrics()
last_metrics = time.time()
except Exception as e:
util.ignore_exception('resource statistics', e)
| 31.434783 | 93 | 0.616355 | import os
import psutil
import time
from prometheus_client import Gauge
from prometheus_client import start_http_server
from shakenfist.daemons import daemon
from shakenfist import config
from shakenfist import db
from shakenfist import logutil
from shakenfist import util
LOG, _ = logutil.setup(__name__)
def _get_stats():
libvirt = util.get_libvirt()
retval = {}
conn = libvirt.open(None)
present_cpus, _, available_cpus = conn.getCPUMap()
retval.update({
'cpu_max': present_cpus,
'cpu_available': available_cpus,
})
retval['cpu_max_per_instance'] = conn.getMaxVcpus(None)
# for i in range(present_cpus):
# per_cpu_stats = conn.getCPUStats(i)
# for key in per_cpu_stats:
# retval['cpu_core%d_%s' % (i, key)] = per_cpu_stats[key]
try:
load_1, load_5, load_15 = psutil.getloadavg()
retval.update({
'cpu_load_1': load_1,
'cpu_load_5': load_5,
'cpu_load_15': load_15,
})
except Exception as e:
util.ignore_exception('load average', e)
# System memory info, converting bytes to mb
stats = psutil.virtual_memory()
retval.update({
'memory_max': stats.total // 1024 // 1024,
'memory_available': stats.available // 1024 // 1024
})
# libvirt memory info, converting kb to mb
memory_status = conn.getMemoryStats(
libvirt.VIR_NODE_MEMORY_STATS_ALL_CELLS)
retval.update({
'memory_max_libvirt': memory_status['total'] // 1024,
'memory_available_libvirt': memory_status['free'] // 1024,
})
# Kernel Shared Memory (KSM) information
ksm_details = {}
for ent in os.listdir('/sys/kernel/mm/ksm'):
with open('/sys/kernel/mm/ksm/%s' % ent) as f:
ksm_details['memory_ksm_%s' % ent] = int(f.read().rstrip())
retval.update(ksm_details)
# Disk info
s = os.statvfs(config.parsed.get('STORAGE_PATH'))
disk_counters = psutil.disk_io_counters()
retval.update({
'disk_total': s.f_frsize * s.f_blocks,
'disk_free': s.f_frsize * s.f_bavail,
'disk_used': s.f_frsize * (s.f_blocks - s.f_bfree),
'disk_read_bytes': disk_counters.read_bytes,
'disk_write_bytes': disk_counters.write_bytes,
})
# Network info
net_counters = psutil.net_io_counters()
retval.update({
'network_read_bytes': net_counters.bytes_recv,
'network_write_bytes': net_counters.bytes_sent,
})
# Virtual machine consumption info
total_instances = 0
total_active_instances = 0
total_instance_max_memory = 0
total_instance_actual_memory = 0
total_instance_vcpus = 0
total_instance_cpu_time = 0
for guest in conn.listAllDomains():
try:
active = guest.isActive() == 1
except Exception:
active = False
_, maxmem, mem, cpus, cpu_time = guest.info()
if active:
total_instances += 1
total_active_instances += 1
total_instance_max_memory += maxmem
total_instance_actual_memory += mem
total_instance_vcpus += cpus
total_instance_cpu_time += cpu_time
# Queue health statistics
node_queue_processing, node_queue_waiting = db.get_queue_length(
config.parsed.get('NODE_NAME'))
retval.update({
'cpu_total_instance_vcpus': total_instance_vcpus,
'cpu_total_instance_cpu_time': total_instance_cpu_time,
'memory_total_instance_max': total_instance_max_memory // 1024,
'memory_total_instance_actual': total_instance_actual_memory // 1024,
'instances_total': total_instances,
'instances_active': total_active_instances,
'node_queue_processing': node_queue_processing,
'node_queue_waiting': node_queue_waiting,
})
if util.is_network_node():
network_queue_processing, network_queue_waiting = db.get_queue_length(
'networknode')
retval.update({
'network_queue_processing': network_queue_processing,
'network_queue_waiting': network_queue_waiting,
})
return retval
class Monitor(daemon.Daemon):
def __init__(self, id):
super(Monitor, self).__init__(id)
start_http_server(config.parsed.get('PROMETHEUS_METRICS_PORT'))
def run(self):
LOG.info('Starting')
gauges = {
'updated_at': Gauge('updated_at', 'The last time metrics were updated')
}
last_metrics = 0
def update_metrics():
global last_metrics
stats = _get_stats()
for metric in stats:
if metric not in gauges:
gauges[metric] = Gauge(metric, '')
gauges[metric].set(stats[metric])
db.update_metrics_bulk(stats)
LOG.debug('Updated metrics')
gauges['updated_at'].set_to_current_time()
while True:
try:
jobname, _ = db.dequeue(
'%s-metrics' % config.parsed.get('NODE_NAME'))
if jobname:
if time.time() - last_metrics > 2:
update_metrics()
last_metrics = time.time()
db.resolve('%s-metrics' % config.parsed.get('NODE_NAME'),
jobname)
else:
time.sleep(0.2)
if time.time() - last_metrics > config.parsed.get('SCHEDULER_CACHE_TIMEOUT'):
update_metrics()
last_metrics = time.time()
except Exception as e:
util.ignore_exception('resource statistics', e)
| true | true |
1c465512236dd5e487d4620bb11fe1ccf6b857ef | 631 | py | Python | pysoup/logger/__init__.py | illBeRoy/pysoup | 742fd6630e1be27c275cb8dc6ee94412472cb20b | [
"MIT"
] | 4 | 2016-02-21T12:40:44.000Z | 2019-06-13T13:23:19.000Z | pysoup/logger/__init__.py | illBeRoy/pysoup | 742fd6630e1be27c275cb8dc6ee94412472cb20b | [
"MIT"
] | null | null | null | pysoup/logger/__init__.py | illBeRoy/pysoup | 742fd6630e1be27c275cb8dc6ee94412472cb20b | [
"MIT"
] | 1 | 2020-07-16T12:22:12.000Z | 2020-07-16T12:22:12.000Z | import os.path
import pysoup.utils.assets
class Logger(object):
def __init__(self, cwd):
self._log = ''
self._cwd = cwd
def log(self, text):
self._log += '{0}\n'.format(text)
def log_dependency_results(self, failed_dependencies):
for dependency in failed_dependencies:
self.log('could not install {0}'.format(dependency))
def dump_to_file(self, filename='soup.log'):
if self._log != '':
with open(os.path.join(self._cwd, filename), 'wb') as f:
f.write(pysoup.utils.assets.LOGO)
f.write('\n{0}'.format(self._log))
| 27.434783 | 68 | 0.59588 | import os.path
import pysoup.utils.assets
class Logger(object):
def __init__(self, cwd):
self._log = ''
self._cwd = cwd
def log(self, text):
self._log += '{0}\n'.format(text)
def log_dependency_results(self, failed_dependencies):
for dependency in failed_dependencies:
self.log('could not install {0}'.format(dependency))
def dump_to_file(self, filename='soup.log'):
if self._log != '':
with open(os.path.join(self._cwd, filename), 'wb') as f:
f.write(pysoup.utils.assets.LOGO)
f.write('\n{0}'.format(self._log))
| true | true |
1c4655f9e7e6644dbd5ab06a55417c8f38cfdb63 | 18,981 | py | Python | mindmeld/models/text_models.py | ritvikshrivastava/mindmeld | 48eccac059439ea0f32fa3ac9079415bb006233b | [
"Apache-2.0"
] | 580 | 2019-03-24T20:59:09.000Z | 2022-03-23T17:06:43.000Z | mindmeld/models/text_models.py | ritvikshrivastava/mindmeld | 48eccac059439ea0f32fa3ac9079415bb006233b | [
"Apache-2.0"
] | 199 | 2019-04-30T18:15:46.000Z | 2022-03-22T17:11:33.000Z | mindmeld/models/text_models.py | ritvikshrivastava/mindmeld | 48eccac059439ea0f32fa3ac9079415bb006233b | [
"Apache-2.0"
] | 164 | 2019-04-25T08:27:28.000Z | 2022-03-23T12:44:33.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains all code required to perform multinomial classification
of text.
"""
import logging
import operator
import os
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel, SelectPercentile
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder as SKLabelEncoder
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from .evaluation import EvaluatedExample, StandardModelEvaluation
from .helpers import (
CHAR_NGRAM_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
)
from .model import ModelConfig, Model, PytorchModel
logger = logging.getLogger(__name__)
class TextModel(Model):
# classifier types
LOG_REG_TYPE = "logreg"
DECISION_TREE_TYPE = "dtree"
RANDOM_FOREST_TYPE = "rforest"
SVM_TYPE = "svm"
ALLOWED_CLASSIFIER_TYPES = [LOG_REG_TYPE, DECISION_TREE_TYPE, RANDOM_FOREST_TYPE, SVM_TYPE]
# default model scoring type
ACCURACY_SCORING = "accuracy"
_NEG_INF = -1e10
def __init__(self, config):
super().__init__(config)
self._class_encoder = SKLabelEncoder()
self._feat_vectorizer = DictVectorizer()
self._feat_selector = self._get_feature_selector()
self._feat_scaler = self._get_feature_scaler()
self._meta_type = None
self._meta_feat_vectorizer = DictVectorizer(sparse=False)
self._base_clfs = {}
self.cv_loss_ = None
self.train_acc_ = None
def __getstate__(self):
"""Returns the information needed pickle an instance of this class.
By default, pickling removes attributes with names starting with
underscores. This overrides that behavior.
"""
attributes = self.__dict__.copy()
attributes["_resources"] = {
rname: self._resources.get(rname, {})
for rname in [
WORD_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
CHAR_NGRAM_FREQ_RSC,
]
}
return attributes
def _get_model_constructor(self):
"""Returns the class of the actual underlying model"""
classifier_type = self.config.model_settings["classifier_type"]
try:
return {
TextModel.LOG_REG_TYPE: LogisticRegression,
TextModel.DECISION_TREE_TYPE: DecisionTreeClassifier,
TextModel.RANDOM_FOREST_TYPE: RandomForestClassifier,
TextModel.SVM_TYPE: SVC,
}[classifier_type]
except KeyError as e:
msg = "{}: Classifier type {!r} not recognized"
raise ValueError(msg.format(self.__class__.__name__, classifier_type)) from e
def _get_cv_scorer(self, selection_settings):
"""
Returns the scorer to use based on the selection settings and classifier type,
defaulting to accuracy.
"""
return selection_settings.get("scoring", TextModel.ACCURACY_SCORING)
def select_params(self, examples, labels, selection_settings=None):
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
clf, params = self._fit_cv(X, y, groups, selection_settings)
self._clf = clf
return params
def _fit(self, examples, labels, params=None):
"""Trains a classifier without cross-validation.
Args:
examples (numpy.matrix): The feature matrix for a dataset.
labels (numpy.array): The target output values.
params (dict): Parameters of the classifier
"""
params = self._convert_params(params, labels, is_grid=False)
model_class = self._get_model_constructor()
params = self._clean_params(model_class, params)
return model_class(**params).fit(examples, labels)
def predict_log_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
predictions = self._predict_proba(X, self._clf.predict_log_proba)
# JSON can't reliably encode infinity, so replace it with large number
for row in predictions:
_, probas = row
for label, proba in probas.items():
if proba == -np.Infinity:
probas[label] = TextModel._NEG_INF
return predictions
def _get_feature_weight(self, feat_name, label_class):
"""Retrieves the feature weight from the coefficient matrix. If there are only two
classes, the feature vector is actually collapsed into one so we need some logic to
handle that case.
Args:
feat_name (str) : The feature name
label_class (int): The index of the label
Returns:
(ndarray float): The ndarray with a single float element
"""
if len(self._class_encoder.classes_) == 2 and label_class >= 1:
return np.array([0.0])
else:
return self._clf.coef_[
label_class, self._feat_vectorizer.vocabulary_[feat_name]
]
def inspect(self, example, gold_label=None, dynamic_resource=None):
"""This class takes an example and returns a 2D list for every feature with feature
name, feature value, feature weight and their product for the predicted label. If gold
label is passed in, we will also include the feature value and weight for the gold
label and returns the log probability of the difference.
Args:
example (Query): The query to be predicted
gold_label (str): The gold label for this string
dynamic_resource (dict, optional): A dynamic resource to aid NLP inference
Returns:
(list of lists): A 2D array that includes every feature, their value, weight and \
probability
"""
if not isinstance(self._clf, LogisticRegression):
logging.warning(
"Currently inspection is only available for Logistic Regression Model"
)
return []
try:
gold_class = self._class_encoder.transform([gold_label])
except ValueError:
logger.warning("Unable to decode label `%s`", gold_label)
gold_class = None
pred_label = self.predict([example], dynamic_resource=dynamic_resource)[0]
pred_class = self._class_encoder.transform([pred_label])
features = self._extract_features(
example, dynamic_resource=dynamic_resource,
text_preparation_pipeline=self.text_preparation_pipeline
)
logging.info("Predicted: %s.", pred_label)
if gold_class is None:
columns = ["Feature", "Value", "Pred_W({0})".format(pred_label), "Pred_P"]
else:
columns = [
"Feature",
"Value",
"Pred_W({0})".format(pred_label),
"Pred_P",
"Gold_W({0})".format(gold_label),
"Gold_P",
"Diff",
]
logging.info("Gold: %s.", gold_label)
inspect_table = [columns]
# Get all active features sorted alphabetically by name
features = sorted(features.items(), key=operator.itemgetter(0))
for feature in features:
feat_name = feature[0]
feat_value = feature[1]
# Features we haven't seen before won't be in our vectorizer
# e.g., an exact match feature for a query we've never seen before
if feat_name not in self._feat_vectorizer.vocabulary_:
continue
weight = self._get_feature_weight(feat_name, pred_class)
product = feat_value * weight
if gold_class is None:
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
"-",
"-",
"-",
]
else:
gold_w = self._get_feature_weight(feat_name, gold_class)
gold_p = feat_value * gold_w
diff = gold_p - product
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
gold_w.round(4),
gold_p.round(4),
diff.round(4),
]
inspect_table.append(row)
return inspect_table
def _predict_proba(self, X, predictor):
predictions = []
for row in predictor(X):
probabilities = {}
top_class = None
for class_index, proba in enumerate(row):
raw_class = self._class_encoder.inverse_transform([class_index])[0]
decoded_class = self._label_encoder.decode([raw_class])[0]
probabilities[decoded_class] = proba
if proba > probabilities.get(top_class, -1.0):
top_class = decoded_class
predictions.append((top_class, probabilities))
return predictions
def get_feature_matrix(self, examples, y=None, fit=False, dynamic_resource=None):
"""Transforms a list of examples into a feature matrix.
Args:
examples (list): The examples.
Returns:
(tuple): tuple containing:
* (numpy.matrix): The feature matrix.
* (numpy.array): The group labels for examples.
"""
groups = []
feats = []
for idx, example in enumerate(examples):
feats.append(
self._extract_features(example, dynamic_resource, self.text_preparation_pipeline)
)
groups.append(idx)
X, y = self._preprocess_data(feats, y, fit=fit)
return X, y, groups
def _preprocess_data(self, X, y=None, fit=False):
if fit:
y = self._class_encoder.fit_transform(y)
X = self._feat_vectorizer.fit_transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.fit_transform(X)
if self._feat_selector is not None:
X = self._feat_selector.fit_transform(X, y)
else:
X = self._feat_vectorizer.transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.transform(X)
if self._feat_selector is not None:
X = self._feat_selector.transform(X)
return X, y
def _convert_params(self, param_grid, y, is_grid=True):
"""
Convert the params from the style given by the config to the style
passed in to the actual classifier.
Args:
param_grid (dict): lists of classifier parameter values, keyed by parameter name
Returns:
(dict): revised param_grid
"""
if "class_weight" in param_grid:
raw_weights = (
param_grid["class_weight"] if is_grid else [param_grid["class_weight"]]
)
weights = [
{
k
if isinstance(k, int)
else self._class_encoder.transform((k,))[0]: v
for k, v in cw_dict.items()
}
for cw_dict in raw_weights
]
param_grid["class_weight"] = weights if is_grid else weights[0]
elif "class_bias" in param_grid:
# interpolate between class_bias=0 => class_weight=None
# and class_bias=1 => class_weight='balanced'
class_count = np.bincount(y)
classes = self._class_encoder.classes_
weights = []
raw_bias = (
param_grid["class_bias"] if is_grid else [param_grid["class_bias"]]
)
for class_bias in raw_bias:
# these weights are same as sklearn's class_weight='balanced'
balanced_w = [(len(y) / len(classes) / c) for c in class_count]
balanced_tuples = list(zip(list(range(len(classes))), balanced_w))
weights.append(
{c: (1 - class_bias) + class_bias * w for c, w in balanced_tuples}
)
param_grid["class_weight"] = weights if is_grid else weights[0]
del param_grid["class_bias"]
return param_grid
def _get_feature_selector(self):
"""Get a feature selector instance based on the feature_selector model
parameter
Returns:
(Object): a feature selector which returns a reduced feature matrix, \
given the full feature matrix, X and the class labels, y
"""
if self.config.model_settings is None:
selector_type = None
else:
selector_type = self.config.model_settings.get("feature_selector")
selector = {
"l1": SelectFromModel(LogisticRegression(penalty="l1", C=1)),
"f": SelectPercentile(),
}.get(selector_type)
return selector
def _get_feature_scaler(self):
"""Get a feature value scaler based on the model settings"""
if self.config.model_settings is None:
scale_type = None
else:
scale_type = self.config.model_settings.get("feature_scaler")
scaler = {
"std-dev": StandardScaler(with_mean=False),
"max-abs": MaxAbsScaler(),
}.get(scale_type)
return scaler
def evaluate(self, examples, labels):
"""Evaluates a model against the given examples and labels
Args:
examples: A list of examples to predict
labels: A list of expected labels
Returns:
ModelEvaluation: an object containing information about the \
evaluation
"""
# TODO: also expose feature weights?
predictions = self.predict_proba(examples)
# Create a model config object for the current effective config (after param selection)
config = self._get_effective_config()
evaluations = [
EvaluatedExample(
e, labels[i], predictions[i][0], predictions[i][1], config.label_type
)
for i, e in enumerate(examples)
]
model_eval = StandardModelEvaluation(config, evaluations)
return model_eval
def fit(self, examples, labels, params=None):
"""Trains this model.
This method inspects instance attributes to determine the classifier
object and cross-validation strategy, and then fits the model to the
training examples passed in.
Args:
examples (ProcessedQueryList.*Iterator): A list of examples.
labels (ProcessedQueryList.*Iterator): A parallel list to examples. The gold labels
for each example.
params (dict, optional): Parameters to use when training. Parameter
selection will be bypassed if this is provided
Returns:
(TextModel): Returns self to match classifier scikit-learn \
interfaces.
"""
params = params or self.config.params
skip_param_selection = params is not None or self.config.param_selection is None
# Shuffle to prevent order effects
indices = list(range(len(labels)))
random.shuffle(indices)
examples.reorder(indices)
labels.reorder(indices)
distinct_labels = set(labels)
if len(set(distinct_labels)) <= 1:
return self
# Extract features and classes
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
if skip_param_selection:
self._clf = self._fit(X, y, params)
self._current_params = params
else:
# run cross validation to select params
best_clf, best_params = self._fit_cv(X, y, groups)
self._clf = best_clf
self._current_params = best_params
return self
def predict(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
y = self._clf.predict(X)
predictions = self._class_encoder.inverse_transform(y)
return self._label_encoder.decode(predictions)
def predict_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
return self._predict_proba(X, self._clf.predict_proba)
def view_extracted_features(self, example, dynamic_resource=None):
return self._extract_features(
example, dynamic_resource=dynamic_resource,
text_preparation_pipeline=self.text_preparation_pipeline
)
@classmethod
def load(cls, path):
metadata = joblib.load(path)
# backwards compatability check for RoleClassifiers
if isinstance(metadata, dict):
return metadata["model"]
# in this case, metadata = model which was serialized and dumped
return metadata
def _dump(self, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
joblib.dump(self, path)
class PytorchTextModel(PytorchModel):
ALLOWED_CLASSIFIER_TYPES = ["embedder", "cnn", "lstm"]
pass
class AutoTextModel:
@staticmethod
def get_model_class(config: ModelConfig):
CLASSES = [TextModel, PytorchTextModel]
classifier_type = config.model_settings["classifier_type"]
for _class in CLASSES:
if classifier_type in _class.ALLOWED_CLASSIFIER_TYPES:
return _class
msg = f"Invalid 'classifier_type': {classifier_type}. " \
f"Allowed types are: {[_class.ALLOWED_CLASSIFIER_TYPES for _class in CLASSES]}"
raise ValueError(msg)
| 37 | 97 | 0.61393 |
import logging
import operator
import os
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel, SelectPercentile
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder as SKLabelEncoder
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from .evaluation import EvaluatedExample, StandardModelEvaluation
from .helpers import (
CHAR_NGRAM_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
)
from .model import ModelConfig, Model, PytorchModel
logger = logging.getLogger(__name__)
class TextModel(Model):
LOG_REG_TYPE = "logreg"
DECISION_TREE_TYPE = "dtree"
RANDOM_FOREST_TYPE = "rforest"
SVM_TYPE = "svm"
ALLOWED_CLASSIFIER_TYPES = [LOG_REG_TYPE, DECISION_TREE_TYPE, RANDOM_FOREST_TYPE, SVM_TYPE]
ACCURACY_SCORING = "accuracy"
_NEG_INF = -1e10
def __init__(self, config):
super().__init__(config)
self._class_encoder = SKLabelEncoder()
self._feat_vectorizer = DictVectorizer()
self._feat_selector = self._get_feature_selector()
self._feat_scaler = self._get_feature_scaler()
self._meta_type = None
self._meta_feat_vectorizer = DictVectorizer(sparse=False)
self._base_clfs = {}
self.cv_loss_ = None
self.train_acc_ = None
def __getstate__(self):
attributes = self.__dict__.copy()
attributes["_resources"] = {
rname: self._resources.get(rname, {})
for rname in [
WORD_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
CHAR_NGRAM_FREQ_RSC,
]
}
return attributes
def _get_model_constructor(self):
classifier_type = self.config.model_settings["classifier_type"]
try:
return {
TextModel.LOG_REG_TYPE: LogisticRegression,
TextModel.DECISION_TREE_TYPE: DecisionTreeClassifier,
TextModel.RANDOM_FOREST_TYPE: RandomForestClassifier,
TextModel.SVM_TYPE: SVC,
}[classifier_type]
except KeyError as e:
msg = "{}: Classifier type {!r} not recognized"
raise ValueError(msg.format(self.__class__.__name__, classifier_type)) from e
def _get_cv_scorer(self, selection_settings):
return selection_settings.get("scoring", TextModel.ACCURACY_SCORING)
def select_params(self, examples, labels, selection_settings=None):
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
clf, params = self._fit_cv(X, y, groups, selection_settings)
self._clf = clf
return params
def _fit(self, examples, labels, params=None):
params = self._convert_params(params, labels, is_grid=False)
model_class = self._get_model_constructor()
params = self._clean_params(model_class, params)
return model_class(**params).fit(examples, labels)
def predict_log_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
predictions = self._predict_proba(X, self._clf.predict_log_proba)
for row in predictions:
_, probas = row
for label, proba in probas.items():
if proba == -np.Infinity:
probas[label] = TextModel._NEG_INF
return predictions
def _get_feature_weight(self, feat_name, label_class):
if len(self._class_encoder.classes_) == 2 and label_class >= 1:
return np.array([0.0])
else:
return self._clf.coef_[
label_class, self._feat_vectorizer.vocabulary_[feat_name]
]
def inspect(self, example, gold_label=None, dynamic_resource=None):
if not isinstance(self._clf, LogisticRegression):
logging.warning(
"Currently inspection is only available for Logistic Regression Model"
)
return []
try:
gold_class = self._class_encoder.transform([gold_label])
except ValueError:
logger.warning("Unable to decode label `%s`", gold_label)
gold_class = None
pred_label = self.predict([example], dynamic_resource=dynamic_resource)[0]
pred_class = self._class_encoder.transform([pred_label])
features = self._extract_features(
example, dynamic_resource=dynamic_resource,
text_preparation_pipeline=self.text_preparation_pipeline
)
logging.info("Predicted: %s.", pred_label)
if gold_class is None:
columns = ["Feature", "Value", "Pred_W({0})".format(pred_label), "Pred_P"]
else:
columns = [
"Feature",
"Value",
"Pred_W({0})".format(pred_label),
"Pred_P",
"Gold_W({0})".format(gold_label),
"Gold_P",
"Diff",
]
logging.info("Gold: %s.", gold_label)
inspect_table = [columns]
# Get all active features sorted alphabetically by name
features = sorted(features.items(), key=operator.itemgetter(0))
for feature in features:
feat_name = feature[0]
feat_value = feature[1]
# Features we haven't seen before won't be in our vectorizer
# e.g., an exact match feature for a query we've never seen before
if feat_name not in self._feat_vectorizer.vocabulary_:
continue
weight = self._get_feature_weight(feat_name, pred_class)
product = feat_value * weight
if gold_class is None:
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
"-",
"-",
"-",
]
else:
gold_w = self._get_feature_weight(feat_name, gold_class)
gold_p = feat_value * gold_w
diff = gold_p - product
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
gold_w.round(4),
gold_p.round(4),
diff.round(4),
]
inspect_table.append(row)
return inspect_table
def _predict_proba(self, X, predictor):
predictions = []
for row in predictor(X):
probabilities = {}
top_class = None
for class_index, proba in enumerate(row):
raw_class = self._class_encoder.inverse_transform([class_index])[0]
decoded_class = self._label_encoder.decode([raw_class])[0]
probabilities[decoded_class] = proba
if proba > probabilities.get(top_class, -1.0):
top_class = decoded_class
predictions.append((top_class, probabilities))
return predictions
def get_feature_matrix(self, examples, y=None, fit=False, dynamic_resource=None):
groups = []
feats = []
for idx, example in enumerate(examples):
feats.append(
self._extract_features(example, dynamic_resource, self.text_preparation_pipeline)
)
groups.append(idx)
X, y = self._preprocess_data(feats, y, fit=fit)
return X, y, groups
def _preprocess_data(self, X, y=None, fit=False):
if fit:
y = self._class_encoder.fit_transform(y)
X = self._feat_vectorizer.fit_transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.fit_transform(X)
if self._feat_selector is not None:
X = self._feat_selector.fit_transform(X, y)
else:
X = self._feat_vectorizer.transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.transform(X)
if self._feat_selector is not None:
X = self._feat_selector.transform(X)
return X, y
def _convert_params(self, param_grid, y, is_grid=True):
if "class_weight" in param_grid:
raw_weights = (
param_grid["class_weight"] if is_grid else [param_grid["class_weight"]]
)
weights = [
{
k
if isinstance(k, int)
else self._class_encoder.transform((k,))[0]: v
for k, v in cw_dict.items()
}
for cw_dict in raw_weights
]
param_grid["class_weight"] = weights if is_grid else weights[0]
elif "class_bias" in param_grid:
class_count = np.bincount(y)
classes = self._class_encoder.classes_
weights = []
raw_bias = (
param_grid["class_bias"] if is_grid else [param_grid["class_bias"]]
)
for class_bias in raw_bias:
balanced_w = [(len(y) / len(classes) / c) for c in class_count]
balanced_tuples = list(zip(list(range(len(classes))), balanced_w))
weights.append(
{c: (1 - class_bias) + class_bias * w for c, w in balanced_tuples}
)
param_grid["class_weight"] = weights if is_grid else weights[0]
del param_grid["class_bias"]
return param_grid
def _get_feature_selector(self):
if self.config.model_settings is None:
selector_type = None
else:
selector_type = self.config.model_settings.get("feature_selector")
selector = {
"l1": SelectFromModel(LogisticRegression(penalty="l1", C=1)),
"f": SelectPercentile(),
}.get(selector_type)
return selector
def _get_feature_scaler(self):
if self.config.model_settings is None:
scale_type = None
else:
scale_type = self.config.model_settings.get("feature_scaler")
scaler = {
"std-dev": StandardScaler(with_mean=False),
"max-abs": MaxAbsScaler(),
}.get(scale_type)
return scaler
def evaluate(self, examples, labels):
# TODO: also expose feature weights?
predictions = self.predict_proba(examples)
# Create a model config object for the current effective config (after param selection)
config = self._get_effective_config()
evaluations = [
EvaluatedExample(
e, labels[i], predictions[i][0], predictions[i][1], config.label_type
)
for i, e in enumerate(examples)
]
model_eval = StandardModelEvaluation(config, evaluations)
return model_eval
def fit(self, examples, labels, params=None):
params = params or self.config.params
skip_param_selection = params is not None or self.config.param_selection is None
# Shuffle to prevent order effects
indices = list(range(len(labels)))
random.shuffle(indices)
examples.reorder(indices)
labels.reorder(indices)
distinct_labels = set(labels)
if len(set(distinct_labels)) <= 1:
return self
# Extract features and classes
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
if skip_param_selection:
self._clf = self._fit(X, y, params)
self._current_params = params
else:
# run cross validation to select params
best_clf, best_params = self._fit_cv(X, y, groups)
self._clf = best_clf
self._current_params = best_params
return self
def predict(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
y = self._clf.predict(X)
predictions = self._class_encoder.inverse_transform(y)
return self._label_encoder.decode(predictions)
def predict_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
return self._predict_proba(X, self._clf.predict_proba)
def view_extracted_features(self, example, dynamic_resource=None):
return self._extract_features(
example, dynamic_resource=dynamic_resource,
text_preparation_pipeline=self.text_preparation_pipeline
)
@classmethod
def load(cls, path):
metadata = joblib.load(path)
# backwards compatability check for RoleClassifiers
if isinstance(metadata, dict):
return metadata["model"]
# in this case, metadata = model which was serialized and dumped
return metadata
def _dump(self, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
joblib.dump(self, path)
class PytorchTextModel(PytorchModel):
ALLOWED_CLASSIFIER_TYPES = ["embedder", "cnn", "lstm"]
pass
class AutoTextModel:
@staticmethod
def get_model_class(config: ModelConfig):
CLASSES = [TextModel, PytorchTextModel]
classifier_type = config.model_settings["classifier_type"]
for _class in CLASSES:
if classifier_type in _class.ALLOWED_CLASSIFIER_TYPES:
return _class
msg = f"Invalid 'classifier_type': {classifier_type}. " \
f"Allowed types are: {[_class.ALLOWED_CLASSIFIER_TYPES for _class in CLASSES]}"
raise ValueError(msg)
| true | true |
1c465740ae5fe9f566269cf6b2d71d8bc9882dcb | 28,276 | py | Python | Core/Python/invoke_refresh_inventory.py | prasadrao-dell/OpenManage-Enterprise | f9bd0e821701902d6571a54663a7c9ef4f2308b3 | [
"Apache-2.0"
] | 1 | 2020-07-18T13:05:48.000Z | 2020-07-18T13:05:48.000Z | Core/Python/invoke_refresh_inventory.py | prasadrao-dell/OpenManage-Enterprise | f9bd0e821701902d6571a54663a7c9ef4f2308b3 | [
"Apache-2.0"
] | 11 | 2020-07-22T07:33:14.000Z | 2020-08-20T12:01:55.000Z | Core/Python/invoke_refresh_inventory.py | prasadrao-dell/OpenManage-Enterprise | f9bd0e821701902d6571a54663a7c9ef4f2308b3 | [
"Apache-2.0"
] | 4 | 2020-06-03T11:38:34.000Z | 2020-08-11T10:38:57.000Z | #
# _author_ = Grant Curell <grant_curell@dell.com>
#
# Copyright (c) 2020 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
#### Synopsis
Refreshes the inventory on a set of target devices. This includes the configuration inventory tab.
#### Description
This script uses the OME REST API to refresh the inventory of a targeted server. It performs X-Auth
with basic authentication. Note: Credentials are not stored on disk.
#### Python Example
`python invoke_refresh_inventory.py -i 192.168.1.93 -u admin -p somepass --idrac-ips 192.168.1.63,192.168.1.45`
"""
import argparse
import json
import sys
import time
from argparse import RawTextHelpFormatter
from pprint import pprint
from urllib.parse import urlparse
from getpass import getpass
try:
import urllib3
import requests
except ModuleNotFoundError:
print("This program requires urllib3 and requests. To install them on most systems run `pip install requests"
"urllib3`")
sys.exit(0)
def authenticate(ome_ip_address: str, ome_username: str, ome_password: str) -> dict:
"""
Authenticates with OME and creates a session
Args:
ome_ip_address: IP address of the OME server
ome_username: Username for OME
ome_password: OME password
Returns: A dictionary of HTTP headers
Raises:
Exception: A generic exception in the event of a failure to connect.
"""
authenticated_headers = {'content-type': 'application/json'}
session_url = 'https://%s/api/SessionService/Sessions' % ome_ip_address
user_details = {'UserName': ome_username,
'Password': ome_password,
'SessionType': 'API'}
try:
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=authenticated_headers)
except requests.exceptions.ConnectionError:
print("Failed to connect to OME. This typically indicates a network connectivity problem. Can you ping OME?")
sys.exit(0)
if session_info.status_code == 201:
authenticated_headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
return authenticated_headers
print("There was a problem authenticating with OME. Are you sure you have the right username, password, "
"and IP?")
raise Exception("There was a problem authenticating with OME. Are you sure you have the right username, "
"password, and IP?")
def get_group_id_by_name(ome_ip_address: str, group_name: str, authenticated_headers: dict) -> int:
"""
Retrieves the ID of a group given its name.
Args:
ome_ip_address: The IP address of the OME server
group_name: The name of the group whose ID you want to resolve.
authenticated_headers: Headers used for authentication to the OME server
Returns: Returns the ID of the group as an integer or -1 if it couldn't be found.
"""
print("Searching for the requested group.")
groups_url = "https://%s/api/GroupService/Groups?$filter=Name eq '%s'" % (ome_ip_address, group_name)
group_response = requests.get(groups_url, headers=authenticated_headers, verify=False)
if group_response.status_code == 200:
json_data = json.loads(group_response.content)
if json_data['@odata.count'] > 1:
print("WARNING: We found more than one name that matched the group name: " + group_name +
". We are picking the first entry.")
if json_data['@odata.count'] == 1 or json_data['@odata.count'] > 1:
group_id = json_data['value'][0]['Id']
if not isinstance(group_id, int):
print("The server did not return an integer ID. Something went wrong.")
return -1
return group_id
print("Error: We could not find the group " + group_name + ". Exiting.")
return -1
print("Unable to retrieve groups. Exiting.")
return -1
def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> dict:
"""
This function retrieves data from a specified URL. Get requests from OME return paginated data. The code below
handles pagination. This is the equivalent in the UI of a list of results that require you to go to different
pages to get a complete listing.
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
url: The API url against which you would like to make a request
odata_filter: An optional parameter for providing an odata filter to run against the API endpoint.
max_pages: The maximum number of pages you would like to return
Returns: Returns a dictionary of data received from OME
"""
next_link_url = None
if odata_filter:
count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)
if count_data.status_code == 400:
print("Received an error while retrieving data from %s:" % url + '?$filter=' + odata_filter)
pprint(count_data.json()['error'])
return {}
count_data = count_data.json()
if count_data['@odata.count'] <= 0:
print("No results found!")
return {}
else:
count_data = requests.get(url, headers=authenticated_headers, verify=False).json()
if 'value' in count_data:
data = count_data['value']
else:
data = count_data
if '@odata.nextLink' in count_data:
# Grab the base URI
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']
i = 1
while next_link_url is not None:
# Break if we have reached the maximum number of pages to be returned
if max_pages:
if i >= max_pages:
break
else:
i = i + 1
response = requests.get(next_link_url, headers=authenticated_headers, verify=False)
next_link_url = None
if response.status_code == 200:
requested_data = response.json()
if requested_data['@odata.count'] <= 0:
print("No results found!")
return {}
# The @odata.nextLink key is only present in data if there are additional pages. We check for it and if it
# is present we get a link to the page with the next set of results.
if '@odata.nextLink' in requested_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \
requested_data['@odata.nextLink']
if 'value' in requested_data:
data += requested_data['value']
else:
data += requested_data
else:
print("Unknown error occurred. Received HTTP response code: " + str(response.status_code) +
" with error: " + response.text)
raise Exception("Unknown error occurred. Received HTTP response code: " + str(response.status_code)
+ " with error: " + response.text)
return data
def track_job_to_completion(ome_ip_address: str,
authenticated_headers: dict,
tracked_job_id,
max_retries: int = 20,
sleep_interval: int = 30) -> bool:
"""
Tracks a job to either completion or a failure within the job.
Args:
ome_ip_address: The IP address of the OME server
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
tracked_job_id: The ID of the job which you would like to track
max_retries: The maximum number of times the function should contact the server to see if the job has completed
sleep_interval: The frequency with which the function should check the server for job completion
Returns: True if the job completed successfully or completed with errors. Returns false if the job failed.
"""
job_status_map = {
"2020": "Scheduled",
"2030": "Queued",
"2040": "Starting",
"2050": "Running",
"2060": "Completed",
"2070": "Failed",
"2090": "Warning",
"2080": "New",
"2100": "Aborted",
"2101": "Paused",
"2102": "Stopped",
"2103": "Canceled"
}
failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]
job_url = 'https://%s/api/JobService/Jobs(%s)' % (ome_ip_address, tracked_job_id)
loop_ctr = 0
job_incomplete = True
print("Polling %s to completion ..." % tracked_job_id)
while loop_ctr < max_retries:
loop_ctr += 1
time.sleep(sleep_interval)
job_resp = requests.get(job_url, headers=authenticated_headers, verify=False)
if job_resp.status_code == 200:
job_status = str((job_resp.json())['LastRunStatus']['Id'])
job_status_str = job_status_map[job_status]
print("Iteration %s: Status of %s is %s" % (loop_ctr, tracked_job_id, job_status_str))
if int(job_status) == 2060:
job_incomplete = False
print("Job completed successfully!")
break
elif int(job_status) in failed_job_status:
job_incomplete = True
if job_status_str == "Warning":
print("Completed with errors")
else:
print("Error: Job failed.")
job_hist_url = str(job_url) + "/ExecutionHistories"
job_hist_resp = requests.get(job_hist_url, headers=authenticated_headers, verify=False)
if job_hist_resp.status_code == 200:
# Get the job's execution details
job_history_id = str((job_hist_resp.json())['value'][0]['Id'])
execution_hist_detail = "(" + job_history_id + ")/ExecutionHistoryDetails"
job_hist_det_url = str(job_hist_url) + execution_hist_detail
job_hist_det_resp = requests.get(job_hist_det_url,
headers=authenticated_headers,
verify=False)
if job_hist_det_resp.status_code == 200:
pprint(job_hist_det_resp.json()['value'])
else:
print("Unable to parse job execution history... exiting")
break
else:
print("Unable to poll status of %s - Iteration %s " % (tracked_job_id, loop_ctr))
if job_incomplete:
print("Job %s incomplete after polling %s times...Check status" % (tracked_job_id, max_retries))
return False
return True
def get_device_id(authenticated_headers: dict,
ome_ip_address: str,
service_tag: str = None,
device_idrac_ip: str = None,
device_name: str = None) -> int:
"""
Resolves a service tag, idrac IP or device name to a device ID
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
ome_ip_address: IP address of the OME server
service_tag: (optional) The service tag of a host
device_idrac_ip: (optional) The idrac IP of a host
device_name: (optional): The name of a host
Returns: Returns the device ID or -1 if it couldn't be found
"""
if not service_tag and not device_idrac_ip and not device_name:
print("No argument provided to get_device_id. Must provide service tag, device idrac IP or device name.")
return -1
# If the user passed a device name, resolve that name to a device ID
if device_name:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceName eq \'%s\'" % device_name)
if len(device_id) == 0:
print("Error: We were unable to find device name " + device_name + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif service_tag:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceServiceTag eq \'%s\'" % service_tag)
if len(device_id) == 0:
print("Error: We were unable to find service tag " + service_tag + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif device_idrac_ip:
device_id = -1
device_ids = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceManagement/any(d:d/NetworkAddress eq '%s')" % device_idrac_ip)
if len(device_ids) == 0:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
# TODO - This is necessary because the filter above could possibly return multiple results
# TODO - See https://github.com/dell/OpenManage-Enterprise/issues/87
for device_id in device_ids:
if device_id['DeviceManagement'][0]['NetworkAddress'] == device_idrac_ip:
device_id = device_id['Id']
if device_id == -1:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
else:
device_id = -1
return device_id
def refresh_device_inventory(authenticated_headers: dict,
ome_ip_address: str,
group_name: str,
skip_config_inventory: bool,
device_ids: list = None,
service_tags: str = None,
device_idrac_ips: str = None,
device_names: str = None,
ignore_group: bool = False):
"""
Refresh the inventory of targeted hosts
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
ome_ip_address: IP address of the OME server
group_name: The name of the group which contains the servers whose inventories you want to refresh
skip_config_inventory: A boolean defining whether you would like to skip gathering the config inventory
device_ids: (optional) The device ID of a host whose inventory you want to refresh
service_tags: (optional) The service tag of a host whose inventory you want to refresh
device_idrac_ips: (optional) The idrac IP of a host whose inventory you want to refresh
device_names: (optional): The name of a host whose inventory you want to refresh
ignore_group: (optional): Controls whether you want to ignore using groups or not
"""
jobs_url = "https://%s/api/JobService/Jobs" % ome_ip_address
target_ids = []
if service_tags:
service_tags = service_tags.split(',')
for service_tag in service_tags:
target = get_device_id(headers, ome_ip_address, service_tag=service_tag)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + service_tag)
if device_idrac_ips:
device_idrac_ips = args.idrac_ips.split(',')
for device_idrac_ip in device_idrac_ips:
target = get_device_id(headers, ome_ip_address, device_idrac_ip=device_idrac_ip)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_idrac_ip)
if device_names:
device_names = device_names.split(',')
for device_name in device_names:
target = get_device_id(headers, ome_ip_address, device_name=device_name)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_name)
if device_ids:
for device_id in device_ids:
target_ids.append(device_id)
if not skip_config_inventory:
group_id = get_group_id_by_name(ome_ip_address, group_name, authenticated_headers)
if group_id == -1:
print("We were unable to find the ID for group name " + group_name + " ... exiting.")
sys.exit(0)
if not ignore_group:
group_devices = get_data(headers, "https://%s/api/GroupService/Groups(%s)/Devices" % (ome_ip_address, group_id))
if len(group_devices) < 1:
print("Error: There was a problem retrieving the devices for group " + args.groupname + ". Exiting")
sys.exit(0)
for device in group_devices:
target_ids.append(device['Id'])
targets_payload = []
for id_to_refresh in target_ids:
targets_payload.append({
"Id": id_to_refresh,
"Data": "",
"TargetType": {
"Id": 1000,
"Name": "DEVICE"
}
})
payload = {
"Id": 0,
"JobName": "Inventory refresh via the API.",
"JobDescription": "Refreshes the inventories for targeted hardware.",
"Schedule": "startnow",
"State": "Enabled",
"JobType": {
"Name": "Inventory_Task"
},
"Targets": targets_payload
}
print("Beginning standard inventory refresh...")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
job_id_generic_refresh = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if job_id_generic_refresh is None:
print("Received invalid job ID from OME for standard inventory. Exiting.")
sys.exit(1)
# ------------------------------------------------------
if not skip_config_inventory:
payload = {
"JobDescription": "Run config inventory collection task on selected devices",
"JobName": "Part 1 - API refresh config inventory",
"JobType": {"Id": 50, "Name": "Device_Config_Task"},
"Params": [{"Key": "action", "Value": "CONFIG_INVENTORY"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 1 of 2 of the configuration inventory refresh.")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_1 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_1 is None:
print("Received invalid job ID from OME for part 1 of configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 1 of configuration inventory refresh to finish. This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_1):
print("Part 1 of configuration inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
# ------------------------------------------------------
payload = {
"JobDescription": "Create Inventory",
"JobName": "Part 2 - API refresh config inventory",
"JobType": {"Id": 8, "Name": "Inventory_Task"},
"Params": [
{"Key": "action", "Value": "CONFIG_INVENTORY"},
{"Key": "isCollectDriverInventory", "Value": "true"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 2 of 2 of the configuration inventory refresh")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_2 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_2 is None:
print("Received invalid job ID from OME for part 2 of the configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 2 of the configuration inventory refresh to finish. "
"This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_2):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Tracking standard inventory to completion.")
if track_job_to_completion(ome_ip_address, authenticated_headers, job_id_generic_refresh):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Inventory refresh complete!")
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False,
help="Username for the OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=False,
help="Password for the OME Appliance")
parser.add_argument("--groupname", "-g", required=False, default="All Devices",
help="The name of the group containing the devices whose inventory you want to refresh. "
"Defaults to all devices. Due to the way the API functions, if you want to refresh the "
"configuration inventory, you must have all applicable devices in a group. The "
"configuration inventory is specific to the tab called \"Configuration Inventory\" under "
"a device's view. You can use the create_static_group and add_device_to_static group "
"modules to do this programmatically.")
parser.add_argument("--device-ids", "-d", help="A comma separated list of device-ids to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--service-tags", "-s", help="A comma separated list of service tags to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--idrac-ips", "-r", help="A comma separated list of idrac IPs to refresh. Applies to regular "
"inventory only. This does not impact the configuration inventory "
"tab. That is controlled by the group name.")
parser.add_argument("--device-names", "-n", help="A comma separated list of device names to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--skip-config-inventory", "-skip", default=False, action='store_true',
help="The configuration inventory is the inventory you see specifically under the tab for a"
" specific device. In order to obtain a config inventory that server must be part of a"
" group or you have to run an inventory update against all devices which can be time "
"consuming. A regular inventory run will update things like firmware assuming that the"
" version change is reflected in idrac. A config inventory is launched in the GUI by "
"clicking \"Run inventory\" on quick links on the devices page. A regular inventory is "
"the same as clicking \"Run inventory\" on a specific device\'s page.")
parser.add_argument("--ignore-group", default=False, action='store_true', help="Used when you only want to run a"
" regular inventory and you do not want to provide a group.")
args = parser.parse_args()
if not args.password:
args.password = getpass()
try:
headers = authenticate(args.ip, args.user, args.password)
if not headers:
sys.exit(0)
if args.device_ids:
device_ids_arg = args.device_ids.split(',')
else:
device_ids_arg = None
if args.service_tags:
service_tags_arg = args.service_tags.split(',')
else:
service_tags_arg = None
if args.idrac_ips:
idrac_ips_arg = args.idrac_ips.split(',')
else:
idrac_ips_arg = None
if args.device_names:
device_names_arg = args.device_names.split(',')
else:
device_names_arg = None
print("WARNING: To reflect firmware changes you may have to power cycle the server first before running this. "
"It is situation dependent.")
if args.groupname == 'All Devices':
print("WARNING: No argument was provided for groupname. Defaulting to \'All Devices\' for the "
"inventory refresh. See help for details. This will also display if the argument was manually set "
"to \'All Devices\' and can be safely ignored. If you do not want to use a group AND you do not want"
" to update the configuration inventory tab, use the --skip-config-inventory and --ignore-group"
" switches together. If you want to use a group to update regular inventories only and not the"
" configuration inventory tab use the --skip-config-inventory switch by itself.")
refresh_device_inventory(headers, args.ip, args.groupname, args.skip_config_inventory, device_ids_arg,
service_tags_arg, idrac_ips_arg, device_names_arg, args.ignore_group)
except Exception as error:
print("Unexpected error:", str(error))
| 44.599369 | 120 | 0.609245 |
import argparse
import json
import sys
import time
from argparse import RawTextHelpFormatter
from pprint import pprint
from urllib.parse import urlparse
from getpass import getpass
try:
import urllib3
import requests
except ModuleNotFoundError:
print("This program requires urllib3 and requests. To install them on most systems run `pip install requests"
"urllib3`")
sys.exit(0)
def authenticate(ome_ip_address: str, ome_username: str, ome_password: str) -> dict:
authenticated_headers = {'content-type': 'application/json'}
session_url = 'https://%s/api/SessionService/Sessions' % ome_ip_address
user_details = {'UserName': ome_username,
'Password': ome_password,
'SessionType': 'API'}
try:
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=authenticated_headers)
except requests.exceptions.ConnectionError:
print("Failed to connect to OME. This typically indicates a network connectivity problem. Can you ping OME?")
sys.exit(0)
if session_info.status_code == 201:
authenticated_headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
return authenticated_headers
print("There was a problem authenticating with OME. Are you sure you have the right username, password, "
"and IP?")
raise Exception("There was a problem authenticating with OME. Are you sure you have the right username, "
"password, and IP?")
def get_group_id_by_name(ome_ip_address: str, group_name: str, authenticated_headers: dict) -> int:
print("Searching for the requested group.")
groups_url = "https://%s/api/GroupService/Groups?$filter=Name eq '%s'" % (ome_ip_address, group_name)
group_response = requests.get(groups_url, headers=authenticated_headers, verify=False)
if group_response.status_code == 200:
json_data = json.loads(group_response.content)
if json_data['@odata.count'] > 1:
print("WARNING: We found more than one name that matched the group name: " + group_name +
". We are picking the first entry.")
if json_data['@odata.count'] == 1 or json_data['@odata.count'] > 1:
group_id = json_data['value'][0]['Id']
if not isinstance(group_id, int):
print("The server did not return an integer ID. Something went wrong.")
return -1
return group_id
print("Error: We could not find the group " + group_name + ". Exiting.")
return -1
print("Unable to retrieve groups. Exiting.")
return -1
def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> dict:
next_link_url = None
if odata_filter:
count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)
if count_data.status_code == 400:
print("Received an error while retrieving data from %s:" % url + '?$filter=' + odata_filter)
pprint(count_data.json()['error'])
return {}
count_data = count_data.json()
if count_data['@odata.count'] <= 0:
print("No results found!")
return {}
else:
count_data = requests.get(url, headers=authenticated_headers, verify=False).json()
if 'value' in count_data:
data = count_data['value']
else:
data = count_data
if '@odata.nextLink' in count_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']
i = 1
while next_link_url is not None:
if max_pages:
if i >= max_pages:
break
else:
i = i + 1
response = requests.get(next_link_url, headers=authenticated_headers, verify=False)
next_link_url = None
if response.status_code == 200:
requested_data = response.json()
if requested_data['@odata.count'] <= 0:
print("No results found!")
return {}
if '@odata.nextLink' in requested_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \
requested_data['@odata.nextLink']
if 'value' in requested_data:
data += requested_data['value']
else:
data += requested_data
else:
print("Unknown error occurred. Received HTTP response code: " + str(response.status_code) +
" with error: " + response.text)
raise Exception("Unknown error occurred. Received HTTP response code: " + str(response.status_code)
+ " with error: " + response.text)
return data
def track_job_to_completion(ome_ip_address: str,
authenticated_headers: dict,
tracked_job_id,
max_retries: int = 20,
sleep_interval: int = 30) -> bool:
job_status_map = {
"2020": "Scheduled",
"2030": "Queued",
"2040": "Starting",
"2050": "Running",
"2060": "Completed",
"2070": "Failed",
"2090": "Warning",
"2080": "New",
"2100": "Aborted",
"2101": "Paused",
"2102": "Stopped",
"2103": "Canceled"
}
failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]
job_url = 'https://%s/api/JobService/Jobs(%s)' % (ome_ip_address, tracked_job_id)
loop_ctr = 0
job_incomplete = True
print("Polling %s to completion ..." % tracked_job_id)
while loop_ctr < max_retries:
loop_ctr += 1
time.sleep(sleep_interval)
job_resp = requests.get(job_url, headers=authenticated_headers, verify=False)
if job_resp.status_code == 200:
job_status = str((job_resp.json())['LastRunStatus']['Id'])
job_status_str = job_status_map[job_status]
print("Iteration %s: Status of %s is %s" % (loop_ctr, tracked_job_id, job_status_str))
if int(job_status) == 2060:
job_incomplete = False
print("Job completed successfully!")
break
elif int(job_status) in failed_job_status:
job_incomplete = True
if job_status_str == "Warning":
print("Completed with errors")
else:
print("Error: Job failed.")
job_hist_url = str(job_url) + "/ExecutionHistories"
job_hist_resp = requests.get(job_hist_url, headers=authenticated_headers, verify=False)
if job_hist_resp.status_code == 200:
job_history_id = str((job_hist_resp.json())['value'][0]['Id'])
execution_hist_detail = "(" + job_history_id + ")/ExecutionHistoryDetails"
job_hist_det_url = str(job_hist_url) + execution_hist_detail
job_hist_det_resp = requests.get(job_hist_det_url,
headers=authenticated_headers,
verify=False)
if job_hist_det_resp.status_code == 200:
pprint(job_hist_det_resp.json()['value'])
else:
print("Unable to parse job execution history... exiting")
break
else:
print("Unable to poll status of %s - Iteration %s " % (tracked_job_id, loop_ctr))
if job_incomplete:
print("Job %s incomplete after polling %s times...Check status" % (tracked_job_id, max_retries))
return False
return True
def get_device_id(authenticated_headers: dict,
ome_ip_address: str,
service_tag: str = None,
device_idrac_ip: str = None,
device_name: str = None) -> int:
if not service_tag and not device_idrac_ip and not device_name:
print("No argument provided to get_device_id. Must provide service tag, device idrac IP or device name.")
return -1
# If the user passed a device name, resolve that name to a device ID
if device_name:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceName eq \'%s\'" % device_name)
if len(device_id) == 0:
print("Error: We were unable to find device name " + device_name + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif service_tag:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceServiceTag eq \'%s\'" % service_tag)
if len(device_id) == 0:
print("Error: We were unable to find service tag " + service_tag + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif device_idrac_ip:
device_id = -1
device_ids = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceManagement/any(d:d/NetworkAddress eq '%s')" % device_idrac_ip)
if len(device_ids) == 0:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
# TODO - This is necessary because the filter above could possibly return multiple results
# TODO - See https://github.com/dell/OpenManage-Enterprise/issues/87
for device_id in device_ids:
if device_id['DeviceManagement'][0]['NetworkAddress'] == device_idrac_ip:
device_id = device_id['Id']
if device_id == -1:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
else:
device_id = -1
return device_id
def refresh_device_inventory(authenticated_headers: dict,
ome_ip_address: str,
group_name: str,
skip_config_inventory: bool,
device_ids: list = None,
service_tags: str = None,
device_idrac_ips: str = None,
device_names: str = None,
ignore_group: bool = False):
jobs_url = "https://%s/api/JobService/Jobs" % ome_ip_address
target_ids = []
if service_tags:
service_tags = service_tags.split(',')
for service_tag in service_tags:
target = get_device_id(headers, ome_ip_address, service_tag=service_tag)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + service_tag)
if device_idrac_ips:
device_idrac_ips = args.idrac_ips.split(',')
for device_idrac_ip in device_idrac_ips:
target = get_device_id(headers, ome_ip_address, device_idrac_ip=device_idrac_ip)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_idrac_ip)
if device_names:
device_names = device_names.split(',')
for device_name in device_names:
target = get_device_id(headers, ome_ip_address, device_name=device_name)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_name)
if device_ids:
for device_id in device_ids:
target_ids.append(device_id)
if not skip_config_inventory:
group_id = get_group_id_by_name(ome_ip_address, group_name, authenticated_headers)
if group_id == -1:
print("We were unable to find the ID for group name " + group_name + " ... exiting.")
sys.exit(0)
if not ignore_group:
group_devices = get_data(headers, "https://%s/api/GroupService/Groups(%s)/Devices" % (ome_ip_address, group_id))
if len(group_devices) < 1:
print("Error: There was a problem retrieving the devices for group " + args.groupname + ". Exiting")
sys.exit(0)
for device in group_devices:
target_ids.append(device['Id'])
targets_payload = []
for id_to_refresh in target_ids:
targets_payload.append({
"Id": id_to_refresh,
"Data": "",
"TargetType": {
"Id": 1000,
"Name": "DEVICE"
}
})
payload = {
"Id": 0,
"JobName": "Inventory refresh via the API.",
"JobDescription": "Refreshes the inventories for targeted hardware.",
"Schedule": "startnow",
"State": "Enabled",
"JobType": {
"Name": "Inventory_Task"
},
"Targets": targets_payload
}
print("Beginning standard inventory refresh...")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
job_id_generic_refresh = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if job_id_generic_refresh is None:
print("Received invalid job ID from OME for standard inventory. Exiting.")
sys.exit(1)
if not skip_config_inventory:
payload = {
"JobDescription": "Run config inventory collection task on selected devices",
"JobName": "Part 1 - API refresh config inventory",
"JobType": {"Id": 50, "Name": "Device_Config_Task"},
"Params": [{"Key": "action", "Value": "CONFIG_INVENTORY"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 1 of 2 of the configuration inventory refresh.")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_1 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_1 is None:
print("Received invalid job ID from OME for part 1 of configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 1 of configuration inventory refresh to finish. This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_1):
print("Part 1 of configuration inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
# ------------------------------------------------------
payload = {
"JobDescription": "Create Inventory",
"JobName": "Part 2 - API refresh config inventory",
"JobType": {"Id": 8, "Name": "Inventory_Task"},
"Params": [
{"Key": "action", "Value": "CONFIG_INVENTORY"},
{"Key": "isCollectDriverInventory", "Value": "true"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 2 of 2 of the configuration inventory refresh")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_2 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_2 is None:
print("Received invalid job ID from OME for part 2 of the configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 2 of the configuration inventory refresh to finish. "
"This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_2):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Tracking standard inventory to completion.")
if track_job_to_completion(ome_ip_address, authenticated_headers, job_id_generic_refresh):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Inventory refresh complete!")
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False,
help="Username for the OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=False,
help="Password for the OME Appliance")
parser.add_argument("--groupname", "-g", required=False, default="All Devices",
help="The name of the group containing the devices whose inventory you want to refresh. "
"Defaults to all devices. Due to the way the API functions, if you want to refresh the "
"configuration inventory, you must have all applicable devices in a group. The "
"configuration inventory is specific to the tab called \"Configuration Inventory\" under "
"a device's view. You can use the create_static_group and add_device_to_static group "
"modules to do this programmatically.")
parser.add_argument("--device-ids", "-d", help="A comma separated list of device-ids to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--service-tags", "-s", help="A comma separated list of service tags to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--idrac-ips", "-r", help="A comma separated list of idrac IPs to refresh. Applies to regular "
"inventory only. This does not impact the configuration inventory "
"tab. That is controlled by the group name.")
parser.add_argument("--device-names", "-n", help="A comma separated list of device names to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--skip-config-inventory", "-skip", default=False, action='store_true',
help="The configuration inventory is the inventory you see specifically under the tab for a"
" specific device. In order to obtain a config inventory that server must be part of a"
" group or you have to run an inventory update against all devices which can be time "
"consuming. A regular inventory run will update things like firmware assuming that the"
" version change is reflected in idrac. A config inventory is launched in the GUI by "
"clicking \"Run inventory\" on quick links on the devices page. A regular inventory is "
"the same as clicking \"Run inventory\" on a specific device\'s page.")
parser.add_argument("--ignore-group", default=False, action='store_true', help="Used when you only want to run a"
" regular inventory and you do not want to provide a group.")
args = parser.parse_args()
if not args.password:
args.password = getpass()
try:
headers = authenticate(args.ip, args.user, args.password)
if not headers:
sys.exit(0)
if args.device_ids:
device_ids_arg = args.device_ids.split(',')
else:
device_ids_arg = None
if args.service_tags:
service_tags_arg = args.service_tags.split(',')
else:
service_tags_arg = None
if args.idrac_ips:
idrac_ips_arg = args.idrac_ips.split(',')
else:
idrac_ips_arg = None
if args.device_names:
device_names_arg = args.device_names.split(',')
else:
device_names_arg = None
print("WARNING: To reflect firmware changes you may have to power cycle the server first before running this. "
"It is situation dependent.")
if args.groupname == 'All Devices':
print("WARNING: No argument was provided for groupname. Defaulting to \'All Devices\' for the "
"inventory refresh. See help for details. This will also display if the argument was manually set "
"to \'All Devices\' and can be safely ignored. If you do not want to use a group AND you do not want"
" to update the configuration inventory tab, use the --skip-config-inventory and --ignore-group"
" switches together. If you want to use a group to update regular inventories only and not the"
" configuration inventory tab use the --skip-config-inventory switch by itself.")
refresh_device_inventory(headers, args.ip, args.groupname, args.skip_config_inventory, device_ids_arg,
service_tags_arg, idrac_ips_arg, device_names_arg, args.ignore_group)
except Exception as error:
print("Unexpected error:", str(error))
| true | true |
1c465889a1c778474e5db6bd5a5c7d2042d61766 | 2,091 | py | Python | source-py/pyBKT/test/hand_specified_model.py | bukeplato/pyBKT | 733a4ccf0de78bef7d47b5a6af7131c7778560db | [
"MIT"
] | 132 | 2018-03-22T06:04:14.000Z | 2022-03-24T21:54:27.000Z | source-py/pyBKT/test/hand_specified_model.py | bukeplato/pyBKT | 733a4ccf0de78bef7d47b5a6af7131c7778560db | [
"MIT"
] | 25 | 2018-01-10T14:00:48.000Z | 2022-03-22T04:00:47.000Z | source-py/pyBKT/test/hand_specified_model.py | bukeplato/pyBKT | 733a4ccf0de78bef7d47b5a6af7131c7778560db | [
"MIT"
] | 46 | 2017-09-12T04:30:58.000Z | 2022-03-10T08:54:52.000Z | import numpy as np
from pyBKT.generate import synthetic_data
from pyBKT.generate import random_model, random_model_uni
from pyBKT.fit import EM_fit
from copy import deepcopy
from pyBKT.util import print_dot
#parameters
num_subparts = 4
num_resources = 2
num_fit_initializations = 25
observation_sequence_lengths = np.full(50, 100, dtype=np.int)
#generate synthetic model and data.
#model is really easy.
truemodel = {}
truemodel["As"] = np.zeros((num_resources, 2, 2), dtype=np.float_)
truemodel["As"][0, :, :] = np.transpose([[0.75, 0.25], [0.1, 0.9]])
truemodel["As"][1, :, :] = np.transpose([[0.9, 0.1], [0.1, 0.9]])
truemodel["learns"] = truemodel["As"][:, 1, 0]
truemodel["forgets"] = truemodel["As"][:, 0, 1]
truemodel["pi_0"] = np.array([[0.9], [0.1]]) #TODO: one prior per resource? does this array needs to be col?
truemodel["prior"] = 0.1
truemodel["guesses"] = np.full(num_subparts, 0.05, dtype=np.float_)
truemodel["slips"] = np.full(num_subparts, 0.25, dtype=np.float_)
truemodel["resources"] = np.random.randint(1, high = num_resources+1, size = sum(observation_sequence_lengths))
#data!
print("generating data...")
data = synthetic_data.synthetic_data(truemodel, observation_sequence_lengths)
#fit models, starting with random initializations
print('fitting! each dot is a new EM initialization')
best_likelihood = float("-inf")
for i in range(num_fit_initializations):
print_dot.print_dot(i, num_fit_initializations)
fitmodel = random_model.random_model(num_resources, num_subparts)
(fitmodel, log_likelihoods) = EM_fit.EM_fit(fitmodel, data)
if (log_likelihoods[-1] > best_likelihood):
best_likelihood = log_likelihoods[-1]
best_model = fitmodel
# compare the fit model to the true model
print('')
print('these two should look similar')
print(truemodel['As'])
print('')
print(best_model['As'])
print('')
print('these should look similar too')
print(1-truemodel['guesses'])
print('')
print(1-best_model['guesses'])
print('')
print('these should look similar too')
print(1-truemodel['slips'])
print('')
print(1-best_model['slips']) | 31.681818 | 111 | 0.724055 | import numpy as np
from pyBKT.generate import synthetic_data
from pyBKT.generate import random_model, random_model_uni
from pyBKT.fit import EM_fit
from copy import deepcopy
from pyBKT.util import print_dot
num_subparts = 4
num_resources = 2
num_fit_initializations = 25
observation_sequence_lengths = np.full(50, 100, dtype=np.int)
truemodel = {}
truemodel["As"] = np.zeros((num_resources, 2, 2), dtype=np.float_)
truemodel["As"][0, :, :] = np.transpose([[0.75, 0.25], [0.1, 0.9]])
truemodel["As"][1, :, :] = np.transpose([[0.9, 0.1], [0.1, 0.9]])
truemodel["learns"] = truemodel["As"][:, 1, 0]
truemodel["forgets"] = truemodel["As"][:, 0, 1]
truemodel["pi_0"] = np.array([[0.9], [0.1]])
truemodel["prior"] = 0.1
truemodel["guesses"] = np.full(num_subparts, 0.05, dtype=np.float_)
truemodel["slips"] = np.full(num_subparts, 0.25, dtype=np.float_)
truemodel["resources"] = np.random.randint(1, high = num_resources+1, size = sum(observation_sequence_lengths))
print("generating data...")
data = synthetic_data.synthetic_data(truemodel, observation_sequence_lengths)
print('fitting! each dot is a new EM initialization')
best_likelihood = float("-inf")
for i in range(num_fit_initializations):
print_dot.print_dot(i, num_fit_initializations)
fitmodel = random_model.random_model(num_resources, num_subparts)
(fitmodel, log_likelihoods) = EM_fit.EM_fit(fitmodel, data)
if (log_likelihoods[-1] > best_likelihood):
best_likelihood = log_likelihoods[-1]
best_model = fitmodel
print('')
print('these two should look similar')
print(truemodel['As'])
print('')
print(best_model['As'])
print('')
print('these should look similar too')
print(1-truemodel['guesses'])
print('')
print(1-best_model['guesses'])
print('')
print('these should look similar too')
print(1-truemodel['slips'])
print('')
print(1-best_model['slips']) | true | true |
1c4658b4bb64b7f6ea6eb1dbc078b2ce403e3327 | 369 | py | Python | Problem124.py | Cleancode404/ProjectEuler | 2f93b256b107bfb6a395b8aa197cfeacc599b00b | [
"MIT"
] | null | null | null | Problem124.py | Cleancode404/ProjectEuler | 2f93b256b107bfb6a395b8aa197cfeacc599b00b | [
"MIT"
] | null | null | null | Problem124.py | Cleancode404/ProjectEuler | 2f93b256b107bfb6a395b8aa197cfeacc599b00b | [
"MIT"
] | null | null | null | """
Ordered radicals
"""
def compute(x):
limit = 100000
rads = [0] + [1]* limit
for i in range(2, len(rads)):
if rads[i] == 1:
for j in range(i, len(rads), i):
rads[j] *= i
data = sorted((rads, i) for (i, rad) in enumerate(rads))
return str(data[1000][1])
if __name__ =="__main__":
print(compute(10000)) | 18.45 | 60 | 0.517615 |
def compute(x):
limit = 100000
rads = [0] + [1]* limit
for i in range(2, len(rads)):
if rads[i] == 1:
for j in range(i, len(rads), i):
rads[j] *= i
data = sorted((rads, i) for (i, rad) in enumerate(rads))
return str(data[1000][1])
if __name__ =="__main__":
print(compute(10000)) | true | true |
1c4659f51ad3a120a0b93c0284ea7b59b39d919d | 537 | py | Python | setup.py | sw5cc/tencent-finance | 08da6a75904055a6113a01c86377b613cbe07033 | [
"MIT"
] | null | null | null | setup.py | sw5cc/tencent-finance | 08da6a75904055a6113a01c86377b613cbe07033 | [
"MIT"
] | null | null | null | setup.py | sw5cc/tencent-finance | 08da6a75904055a6113a01c86377b613cbe07033 | [
"MIT"
] | null | null | null | from setuptools import setup
VERSION = '1.0.0'
REPO = 'https://github.com/sw5cc/tencent-finance'
setup(
name='tencent-finance',
py_modules=['tencent_finance'],
version=VERSION,
description='Python library that provides APIs to query finance from http://stock.qq.com',
author='sw5cc',
author_email='sw5cc.125pflops@gmail.com',
license='MIT',
url=REPO,
download_url='{0}/archive/{1}.tar.gz'.format(REPO, VERSION),
keywords=['tencent', 'finance'],
install_requires=['requests', 'simplejson']
)
| 28.263158 | 94 | 0.683426 | from setuptools import setup
VERSION = '1.0.0'
REPO = 'https://github.com/sw5cc/tencent-finance'
setup(
name='tencent-finance',
py_modules=['tencent_finance'],
version=VERSION,
description='Python library that provides APIs to query finance from http://stock.qq.com',
author='sw5cc',
author_email='sw5cc.125pflops@gmail.com',
license='MIT',
url=REPO,
download_url='{0}/archive/{1}.tar.gz'.format(REPO, VERSION),
keywords=['tencent', 'finance'],
install_requires=['requests', 'simplejson']
)
| true | true |
1c465c0941cce89c8fc109d641fe9e2f109a55e6 | 1,071 | py | Python | python/time_test.py | ysoftman/test_code | 4c71cc7c6a17d73cc84298e3a44051d3ab9d40f8 | [
"MIT"
] | 3 | 2017-12-07T04:29:36.000Z | 2022-01-11T10:58:14.000Z | python/time_test.py | ysoftman/test_code | 4c71cc7c6a17d73cc84298e3a44051d3ab9d40f8 | [
"MIT"
] | 14 | 2018-07-17T05:16:42.000Z | 2022-03-22T00:43:47.000Z | python/time_test.py | ysoftman/test_code | 4c71cc7c6a17d73cc84298e3a44051d3ab9d40f8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : time test
import time
import datetime
if __name__ == '__main__':
# epoch time
print(time.time())
# suspend for 1 sec
time.sleep(1)
# 프로세스 시간(초)
start = time.clock()
# 현재 시간을 struct_time 형식으로 리턴
print(time.localtime())
print(time.localtime().tm_year)
print(time.localtime().tm_mon)
print(time.localtime().tm_mday)
print(time.localtime().tm_hour)
print(time.localtime().tm_min)
print(time.localtime().tm_sec)
day_of_week = {
0: "monday",
1: "tuesday",
2: "wednesday",
3: "thursday",
4: "friday",
5: "saturday",
6: "sunday",
}
# 요일 (월요일:0~일요일:6)
wday = time.localtime().tm_wday
print(wday, '->', day_of_week.get(wday))
end = time.perf_counter()
print('elapsed time : ', end - start, 'sec')
# 현재 타임스탬프
print(datetime.date.fromtimestamp(time.time()))
# 10일 후 날짜 표시
td = datetime.timedelta(days=10)
print(datetime.date.today() + td)
| 21.42 | 51 | 0.582633 |
import time
import datetime
if __name__ == '__main__':
print(time.time())
time.sleep(1)
start = time.clock()
print(time.localtime())
print(time.localtime().tm_year)
print(time.localtime().tm_mon)
print(time.localtime().tm_mday)
print(time.localtime().tm_hour)
print(time.localtime().tm_min)
print(time.localtime().tm_sec)
day_of_week = {
0: "monday",
1: "tuesday",
2: "wednesday",
3: "thursday",
4: "friday",
5: "saturday",
6: "sunday",
}
wday = time.localtime().tm_wday
print(wday, '->', day_of_week.get(wday))
end = time.perf_counter()
print('elapsed time : ', end - start, 'sec')
print(datetime.date.fromtimestamp(time.time()))
td = datetime.timedelta(days=10)
print(datetime.date.today() + td)
| true | true |
1c465c6a86486509dd27a24054b97bb891f2c729 | 1,867 | py | Python | tests/components/folder/test_sensor.py | twrecked/core | d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | tests/components/folder/test_sensor.py | twrecked/core | d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab | [
"Apache-2.0"
] | 6 | 2021-02-08T20:54:31.000Z | 2022-03-12T00:50:43.000Z | tests/components/folder/test_sensor.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 2 | 2020-04-19T13:35:24.000Z | 2020-04-19T13:35:51.000Z | """The tests for the folder sensor."""
import os
import unittest
from homeassistant.components.folder.sensor import CONF_FOLDER_PATHS
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
CWD = os.path.join(os.path.dirname(__file__))
TEST_FOLDER = "test_folder"
TEST_DIR = os.path.join(CWD, TEST_FOLDER)
TEST_TXT = "mock_test_folder.txt"
TEST_FILE = os.path.join(TEST_DIR, TEST_TXT)
def create_file(path):
"""Create a test file."""
with open(path, "w") as test_file:
test_file.write("test")
class TestFolderSensor(unittest.TestCase):
"""Test the filesize sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
if not os.path.isdir(TEST_DIR):
os.mkdir(TEST_DIR)
self.hass.config.whitelist_external_dirs = {TEST_DIR}
def teardown_method(self, method):
"""Stop everything that was started."""
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
os.rmdir(TEST_DIR)
self.hass.stop()
def test_invalid_path(self):
"""Test that an invalid path is caught."""
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: "invalid_path"}}
assert setup_component(self.hass, "sensor", config)
assert len(self.hass.states.entity_ids()) == 0
def test_valid_path(self):
"""Test for a valid path."""
create_file(TEST_FILE)
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: TEST_DIR}}
assert setup_component(self.hass, "sensor", config)
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get("sensor.test_folder")
assert state.state == "0.0"
assert state.attributes.get("number_of_files") == 1
| 33.945455 | 86 | 0.666845 | import os
import unittest
from homeassistant.components.folder.sensor import CONF_FOLDER_PATHS
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
CWD = os.path.join(os.path.dirname(__file__))
TEST_FOLDER = "test_folder"
TEST_DIR = os.path.join(CWD, TEST_FOLDER)
TEST_TXT = "mock_test_folder.txt"
TEST_FILE = os.path.join(TEST_DIR, TEST_TXT)
def create_file(path):
with open(path, "w") as test_file:
test_file.write("test")
class TestFolderSensor(unittest.TestCase):
def setup_method(self, method):
self.hass = get_test_home_assistant()
if not os.path.isdir(TEST_DIR):
os.mkdir(TEST_DIR)
self.hass.config.whitelist_external_dirs = {TEST_DIR}
def teardown_method(self, method):
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
os.rmdir(TEST_DIR)
self.hass.stop()
def test_invalid_path(self):
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: "invalid_path"}}
assert setup_component(self.hass, "sensor", config)
assert len(self.hass.states.entity_ids()) == 0
def test_valid_path(self):
create_file(TEST_FILE)
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: TEST_DIR}}
assert setup_component(self.hass, "sensor", config)
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get("sensor.test_folder")
assert state.state == "0.0"
assert state.attributes.get("number_of_files") == 1
| true | true |
1c465d2bf7cc3b2557d4537d22985e65be65189e | 6,600 | py | Python | utils/models/mobilenet_v2.py | voldemortX/DeeplabV3_PyTorch1.3_Codebase | d22d23e74800fafb58eeb61d6649008745c1a287 | [
"BSD-3-Clause"
] | 1 | 2020-09-17T06:21:39.000Z | 2020-09-17T06:21:39.000Z | utils/models/mobilenet_v2.py | voldemortX/pytorch-segmentation | 9c62c0a721d11c8ea6bf312ecf1c7b238a54dcda | [
"BSD-3-Clause"
] | null | null | null | utils/models/mobilenet_v2.py | voldemortX/pytorch-segmentation | 9c62c0a721d11c8ea6bf312ecf1c7b238a54dcda | [
"BSD-3-Clause"
] | null | null | null | # Modified from mmsegmentation code, referenced from torchvision
import torch.nn as nn
from .builder import MODELS
from ._utils import make_divisible
from .common_models import InvertedResidual
from .utils import load_state_dict_from_url
@MODELS.register()
class MobileNetV2Encoder(nn.Module):
"""MobileNetV2 backbone (up to second-to-last feature map).
This backbone is the implementation of
`MobileNetV2: Inverted Residuals and Linear Bottlenecks
<https://arxiv.org/abs/1801.04381>`_.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
strides (Sequence[int], optional): Strides of the first block of each
layer. If not specified, default config in ``arch_setting`` will
be used.
dilations (Sequence[int]): Dilation of each layer.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
pretrained (str, optional): model pretrained path. Default: None
out_stride (int): the output stride of the output feature map
"""
# Parameters to build layers. 3 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks.
arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4],
[6, 96, 3], [6, 160, 3], [6, 320, 1]]
def __init__(self, widen_factor=1., strides=(1, 2, 2, 2, 1, 2, 1), dilations=(1, 1, 1, 1, 1, 1, 1),
out_indices=(1, 2, 4, 6), frozen_stages=-1, norm_eval=False, pretrained=None,
progress=True, out_stride=0):
super(MobileNetV2Encoder, self).__init__()
self.pretrained = pretrained
self.widen_factor = widen_factor
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == len(self.arch_settings)
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 7):
raise ValueError('the item in out_indices must in range(0, 7). But received {index}')
if frozen_stages not in range(-1, 7):
raise ValueError('frozen_stages must be in range(-1, 7). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
self.out_stride = out_stride
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(self.in_channels),
nn.ReLU6()
)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks = layer_cfg
stride = self.strides[i]
dilation = self.dilations[i]
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if self.pretrained is None:
self.weight_initialization()
else:
self.load_pretrained(progress=progress)
def load_pretrained(self, progress):
state_dict = load_state_dict_from_url(self.pretrained, progress=progress)
self_state_dict = self.state_dict()
self_keys = list(self_state_dict.keys())
for i, (_, v) in enumerate(state_dict.items()):
if i > len(self_keys) - 1:
break
self_state_dict[self_keys[i]] = v
self.load_state_dict(self_state_dict)
def weight_initialization(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def make_layer(self, out_channels, num_blocks, stride, dilation,
expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): Number of blocks.
stride (int): Stride of the first block.
dilation (int): Dilation of the first block.
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio.
"""
layers = []
for i in range(num_blocks):
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride if i == 0 else 1,
expand_ratio=expand_ratio,
dilation=dilation if i == 0 else 1)
)
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
| 42.038217 | 116 | 0.594697 |
import torch.nn as nn
from .builder import MODELS
from ._utils import make_divisible
from .common_models import InvertedResidual
from .utils import load_state_dict_from_url
@MODELS.register()
class MobileNetV2Encoder(nn.Module):
arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4],
[6, 96, 3], [6, 160, 3], [6, 320, 1]]
def __init__(self, widen_factor=1., strides=(1, 2, 2, 2, 1, 2, 1), dilations=(1, 1, 1, 1, 1, 1, 1),
out_indices=(1, 2, 4, 6), frozen_stages=-1, norm_eval=False, pretrained=None,
progress=True, out_stride=0):
super(MobileNetV2Encoder, self).__init__()
self.pretrained = pretrained
self.widen_factor = widen_factor
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == len(self.arch_settings)
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 7):
raise ValueError('the item in out_indices must in range(0, 7). But received {index}')
if frozen_stages not in range(-1, 7):
raise ValueError('frozen_stages must be in range(-1, 7). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
self.out_stride = out_stride
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(self.in_channels),
nn.ReLU6()
)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks = layer_cfg
stride = self.strides[i]
dilation = self.dilations[i]
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if self.pretrained is None:
self.weight_initialization()
else:
self.load_pretrained(progress=progress)
def load_pretrained(self, progress):
state_dict = load_state_dict_from_url(self.pretrained, progress=progress)
self_state_dict = self.state_dict()
self_keys = list(self_state_dict.keys())
for i, (_, v) in enumerate(state_dict.items()):
if i > len(self_keys) - 1:
break
self_state_dict[self_keys[i]] = v
self.load_state_dict(self_state_dict)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def make_layer(self, out_channels, num_blocks, stride, dilation,
expand_ratio):
layers = []
for i in range(num_blocks):
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride if i == 0 else 1,
expand_ratio=expand_ratio,
dilation=dilation if i == 0 else 1)
)
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
| true | true |
1c465d43539d78553af3d947b0be4daa8319c479 | 20,345 | py | Python | tests/python/unittest/test_higher_order_grad.py | HaoLiuHust/incubator-mxnet | 0deb50b33f29a19bbe4bdc6ff14658afc5000d50 | [
"Apache-2.0"
] | 1 | 2019-02-22T13:53:48.000Z | 2019-02-22T13:53:48.000Z | tests/python/unittest/test_higher_order_grad.py | HaoLiuHust/incubator-mxnet | 0deb50b33f29a19bbe4bdc6ff14658afc5000d50 | [
"Apache-2.0"
] | 1 | 2020-08-27T06:39:07.000Z | 2020-08-31T03:29:27.000Z | tests/python/unittest/test_higher_order_grad.py | HaoLiuHust/incubator-mxnet | 0deb50b33f29a19bbe4bdc6ff14658afc5000d50 | [
"Apache-2.0"
] | 1 | 2020-08-14T22:56:19.000Z | 2020-08-14T22:56:19.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import random
from functools import reduce
from operator import mul
import random
from common import with_seed, xfail_when_nonstandard_decimal_separator
import mxnet
from mxnet import nd, autograd, gluon
from mxnet.test_utils import (
assert_almost_equal, random_arrays, random_uniform_arrays, rand_shape_nd, same)
@with_seed()
def test_sin():
def sin(x):
return nd.sin(x)
def grad_grad_op(x):
return -nd.sin(x)
def grad_grad_grad_op(x):
return -nd.cos(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sin, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, sin,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_cos():
def cos(x):
return nd.cos(x)
def grad_grad_op(x):
return -nd.cos(x)
def grad_grad_grad_op(x):
return nd.sin(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cos, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, cos,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_tan():
def tan(x):
return nd.tan(x)
def grad_op(x):
return 1 / nd.cos(x)**2
def grad_grad_op(x):
return 2 * tan(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, tan, grad_grad_op)
@with_seed()
def test_sinh():
def sinh(x):
return nd.sinh(x)
def grad_grad_op(x):
return sinh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sinh, grad_grad_op)
@with_seed()
def test_cosh():
def cosh(x):
return nd.cosh(x)
def grad_grad_op(x):
return cosh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cosh, grad_grad_op)
@with_seed()
def test_tanh():
def tanh(x):
return nd.tanh(x)
def grad_op(x):
return 1 - tanh(x)**2
def grad_grad_op(x):
return -2 * tanh(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_nth_order_unary(array, tanh, grad_op, 1, rtol=1e-6, atol=1e-6)
check_second_order_unary(
array, tanh, grad_grad_op, rtol=1e-6, atol=1e-5)
@with_seed()
def test_arcsin():
def arcsin(x):
return nd.arcsin(x)
def grad_grad_op(x):
return x / nd.sqrt((1-x**2)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
# Domain of arcsin is [-1, 1]
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arcsin, grad_grad_op)
@with_seed()
def test_arccos():
def arccos(x):
return nd.arccos(x)
def grad_grad_op(x):
return -x / nd.sqrt((1-x**2)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
# Domain of arccos is [-1, 1]
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arccos, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_arctan():
def arctan(x):
return nd.arctan(x)
def grad_grad_op(x):
return (-2 * x)/((1 + x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
# Domain of arctan is all real numbers.
# Scale std_dev
array *= random.randint(500, 10000)
check_second_order_unary(array, arctan, grad_grad_op)
@with_seed()
def test_arcsinh():
def arcsinh(x):
return nd.arcsinh(x)
def grad_grad_op(x):
return x/nd.sqrt((nd.square(x)+1)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, arcsinh, grad_grad_op)
@with_seed()
def test_arccosh():
def arccosh(x):
return nd.arccosh(x)
def grad_grad_op(x):
return x/(nd.sqrt(x-1) * nd.sqrt(x+1) * (x+1) * (x-1))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = array * sigma + mu
# Domain of arccosh 1 to infinity.
assert((array > 1).all())
check_second_order_unary(array, arccosh, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_arctanh():
def arctanh(x):
return nd.arctanh(x)
def grad_grad_op(x):
return (2 * x)/((1 - x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
# Domain of arctanh is (-1, 1)
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arctanh, grad_grad_op)
@with_seed()
def test_radians():
def radians(x):
return nd.radians(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, radians, grad_grad_op)
@with_seed()
def test_relu():
def relu(x):
return nd.relu(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, relu, grad_grad_op)
@with_seed()
def test_log():
def log(x):
return nd.log(x)
def grad_op(x):
return 1/x
def grad_grad_op(x):
return -1/(x**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, log, [grad_op, grad_grad_op], [1, 2])
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_log2():
def log2(x):
return nd.log2(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(2))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log2, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_log10():
def log10(x):
return nd.log10(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(10))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log10, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_square():
def grad_grad_op(x):
return nd.ones_like(x) * 2
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.square, grad_grad_op)
@with_seed()
def test_expm1():
def grad_grad_op(x):
return nd.exp(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.expm1, grad_grad_op)
@with_seed()
def test_log1p():
def grad_grad_op(x):
return -1/((1+x)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.log1p, grad_grad_op)
@with_seed()
def test_reciprocal():
def reciprocal(x):
return nd.reciprocal(x)
def grad_grad_op(x):
return 2 / x**3
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, reciprocal, grad_grad_op)
@with_seed()
def test_abs():
def abs(x):
return nd.abs(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, abs, grad_grad_op)
@with_seed()
def test_clip():
def clip(x):
a_min, a_max = sorted([random.random(), random.random()])
return nd.clip(x, a_min, a_max)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, clip, grad_grad_op)
@with_seed()
def test_dropout():
def dropout(x):
return nd.Dropout(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, dropout, grad_grad_op)
@with_seed()
def test_sigmoid():
def sigmoid(x):
return nd.sigmoid(x)
def grad_op(x):
return sigmoid(x) * (1 - sigmoid(x))
def grad_grad_op(x):
return grad_op(x) * (1 - 2 * sigmoid(x))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sigmoid, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, sigmoid, [grad_op, grad_grad_op], [1, 2])
check_nth_order_unary(array, sigmoid, grad_grad_op, 2)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sqrt():
def sqrt(x):
return nd.sqrt(x)
def grad_grad_op(x):
return -1/(4 * sqrt(x**3))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, sqrt, grad_grad_op)
@with_seed()
def test_cbrt():
def cbrt(x):
return nd.cbrt(x)
def grad_grad_op(x):
return -2/(9 * cbrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, cbrt, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rsqrt():
def rsqrt(x):
return nd.rsqrt(x)
def grad_grad_op(x):
return 3/(4 * nd.sqrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, rsqrt, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rcbrt():
def rcbrt(x):
return nd.rcbrt(x)
def grad_grad_op(x):
return 4/(9 * nd.cbrt(x**7))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, rcbrt, grad_grad_op)
def check_second_order_unary(x, op, grad_grad_op, rtol=None, atol=None):
check_nth_order_unary(x, op, grad_grad_op, 2, rtol, atol)
def check_nth_order_unary(x, op, grad_ops, orders, rtol=None, atol=None):
"""Assert n-th order autograd gradient against expected gradient.
Multiple order of gradients can be checked by passing list of
function computing the particular order gradient and passing the
corresponding list of order.
Note
----
1. Orders should always be monotonically increasing.
2. Elements of grads_ops should correspond to elements of orders
i.e. grads_op = [grad_op, grad_grad_grad_op] should be passed with
orders = [1, 3]
Parameters
----------
x : mxnet.NDArray
Input Array.
op : Callable
Operation to perform on Input Array.
grad_ops : Callable or List of Callable
Function to compute and assert gradient of given order.
orders : int or List of int
Order/s to assert expected and computed gradients.
Returns
-------
None
"""
if isinstance(orders, int):
orders = [orders]
grad_ops = [grad_ops]
assert all(i < j for i, j in zip(orders[0:-1], orders[1:])), \
"orders should be monotonically increasing"
assert len(set(orders)) == len(orders), \
"orders should have unique elements"
highest_order = max(orders)
x = nd.array(x)
x.attach_grad()
expected_grads = [grad_op(x) for grad_op in grad_ops]
computed_grads = []
head_grads = []
# Perform compute.
with autograd.record():
y = op(x)
for current_order in range(1, highest_order+1):
head_grad = nd.random.normal(shape=x.shape)
y = autograd.grad(heads=y, variables=x, head_grads=head_grad,
create_graph=True, retain_graph=True)[0]
if current_order in orders:
computed_grads.append(y)
head_grads.append(head_grad)
# Validate all the gradients.
for order, grad, computed_grad in \
zip(orders, expected_grads, computed_grads):
# Compute expected values.
expected_grad = grad.asnumpy()
for head_grad in head_grads[:order]:
expected_grad *= head_grad.asnumpy()
assert_almost_equal(
expected_grad, computed_grad.asnumpy(), rtol=rtol, atol=atol)
def arange_shape_like(y):
shape = y.shape
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
class NDArrayGenerator(object):
def __init__(self, dim, startdim=1):
self.dim = dim
self.curdim = startdim
def __iter__(self):
return self
@staticmethod
def gen(dimensions):
shape = rand_shape_nd(dimensions, 4)
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
def next(self):
return self.__next__()
def __next__(self):
if self.curdim > self.dim:
raise StopIteration
x = NDArrayGenerator.gen(self.curdim)
self.curdim += 1
return x
def flatten2d_right(x):
s_0 = x.shape[0]
s_1 = reduce(mul, x.shape[1:])
return x.reshape((s_0, s_1))
def flatten2d_left(x):
s_0 = reduce(mul, x.shape[:-1])
s_1 = x.shape[-1]
return x.reshape((s_0, s_1))
@with_seed()
def test_dense_backward_flatten():
print("2nd order gradient for Fully Connected, flatten=True")
for x in NDArrayGenerator(4,2):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=True))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y) # head gradient of y
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
# Expected results
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
assert w_grad.shape == w.shape
assert w_grad_grad.shape == w.shape
assert x_grad.shape == x.shape
assert x_grad_grad.shape == x.shape
w_grad_check = same(flatten2d_right(w_grad), flatten2d_right(w_grad_e))
w_grad_grad_check = same(flatten2d_right(w_grad_grad), flatten2d_right(w_grad_grad_e))
x_grad_check = same(flatten2d_right(x_grad), flatten2d_right(x_grad_e))
x_grad_grad_check = same(flatten2d_right(x_grad_grad), flatten2d_right(x_grad_grad_e))
assert x_grad_check
assert w_grad_check
assert x_grad_grad_check
assert w_grad_grad_check
@with_seed()
def test_dense_backward_no_flatten():
print("2nd order gradient for Fully Connected, flatten=False")
for x in NDArrayGenerator(5,3):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=False))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y) # head gradient of y
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
# Expected results
o_y = flatten2d_left(o_y)
x = flatten2d_left(x)
o_x_grad = flatten2d_left(o_x_grad)
o_w_grad = flatten2d_left(o_w_grad)
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
w_grad_check = same(flatten2d_left(w_grad), flatten2d_left(w_grad_e))
w_grad_grad_check = same(flatten2d_left(w_grad_grad), flatten2d_left(w_grad_grad_e))
x_grad_check = same(flatten2d_left(x_grad), flatten2d_left(x_grad_e))
x_grad_grad_check = same(flatten2d_left(x_grad_grad), flatten2d_left(x_grad_grad_e))
assert x_grad_check
assert w_grad_check
assert x_grad_grad_check
assert w_grad_grad_check
| 28.454545 | 105 | 0.621627 |
import math
import random
from functools import reduce
from operator import mul
import random
from common import with_seed, xfail_when_nonstandard_decimal_separator
import mxnet
from mxnet import nd, autograd, gluon
from mxnet.test_utils import (
assert_almost_equal, random_arrays, random_uniform_arrays, rand_shape_nd, same)
@with_seed()
def test_sin():
def sin(x):
return nd.sin(x)
def grad_grad_op(x):
return -nd.sin(x)
def grad_grad_grad_op(x):
return -nd.cos(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sin, grad_grad_op)
check_nth_order_unary(array, sin,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_cos():
def cos(x):
return nd.cos(x)
def grad_grad_op(x):
return -nd.cos(x)
def grad_grad_grad_op(x):
return nd.sin(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cos, grad_grad_op)
check_nth_order_unary(array, cos,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_tan():
def tan(x):
return nd.tan(x)
def grad_op(x):
return 1 / nd.cos(x)**2
def grad_grad_op(x):
return 2 * tan(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, tan, grad_grad_op)
@with_seed()
def test_sinh():
def sinh(x):
return nd.sinh(x)
def grad_grad_op(x):
return sinh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sinh, grad_grad_op)
@with_seed()
def test_cosh():
def cosh(x):
return nd.cosh(x)
def grad_grad_op(x):
return cosh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cosh, grad_grad_op)
@with_seed()
def test_tanh():
def tanh(x):
return nd.tanh(x)
def grad_op(x):
return 1 - tanh(x)**2
def grad_grad_op(x):
return -2 * tanh(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_nth_order_unary(array, tanh, grad_op, 1, rtol=1e-6, atol=1e-6)
check_second_order_unary(
array, tanh, grad_grad_op, rtol=1e-6, atol=1e-5)
@with_seed()
def test_arcsin():
def arcsin(x):
return nd.arcsin(x)
def grad_grad_op(x):
return x / nd.sqrt((1-x**2)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arcsin, grad_grad_op)
@with_seed()
def test_arccos():
def arccos(x):
return nd.arccos(x)
def grad_grad_op(x):
return -x / nd.sqrt((1-x**2)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arccos, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_arctan():
def arctan(x):
return nd.arctan(x)
def grad_grad_op(x):
return (-2 * x)/((1 + x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array *= random.randint(500, 10000)
check_second_order_unary(array, arctan, grad_grad_op)
@with_seed()
def test_arcsinh():
def arcsinh(x):
return nd.arcsinh(x)
def grad_grad_op(x):
return x/nd.sqrt((nd.square(x)+1)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, arcsinh, grad_grad_op)
@with_seed()
def test_arccosh():
def arccosh(x):
return nd.arccosh(x)
def grad_grad_op(x):
return x/(nd.sqrt(x-1) * nd.sqrt(x+1) * (x+1) * (x-1))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = array * sigma + mu
assert((array > 1).all())
check_second_order_unary(array, arccosh, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_arctanh():
def arctanh(x):
return nd.arctanh(x)
def grad_grad_op(x):
return (2 * x)/((1 - x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arctanh, grad_grad_op)
@with_seed()
def test_radians():
def radians(x):
return nd.radians(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, radians, grad_grad_op)
@with_seed()
def test_relu():
def relu(x):
return nd.relu(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, relu, grad_grad_op)
@with_seed()
def test_log():
def log(x):
return nd.log(x)
def grad_op(x):
return 1/x
def grad_grad_op(x):
return -1/(x**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log, grad_grad_op)
check_nth_order_unary(array, log, [grad_op, grad_grad_op], [1, 2])
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_log2():
def log2(x):
return nd.log2(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(2))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log2, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_log10():
def log10(x):
return nd.log10(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(10))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log10, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_square():
def grad_grad_op(x):
return nd.ones_like(x) * 2
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.square, grad_grad_op)
@with_seed()
def test_expm1():
def grad_grad_op(x):
return nd.exp(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.expm1, grad_grad_op)
@with_seed()
def test_log1p():
def grad_grad_op(x):
return -1/((1+x)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.log1p, grad_grad_op)
@with_seed()
def test_reciprocal():
def reciprocal(x):
return nd.reciprocal(x)
def grad_grad_op(x):
return 2 / x**3
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, reciprocal, grad_grad_op)
@with_seed()
def test_abs():
def abs(x):
return nd.abs(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, abs, grad_grad_op)
@with_seed()
def test_clip():
def clip(x):
a_min, a_max = sorted([random.random(), random.random()])
return nd.clip(x, a_min, a_max)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, clip, grad_grad_op)
@with_seed()
def test_dropout():
def dropout(x):
return nd.Dropout(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, dropout, grad_grad_op)
@with_seed()
def test_sigmoid():
def sigmoid(x):
return nd.sigmoid(x)
def grad_op(x):
return sigmoid(x) * (1 - sigmoid(x))
def grad_grad_op(x):
return grad_op(x) * (1 - 2 * sigmoid(x))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sigmoid, grad_grad_op)
check_nth_order_unary(array, sigmoid, [grad_op, grad_grad_op], [1, 2])
check_nth_order_unary(array, sigmoid, grad_grad_op, 2)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sqrt():
def sqrt(x):
return nd.sqrt(x)
def grad_grad_op(x):
return -1/(4 * sqrt(x**3))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
assert((array > 0).all())
check_second_order_unary(array, sqrt, grad_grad_op)
@with_seed()
def test_cbrt():
def cbrt(x):
return nd.cbrt(x)
def grad_grad_op(x):
return -2/(9 * cbrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
assert((array > 0).all())
check_second_order_unary(array, cbrt, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rsqrt():
def rsqrt(x):
return nd.rsqrt(x)
def grad_grad_op(x):
return 3/(4 * nd.sqrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
assert((array > 0).all())
check_second_order_unary(array, rsqrt, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rcbrt():
def rcbrt(x):
return nd.rcbrt(x)
def grad_grad_op(x):
return 4/(9 * nd.cbrt(x**7))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
assert((array > 0).all())
check_second_order_unary(array, rcbrt, grad_grad_op)
def check_second_order_unary(x, op, grad_grad_op, rtol=None, atol=None):
check_nth_order_unary(x, op, grad_grad_op, 2, rtol, atol)
def check_nth_order_unary(x, op, grad_ops, orders, rtol=None, atol=None):
if isinstance(orders, int):
orders = [orders]
grad_ops = [grad_ops]
assert all(i < j for i, j in zip(orders[0:-1], orders[1:])), \
"orders should be monotonically increasing"
assert len(set(orders)) == len(orders), \
"orders should have unique elements"
highest_order = max(orders)
x = nd.array(x)
x.attach_grad()
expected_grads = [grad_op(x) for grad_op in grad_ops]
computed_grads = []
head_grads = []
with autograd.record():
y = op(x)
for current_order in range(1, highest_order+1):
head_grad = nd.random.normal(shape=x.shape)
y = autograd.grad(heads=y, variables=x, head_grads=head_grad,
create_graph=True, retain_graph=True)[0]
if current_order in orders:
computed_grads.append(y)
head_grads.append(head_grad)
for order, grad, computed_grad in \
zip(orders, expected_grads, computed_grads):
expected_grad = grad.asnumpy()
for head_grad in head_grads[:order]:
expected_grad *= head_grad.asnumpy()
assert_almost_equal(
expected_grad, computed_grad.asnumpy(), rtol=rtol, atol=atol)
def arange_shape_like(y):
shape = y.shape
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
class NDArrayGenerator(object):
def __init__(self, dim, startdim=1):
self.dim = dim
self.curdim = startdim
def __iter__(self):
return self
@staticmethod
def gen(dimensions):
shape = rand_shape_nd(dimensions, 4)
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
def next(self):
return self.__next__()
def __next__(self):
if self.curdim > self.dim:
raise StopIteration
x = NDArrayGenerator.gen(self.curdim)
self.curdim += 1
return x
def flatten2d_right(x):
s_0 = x.shape[0]
s_1 = reduce(mul, x.shape[1:])
return x.reshape((s_0, s_1))
def flatten2d_left(x):
s_0 = reduce(mul, x.shape[:-1])
s_1 = x.shape[-1]
return x.reshape((s_0, s_1))
@with_seed()
def test_dense_backward_flatten():
print("2nd order gradient for Fully Connected, flatten=True")
for x in NDArrayGenerator(4,2):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=True))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y)
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
assert w_grad.shape == w.shape
assert w_grad_grad.shape == w.shape
assert x_grad.shape == x.shape
assert x_grad_grad.shape == x.shape
w_grad_check = same(flatten2d_right(w_grad), flatten2d_right(w_grad_e))
w_grad_grad_check = same(flatten2d_right(w_grad_grad), flatten2d_right(w_grad_grad_e))
x_grad_check = same(flatten2d_right(x_grad), flatten2d_right(x_grad_e))
x_grad_grad_check = same(flatten2d_right(x_grad_grad), flatten2d_right(x_grad_grad_e))
assert x_grad_check
assert w_grad_check
assert x_grad_grad_check
assert w_grad_grad_check
@with_seed()
def test_dense_backward_no_flatten():
print("2nd order gradient for Fully Connected, flatten=False")
for x in NDArrayGenerator(5,3):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=False))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y)
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
o_y = flatten2d_left(o_y)
x = flatten2d_left(x)
o_x_grad = flatten2d_left(o_x_grad)
o_w_grad = flatten2d_left(o_w_grad)
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
w_grad_check = same(flatten2d_left(w_grad), flatten2d_left(w_grad_e))
w_grad_grad_check = same(flatten2d_left(w_grad_grad), flatten2d_left(w_grad_grad_e))
x_grad_check = same(flatten2d_left(x_grad), flatten2d_left(x_grad_e))
x_grad_grad_check = same(flatten2d_left(x_grad_grad), flatten2d_left(x_grad_grad_e))
assert x_grad_check
assert w_grad_check
assert x_grad_grad_check
assert w_grad_grad_check
| true | true |
1c465dd88414760419bb1ffb6b9b757ef5581d36 | 627 | py | Python | runs/seq-nobro-iter03000.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null | runs/seq-nobro-iter03000.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null | runs/seq-nobro-iter03000.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/seq-nobro-iter03000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 3000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
| 21.62069 | 68 | 0.722488 |
OUTFILE = 'runs/seq-nobro-iter03000.result.csv'
SOURCE = ['10.0.0.1']
SOURCE_BRO = [False]
TARGET = ['10.0.0.2']
TARGET_BRO = [False]
MODE = 'seq'
EPOCHS = 100
ITER = 3000
SIZE = 5
| true | true |
1c465eea594f4a857f85aba181b0c6af1aa42352 | 5,672 | py | Python | EvaluateAccuracy.py | sagieppel/Classification-of-the-material-given-region-of-an-image-using-a-convolutional-neural-net-with-attent | 2c78f069d4f4d9be7197b5bff6df39fc239270e4 | [
"MIT"
] | 5 | 2021-01-21T05:04:33.000Z | 2021-12-19T09:49:35.000Z | EvaluateAccuracy.py | sagieppel/Classification-of-the-material-given-region-of-an-image-using-a-convolutional-neural-net-with-attent | 2c78f069d4f4d9be7197b5bff6df39fc239270e4 | [
"MIT"
] | 2 | 2019-11-13T17:35:41.000Z | 2021-06-04T21:40:57.000Z | EvaluateAccuracy.py | sagieppel/Classification-of-the-material-given-region-of-an-image-using-a-convolutional-neural-net-with-attent | 2c78f069d4f4d9be7197b5bff6df39fc239270e4 | [
"MIT"
] | 1 | 2021-12-19T09:49:29.000Z | 2021-12-19T09:49:29.000Z | # Evaluate precision of image classification in a given image region
# Instructions:
# a) Set folder of images in Image_Dir
# c) Set folder for ground truth Annotation in AnnotationDir
# The Label Maps should be saved as png image with same name as the corresponding image and png ending. The value of each pixel correspond to it class
# d) Set number of classes number in NUM_CLASSES
# e) Set path to trained model weights in Trained_model_path
# e) Run script
##########################################################################################################################################################################
import Reader as Reader
import torch
import numpy as np
import AttentionNet as Net
#...........................................Input Parameters.................................................
UseCuda=True
ImageDir="ExampleData/TrainVal_Set/Images/"
AnnotationDir="ExampleData/TrainVal_Set/Annotations/"
Trained_model_path="logs/WeightRegionMaterialClassificationOpenSurface.torch" # If you want tos start from pretrained model
EvaluationFile=Trained_model_path.replace(".torch","Eval.xls")
NumClasses=44 # Number of classes if -1 read num classes from the reader
BackgroundClass=0 # Marking for background/unknown class that will be ignored
#---------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
Reader = Reader.Reader(ImageDir=ImageDir, AnnotationDir=AnnotationDir,NumClasses=NumClasses,BackgroundClass=BackgroundClass)
if NumClasses==-1: NumClasses = Reader.NumClass+1
#---------------------Load an initiate Initiate neural net------------------------------------------------------------------------------------
Net=Net.Net(NumClasses=NumClasses,UseGPU=UseCuda)
Net.AddAttententionLayer()
Net.load_state_dict(torch.load(Trained_model_path))
if UseCuda: Net.cuda()
Net.eval()
#==============================Region size ranges in pixesl=============================================================================================
Sizes=[1000,2000,4000,8000,16000,32000,64000,128000,256000,500000,1000000] #sizes pixels
NumSizes=len(Sizes)
#--------------------Evaluate net accuracy---------------------------------------------------------------------------------
TP=np.zeros([Reader.NumClass+1],dtype=np.float64) # True positive per class
FP=np.zeros([Reader.NumClass+1],dtype=np.float64) # False positive per class
FN=np.zeros([Reader.NumClass+1],dtype=np.float64) # False Negative per class
SumPred=np.zeros([Reader.NumClass+1],dtype=np.float64)
SzTP=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # True positive per class per size
SzFP=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # False positive per class per size
SzFN=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # False Negative per class per size
SzSumPred=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64)
# Counter of segment of specific class appearence
uu=0
while (Reader.ImageN<len(Reader.FileList)):
# for i,sz in enumerate(Sizes):
Images, SegmentMask, Labels, LabelsOneHot = Reader.ReadNextImageClean()
uu+=1
print(uu)
BatchSize = Images.shape[0]
for i in range(BatchSize):
#.........................Use net to make predicition.........................................
Prob, Lb = Net.forward(Images[i:i+1], ROI=SegmentMask[i:i+1],EvalMode=True) # Run net inference and get prediction
PredLb = Lb.data.cpu().numpy()
#.................................Evaluate accuracy per size range......................................................
LbSize=SegmentMask[i].sum()
SzInd=-1
for f,sz in enumerate(Sizes): # Find size range of the ROI region
if LbSize<sz:
SzInd=f
break
if PredLb[0] == Labels[i]:
# print("Correct")
TP[Labels[i]] += 1
SzTP[Labels[i],SzInd] += 1
else:
# print("Wrong")
FN[Labels[i]] += 1
FP[PredLb[0]] += 1
SzFN[Labels[i],SzInd] += 1
SzFP[PredLb[0],SzInd] += 1
SumPred[Labels[i]] += 1
SzSumPred[Labels[i],SzInd] += 1
#==============================Write to file=======================================================================
f = open(EvaluationFile, "w")
NrmF=len(SumPred)/(np.sum(SumPred>0)) # Normalization factor for classes with zero occurrences
txt="Mean Accuracy All Class Average =\t"+ str((TP/(SumPred+0.00000001)).mean()*NrmF*100)+"%"+"\r\n"
print(txt)
f.write(txt)
txt="Mean Accuracy Images =\t"+ str((TP.mean()/SumPred.mean())*100)+"%"+"\r\n"
print(txt)
f.write(txt)
print("\r\n=============================================================================\r\n")
print(txt)
f.write(txt)
txt="SizeMax\tMeanClasses\tMeanGlobal\tNum Instances\tNumValidClasses\r\n"
print(txt)
f.write(txt)
for i,sz in enumerate(Sizes):
if SzSumPred[:,i].sum()==0: continue
NumValidClass=np.sum(SzSumPred[:, i] > 0)
NrmF = len(SzSumPred[:,i]) / NumValidClass # Normalization factor for classes with zero occurrences
txt=str(sz)+"\t"+str((SzTP[:,i]/(SzSumPred[:,i]+0.00001)).mean()*NrmF*100)+"%\t"+str(100*(SzTP[:,i]).mean()/(SzSumPred[:,i].mean()))+"%\t"+str(SzSumPred[:,i].sum())+"\t"+str(NumValidClass)+"\r\n"
print(txt)
f.write(txt)
f.close()
| 50.19469 | 199 | 0.542666 | true | true | |
1c465fea1d1ceec23b4315681cacca75310c7202 | 27,098 | py | Python | numpy/core/tests/test_casting_unittests.py | HanumanJat8698/numpy | cbec2c8054ea6150490b9e72eb051848b79344d1 | [
"BSD-3-Clause"
] | 1 | 2022-02-26T03:35:36.000Z | 2022-02-26T03:35:36.000Z | numpy/core/tests/test_casting_unittests.py | HanumanJat8698/numpy | cbec2c8054ea6150490b9e72eb051848b79344d1 | [
"BSD-3-Clause"
] | null | null | null | numpy/core/tests/test_casting_unittests.py | HanumanJat8698/numpy | cbec2c8054ea6150490b9e72eb051848b79344d1 | [
"BSD-3-Clause"
] | null | null | null | """
The tests exercise the casting machinery in a more low-level manner.
The reason is mostly to test a new implementation of the casting machinery.
Unlike most tests in NumPy, these are closer to unit-tests rather
than integration tests.
"""
import pytest
import textwrap
import enum
import itertools
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
# Simple skips object, parametric and long double (unsupported by struct)
simple_dtypes = "?bhilqBHILQefdFD"
if np.dtype("l").itemsize != np.dtype("q").itemsize:
# Remove l and L, the table was generated with 64bit linux in mind.
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
def simple_dtype_instances():
for dtype_class in simple_dtypes:
dt = dtype_class()
yield pytest.param(dt, id=str(dt))
if dt.byteorder != "|":
dt = dt.newbyteorder()
yield pytest.param(dt, id=str(dt))
def get_expected_stringlength(dtype):
"""Returns the string length when casting the basic dtypes to strings.
"""
if dtype == np.bool_:
return 5
if dtype.kind in "iu":
if dtype.itemsize == 1:
length = 3
elif dtype.itemsize == 2:
length = 5
elif dtype.itemsize == 4:
length = 10
elif dtype.itemsize == 8:
length = 20
else:
raise AssertionError(f"did not find expected length for {dtype}")
if dtype.kind == "i":
length += 1 # adds one character for the sign
return length
# Note: Can't do dtype comparison for longdouble on windows
if dtype.char == "g":
return 48
elif dtype.char == "G":
return 48 * 2
elif dtype.kind == "f":
return 32 # also for half apparently.
elif dtype.kind == "c":
return 32 * 2
raise AssertionError(f"did not find expected length for {dtype}")
class Casting(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
cast_is_view = 1 << 16
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = = = = = = = = = = = = = . =
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
e . . . . . . . . . . . # = = = = = = = = = = . .
f . . . . . . . . . . . ~ # = = = = = = = = = . .
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
F . . . . . . . . . . . . . . . # = = = = = = . .
D . . . . . . . . . . . . . . . ~ # = = = = = . .
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
S . . . . . . . . . . . . . . . . . . # = = = . .
U . . . . . . . . . . . . . . . . . . . # = = . .
V . . . . . . . . . . . . . . . . . . . . # = . .
O . . . . . . . . . . . . . . . . . . . . = # . .
M . . . . . . . . . . . . . . . . . . . . = = # .
m . . . . . . . . . . . . . . . . . . . . = = . #
""").strip().split("\n")
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
cancast = {}
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
return cancast
CAST_TABLE = _get_cancast_table()
class TestChanges:
"""
These test cases excercise some behaviour changes
"""
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
assert np.can_cast(floating, string)
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
def test_to_void(self):
# But in general, we do consider these safe:
assert np.can_cast("d", "V")
assert np.can_cast("S20", "V")
# Do not consider it a safe cast if the void is too smaller:
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# Structured to unstructured is just like any other:
assert np.can_cast("d,i", "V", casting="same_kind")
# Unstructured void to unstructured is actually no cast at all:
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
class TestCasting:
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
def get_data(self, dtype1, dtype2):
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# Assume that the base array is well enough aligned for all inputs.
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
values = [random.randrange(-128, 128) for _ in range(length)]
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr1[i] = value
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
if dtype2.char == "?":
values = [bool(v) for v in values]
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr2[i] = value
return arr1, arr2, values
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
"""
Returns a copy of arr1 that may be non-contiguous or unaligned, and a
matching array for arr2 (although not a copy).
"""
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# Sanity check that the above is large enough:
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
new1[...] = arr1
if not contig:
# Ensure we did not overwrite bytes that should not be written:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
return new1, new2
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_simple_cancast(self, from_Dt):
for to_Dt in simple_dtypes:
cast = get_castingimpl(from_Dt, to_Dt)
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
if casting & Casting.cast_is_view:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
assert casting == Casting.no | Casting.cast_is_view
# The above table lists this as "equivalent"
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
# Note that to_res may not be the same as from_dt
assert from_res.isnative == to_res.isnative
else:
if from_Dt == to_Dt:
# Note that to_res may not be the same as from_dt
assert from_res.isnative != to_res.isnative
assert casting == CAST_TABLE[from_Dt][to_Dt]
if from_Dt is to_Dt:
assert(from_dt is from_res)
assert(to_dt is to_res)
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
def test_simple_direct_casts(self, from_dt):
"""
This test checks numeric direct casts for dtypes supported also by the
struct module (plus complex). It tries to be test a wide range of
inputs, but skips over possibly undefined behaviour (e.g. int rollover).
Longdouble and CLongdouble are tested, but only using double precision.
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
for to_dt in simple_dtype_instances():
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
safe = (casting & ~Casting.cast_is_view) <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
cast._simple_strided_call((arr1, arr2))
# Check via python list
assert arr2.tolist() == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# Check if alignment makes a difference, but only if supported
# and only if the alignment can be wrong
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
del arr1_o, arr2_o, cast
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_numeric_to_times(self, from_Dt):
# We currently only implement contiguous loops, so only need to
# test those.
from_dt = from_Dt()
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
assert(casting & CAST_TABLE[from_Dt][type(time_dt)])
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
arr2 = arr2.view(time_dt)
arr2[...] = np.datetime64("NaT")
if time_dt == np.dtype("M8"):
# This is a bit of a strange path, and could probably be removed
arr1[-1] = 0 # ensure at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "nom", "denom"],
[("M8[ns]", None,
Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast
("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast
("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _ = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None))
assert safety == Casting.unsafe
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
"""
Tests casts from and to string by checking the roundtripping property.
The test also covers some string to string casts (but not all).
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_strided_call((other_arr, str_arr))
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
if other_dt.kind == "b":
# Booleans do not roundtrip
continue
other_arr[...] = 0
cast_back._simple_strided_call((str_arr, other_arr))
assert_array_equal(orig_arr, other_arr)
other_arr[...] = 0
cast_back._simple_strided_call((str_arr_long, other_arr))
assert_array_equal(orig_arr, other_arr)
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_to_string_cancast(self, other_dt, string_char):
other_dt = np.dtype(other_dt)
fact = 1 if string_char == "S" else 4
div = 1 if other_dt.char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no | Casting.cast_is_view
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# Very specific tests (not using the castingimpl directly)
# that tests unicode bytedwaps including for unaligned array data.
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
if dtype1.alignment != 1:
# alignment should always be >1, but skip the check if not
assert not data1.flags.aligned
assert not data2.flags.aligned
element = "this is a ünicode string‽"
data1[()] = element
# Test both `data1` and `data1.copy()` (which should be aligned)
for data in [data1, data1.copy()]:
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
def test_void_to_string_special_case(self):
# Cover a small special case in void to string casting that could
# probably just as well be turned into an error (compare
# `test_object_to_parametric_internal_error` below).
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
def test_object_to_parametric_internal_error(self):
# We reject casting from object to a parametric type, without
# figuring out the correct instance first.
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# test case corresponding to gh-19325
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
| 41.057576 | 84 | 0.565983 |
import pytest
import textwrap
import enum
import itertools
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
simple_dtypes = "?bhilqBHILQefdFD"
if np.dtype("l").itemsize != np.dtype("q").itemsize:
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
def simple_dtype_instances():
for dtype_class in simple_dtypes:
dt = dtype_class()
yield pytest.param(dt, id=str(dt))
if dt.byteorder != "|":
dt = dt.newbyteorder()
yield pytest.param(dt, id=str(dt))
def get_expected_stringlength(dtype):
if dtype == np.bool_:
return 5
if dtype.kind in "iu":
if dtype.itemsize == 1:
length = 3
elif dtype.itemsize == 2:
length = 5
elif dtype.itemsize == 4:
length = 10
elif dtype.itemsize == 8:
length = 20
else:
raise AssertionError(f"did not find expected length for {dtype}")
if dtype.kind == "i":
length += 1
return length
if dtype.char == "g":
return 48
elif dtype.char == "G":
return 48 * 2
elif dtype.kind == "f":
return 32 # also for half apparently.
elif dtype.kind == "c":
return 32 * 2
raise AssertionError(f"did not find expected length for {dtype}")
class Casting(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
cast_is_view = 1 << 16
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = = = = = = = = = = = = = . =
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
e . . . . . . . . . . . # = = = = = = = = = = . .
f . . . . . . . . . . . ~ # = = = = = = = = = . .
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
F . . . . . . . . . . . . . . . # = = = = = = . .
D . . . . . . . . . . . . . . . ~ # = = = = = . .
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
S . . . . . . . . . . . . . . . . . . # = = = . .
U . . . . . . . . . . . . . . . . . . . # = = . .
V . . . . . . . . . . . . . . . . . . . . # = . .
O . . . . . . . . . . . . . . . . . . . . = # . .
M . . . . . . . . . . . . . . . . . . . . = = # .
m . . . . . . . . . . . . . . . . . . . . = = . #
""").strip().split("\n")
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
cancast = {}
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
return cancast
CAST_TABLE = _get_cancast_table()
class TestChanges:
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
assert np.can_cast(floating, string)
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
def test_to_void(self):
# But in general, we do consider these safe:
assert np.can_cast("d", "V")
assert np.can_cast("S20", "V")
# Do not consider it a safe cast if the void is too smaller:
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# Structured to unstructured is just like any other:
assert np.can_cast("d,i", "V", casting="same_kind")
# Unstructured void to unstructured is actually no cast at all:
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
class TestCasting:
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
def get_data(self, dtype1, dtype2):
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# Assume that the base array is well enough aligned for all inputs.
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
values = [random.randrange(-128, 128) for _ in range(length)]
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr1[i] = value
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
if dtype2.char == "?":
values = [bool(v) for v in values]
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr2[i] = value
return arr1, arr2, values
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# Sanity check that the above is large enough:
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
new1[...] = arr1
if not contig:
# Ensure we did not overwrite bytes that should not be written:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
return new1, new2
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_simple_cancast(self, from_Dt):
for to_Dt in simple_dtypes:
cast = get_castingimpl(from_Dt, to_Dt)
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
if casting & Casting.cast_is_view:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
assert casting == Casting.no | Casting.cast_is_view
# The above table lists this as "equivalent"
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
# Note that to_res may not be the same as from_dt
assert from_res.isnative == to_res.isnative
else:
if from_Dt == to_Dt:
# Note that to_res may not be the same as from_dt
assert from_res.isnative != to_res.isnative
assert casting == CAST_TABLE[from_Dt][to_Dt]
if from_Dt is to_Dt:
assert(from_dt is from_res)
assert(to_dt is to_res)
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
def test_simple_direct_casts(self, from_dt):
for to_dt in simple_dtype_instances():
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
safe = (casting & ~Casting.cast_is_view) <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
cast._simple_strided_call((arr1, arr2))
# Check via python list
assert arr2.tolist() == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# Check if alignment makes a difference, but only if supported
# and only if the alignment can be wrong
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
del arr1_o, arr2_o, cast
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_numeric_to_times(self, from_Dt):
# We currently only implement contiguous loops, so only need to
# test those.
from_dt = from_Dt()
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
assert(casting & CAST_TABLE[from_Dt][type(time_dt)])
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
arr2 = arr2.view(time_dt)
arr2[...] = np.datetime64("NaT")
if time_dt == np.dtype("M8"):
# This is a bit of a strange path, and could probably be removed
arr1[-1] = 0 # ensure at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "nom", "denom"],
[("M8[ns]", None,
Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast
("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast
("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _ = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None))
assert safety == Casting.unsafe
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_strided_call((other_arr, str_arr))
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
if other_dt.kind == "b":
# Booleans do not roundtrip
continue
other_arr[...] = 0
cast_back._simple_strided_call((str_arr, other_arr))
assert_array_equal(orig_arr, other_arr)
other_arr[...] = 0
cast_back._simple_strided_call((str_arr_long, other_arr))
assert_array_equal(orig_arr, other_arr)
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_to_string_cancast(self, other_dt, string_char):
other_dt = np.dtype(other_dt)
fact = 1 if string_char == "S" else 4
div = 1 if other_dt.char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no | Casting.cast_is_view
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# Very specific tests (not using the castingimpl directly)
# that tests unicode bytedwaps including for unaligned array data.
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
if dtype1.alignment != 1:
# alignment should always be >1, but skip the check if not
assert not data1.flags.aligned
assert not data2.flags.aligned
element = "this is a ünicode string‽"
data1[()] = element
# Test both `data1` and `data1.copy()` (which should be aligned)
for data in [data1, data1.copy()]:
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
def test_void_to_string_special_case(self):
# Cover a small special case in void to string casting that could
# probably just as well be turned into an error (compare
# `test_object_to_parametric_internal_error` below).
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
def test_object_to_parametric_internal_error(self):
# We reject casting from object to a parametric type, without
# figuring out the correct instance first.
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# test case corresponding to gh-19325
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
| true | true |
1c46600ef51420118bf2adf803f33064109e861f | 2,286 | py | Python | venv/Lib/site-packages/tests/test_310_ClientInfo.py | shehzadulislam/Assignment4 | a9cced70be6ae5d2685027d68032d5849f638301 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/tests/test_310_ClientInfo.py | shehzadulislam/Assignment4 | a9cced70be6ae5d2685027d68032d5849f638301 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/tests/test_310_ClientInfo.py | shehzadulislam/Assignment4 | a9cced70be6ae5d2685027d68032d5849f638301 | [
"Apache-2.0"
] | null | null | null | #
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import unittest, sys
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_310_ClientInfo(self):
obj = IbmDbTestFunctions()
obj.assert_expectf(self.run_test_310)
def run_test_310(self):
conn = ibm_db.connect(config.database, config.user, config.password)
client = ibm_db.client_info(conn)
if client:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(client.DRIVER_NAME), client.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(client.DRIVER_VER), client.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(client.DATA_SOURCE_NAME), client.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(client.DRIVER_ODBC_VER), client.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(client.ODBC_VER), client.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(client.ODBC_SQL_CONFORMANCE), client.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % client.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % client.CONN_CODEPAGE)
ibm_db.close(conn)
else:
print("Error.")
#__END__
#__LUW_EXPECTED__
#DRIVER_NAME: string(%d) %s
#DRIVER_VER: string(%d) %s
#DATA_SOURCE_NAME: string(%d) %s
#DRIVER_ODBC_VER: string(%d) %s
#ODBC_VER: string(%d) %s
#ODBC_SQL_CONFORMANCE: string(%d) %s
#APPL_CODEPAGE: int(%d)
#CONN_CODEPAGE: int(%d)
#__ZOS_EXPECTED__
#DRIVER_NAME: string(%d) %s
#DRIVER_VER: string(%d) %s
#DATA_SOURCE_NAME: string(%d) %s
#DRIVER_ODBC_VER: string(%d) %s
#ODBC_VER: string(%d) %s
#ODBC_SQL_CONFORMANCE: string(%d) %s
#APPL_CODEPAGE: int(%d)
#CONN_CODEPAGE: int(%d)
#__SYSTEMI_EXPECTED__
#DRIVER_NAME: string(%d) %s
#DRIVER_VER: string(%d) %s
#DATA_SOURCE_NAME: string(%d) %s
#DRIVER_ODBC_VER: string(%d) %s
#ODBC_VER: string(%d) %s
#ODBC_SQL_CONFORMANCE: string(%d) %s
#APPL_CODEPAGE: int(%d)
#CONN_CODEPAGE: int(%d)
#__IDS_EXPECTED__
#DRIVER_NAME: string(%d) %s
#DRIVER_VER: string(%d) %s
#DATA_SOURCE_NAME: string(%d) %s
#DRIVER_ODBC_VER: string(%d) %s
#ODBC_VER: string(%d) %s
#ODBC_SQL_CONFORMANCE: string(%d) %s
#APPL_CODEPAGE: int(%d)
#CONN_CODEPAGE: int(%d)
| 30.891892 | 120 | 0.700787 |
import unittest, sys
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_310_ClientInfo(self):
obj = IbmDbTestFunctions()
obj.assert_expectf(self.run_test_310)
def run_test_310(self):
conn = ibm_db.connect(config.database, config.user, config.password)
client = ibm_db.client_info(conn)
if client:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(client.DRIVER_NAME), client.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(client.DRIVER_VER), client.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(client.DATA_SOURCE_NAME), client.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(client.DRIVER_ODBC_VER), client.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(client.ODBC_VER), client.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(client.ODBC_SQL_CONFORMANCE), client.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % client.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % client.CONN_CODEPAGE)
ibm_db.close(conn)
else:
print("Error.")
| true | true |
1c4660eee4c36b65b45ca71a3dfd9c51e6edccdc | 1,545 | py | Python | postprocessing.py | BaerkeDestroyer/tiktok-rss-flat | ec96d901b5d40c0563658c469a6308546e78d0e2 | [
"Apache-2.0"
] | null | null | null | postprocessing.py | BaerkeDestroyer/tiktok-rss-flat | ec96d901b5d40c0563658c469a6308546e78d0e2 | [
"Apache-2.0"
] | null | null | null | postprocessing.py | BaerkeDestroyer/tiktok-rss-flat | ec96d901b5d40c0563658c469a6308546e78d0e2 | [
"Apache-2.0"
] | null | null | null | from TikTokApi import TikTokApi
import csv
from feedgen.feed import FeedGenerator
from datetime import datetime, timezone
# Normal GitHub Pages URL
# ghPagesURL = "https://conoro.github.io/tiktok-rss-flat/"
# Custom Domain
ghPagesURL = "https://baerkedestroyer.github.io/tiktok-rss-flat/"
api = TikTokApi.get_instance()
count = 10
with open('subscriptions.csv') as f:
cf = csv.DictReader(f, fieldnames=['username'])
for row in cf:
user = row['username']
print (user)
tiktoks = api.by_username(user, count=count)
fg = FeedGenerator()
fg.id('https://www.tiktok.com/@' + user)
fg.title(user + ' TikTok')
fg.author( {'name':'Conor ONeill','email':'conor@conoroneill.com'} )
fg.link( href='http://tiktok.com', rel='alternate' )
fg.logo(ghPagesURL + '/tiktok-rss.png')
fg.subtitle('OK Boomer, all the latest TikToks from ' + user)
fg.link( href=ghPagesURL + 'rss/' + user + '.xml', rel='self' )
fg.language('en')
for tiktok in tiktoks:
fe = fg.add_entry()
link = "https://www.tiktok.com/@" + user + "/video/" + tiktok['id']
fe.id(link)
fe.published(datetime.fromtimestamp(tiktok['createTime'], timezone.utc))
fe.title(tiktok['desc'])
fe.link(href=link)
fe.description("<img src='" + tiktok['video']['cover'] + "' />")
fg.rss_file('rss/' + user + '.xml') # Write the RSS feed to a file
| 34.333333 | 85 | 0.579935 | from TikTokApi import TikTokApi
import csv
from feedgen.feed import FeedGenerator
from datetime import datetime, timezone
ghPagesURL = "https://baerkedestroyer.github.io/tiktok-rss-flat/"
api = TikTokApi.get_instance()
count = 10
with open('subscriptions.csv') as f:
cf = csv.DictReader(f, fieldnames=['username'])
for row in cf:
user = row['username']
print (user)
tiktoks = api.by_username(user, count=count)
fg = FeedGenerator()
fg.id('https://www.tiktok.com/@' + user)
fg.title(user + ' TikTok')
fg.author( {'name':'Conor ONeill','email':'conor@conoroneill.com'} )
fg.link( href='http://tiktok.com', rel='alternate' )
fg.logo(ghPagesURL + '/tiktok-rss.png')
fg.subtitle('OK Boomer, all the latest TikToks from ' + user)
fg.link( href=ghPagesURL + 'rss/' + user + '.xml', rel='self' )
fg.language('en')
for tiktok in tiktoks:
fe = fg.add_entry()
link = "https://www.tiktok.com/@" + user + "/video/" + tiktok['id']
fe.id(link)
fe.published(datetime.fromtimestamp(tiktok['createTime'], timezone.utc))
fe.title(tiktok['desc'])
fe.link(href=link)
fe.description("<img src='" + tiktok['video']['cover'] + "' />")
fg.rss_file('rss/' + user + '.xml')
| true | true |
1c46616705638a9d0e9b20f08577b7cad14f9b79 | 459 | py | Python | config.example.py | entuland/fogibot | e3afe14d53fe9d47178161d9311301c47c960507 | [
"MIT"
] | null | null | null | config.example.py | entuland/fogibot | e3afe14d53fe9d47178161d9311301c47c960507 | [
"MIT"
] | null | null | null | config.example.py | entuland/fogibot | e3afe14d53fe9d47178161d9311301c47c960507 | [
"MIT"
] | null | null | null | host = "chat.example.com"
port = 6697
username = "username"
password = "password"
botname = "botname"
realname = "realname"
owner = "owner"
trigger = botname
channels = [
"##" + botname,
]
sharing_bins = [
"cpy.pt (generic pastes), gist.github.com (multiple files pastes)",
"jsfiddle.net, codepen.io (HTML+CSS+JS IDEs)",
"ideone.com (runnable code - C, C++, Python etc.)",
"postimage.io (family safe images), pasteconf.net (conf files)"
]
| 25.5 | 71 | 0.657952 | host = "chat.example.com"
port = 6697
username = "username"
password = "password"
botname = "botname"
realname = "realname"
owner = "owner"
trigger = botname
channels = [
"##" + botname,
]
sharing_bins = [
"cpy.pt (generic pastes), gist.github.com (multiple files pastes)",
"jsfiddle.net, codepen.io (HTML+CSS+JS IDEs)",
"ideone.com (runnable code - C, C++, Python etc.)",
"postimage.io (family safe images), pasteconf.net (conf files)"
]
| true | true |
1c466226c6dae77cdef9d5c22b9f63c343a0eb11 | 933 | py | Python | bindings/python/src/test/test_package_dependencies.py | cloudsmith-io/cloudsmith-api | bc747fa6ee1d86485e334b08f65687630b3fd87c | [
"Apache-2.0"
] | 9 | 2018-07-02T15:21:40.000Z | 2021-11-24T03:44:39.000Z | bindings/python/src/test/test_package_dependencies.py | cloudsmith-io/cloudsmith-api | bc747fa6ee1d86485e334b08f65687630b3fd87c | [
"Apache-2.0"
] | 8 | 2019-01-08T22:06:12.000Z | 2022-03-16T15:02:37.000Z | bindings/python/src/test/test_package_dependencies.py | cloudsmith-io/cloudsmith-api | bc747fa6ee1d86485e334b08f65687630b3fd87c | [
"Apache-2.0"
] | 1 | 2021-12-06T19:08:05.000Z | 2021-12-06T19:08:05.000Z | # coding: utf-8
"""
Cloudsmith API
The API to the Cloudsmith Service
OpenAPI spec version: v1
Contact: support@cloudsmith.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import cloudsmith_api
from cloudsmith_api.rest import ApiException
from cloudsmith_api.models.package_dependencies import PackageDependencies
class TestPackageDependencies(unittest.TestCase):
""" PackageDependencies unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testPackageDependencies(self):
"""
Test PackageDependencies
"""
# FIXME: construct object with mandatory attributes with example values
#model = cloudsmith_api.models.package_dependencies.PackageDependencies()
pass
if __name__ == '__main__':
unittest.main()
| 20.733333 | 81 | 0.713826 |
from __future__ import absolute_import
import os
import sys
import unittest
import cloudsmith_api
from cloudsmith_api.rest import ApiException
from cloudsmith_api.models.package_dependencies import PackageDependencies
class TestPackageDependencies(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testPackageDependencies(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
1c466290ee5308ecc91a711df4d496fe19a9680e | 674 | py | Python | manage.py | loyer-yuan/REMVocabulary | d86965600f1951c67558b8946bcfd6317d345153 | [
"MIT"
] | 1 | 2021-12-09T09:26:23.000Z | 2021-12-09T09:26:23.000Z | manage.py | loyer-yuan/REMVocabulary | d86965600f1951c67558b8946bcfd6317d345153 | [
"MIT"
] | 1 | 2021-12-07T13:01:23.000Z | 2021-12-12T13:53:47.000Z | manage.py | loyer-yuan/REMVocabulary | d86965600f1951c67558b8946bcfd6317d345153 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'REMVocabulary_DBMS.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.304348 | 82 | 0.683976 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'REMVocabulary_DBMS.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c46640f92964d3b0eb444b0e03fd6c6ff9d1033 | 9,349 | py | Python | platformio/commands/check.py | xeno010/platformio-core | 94f8afec38fc8d35db1055368f5fbe4e67c89e7e | [
"Apache-2.0"
] | null | null | null | platformio/commands/check.py | xeno010/platformio-core | 94f8afec38fc8d35db1055368f5fbe4e67c89e7e | [
"Apache-2.0"
] | null | null | null | platformio/commands/check.py | xeno010/platformio-core | 94f8afec38fc8d35db1055368f5fbe4e67c89e7e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
# pylint: disable=redefined-builtin,too-many-statements
import os
from collections import Counter
from os.path import basename, dirname, isfile, join
from time import time
import click
from tabulate import tabulate
from platformio import exception, fs, util
from platformio.check.defect import DefectItem
from platformio.check.tools import CheckToolFactory
from platformio.compat import dump_json_to_unicode
from platformio.project.config import ProjectConfig
from platformio.project.helpers import (
find_project_dir_above,
get_project_dir,
get_project_include_dir,
get_project_src_dir,
)
@click.command("check", short_help="Run a static analysis tool on code")
@click.option("-e", "--environment", multiple=True)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option(
"-c",
"--project-conf",
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
@click.option("--filter", multiple=True, help="Pattern: +<include> -<exclude>")
@click.option("--flags", multiple=True)
@click.option(
"--severity", multiple=True, type=click.Choice(DefectItem.SEVERITY_LABELS.values())
)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
@click.option("--json-output", is_flag=True)
def cli(
environment,
project_dir,
project_conf,
filter,
flags,
severity,
silent,
verbose,
json_output,
):
# find project directory on upper level
if isfile(project_dir):
project_dir = find_project_dir_above(project_dir)
results = []
with fs.cd(project_dir):
config = ProjectConfig.get_instance(
project_conf or join(project_dir, "platformio.ini")
)
config.validate(environment)
default_envs = config.default_envs()
for envname in config.envs():
skipenv = any(
[
environment and envname not in environment,
not environment and default_envs and envname not in default_envs,
]
)
env_options = config.items(env=envname, as_dict=True)
env_dump = []
for k, v in env_options.items():
if k not in ("platform", "framework", "board"):
continue
env_dump.append(
"%s: %s" % (k, ", ".join(v) if isinstance(v, list) else v)
)
default_filter = [
"+<%s/>" % basename(d)
for d in (get_project_src_dir(), get_project_include_dir())
]
tool_options = dict(
verbose=verbose,
silent=silent,
filter=filter or env_options.get("check_filter", default_filter),
flags=flags or env_options.get("check_flags"),
severity=[DefectItem.SEVERITY_LABELS[DefectItem.SEVERITY_HIGH]]
if silent
else (severity or env_options.get("check_severity")),
)
for tool in env_options.get("check_tool", ["cppcheck"]):
if skipenv:
results.append({"env": envname, "tool": tool})
continue
if not silent and not json_output:
print_processing_header(tool, envname, env_dump)
ct = CheckToolFactory.new(
tool, project_dir, config, envname, tool_options
)
result = {"env": envname, "tool": tool, "duration": time()}
rc = ct.check(
on_defect_callback=None
if (json_output or verbose)
else lambda defect: click.echo(repr(defect))
)
result["defects"] = ct.get_defects()
result["duration"] = time() - result["duration"]
result["succeeded"] = rc == 0 and not any(
d.severity == DefectItem.SEVERITY_HIGH for d in result["defects"]
)
results.append(result)
if verbose:
click.echo("\n".join(repr(d) for d in result["defects"]))
if not json_output and not silent:
if not result["defects"]:
click.echo("No defects found")
print_processing_footer(result)
if json_output:
click.echo(dump_json_to_unicode(results_to_json(results)))
elif not silent:
print_check_summary(results)
command_failed = any(r.get("succeeded") is False for r in results)
if command_failed:
raise exception.ReturnErrorCode(1)
def results_to_json(raw):
results = []
for item in raw:
item.update(
{
"ignored": item.get("succeeded") is None,
"succeeded": bool(item.get("succeeded")),
"defects": [d.to_json() for d in item.get("defects", [])],
}
)
results.append(item)
return results
def print_processing_header(tool, envname, envdump):
click.echo(
"Checking %s > %s (%s)"
% (click.style(envname, fg="cyan", bold=True), tool, "; ".join(envdump))
)
terminal_width, _ = click.get_terminal_size()
click.secho("-" * terminal_width, bold=True)
def print_processing_footer(result):
is_failed = not result.get("succeeded")
util.print_labeled_bar(
"[%s] Took %.2f seconds"
% (
(
click.style("FAILED", fg="red", bold=True)
if is_failed
else click.style("PASSED", fg="green", bold=True)
),
result["duration"],
),
is_error=is_failed,
)
def print_defects_stats(results):
components = dict()
def _append_defect(component, defect):
if not components.get(component):
components[component] = Counter()
components[component].update({DefectItem.SEVERITY_LABELS[defect.severity]: 1})
for result in results:
for defect in result.get("defects", []):
component = dirname(defect.file) or defect.file
_append_defect(component, defect)
if component.startswith(get_project_dir()):
while os.sep in component:
component = dirname(component)
_append_defect(component, defect)
if not components:
return
severity_labels = list(DefectItem.SEVERITY_LABELS.values())
severity_labels.reverse()
tabular_data = list()
for k, v in components.items():
tool_defect = [v.get(s, 0) for s in severity_labels]
tabular_data.append([k] + tool_defect)
total = ["Total"] + [sum(d) for d in list(zip(*tabular_data))[1:]]
tabular_data.sort()
tabular_data.append([]) # Empty line as delimeter
tabular_data.append(total)
headers = ["Component"]
headers.extend([l.upper() for l in severity_labels])
headers = [click.style(h, bold=True) for h in headers]
click.echo(tabulate(tabular_data, headers=headers, numalign="center"))
click.echo()
def print_check_summary(results):
click.echo()
tabular_data = []
succeeded_nums = 0
failed_nums = 0
duration = 0
print_defects_stats(results)
for result in results:
duration += result.get("duration", 0)
if result.get("succeeded") is False:
failed_nums += 1
status_str = click.style("FAILED", fg="red")
elif result.get("succeeded") is None:
status_str = "IGNORED"
else:
succeeded_nums += 1
status_str = click.style("PASSED", fg="green")
tabular_data.append(
(
click.style(result["env"], fg="cyan"),
result["tool"],
status_str,
util.humanize_duration_time(result.get("duration")),
)
)
click.echo(
tabulate(
tabular_data,
headers=[
click.style(s, bold=True)
for s in ("Environment", "Tool", "Status", "Duration")
],
),
err=failed_nums,
)
util.print_labeled_bar(
"%s%d succeeded in %s"
% (
"%d failed, " % failed_nums if failed_nums else "",
succeeded_nums,
util.humanize_duration_time(duration),
),
is_error=failed_nums,
fg="red" if failed_nums else "green",
)
| 31.90785 | 87 | 0.587656 |
import os
from collections import Counter
from os.path import basename, dirname, isfile, join
from time import time
import click
from tabulate import tabulate
from platformio import exception, fs, util
from platformio.check.defect import DefectItem
from platformio.check.tools import CheckToolFactory
from platformio.compat import dump_json_to_unicode
from platformio.project.config import ProjectConfig
from platformio.project.helpers import (
find_project_dir_above,
get_project_dir,
get_project_include_dir,
get_project_src_dir,
)
@click.command("check", short_help="Run a static analysis tool on code")
@click.option("-e", "--environment", multiple=True)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option(
"-c",
"--project-conf",
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
@click.option("--filter", multiple=True, help="Pattern: +<include> -<exclude>")
@click.option("--flags", multiple=True)
@click.option(
"--severity", multiple=True, type=click.Choice(DefectItem.SEVERITY_LABELS.values())
)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
@click.option("--json-output", is_flag=True)
def cli(
environment,
project_dir,
project_conf,
filter,
flags,
severity,
silent,
verbose,
json_output,
):
if isfile(project_dir):
project_dir = find_project_dir_above(project_dir)
results = []
with fs.cd(project_dir):
config = ProjectConfig.get_instance(
project_conf or join(project_dir, "platformio.ini")
)
config.validate(environment)
default_envs = config.default_envs()
for envname in config.envs():
skipenv = any(
[
environment and envname not in environment,
not environment and default_envs and envname not in default_envs,
]
)
env_options = config.items(env=envname, as_dict=True)
env_dump = []
for k, v in env_options.items():
if k not in ("platform", "framework", "board"):
continue
env_dump.append(
"%s: %s" % (k, ", ".join(v) if isinstance(v, list) else v)
)
default_filter = [
"+<%s/>" % basename(d)
for d in (get_project_src_dir(), get_project_include_dir())
]
tool_options = dict(
verbose=verbose,
silent=silent,
filter=filter or env_options.get("check_filter", default_filter),
flags=flags or env_options.get("check_flags"),
severity=[DefectItem.SEVERITY_LABELS[DefectItem.SEVERITY_HIGH]]
if silent
else (severity or env_options.get("check_severity")),
)
for tool in env_options.get("check_tool", ["cppcheck"]):
if skipenv:
results.append({"env": envname, "tool": tool})
continue
if not silent and not json_output:
print_processing_header(tool, envname, env_dump)
ct = CheckToolFactory.new(
tool, project_dir, config, envname, tool_options
)
result = {"env": envname, "tool": tool, "duration": time()}
rc = ct.check(
on_defect_callback=None
if (json_output or verbose)
else lambda defect: click.echo(repr(defect))
)
result["defects"] = ct.get_defects()
result["duration"] = time() - result["duration"]
result["succeeded"] = rc == 0 and not any(
d.severity == DefectItem.SEVERITY_HIGH for d in result["defects"]
)
results.append(result)
if verbose:
click.echo("\n".join(repr(d) for d in result["defects"]))
if not json_output and not silent:
if not result["defects"]:
click.echo("No defects found")
print_processing_footer(result)
if json_output:
click.echo(dump_json_to_unicode(results_to_json(results)))
elif not silent:
print_check_summary(results)
command_failed = any(r.get("succeeded") is False for r in results)
if command_failed:
raise exception.ReturnErrorCode(1)
def results_to_json(raw):
results = []
for item in raw:
item.update(
{
"ignored": item.get("succeeded") is None,
"succeeded": bool(item.get("succeeded")),
"defects": [d.to_json() for d in item.get("defects", [])],
}
)
results.append(item)
return results
def print_processing_header(tool, envname, envdump):
click.echo(
"Checking %s > %s (%s)"
% (click.style(envname, fg="cyan", bold=True), tool, "; ".join(envdump))
)
terminal_width, _ = click.get_terminal_size()
click.secho("-" * terminal_width, bold=True)
def print_processing_footer(result):
is_failed = not result.get("succeeded")
util.print_labeled_bar(
"[%s] Took %.2f seconds"
% (
(
click.style("FAILED", fg="red", bold=True)
if is_failed
else click.style("PASSED", fg="green", bold=True)
),
result["duration"],
),
is_error=is_failed,
)
def print_defects_stats(results):
components = dict()
def _append_defect(component, defect):
if not components.get(component):
components[component] = Counter()
components[component].update({DefectItem.SEVERITY_LABELS[defect.severity]: 1})
for result in results:
for defect in result.get("defects", []):
component = dirname(defect.file) or defect.file
_append_defect(component, defect)
if component.startswith(get_project_dir()):
while os.sep in component:
component = dirname(component)
_append_defect(component, defect)
if not components:
return
severity_labels = list(DefectItem.SEVERITY_LABELS.values())
severity_labels.reverse()
tabular_data = list()
for k, v in components.items():
tool_defect = [v.get(s, 0) for s in severity_labels]
tabular_data.append([k] + tool_defect)
total = ["Total"] + [sum(d) for d in list(zip(*tabular_data))[1:]]
tabular_data.sort()
tabular_data.append([])
tabular_data.append(total)
headers = ["Component"]
headers.extend([l.upper() for l in severity_labels])
headers = [click.style(h, bold=True) for h in headers]
click.echo(tabulate(tabular_data, headers=headers, numalign="center"))
click.echo()
def print_check_summary(results):
click.echo()
tabular_data = []
succeeded_nums = 0
failed_nums = 0
duration = 0
print_defects_stats(results)
for result in results:
duration += result.get("duration", 0)
if result.get("succeeded") is False:
failed_nums += 1
status_str = click.style("FAILED", fg="red")
elif result.get("succeeded") is None:
status_str = "IGNORED"
else:
succeeded_nums += 1
status_str = click.style("PASSED", fg="green")
tabular_data.append(
(
click.style(result["env"], fg="cyan"),
result["tool"],
status_str,
util.humanize_duration_time(result.get("duration")),
)
)
click.echo(
tabulate(
tabular_data,
headers=[
click.style(s, bold=True)
for s in ("Environment", "Tool", "Status", "Duration")
],
),
err=failed_nums,
)
util.print_labeled_bar(
"%s%d succeeded in %s"
% (
"%d failed, " % failed_nums if failed_nums else "",
succeeded_nums,
util.humanize_duration_time(duration),
),
is_error=failed_nums,
fg="red" if failed_nums else "green",
)
| true | true |
1c46642673cb78f0a633c5d3ee8ef83f7a8d8c9d | 3,761 | py | Python | python/spidriver.py | boxofrox/spidriver | 4e4cf254f1ad337fb299e3c08eb105ae13fa081f | [
"BSD-3-Clause"
] | 4 | 2021-04-21T21:37:57.000Z | 2022-02-10T06:56:01.000Z | python/spidriver.py | boxofrox/spidriver | 4e4cf254f1ad337fb299e3c08eb105ae13fa081f | [
"BSD-3-Clause"
] | null | null | null | python/spidriver.py | boxofrox/spidriver | 4e4cf254f1ad337fb299e3c08eb105ae13fa081f | [
"BSD-3-Clause"
] | 5 | 2019-09-25T15:19:48.000Z | 2021-09-08T10:33:31.000Z | # coding=utf-8
import sys
import serial
__version__ = '0.0.2'
PYTHON2 = (sys.version_info < (3, 0))
class SPIDriver:
"""
SPIDriver interface.
The following variables are available:
product product code e.g. 'spidriver1'
serial serial string of SPIDriver
uptime time since SPIDriver boot, in seconds
voltage USB voltage, in V
current current used by attached device, in mA
temp temperature, in degrees C
cs state of CS pin
a state of A pin
b state of B pin
ccitt_crc CCITT-16 CRC of all transmitted and received bytes
"""
def __init__(self, port="/dev/ttyUSB0"):
self.ser = serial.Serial(port, 460800, timeout=1)
self.ser.write(b'@' * 64)
while self.ser.inWaiting():
self.ser.read(1)
for c in [0x55, 0x00, 0xff, 0xaa]:
r = self.__echo(c)
if r != c:
print('Echo test failed - not attached?')
print('Expected %r but received %r' % (c, r))
raise IOError
self.getstatus()
if PYTHON2:
def __ser_w(self, s):
if isinstance(s, list):
s = "".join([chr(c) for c in s])
self.ser.write(s)
else:
def __ser_w(self, s):
if isinstance(s, list):
s = bytes(s)
self.ser.write(s)
def __echo(self, c):
self.__ser_w([ord('e'), c])
r = self.ser.read(1)
if PYTHON2:
return ord(r[0])
else:
return r[0]
def detach(self):
""" Detach all signals """
self.ser.write(b'x')
def sel(self):
""" Select the SPI device by asserting CS """
self.ser.write(b's')
def unsel(self):
""" Unselect the SPI device by deasserting CS """
self.ser.write(b'u')
def read(self, l):
""" Read l bytes from the SPI device """
r = []
for i in range(0, l, 64):
rem = min(l - i, 64)
self.__ser_w([0x80 + rem - 1] + [0xff] * rem)
r.append(self.ser.read(rem))
return b''.join(r)
def write(self, bb):
""" Write bb to the SPI device """
for i in range(0, len(bb), 64):
sub = bb[i:i + 64]
self.__ser_w([0xc0 + len(sub) - 1])
self.__ser_w(sub)
def writeread(self, bb):
""" Write bb to the SPI device, return the read bytes """
r = []
ST = 64
for i in range(0, len(bb), ST):
sub = bb[i:i + 64]
self.__ser_w([0x80 + len(sub) - 1])
self.__ser_w(sub)
r.append(self.ser.read(len(sub)))
return b''.join(r)
def seta(self, v):
""" Set the A signal to 0 or 1 """
self.__ser_w([ord('a'), v])
def setb(self, v):
""" Set the B signal to 0 or 1 """
self.__ser_w([ord('b'), v])
def getstatus(self):
""" Update all status variables """
self.ser.write(b'?')
r = self.ser.read(80)
body = r[1:-1].decode() # remove [ and ]
(self.product,
self.serial,
uptime,
voltage,
current,
temp,
a,
b,
cs,
ccitt_crc) = body.split()
self.uptime = int(uptime)
self.voltage = float(voltage)
self.current = float(current)
self.temp = float(temp)
self.a = int(a)
self.b = int(b)
self.cs = int(cs)
self.ccitt_crc = int(ccitt_crc, 16)
def __repr__(self):
return "<%s serial=%s uptime=%d>" % (
self.product,
self.serial,
self.uptime)
| 26.864286 | 70 | 0.488168 |
import sys
import serial
__version__ = '0.0.2'
PYTHON2 = (sys.version_info < (3, 0))
class SPIDriver:
def __init__(self, port="/dev/ttyUSB0"):
self.ser = serial.Serial(port, 460800, timeout=1)
self.ser.write(b'@' * 64)
while self.ser.inWaiting():
self.ser.read(1)
for c in [0x55, 0x00, 0xff, 0xaa]:
r = self.__echo(c)
if r != c:
print('Echo test failed - not attached?')
print('Expected %r but received %r' % (c, r))
raise IOError
self.getstatus()
if PYTHON2:
def __ser_w(self, s):
if isinstance(s, list):
s = "".join([chr(c) for c in s])
self.ser.write(s)
else:
def __ser_w(self, s):
if isinstance(s, list):
s = bytes(s)
self.ser.write(s)
def __echo(self, c):
self.__ser_w([ord('e'), c])
r = self.ser.read(1)
if PYTHON2:
return ord(r[0])
else:
return r[0]
def detach(self):
self.ser.write(b'x')
def sel(self):
self.ser.write(b's')
def unsel(self):
self.ser.write(b'u')
def read(self, l):
r = []
for i in range(0, l, 64):
rem = min(l - i, 64)
self.__ser_w([0x80 + rem - 1] + [0xff] * rem)
r.append(self.ser.read(rem))
return b''.join(r)
def write(self, bb):
for i in range(0, len(bb), 64):
sub = bb[i:i + 64]
self.__ser_w([0xc0 + len(sub) - 1])
self.__ser_w(sub)
def writeread(self, bb):
r = []
ST = 64
for i in range(0, len(bb), ST):
sub = bb[i:i + 64]
self.__ser_w([0x80 + len(sub) - 1])
self.__ser_w(sub)
r.append(self.ser.read(len(sub)))
return b''.join(r)
def seta(self, v):
self.__ser_w([ord('a'), v])
def setb(self, v):
self.__ser_w([ord('b'), v])
def getstatus(self):
self.ser.write(b'?')
r = self.ser.read(80)
body = r[1:-1].decode()
(self.product,
self.serial,
uptime,
voltage,
current,
temp,
a,
b,
cs,
ccitt_crc) = body.split()
self.uptime = int(uptime)
self.voltage = float(voltage)
self.current = float(current)
self.temp = float(temp)
self.a = int(a)
self.b = int(b)
self.cs = int(cs)
self.ccitt_crc = int(ccitt_crc, 16)
def __repr__(self):
return "<%s serial=%s uptime=%d>" % (
self.product,
self.serial,
self.uptime)
| true | true |
1c46646062a09d65a5e7407db23335596075f971 | 5,160 | py | Python | docs/source/conf.py | Zhiwei-Lu/pyvaspflow | b80eab3e8bfc52aed6a2459dd32655f1075d9058 | [
"MIT"
] | 13 | 2019-06-03T11:41:35.000Z | 2022-03-04T07:45:42.000Z | docs/source/conf.py | Zhiwei-Lu/pyvaspflow | b80eab3e8bfc52aed6a2459dd32655f1075d9058 | [
"MIT"
] | 2 | 2019-03-12T10:51:15.000Z | 2019-03-14T02:18:18.000Z | docs/source/conf.py | Zhiwei-Lu/pyvaspflow | b80eab3e8bfc52aed6a2459dd32655f1075d9058 | [
"MIT"
] | 8 | 2019-06-03T03:20:20.000Z | 2021-01-06T11:48:37.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
autodoc_mock_imports = ['spglib', 'ase']
# -- Project information -----------------------------------------------------
project = 'pyvaspflowdoc'
copyright = '2019, ChangChun He'
author = 'ChangChun He'
# The short X.Y version
version = '0.0.1'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyvaspflowdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyvaspflow.tex', 'pyvaspflow Documentation',
'ChangChun He', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyvaspflow', 'pyvaspflow Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyvaspflow', 'pyvaspflow Documentation',
author, 'pyvaspflow', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 30.352941 | 79 | 0.645543 |
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
autodoc_mock_imports = ['spglib', 'ase']
project = 'pyvaspflowdoc'
copyright = '2019, ChangChun He'
author = 'ChangChun He'
version = '0.0.1'
release = '0.1.0'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'bizstyle'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyvaspflowdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyvaspflow.tex', 'pyvaspflow Documentation',
'ChangChun He', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyvaspflow', 'pyvaspflow Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyvaspflow', 'pyvaspflow Documentation',
author, 'pyvaspflow', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true | true |
1c46648434564e159bcf071df56bbe0cd6cbec1e | 3,708 | py | Python | scripts/trainLSTM_language.py | Asteur/visual-qa | 99be95d61bf9302495e741fa53cf63b7e9a91a35 | [
"MIT"
] | 528 | 2015-11-02T22:06:38.000Z | 2022-02-17T16:07:47.000Z | scripts/trainLSTM_language.py | Asteur/visual-qa | 99be95d61bf9302495e741fa53cf63b7e9a91a35 | [
"MIT"
] | 33 | 2015-11-02T16:10:36.000Z | 2021-01-09T06:05:16.000Z | scripts/trainLSTM_language.py | Asteur/visual-qa | 99be95d61bf9302495e741fa53cf63b7e9a91a35 | [
"MIT"
] | 221 | 2015-11-02T19:13:21.000Z | 2020-09-25T03:41:24.000Z | import sys
from random import shuffle
import argparse
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.utils import np_utils, generic_utils
from sklearn import preprocessing
from sklearn.externals import joblib
from spacy.en import English
from features import get_questions_tensor_timeseries, get_answers_matrix
from utils import grouper, selectFrequentAnswers
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-num_hidden_units', type=int, default=512)
parser.add_argument('-num_lstm_layers', type=int, default=2)
parser.add_argument('-dropout', type=float, default=0.2)
parser.add_argument('-activation', type=str, default='tanh')
args = parser.parse_args()
questions_train = open('../data/preprocessed/questions_train2014.txt', 'r').read().decode('utf8').splitlines()
questions_lengths_train = open('../data/preprocessed/questions_lengths_train2014.txt', 'r').read().decode('utf8').splitlines()
answers_train = open('../data/preprocessed/answers_train2014.txt', 'r').read().decode('utf8').splitlines()
images_train = open('../data/preprocessed/images_train2014.txt', 'r').read().decode('utf8').splitlines()
max_answers = 1000
questions_train, answers_train, images_train = selectFrequentAnswers(questions_train,answers_train,images_train, max_answers)
print 'Loaded questions, sorting by length...'
questions_lengths_train, questions_train, answers_train = (list(t) for t in zip(*sorted(zip(questions_lengths_train, questions_train, answers_train))))
#encode the remaining answers
labelencoder = preprocessing.LabelEncoder()
labelencoder.fit(answers_train)
nb_classes = len(list(labelencoder.classes_))
joblib.dump(labelencoder,'../models/labelencoder.pkl')
max_len = 30 #25 is max for training, 27 is max for validation
word_vec_dim = 300
model = Sequential()
model.add(LSTM(output_dim = args.num_hidden_units, activation='tanh',
return_sequences=True, input_shape=(max_len, word_vec_dim)))
model.add(Dropout(args.dropout))
model.add(LSTM(args.num_hidden_units, return_sequences=False))
model.add(Dense(nb_classes, init='uniform'))
model.add(Activation('softmax'))
json_string = model.to_json()
model_file_name = '../models/lstm_language_only_num_hidden_units_' + str(args.num_hidden_units) + '_num_lstm_layers_' + str(args.num_lstm_layers) + '_dropout_' + str(args.dropout)
open(model_file_name + '.json', 'w').write(json_string)
print 'Compiling model...'
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
print 'Compilation done...'
#set up word vectors
nlp = English()
print 'loaded word2vec features...'
## training
print 'Training started...'
numEpochs = 100
model_save_interval = 5
batchSize = 128
for k in xrange(numEpochs):
progbar = generic_utils.Progbar(len(questions_train))
for qu_batch,an_batch,im_batch in zip(grouper(questions_train, batchSize, fillvalue=questions_train[0]),
grouper(answers_train, batchSize, fillvalue=answers_train[0]),
grouper(images_train, batchSize, fillvalue=images_train[0])):
timesteps = len(nlp(qu_batch[-1])) #questions sorted in descending order of length
X_q_batch = get_questions_tensor_timeseries(qu_batch, nlp, timesteps)
Y_batch = get_answers_matrix(an_batch, labelencoder)
loss = model.train_on_batch(X_q_batch, Y_batch)
progbar.add(batchSize, values=[("train loss", loss)])
if k%model_save_interval == 0:
model.save_weights(model_file_name + '_epoch_{:02d}.hdf5'.format(k))
model.save_weights(model_file_name + '_epoch_{:02d}.hdf5'.format(k+1))
if __name__ == "__main__":
main()
| 40.304348 | 180 | 0.76699 | import sys
from random import shuffle
import argparse
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.utils import np_utils, generic_utils
from sklearn import preprocessing
from sklearn.externals import joblib
from spacy.en import English
from features import get_questions_tensor_timeseries, get_answers_matrix
from utils import grouper, selectFrequentAnswers
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-num_hidden_units', type=int, default=512)
parser.add_argument('-num_lstm_layers', type=int, default=2)
parser.add_argument('-dropout', type=float, default=0.2)
parser.add_argument('-activation', type=str, default='tanh')
args = parser.parse_args()
questions_train = open('../data/preprocessed/questions_train2014.txt', 'r').read().decode('utf8').splitlines()
questions_lengths_train = open('../data/preprocessed/questions_lengths_train2014.txt', 'r').read().decode('utf8').splitlines()
answers_train = open('../data/preprocessed/answers_train2014.txt', 'r').read().decode('utf8').splitlines()
images_train = open('../data/preprocessed/images_train2014.txt', 'r').read().decode('utf8').splitlines()
max_answers = 1000
questions_train, answers_train, images_train = selectFrequentAnswers(questions_train,answers_train,images_train, max_answers)
print 'Loaded questions, sorting by length...'
questions_lengths_train, questions_train, answers_train = (list(t) for t in zip(*sorted(zip(questions_lengths_train, questions_train, answers_train))))
labelencoder = preprocessing.LabelEncoder()
labelencoder.fit(answers_train)
nb_classes = len(list(labelencoder.classes_))
joblib.dump(labelencoder,'../models/labelencoder.pkl')
max_len = 30
word_vec_dim = 300
model = Sequential()
model.add(LSTM(output_dim = args.num_hidden_units, activation='tanh',
return_sequences=True, input_shape=(max_len, word_vec_dim)))
model.add(Dropout(args.dropout))
model.add(LSTM(args.num_hidden_units, return_sequences=False))
model.add(Dense(nb_classes, init='uniform'))
model.add(Activation('softmax'))
json_string = model.to_json()
model_file_name = '../models/lstm_language_only_num_hidden_units_' + str(args.num_hidden_units) + '_num_lstm_layers_' + str(args.num_lstm_layers) + '_dropout_' + str(args.dropout)
open(model_file_name + '.json', 'w').write(json_string)
print 'Compiling model...'
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
print 'Compilation done...'
nlp = English()
print 'loaded word2vec features...'
raining started...'
numEpochs = 100
model_save_interval = 5
batchSize = 128
for k in xrange(numEpochs):
progbar = generic_utils.Progbar(len(questions_train))
for qu_batch,an_batch,im_batch in zip(grouper(questions_train, batchSize, fillvalue=questions_train[0]),
grouper(answers_train, batchSize, fillvalue=answers_train[0]),
grouper(images_train, batchSize, fillvalue=images_train[0])):
timesteps = len(nlp(qu_batch[-1]))
X_q_batch = get_questions_tensor_timeseries(qu_batch, nlp, timesteps)
Y_batch = get_answers_matrix(an_batch, labelencoder)
loss = model.train_on_batch(X_q_batch, Y_batch)
progbar.add(batchSize, values=[("train loss", loss)])
if k%model_save_interval == 0:
model.save_weights(model_file_name + '_epoch_{:02d}.hdf5'.format(k))
model.save_weights(model_file_name + '_epoch_{:02d}.hdf5'.format(k+1))
if __name__ == "__main__":
main()
| false | true |
1c4665040f1d2e9fa827001815877d94a40df77a | 4,964 | py | Python | pypureclient/flasharray/FA_2_7/models/policy_rule_snapshot_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_7/models/policy_rule_snapshot_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_7/models/policy_rule_snapshot_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class PolicyRuleSnapshotGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[PolicyRuleSnapshot]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.PolicyRuleSnapshot]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[PolicyRuleSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyRuleSnapshotGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyRuleSnapshotGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyRuleSnapshotGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.184615 | 524 | 0.615834 |
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class PolicyRuleSnapshotGetResponse(object):
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[PolicyRuleSnapshot]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None,
total_item_count=None,
continuation_token=None,
items=None,
):
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyRuleSnapshotGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyRuleSnapshotGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PolicyRuleSnapshotGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c466520334bf6074bf6400dd3e06d73f8b1465a | 6,434 | py | Python | electroncash_plugins/fusion/connection.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 208 | 2017-07-25T19:52:15.000Z | 2018-09-21T13:44:58.000Z | electroncash_plugins/fusion/connection.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 1,478 | 2018-09-24T09:30:13.000Z | 2022-03-29T15:48:17.000Z | electroncash_plugins/fusion/connection.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 159 | 2018-09-24T12:56:47.000Z | 2022-03-28T23:52:17.000Z | #!/usr/bin/env python3
#
# Electron Cash - a lightweight Bitcoin Cash client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Message-based communications system for CashFusion.
This only implements a framing protocol:
<8 byte magic><4 byte length (big endian) of message><message>
<8 byte magic><4 byte length (big endian) of message><message>
...
<8 byte magic><4 byte length (big endian) of message><message>
"""
import certifi
import socket
import socks
import ssl
import time
from contextlib import suppress
sslcontext = ssl.create_default_context(cafile=certifi.where())
class BadFrameError(Exception):
pass
def open_connection(host, port, conn_timeout = 5.0, default_timeout = 5.0, ssl = False, socks_opts=None):
"""Open a connection as client to the specified server.
If `socks_opts` is None, a direct connection will be made using
`socket.create_connection`. Otherwise, a proxied connection will be
made using `socks.create_connection`, including socks_opts as keyword
arguments. Within that connection, an SSL tunnel will be established
if `ssl` is True.
"""
if socks_opts is None:
bare_socket = socket.create_connection((host, port), timeout=conn_timeout)
else:
bare_socket = socks.create_connection((host, port), timeout=conn_timeout, **socks_opts)
if ssl:
try:
conn_socket = sslcontext.wrap_socket(bare_socket, server_hostname=host)
except:
bare_socket.close()
raise
else:
conn_socket = bare_socket
try:
return Connection(conn_socket, default_timeout)
except:
conn_socket.close()
raise
class Connection:
# Message length limit. Anything longer is considered to be a malicious server.
# The all-initial-commitments and all-components messages can be big (~100 kB in large fusions).
MAX_MSG_LENGTH = 200*1024
magic = bytes.fromhex("765be8b4e4396dcf")
def __init__(self, socket, timeout):
self.socket = socket
self.timeout = timeout
socket.settimeout(timeout)
self.recvbuf = bytearray()
def __enter__(self):
self.socket.__enter__()
def __exit__(self, etype, evalue, traceback):
self.socket.__exit__(etype, evalue, traceback)
def send_message(self, msg, timeout = None):
""" Sends message; if this times out, the connection should be
abandoned since it's not possible to know how much data was sent.
"""
lengthbytes = len(msg).to_bytes(4, byteorder='big')
frame = self.magic + lengthbytes + msg
if timeout is None:
timeout = self.timeout
self.socket.settimeout(timeout)
try:
self.socket.sendall(frame)
except (ssl.SSLWantWriteError, ssl.SSLWantReadError) as e:
raise socket.timeout from e
def recv_message(self, timeout = None):
""" Read message, default timeout is self.timeout.
If it times out, behaviour is well defined in that no data is lost,
and the next call will functions properly.
"""
if timeout is None:
timeout = self.timeout
if timeout is None:
max_time = None
self.socket.settimeout(timeout)
else:
max_time = time.monotonic() + timeout
recvbuf = self.recvbuf
def fillbuf(n):
# read until recvbuf contains at least n bytes
while True:
if len(recvbuf) >= n:
return
if max_time is not None:
remtime = max_time - time.monotonic()
if remtime < 0:
raise socket.timeout
self.socket.settimeout(remtime)
try:
data = self.socket.recv(65536)
except (ssl.SSLWantWriteError, ssl.SSLWantReadError) as e:
# these SSL errors should be reported as a timeout
raise socket.timeout from e
if not data:
if self.recvbuf:
raise ConnectionError("Connection ended mid-message.")
else:
raise ConnectionError("Connection ended while awaiting message.")
recvbuf.extend(data)
try:
fillbuf(12)
magic = recvbuf[:8]
if magic != self.magic:
raise BadFrameError("Bad magic in frame: {}".format(magic.hex()))
message_length = int.from_bytes(recvbuf[8:12], byteorder='big')
if message_length > self.MAX_MSG_LENGTH:
raise BadFrameError("Got a frame with msg_length={} > {} (max)".format(message_length, self.MAX_MSG_LENGTH))
fillbuf(12 + message_length)
# we have a complete message
message = bytes(recvbuf[12:12 + message_length])
del recvbuf[:12 + message_length]
return message
finally:
with suppress(OSError):
self.socket.settimeout(self.timeout)
def close(self):
with suppress(OSError):
self.socket.settimeout(self.timeout)
self.socket.shutdown(socket.SHUT_RDWR)
with suppress(OSError):
self.socket.close()
| 35.744444 | 124 | 0.639882 |
import certifi
import socket
import socks
import ssl
import time
from contextlib import suppress
sslcontext = ssl.create_default_context(cafile=certifi.where())
class BadFrameError(Exception):
pass
def open_connection(host, port, conn_timeout = 5.0, default_timeout = 5.0, ssl = False, socks_opts=None):
if socks_opts is None:
bare_socket = socket.create_connection((host, port), timeout=conn_timeout)
else:
bare_socket = socks.create_connection((host, port), timeout=conn_timeout, **socks_opts)
if ssl:
try:
conn_socket = sslcontext.wrap_socket(bare_socket, server_hostname=host)
except:
bare_socket.close()
raise
else:
conn_socket = bare_socket
try:
return Connection(conn_socket, default_timeout)
except:
conn_socket.close()
raise
class Connection:
MAX_MSG_LENGTH = 200*1024
magic = bytes.fromhex("765be8b4e4396dcf")
def __init__(self, socket, timeout):
self.socket = socket
self.timeout = timeout
socket.settimeout(timeout)
self.recvbuf = bytearray()
def __enter__(self):
self.socket.__enter__()
def __exit__(self, etype, evalue, traceback):
self.socket.__exit__(etype, evalue, traceback)
def send_message(self, msg, timeout = None):
lengthbytes = len(msg).to_bytes(4, byteorder='big')
frame = self.magic + lengthbytes + msg
if timeout is None:
timeout = self.timeout
self.socket.settimeout(timeout)
try:
self.socket.sendall(frame)
except (ssl.SSLWantWriteError, ssl.SSLWantReadError) as e:
raise socket.timeout from e
def recv_message(self, timeout = None):
if timeout is None:
timeout = self.timeout
if timeout is None:
max_time = None
self.socket.settimeout(timeout)
else:
max_time = time.monotonic() + timeout
recvbuf = self.recvbuf
def fillbuf(n):
while True:
if len(recvbuf) >= n:
return
if max_time is not None:
remtime = max_time - time.monotonic()
if remtime < 0:
raise socket.timeout
self.socket.settimeout(remtime)
try:
data = self.socket.recv(65536)
except (ssl.SSLWantWriteError, ssl.SSLWantReadError) as e:
raise socket.timeout from e
if not data:
if self.recvbuf:
raise ConnectionError("Connection ended mid-message.")
else:
raise ConnectionError("Connection ended while awaiting message.")
recvbuf.extend(data)
try:
fillbuf(12)
magic = recvbuf[:8]
if magic != self.magic:
raise BadFrameError("Bad magic in frame: {}".format(magic.hex()))
message_length = int.from_bytes(recvbuf[8:12], byteorder='big')
if message_length > self.MAX_MSG_LENGTH:
raise BadFrameError("Got a frame with msg_length={} > {} (max)".format(message_length, self.MAX_MSG_LENGTH))
fillbuf(12 + message_length)
message = bytes(recvbuf[12:12 + message_length])
del recvbuf[:12 + message_length]
return message
finally:
with suppress(OSError):
self.socket.settimeout(self.timeout)
def close(self):
with suppress(OSError):
self.socket.settimeout(self.timeout)
self.socket.shutdown(socket.SHUT_RDWR)
with suppress(OSError):
self.socket.close()
| true | true |
1c46653ea6548a2a103e0f864db3564637b4a532 | 6,220 | py | Python | tests/test_validators.py | wigeria/selenium-yaml-core | 9f953a24ad6f47d0a8423ec78f2e8d29babff89a | [
"Apache-2.0"
] | 2 | 2020-06-28T11:08:20.000Z | 2021-12-01T13:12:11.000Z | tests/test_validators.py | wigeria/selenium-yaml-core | 9f953a24ad6f47d0a8423ec78f2e8d29babff89a | [
"Apache-2.0"
] | 5 | 2020-10-12T13:02:20.000Z | 2021-05-20T14:04:14.000Z | tests/test_validators.py | wigeria/selenium-yaml-core | 9f953a24ad6f47d0a8423ec78f2e8d29babff89a | [
"Apache-2.0"
] | null | null | null | """
Contains tests for the base Validators included in selenium_yaml.validators
"""
from selenium_yaml import validators
import os
class ValidationTestMixin:
""" Contains basic methods for checking whether a validation was
a success or a failure
"""
def is_successful_validation(self, validator, value):
""" Uses basic assertions to test that the validation was a success
Parameters
----------
validator : An instance of a validator derived from
selenium_yaml.validators.Validator
value : The value that should be passed on to the validator
"""
assert validator.is_valid(value) is True
assert isinstance(validator.error, str)
assert len(validator.error) == 0
def is_unsuccessful_validation(self, validator, value):
""" Uses basic assertions to test that the validation was a failure
Parameters
----------
validator : An instance of a validator derived from
selenium_yaml.validators.Validator
value : The value that should be passed on to the validator
"""
assert validator.is_valid(value) is False
assert isinstance(validator.error, list)
assert len(validator.error) > 0
class TestRequiredValidation(ValidationTestMixin):
""" Tests the RequiredValidator on null and non-null values """
def test_required_on_null(self):
""" Tests that the required validator raises an exception on a Null
value and that the error attribute gets populated
"""
validator = validators.RequiredValidator()
self.is_unsuccessful_validation(validator, None)
def test_required_on_non_null_values(self):
""" Tests that the required validator doesn't raise an exception for
valid (non-null) values and that the error attribute is set to a
blank string
"""
validator = validators.RequiredValidator()
valid_values = ["Valid Value", 100, True, [], ["Crazy"]]
for value in valid_values:
self.is_successful_validation(validator, value)
class TestMaxLengthValidation(ValidationTestMixin):
""" Tests the MaxLengthValidator on values that don't have a len, values
that exceed a max length and values that fall within the max length
"""
def test_max_length_on_no_len(self):
""" Tests that the max-length validator fails on values that don't have
a len attribute
"""
validator = validators.MaxLengthValidator(length=3)
self.is_unsuccessful_validation(validator, 0)
def test_max_length_on_greater_len(self):
""" Tests that the max-length validator fails on values that have a len
greater than the specified max-length
"""
invalid_values = ["Test", [1, 2, 3, 4]]
validator = validators.MaxLengthValidator(length=3)
for value in invalid_values:
self.is_unsuccessful_validation(validator, value)
def test_max_length_on_valid_len(self):
""" Tests that the max-length validator succeeds on values that have a
len within the given threshold
"""
valid_values = ["XYZ", [1, 2]]
validator = validators.MaxLengthValidator(length=3)
for value in valid_values:
self.is_successful_validation(validator, value)
class TestTypeValidation(ValidationTestMixin):
""" Tests that the TypeValidator only succeeds on values that are
instances of the given type
"""
def test_validator_on_non_matching_type(self):
""" Tests that the validation fails for values that aren't of a
matching type
"""
invalid_values = [1, False]
validator = validators.TypeValidator(field_type=str)
for value in invalid_values:
self.is_unsuccessful_validation(validator, value)
def test_validator_on_matching_type(self):
""" Tests that the validation succeeds on values that are of a
matching type
"""
valid_values = ["This", "Is", "Valid"]
validator = validators.TypeValidator(field_type=str)
for value in valid_values:
self.is_successful_validation(validator, value)
class TestOptionsValidation(ValidationTestMixin):
""" Tests that the options validator only succeeds if the value is a
part of the given options array
"""
def test_validator_on_non_member(self):
""" Tests that the validation fails on non-members """
options = [1, 2]
validator = validators.OptionsValidator(options=options)
self.is_unsuccessful_validation(validator, 3)
def test_validator_on_member(self):
""" Tests that the validation succeeds on a member """
options = [1, 2]
validator = validators.OptionsValidator(options=options)
self.is_successful_validation(validator, 1)
class TestFilePathValidation(ValidationTestMixin):
""" Tests that the FilePath validator is only valid when the given
value is a valid file path
"""
def test_validator_on_invalid_filepath(self):
""" Tests that the validation fails on non-existent fpaths """
value = os.path.join(os.getcwd(), "thispathshouldnotexist.txt")
validator = validators.FilePathValidator()
self.is_unsuccessful_validation(validator, value)
def test_validator_on_valid_filepath(self):
""" Tests that the validation succeeds on valid fpaths """
value = os.path.join(os.getcwd(), ".gitignore")
validator = validators.FilePathValidator()
self.is_successful_validation(validator, value)
class TestResolvedVariableValidation(ValidationTestMixin):
""" Tests that the ResolvedVariable validation is only valid on a resolved
variable or an instance of the given type
"""
def test_validator_on_resolved_var(self):
""" Tests that the validation succeeds on a valid resolved var """
value = "${resolved_var}"
validator = validators.ResolvedVariableValidator()
self.is_successful_validation(validator, value)
| 39.119497 | 79 | 0.671383 |
from selenium_yaml import validators
import os
class ValidationTestMixin:
def is_successful_validation(self, validator, value):
assert validator.is_valid(value) is True
assert isinstance(validator.error, str)
assert len(validator.error) == 0
def is_unsuccessful_validation(self, validator, value):
assert validator.is_valid(value) is False
assert isinstance(validator.error, list)
assert len(validator.error) > 0
class TestRequiredValidation(ValidationTestMixin):
def test_required_on_null(self):
validator = validators.RequiredValidator()
self.is_unsuccessful_validation(validator, None)
def test_required_on_non_null_values(self):
validator = validators.RequiredValidator()
valid_values = ["Valid Value", 100, True, [], ["Crazy"]]
for value in valid_values:
self.is_successful_validation(validator, value)
class TestMaxLengthValidation(ValidationTestMixin):
def test_max_length_on_no_len(self):
validator = validators.MaxLengthValidator(length=3)
self.is_unsuccessful_validation(validator, 0)
def test_max_length_on_greater_len(self):
invalid_values = ["Test", [1, 2, 3, 4]]
validator = validators.MaxLengthValidator(length=3)
for value in invalid_values:
self.is_unsuccessful_validation(validator, value)
def test_max_length_on_valid_len(self):
valid_values = ["XYZ", [1, 2]]
validator = validators.MaxLengthValidator(length=3)
for value in valid_values:
self.is_successful_validation(validator, value)
class TestTypeValidation(ValidationTestMixin):
def test_validator_on_non_matching_type(self):
invalid_values = [1, False]
validator = validators.TypeValidator(field_type=str)
for value in invalid_values:
self.is_unsuccessful_validation(validator, value)
def test_validator_on_matching_type(self):
valid_values = ["This", "Is", "Valid"]
validator = validators.TypeValidator(field_type=str)
for value in valid_values:
self.is_successful_validation(validator, value)
class TestOptionsValidation(ValidationTestMixin):
def test_validator_on_non_member(self):
options = [1, 2]
validator = validators.OptionsValidator(options=options)
self.is_unsuccessful_validation(validator, 3)
def test_validator_on_member(self):
options = [1, 2]
validator = validators.OptionsValidator(options=options)
self.is_successful_validation(validator, 1)
class TestFilePathValidation(ValidationTestMixin):
def test_validator_on_invalid_filepath(self):
value = os.path.join(os.getcwd(), "thispathshouldnotexist.txt")
validator = validators.FilePathValidator()
self.is_unsuccessful_validation(validator, value)
def test_validator_on_valid_filepath(self):
value = os.path.join(os.getcwd(), ".gitignore")
validator = validators.FilePathValidator()
self.is_successful_validation(validator, value)
class TestResolvedVariableValidation(ValidationTestMixin):
def test_validator_on_resolved_var(self):
value = "${resolved_var}"
validator = validators.ResolvedVariableValidator()
self.is_successful_validation(validator, value)
| true | true |
1c46658b4f19b64b4fc2645b72a26baec8f56676 | 1,490 | py | Python | examples/python-guide/sklearn_example.py | harunpehlivan/LightGBM | 8ba65be9c93b79c095ea06e74de2cc5bf35ab169 | [
"MIT"
] | 59 | 2017-03-09T15:33:52.000Z | 2021-09-16T05:47:10.000Z | examples/python-guide/sklearn_example.py | harunpehlivan/LightGBM | 8ba65be9c93b79c095ea06e74de2cc5bf35ab169 | [
"MIT"
] | 1 | 2017-03-09T07:43:02.000Z | 2017-04-09T19:34:06.000Z | examples/python-guide/sklearn_example.py | harunpehlivan/LightGBM | 8ba65be9c93b79c095ea06e74de2cc5bf35ab169 | [
"MIT"
] | 17 | 2017-03-27T06:37:47.000Z | 2020-05-28T09:17:38.000Z | # coding: utf-8
# pylint: disable = invalid-name, C0111
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
print('Start training...')
# train
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
print('Calculate feature importances...')
# feature importances
print('Feature importances:', list(gbm.feature_importances_))
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)
| 28.653846 | 79 | 0.699329 |
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
print('Start training...')
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Start predicting...')
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
print('Calculate feature importances...')
print('Feature importances:', list(gbm.feature_importances_))
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)
| true | true |
1c4665f87ee43e6b0bf3543beac6d08684c8b95e | 1,638 | py | Python | examples/01-filter/decimate.py | sthagen/pyvista-pyvista | 450db9a8d8ad2feea78e10368c47d9aa0e575e65 | [
"MIT"
] | 25 | 2018-12-03T18:22:58.000Z | 2019-02-26T01:30:35.000Z | examples/01-filter/decimate.py | sthagen/pyvista | ffba268b285925eb6103c8ff5072fcf1c0212c53 | [
"MIT"
] | 108 | 2019-02-27T19:52:12.000Z | 2019-05-08T02:15:21.000Z | examples/01-filter/decimate.py | pyvista/vista | c49a6abae7cc62d242f12ec45a6b22b524db1ec8 | [
"MIT"
] | 8 | 2019-03-02T13:41:48.000Z | 2019-04-22T16:57:44.000Z | """
.. _decimate_example:
Decimation
~~~~~~~~~~
Decimate a mesh
"""
# sphinx_gallery_thumbnail_number = 4
import pyvista as pv
from pyvista import examples
mesh = examples.download_face()
# Define a camera position that shows this mesh properly
cpos = [(0.4, -0.07, -0.31), (0.05, -0.13, -0.06), (-0.1, 1, 0.08)]
dargs = dict(show_edges=True, color=True)
# Preview the mesh
mesh.plot(cpos=cpos, **dargs)
###############################################################################
# Now let's define a target reduction and compare the
# :func:`pyvista.PolyData.decimate` and :func:`pyvista.PolyData.decimate_pro`
# filters.
target_reduction = 0.7
print(f"Reducing {target_reduction * 100.0} percent out of the original mesh")
###############################################################################
decimated = mesh.decimate(target_reduction)
decimated.plot(cpos=cpos, **dargs)
###############################################################################
pro_decimated = mesh.decimate_pro(target_reduction, preserve_topology=True)
pro_decimated.plot(cpos=cpos, **dargs)
###############################################################################
# Side by side comparison:
p = pv.Plotter(shape=(1, 3))
p.add_mesh(mesh, **dargs)
p.add_text("Input mesh", font_size=24)
p.camera_position = cpos
p.reset_camera()
p.subplot(0, 1)
p.add_mesh(decimated, **dargs)
p.add_text("Decimated mesh", font_size=24)
p.camera_position = cpos
p.reset_camera()
p.subplot(0, 2)
p.add_mesh(pro_decimated, **dargs)
p.add_text("Pro Decimated mesh", font_size=24)
p.camera_position = cpos
p.reset_camera()
p.link_views()
p.show()
| 26.419355 | 79 | 0.589133 |
import pyvista as pv
from pyvista import examples
mesh = examples.download_face()
cpos = [(0.4, -0.07, -0.31), (0.05, -0.13, -0.06), (-0.1, 1, 0.08)]
dargs = dict(show_edges=True, color=True)
mesh.plot(cpos=cpos, **dargs)
| true | true |
1c46662dc412d1a358e2418e8c957b2eb8513589 | 1,984 | py | Python | whitetube/migrations/0001_initial.py | AmanGiri007/youtube | b58009581378bf74cabfd791691dee65c9516685 | [
"MIT"
] | null | null | null | whitetube/migrations/0001_initial.py | AmanGiri007/youtube | b58009581378bf74cabfd791691dee65c9516685 | [
"MIT"
] | null | null | null | whitetube/migrations/0001_initial.py | AmanGiri007/youtube | b58009581378bf74cabfd791691dee65c9516685 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-31 15:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField(max_length=300)),
('path', models.CharField(max_length=60)),
('datetime', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=300)),
('datetime', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='whitetube.Video')),
],
),
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('channel_name', models.CharField(max_length=50)),
('subscribers', models.IntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 41.333333 | 118 | 0.602319 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField(max_length=300)),
('path', models.CharField(max_length=60)),
('datetime', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=300)),
('datetime', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='whitetube.Video')),
],
),
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('channel_name', models.CharField(max_length=50)),
('subscribers', models.IntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
1c4666b57524192be8803ca0b7c97e5673ce1bbb | 7,276 | py | Python | telethon/password.py | bb010g/Telethon | 278f0e9e983d938589b6d541e71135ad5b6857c5 | [
"MIT"
] | 2 | 2021-04-29T14:19:25.000Z | 2021-09-17T07:13:49.000Z | telethon/password.py | exceloo/Telethon | 30a0e390603072d3ec57a2f0eef0a297a9b0321b | [
"MIT"
] | 5 | 2021-04-30T21:14:18.000Z | 2022-03-12T00:21:58.000Z | telethon/password.py | exceloo/Telethon | 30a0e390603072d3ec57a2f0eef0a297a9b0321b | [
"MIT"
] | 1 | 2020-04-16T22:02:26.000Z | 2020-04-16T22:02:26.000Z | import hashlib
import os
from .crypto import factorization
from .tl import types
def check_prime_and_good_check(prime: int, g: int):
good_prime_bits_count = 2048
if prime < 0 or prime.bit_length() != good_prime_bits_count:
raise ValueError('bad prime count {}, expected {}'
.format(prime.bit_length(), good_prime_bits_count))
# TODO This is awfully slow
if factorization.Factorization.factorize(prime)[0] != 1:
raise ValueError('given "prime" is not prime')
if g == 2:
if prime % 8 != 7:
raise ValueError('bad g {}, mod8 {}'.format(g, prime % 8))
elif g == 3:
if prime % 3 != 2:
raise ValueError('bad g {}, mod3 {}'.format(g, prime % 3))
elif g == 4:
pass
elif g == 5:
if prime % 5 not in (1, 4):
raise ValueError('bad g {}, mod5 {}'.format(g, prime % 5))
elif g == 6:
if prime % 24 not in (19, 23):
raise ValueError('bad g {}, mod24 {}'.format(g, prime % 24))
elif g == 7:
if prime % 7 not in (3, 5, 6):
raise ValueError('bad g {}, mod7 {}'.format(g, prime % 7))
else:
raise ValueError('bad g {}'.format(g))
prime_sub1_div2 = (prime - 1) // 2
if factorization.Factorization.factorize(prime_sub1_div2)[0] != 1:
raise ValueError('(prime - 1) // 2 is not prime')
# Else it's good
def check_prime_and_good(prime_bytes: bytes, g: int):
good_prime = bytes((
0xC7, 0x1C, 0xAE, 0xB9, 0xC6, 0xB1, 0xC9, 0x04, 0x8E, 0x6C, 0x52, 0x2F, 0x70, 0xF1, 0x3F, 0x73,
0x98, 0x0D, 0x40, 0x23, 0x8E, 0x3E, 0x21, 0xC1, 0x49, 0x34, 0xD0, 0x37, 0x56, 0x3D, 0x93, 0x0F,
0x48, 0x19, 0x8A, 0x0A, 0xA7, 0xC1, 0x40, 0x58, 0x22, 0x94, 0x93, 0xD2, 0x25, 0x30, 0xF4, 0xDB,
0xFA, 0x33, 0x6F, 0x6E, 0x0A, 0xC9, 0x25, 0x13, 0x95, 0x43, 0xAE, 0xD4, 0x4C, 0xCE, 0x7C, 0x37,
0x20, 0xFD, 0x51, 0xF6, 0x94, 0x58, 0x70, 0x5A, 0xC6, 0x8C, 0xD4, 0xFE, 0x6B, 0x6B, 0x13, 0xAB,
0xDC, 0x97, 0x46, 0x51, 0x29, 0x69, 0x32, 0x84, 0x54, 0xF1, 0x8F, 0xAF, 0x8C, 0x59, 0x5F, 0x64,
0x24, 0x77, 0xFE, 0x96, 0xBB, 0x2A, 0x94, 0x1D, 0x5B, 0xCD, 0x1D, 0x4A, 0xC8, 0xCC, 0x49, 0x88,
0x07, 0x08, 0xFA, 0x9B, 0x37, 0x8E, 0x3C, 0x4F, 0x3A, 0x90, 0x60, 0xBE, 0xE6, 0x7C, 0xF9, 0xA4,
0xA4, 0xA6, 0x95, 0x81, 0x10, 0x51, 0x90, 0x7E, 0x16, 0x27, 0x53, 0xB5, 0x6B, 0x0F, 0x6B, 0x41,
0x0D, 0xBA, 0x74, 0xD8, 0xA8, 0x4B, 0x2A, 0x14, 0xB3, 0x14, 0x4E, 0x0E, 0xF1, 0x28, 0x47, 0x54,
0xFD, 0x17, 0xED, 0x95, 0x0D, 0x59, 0x65, 0xB4, 0xB9, 0xDD, 0x46, 0x58, 0x2D, 0xB1, 0x17, 0x8D,
0x16, 0x9C, 0x6B, 0xC4, 0x65, 0xB0, 0xD6, 0xFF, 0x9C, 0xA3, 0x92, 0x8F, 0xEF, 0x5B, 0x9A, 0xE4,
0xE4, 0x18, 0xFC, 0x15, 0xE8, 0x3E, 0xBE, 0xA0, 0xF8, 0x7F, 0xA9, 0xFF, 0x5E, 0xED, 0x70, 0x05,
0x0D, 0xED, 0x28, 0x49, 0xF4, 0x7B, 0xF9, 0x59, 0xD9, 0x56, 0x85, 0x0C, 0xE9, 0x29, 0x85, 0x1F,
0x0D, 0x81, 0x15, 0xF6, 0x35, 0xB1, 0x05, 0xEE, 0x2E, 0x4E, 0x15, 0xD0, 0x4B, 0x24, 0x54, 0xBF,
0x6F, 0x4F, 0xAD, 0xF0, 0x34, 0xB1, 0x04, 0x03, 0x11, 0x9C, 0xD8, 0xE3, 0xB9, 0x2F, 0xCC, 0x5B))
if good_prime == prime_bytes:
if g in (3, 4, 5, 7):
return # It's good
check_prime_and_good_check(int.from_bytes(prime_bytes, 'big'), g)
def is_good_large(number: int, p: int) -> bool:
return number > 0 and p - number > 0
SIZE_FOR_HASH = 256
def num_bytes_for_hash(number: bytes) -> bytes:
return bytes(SIZE_FOR_HASH - len(number)) + number
def big_num_for_hash(g: int) -> bytes:
return g.to_bytes(SIZE_FOR_HASH, 'big')
def sha256(*p: bytes) -> bytes:
hash = hashlib.sha256()
for q in p:
hash.update(q)
return hash.digest()
def is_good_mod_exp_first(modexp, prime) -> bool:
diff = prime - modexp
min_diff_bits_count = 2048 - 64
max_mod_exp_size = 256
if diff < 0 or \
diff.bit_length() < min_diff_bits_count or \
modexp.bit_length() < min_diff_bits_count or \
(modexp.bit_length() + 7) // 8 > max_mod_exp_size:
return False
return True
def xor(a: bytes, b: bytes) -> bytes:
return bytes(x ^ y for x, y in zip(a, b))
def pbkdf2sha512(password: bytes, salt: bytes, iterations: int):
return hashlib.pbkdf2_hmac('sha512', password, salt, iterations)
def compute_hash(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str):
hash1 = sha256(algo.salt1, password.encode('utf-8'), algo.salt1)
hash2 = sha256(algo.salt2, hash1, algo.salt2)
hash3 = pbkdf2sha512(hash2, algo.salt1, 100000)
return sha256(algo.salt2, hash3, algo.salt2)
def compute_digest(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str):
try:
check_prime_and_good(algo.p, algo.g)
except ValueError:
raise ValueError('bad p/g in password')
value = pow(algo.g,
int.from_bytes(compute_hash(algo, password), 'big'),
int.from_bytes(algo.p, 'big'))
return big_num_for_hash(value)
# https://github.com/telegramdesktop/tdesktop/blob/18b74b90451a7db2379a9d753c9cbaf8734b4d5d/Telegram/SourceFiles/core/core_cloud_password.cpp
def compute_check(request: types.account.Password, password: str):
algo = request.current_algo
if not isinstance(algo, types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow):
raise ValueError('unsupported password algorithm {}'
.format(algo.__class__.__name__))
pw_hash = compute_hash(algo, password)
p = int.from_bytes(algo.p, 'big')
g = algo.g
B = int.from_bytes(request.srp_B, 'big')
try:
check_prime_and_good(algo.p, g)
except ValueError:
raise ValueError('bad p/g in password')
if not is_good_large(B, p):
raise ValueError('bad b in check')
x = int.from_bytes(pw_hash, 'big')
p_for_hash = num_bytes_for_hash(algo.p)
g_for_hash = big_num_for_hash(g)
b_for_hash = num_bytes_for_hash(request.srp_B)
g_x = pow(g, x, p)
k = int.from_bytes(sha256(p_for_hash, g_for_hash), 'big')
kg_x = (k * g_x) % p
def generate_and_check_random():
random_size = 256
import time
while True:
random = os.urandom(random_size)
a = int.from_bytes(random, 'big')
A = pow(g, a, p)
if is_good_mod_exp_first(A, p):
a_for_hash = big_num_for_hash(A)
u = int.from_bytes(sha256(a_for_hash, b_for_hash), 'big')
if u > 0:
return (a, a_for_hash, u)
print(A, 'bad for', p)
time.sleep(1)
a, a_for_hash, u = generate_and_check_random()
g_b = (B - kg_x) % p
if not is_good_mod_exp_first(g_b, p):
raise ValueError('bad g_b')
ux = u * x
a_ux = a + ux
S = pow(g_b, a_ux, p)
K = sha256(big_num_for_hash(S))
M1 = sha256(
xor(sha256(p_for_hash), sha256(g_for_hash)),
sha256(algo.salt1),
sha256(algo.salt2),
a_for_hash,
b_for_hash,
K
)
return types.InputCheckPasswordSRP(
request.srp_id, bytes(a_for_hash), bytes(M1))
| 36.562814 | 141 | 0.617097 | import hashlib
import os
from .crypto import factorization
from .tl import types
def check_prime_and_good_check(prime: int, g: int):
good_prime_bits_count = 2048
if prime < 0 or prime.bit_length() != good_prime_bits_count:
raise ValueError('bad prime count {}, expected {}'
.format(prime.bit_length(), good_prime_bits_count))
if factorization.Factorization.factorize(prime)[0] != 1:
raise ValueError('given "prime" is not prime')
if g == 2:
if prime % 8 != 7:
raise ValueError('bad g {}, mod8 {}'.format(g, prime % 8))
elif g == 3:
if prime % 3 != 2:
raise ValueError('bad g {}, mod3 {}'.format(g, prime % 3))
elif g == 4:
pass
elif g == 5:
if prime % 5 not in (1, 4):
raise ValueError('bad g {}, mod5 {}'.format(g, prime % 5))
elif g == 6:
if prime % 24 not in (19, 23):
raise ValueError('bad g {}, mod24 {}'.format(g, prime % 24))
elif g == 7:
if prime % 7 not in (3, 5, 6):
raise ValueError('bad g {}, mod7 {}'.format(g, prime % 7))
else:
raise ValueError('bad g {}'.format(g))
prime_sub1_div2 = (prime - 1) // 2
if factorization.Factorization.factorize(prime_sub1_div2)[0] != 1:
raise ValueError('(prime - 1) // 2 is not prime')
def check_prime_and_good(prime_bytes: bytes, g: int):
good_prime = bytes((
0xC7, 0x1C, 0xAE, 0xB9, 0xC6, 0xB1, 0xC9, 0x04, 0x8E, 0x6C, 0x52, 0x2F, 0x70, 0xF1, 0x3F, 0x73,
0x98, 0x0D, 0x40, 0x23, 0x8E, 0x3E, 0x21, 0xC1, 0x49, 0x34, 0xD0, 0x37, 0x56, 0x3D, 0x93, 0x0F,
0x48, 0x19, 0x8A, 0x0A, 0xA7, 0xC1, 0x40, 0x58, 0x22, 0x94, 0x93, 0xD2, 0x25, 0x30, 0xF4, 0xDB,
0xFA, 0x33, 0x6F, 0x6E, 0x0A, 0xC9, 0x25, 0x13, 0x95, 0x43, 0xAE, 0xD4, 0x4C, 0xCE, 0x7C, 0x37,
0x20, 0xFD, 0x51, 0xF6, 0x94, 0x58, 0x70, 0x5A, 0xC6, 0x8C, 0xD4, 0xFE, 0x6B, 0x6B, 0x13, 0xAB,
0xDC, 0x97, 0x46, 0x51, 0x29, 0x69, 0x32, 0x84, 0x54, 0xF1, 0x8F, 0xAF, 0x8C, 0x59, 0x5F, 0x64,
0x24, 0x77, 0xFE, 0x96, 0xBB, 0x2A, 0x94, 0x1D, 0x5B, 0xCD, 0x1D, 0x4A, 0xC8, 0xCC, 0x49, 0x88,
0x07, 0x08, 0xFA, 0x9B, 0x37, 0x8E, 0x3C, 0x4F, 0x3A, 0x90, 0x60, 0xBE, 0xE6, 0x7C, 0xF9, 0xA4,
0xA4, 0xA6, 0x95, 0x81, 0x10, 0x51, 0x90, 0x7E, 0x16, 0x27, 0x53, 0xB5, 0x6B, 0x0F, 0x6B, 0x41,
0x0D, 0xBA, 0x74, 0xD8, 0xA8, 0x4B, 0x2A, 0x14, 0xB3, 0x14, 0x4E, 0x0E, 0xF1, 0x28, 0x47, 0x54,
0xFD, 0x17, 0xED, 0x95, 0x0D, 0x59, 0x65, 0xB4, 0xB9, 0xDD, 0x46, 0x58, 0x2D, 0xB1, 0x17, 0x8D,
0x16, 0x9C, 0x6B, 0xC4, 0x65, 0xB0, 0xD6, 0xFF, 0x9C, 0xA3, 0x92, 0x8F, 0xEF, 0x5B, 0x9A, 0xE4,
0xE4, 0x18, 0xFC, 0x15, 0xE8, 0x3E, 0xBE, 0xA0, 0xF8, 0x7F, 0xA9, 0xFF, 0x5E, 0xED, 0x70, 0x05,
0x0D, 0xED, 0x28, 0x49, 0xF4, 0x7B, 0xF9, 0x59, 0xD9, 0x56, 0x85, 0x0C, 0xE9, 0x29, 0x85, 0x1F,
0x0D, 0x81, 0x15, 0xF6, 0x35, 0xB1, 0x05, 0xEE, 0x2E, 0x4E, 0x15, 0xD0, 0x4B, 0x24, 0x54, 0xBF,
0x6F, 0x4F, 0xAD, 0xF0, 0x34, 0xB1, 0x04, 0x03, 0x11, 0x9C, 0xD8, 0xE3, 0xB9, 0x2F, 0xCC, 0x5B))
if good_prime == prime_bytes:
if g in (3, 4, 5, 7):
return # It's good
check_prime_and_good_check(int.from_bytes(prime_bytes, 'big'), g)
def is_good_large(number: int, p: int) -> bool:
return number > 0 and p - number > 0
SIZE_FOR_HASH = 256
def num_bytes_for_hash(number: bytes) -> bytes:
return bytes(SIZE_FOR_HASH - len(number)) + number
def big_num_for_hash(g: int) -> bytes:
return g.to_bytes(SIZE_FOR_HASH, 'big')
def sha256(*p: bytes) -> bytes:
hash = hashlib.sha256()
for q in p:
hash.update(q)
return hash.digest()
def is_good_mod_exp_first(modexp, prime) -> bool:
diff = prime - modexp
min_diff_bits_count = 2048 - 64
max_mod_exp_size = 256
if diff < 0 or \
diff.bit_length() < min_diff_bits_count or \
modexp.bit_length() < min_diff_bits_count or \
(modexp.bit_length() + 7) // 8 > max_mod_exp_size:
return False
return True
def xor(a: bytes, b: bytes) -> bytes:
return bytes(x ^ y for x, y in zip(a, b))
def pbkdf2sha512(password: bytes, salt: bytes, iterations: int):
return hashlib.pbkdf2_hmac('sha512', password, salt, iterations)
def compute_hash(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str):
hash1 = sha256(algo.salt1, password.encode('utf-8'), algo.salt1)
hash2 = sha256(algo.salt2, hash1, algo.salt2)
hash3 = pbkdf2sha512(hash2, algo.salt1, 100000)
return sha256(algo.salt2, hash3, algo.salt2)
def compute_digest(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str):
try:
check_prime_and_good(algo.p, algo.g)
except ValueError:
raise ValueError('bad p/g in password')
value = pow(algo.g,
int.from_bytes(compute_hash(algo, password), 'big'),
int.from_bytes(algo.p, 'big'))
return big_num_for_hash(value)
def compute_check(request: types.account.Password, password: str):
algo = request.current_algo
if not isinstance(algo, types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow):
raise ValueError('unsupported password algorithm {}'
.format(algo.__class__.__name__))
pw_hash = compute_hash(algo, password)
p = int.from_bytes(algo.p, 'big')
g = algo.g
B = int.from_bytes(request.srp_B, 'big')
try:
check_prime_and_good(algo.p, g)
except ValueError:
raise ValueError('bad p/g in password')
if not is_good_large(B, p):
raise ValueError('bad b in check')
x = int.from_bytes(pw_hash, 'big')
p_for_hash = num_bytes_for_hash(algo.p)
g_for_hash = big_num_for_hash(g)
b_for_hash = num_bytes_for_hash(request.srp_B)
g_x = pow(g, x, p)
k = int.from_bytes(sha256(p_for_hash, g_for_hash), 'big')
kg_x = (k * g_x) % p
def generate_and_check_random():
random_size = 256
import time
while True:
random = os.urandom(random_size)
a = int.from_bytes(random, 'big')
A = pow(g, a, p)
if is_good_mod_exp_first(A, p):
a_for_hash = big_num_for_hash(A)
u = int.from_bytes(sha256(a_for_hash, b_for_hash), 'big')
if u > 0:
return (a, a_for_hash, u)
print(A, 'bad for', p)
time.sleep(1)
a, a_for_hash, u = generate_and_check_random()
g_b = (B - kg_x) % p
if not is_good_mod_exp_first(g_b, p):
raise ValueError('bad g_b')
ux = u * x
a_ux = a + ux
S = pow(g_b, a_ux, p)
K = sha256(big_num_for_hash(S))
M1 = sha256(
xor(sha256(p_for_hash), sha256(g_for_hash)),
sha256(algo.salt1),
sha256(algo.salt2),
a_for_hash,
b_for_hash,
K
)
return types.InputCheckPasswordSRP(
request.srp_id, bytes(a_for_hash), bytes(M1))
| true | true |
1c46679f018a8751f25fbf73754145fb78d2528e | 982 | py | Python | controller/liff_controller.py | louis70109/LIFF-to-LIFF-Example | 1e3d90f7989b5b69090a7b2e3a41c74b3ae3c90b | [
"MIT"
] | null | null | null | controller/liff_controller.py | louis70109/LIFF-to-LIFF-Example | 1e3d90f7989b5b69090a7b2e3a41c74b3ae3c90b | [
"MIT"
] | null | null | null | controller/liff_controller.py | louis70109/LIFF-to-LIFF-Example | 1e3d90f7989b5b69090a7b2e3a41c74b3ae3c90b | [
"MIT"
] | null | null | null | import os
from flask import request, render_template, Response
from flask_restful import Resource
LIFF_A = os.getenv('LIFF_SHARE_A')
LIFF_B = os.getenv('LIFF_SHARE_B')
SHARE_A = f"https://liff.line.me/{LIFF_A}"
SHARE_B = f"https://liff.line.me/{LIFF_B}"
class LiffAController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self):
if request.args.get('liff.state'):
return Response(render_template('liff_redirect.html', liff_id=LIFF_A))
return Response(render_template('a.html', liff_id=LIFF_A, text='AAAAAAAAA', next=SHARE_B))
class LiffBController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self):
if request.args.get('liff.state'):
return Response(render_template('liff_redirect.html', liff_id=LIFF_B))
return Response(render_template('b.html', liff_id=LIFF_B, text='BBBBBBBBBB', next=SHARE_A))
| 33.862069 | 99 | 0.689409 | import os
from flask import request, render_template, Response
from flask_restful import Resource
LIFF_A = os.getenv('LIFF_SHARE_A')
LIFF_B = os.getenv('LIFF_SHARE_B')
SHARE_A = f"https://liff.line.me/{LIFF_A}"
SHARE_B = f"https://liff.line.me/{LIFF_B}"
class LiffAController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self):
if request.args.get('liff.state'):
return Response(render_template('liff_redirect.html', liff_id=LIFF_A))
return Response(render_template('a.html', liff_id=LIFF_A, text='AAAAAAAAA', next=SHARE_B))
class LiffBController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self):
if request.args.get('liff.state'):
return Response(render_template('liff_redirect.html', liff_id=LIFF_B))
return Response(render_template('b.html', liff_id=LIFF_B, text='BBBBBBBBBB', next=SHARE_A))
| true | true |
1c4667e358c0ed6a68e36d3ec4c4fde1c83f9ab6 | 2,831 | py | Python | members_only/views.py | TamasPalfi/FixedDB | be3e4e830b05099d33031759f4a7fc8a42f1e733 | [
"BSD-2-Clause"
] | null | null | null | members_only/views.py | TamasPalfi/FixedDB | be3e4e830b05099d33031759f4a7fc8a42f1e733 | [
"BSD-2-Clause"
] | null | null | null | members_only/views.py | TamasPalfi/FixedDB | be3e4e830b05099d33031759f4a7fc8a42f1e733 | [
"BSD-2-Clause"
] | null | null | null | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import action
from members_only.models import User, Post, Comment, Photo, ShortLink
from members_only.serializers import UserSerializer, UserSetupSerializer, PostSerializer, CommentSerializer, PhotoSerializer, ShortLinkSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
# Create your views here.
# Front End Views
def index(request):
return render(request, "index.html")
def feed(request):
return render(request, "feed.html")
# Back End Views
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
@action(detail=False, methods=['post'], serializer_class=UserSetupSerializer, permission_classes=[])
def setup(self, request):
serializer = UserSetupSerializer(data=request.data)
if serializer.is_valid():
if User.objects.filter(username=serializer.data['email']).exists():
new_user = User.objects.get(username=serializer.data['email'])
if new_user.reset_code != serializer.data['reset_code'] or new_user.reset_code == "":
return Response({"message": "Incorrect reset code"})
new_user.reset_code = ""
new_user.set_password(serializer.data['password'])
new_user.save()
else:
return Response({"message": "User does not exist"})
else:
return Response({"message": "Invalid data"})
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all().order_by('-timestamp')
serializer_class = PostSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all().order_by('-timestamp')
serializer_class = CommentSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class PhotoViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class ShortLinkViewSet(viewsets.ModelViewSet):
queryset = ShortLink.objects.all()
serializer_class = ShortLinkSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
| 37.746667 | 145 | 0.741434 | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import action
from members_only.models import User, Post, Comment, Photo, ShortLink
from members_only.serializers import UserSerializer, UserSetupSerializer, PostSerializer, CommentSerializer, PhotoSerializer, ShortLinkSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
def index(request):
return render(request, "index.html")
def feed(request):
return render(request, "feed.html")
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
@action(detail=False, methods=['post'], serializer_class=UserSetupSerializer, permission_classes=[])
def setup(self, request):
serializer = UserSetupSerializer(data=request.data)
if serializer.is_valid():
if User.objects.filter(username=serializer.data['email']).exists():
new_user = User.objects.get(username=serializer.data['email'])
if new_user.reset_code != serializer.data['reset_code'] or new_user.reset_code == "":
return Response({"message": "Incorrect reset code"})
new_user.reset_code = ""
new_user.set_password(serializer.data['password'])
new_user.save()
else:
return Response({"message": "User does not exist"})
else:
return Response({"message": "Invalid data"})
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all().order_by('-timestamp')
serializer_class = PostSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all().order_by('-timestamp')
serializer_class = CommentSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class PhotoViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class ShortLinkViewSet(viewsets.ModelViewSet):
queryset = ShortLink.objects.all()
serializer_class = ShortLinkSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
| true | true |
1c46683814368973d3a6672199085b7f7bf2c538 | 335 | py | Python | challenge/D09/Point.py | pengfei99/AdventOfCode2021 | 7aeaf417521fc3acf6b34259b78b9534e1c9f912 | [
"Apache-2.0"
] | 1 | 2022-03-27T09:48:34.000Z | 2022-03-27T09:48:34.000Z | challenge/D09/Point.py | pengfei99/AdventOfCode2021 | 7aeaf417521fc3acf6b34259b78b9534e1c9f912 | [
"Apache-2.0"
] | null | null | null | challenge/D09/Point.py | pengfei99/AdventOfCode2021 | 7aeaf417521fc3acf6b34259b78b9534e1c9f912 | [
"Apache-2.0"
] | null | null | null | class Point:
def __init__(self, value):
self.value = value
self.mark = False
def __str__(self):
return f"value: {self.value}, marked: {self.mark}"
def get_value(self):
return self.value
def get_mark(self):
return self.mark
def mark_point(self):
self.mark = True
| 18.611111 | 58 | 0.58209 | class Point:
def __init__(self, value):
self.value = value
self.mark = False
def __str__(self):
return f"value: {self.value}, marked: {self.mark}"
def get_value(self):
return self.value
def get_mark(self):
return self.mark
def mark_point(self):
self.mark = True
| true | true |
1c46686d9bf0cf7f5d046349ac7a9f5fc444fa5e | 2,984 | py | Python | openslides_backend/action/actions/committee/update.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | openslides_backend/action/actions/committee/update.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | openslides_backend/action/actions/committee/update.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | from typing import Any, Dict
from ....models.models import Committee
from ....permissions.management_levels import (
CommitteeManagementLevel,
OrganizationManagementLevel,
)
from ....permissions.permission_helper import (
has_committee_management_level,
has_organization_management_level,
)
from ....shared.exceptions import ActionException, MissingPermission
from ....shared.patterns import Collection, FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from .committee_common_mixin import CommitteeCommonCreateUpdateMixin
@register_action("committee.update")
class CommitteeUpdateAction(CommitteeCommonCreateUpdateMixin, UpdateAction):
"""
Action to update a committee.
"""
model = Committee()
schema = DefaultSchema(Committee()).get_update_schema(
optional_properties=[
"name",
"description",
"default_meeting_id",
"forward_to_committee_ids",
"receive_forwardings_from_committee_ids",
"organization_tag_ids",
"user_$_management_level",
],
)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
if instance.get("default_meeting_id"):
self.check_meeting_in_committee(
instance["default_meeting_id"], instance["id"]
)
return instance
def check_meeting_in_committee(self, meeting_id: int, committee_id: int) -> None:
meeting = self.datastore.get(
FullQualifiedId(Collection("meeting"), meeting_id), ["committee_id"]
)
if meeting.get("committee_id") != committee_id:
raise ActionException(
f"Meeting {meeting_id} does not belong to committee {committee_id}"
)
def check_permissions(self, instance: Dict[str, Any]) -> None:
if has_organization_management_level(
self.datastore,
self.user_id,
OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION,
):
return
if any(
[
field in instance
for field in [
"forward_to_committee_ids",
"receive_forwardings_from_committee_ids",
"user_$_management_level",
]
]
):
raise MissingPermission(OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION)
if has_committee_management_level(
self.datastore,
self.user_id,
CommitteeManagementLevel.CAN_MANAGE,
instance["id"],
):
return
raise MissingPermission(
{
OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION: 1,
CommitteeManagementLevel.CAN_MANAGE: instance["id"],
}
)
| 33.155556 | 88 | 0.632373 | from typing import Any, Dict
from ....models.models import Committee
from ....permissions.management_levels import (
CommitteeManagementLevel,
OrganizationManagementLevel,
)
from ....permissions.permission_helper import (
has_committee_management_level,
has_organization_management_level,
)
from ....shared.exceptions import ActionException, MissingPermission
from ....shared.patterns import Collection, FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from .committee_common_mixin import CommitteeCommonCreateUpdateMixin
@register_action("committee.update")
class CommitteeUpdateAction(CommitteeCommonCreateUpdateMixin, UpdateAction):
model = Committee()
schema = DefaultSchema(Committee()).get_update_schema(
optional_properties=[
"name",
"description",
"default_meeting_id",
"forward_to_committee_ids",
"receive_forwardings_from_committee_ids",
"organization_tag_ids",
"user_$_management_level",
],
)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
if instance.get("default_meeting_id"):
self.check_meeting_in_committee(
instance["default_meeting_id"], instance["id"]
)
return instance
def check_meeting_in_committee(self, meeting_id: int, committee_id: int) -> None:
meeting = self.datastore.get(
FullQualifiedId(Collection("meeting"), meeting_id), ["committee_id"]
)
if meeting.get("committee_id") != committee_id:
raise ActionException(
f"Meeting {meeting_id} does not belong to committee {committee_id}"
)
def check_permissions(self, instance: Dict[str, Any]) -> None:
if has_organization_management_level(
self.datastore,
self.user_id,
OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION,
):
return
if any(
[
field in instance
for field in [
"forward_to_committee_ids",
"receive_forwardings_from_committee_ids",
"user_$_management_level",
]
]
):
raise MissingPermission(OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION)
if has_committee_management_level(
self.datastore,
self.user_id,
CommitteeManagementLevel.CAN_MANAGE,
instance["id"],
):
return
raise MissingPermission(
{
OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION: 1,
CommitteeManagementLevel.CAN_MANAGE: instance["id"],
}
)
| true | true |
1c4668cd01cebff276c684dd00b647ddfd8b4381 | 40,948 | py | Python | venv/lib/python2.7/site-packages/cffi/vengine_cpy.py | deandunbar/html2bwml | 32c06a93c8daf6a26c89c0de58fd39859d1ddb1e | [
"MIT"
] | 4 | 2017-09-17T03:27:47.000Z | 2020-04-29T00:10:20.000Z | venv/lib/python2.7/site-packages/cffi/vengine_cpy.py | deandunbar/html2bwml | 32c06a93c8daf6a26c89c0de58fd39859d1ddb1e | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/cffi/vengine_cpy.py | deandunbar/html2bwml | 32c06a93c8daf6a26c89c0de58fd39859d1ddb1e | [
"MIT"
] | 5 | 2017-09-20T08:08:43.000Z | 2022-02-02T08:19:30.000Z | import sys, imp
from . import model, ffiplatform
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise ffiplatform.VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, ffiplatform.VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
return sorted(self.ffi._parser._declarations.items())
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise ffiplatform.VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = alloca((size_t)datasize);' % (tovar,))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructType):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname), fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise ffiplatform.VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise ffiplatform.VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
typedef unsigned char _Bool;
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
(sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
| 40.988989 | 79 | 0.546962 | import sys, imp
from . import model, ffiplatform
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
prnt = self._prnt
prnt(cffimod_header)
prnt()
prnt(self.verifier.preamble)
prnt()
self._generate("decl")
self._generate_setup_custom()
prnt()
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise ffiplatform.VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
self._load(module, 'loading')
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, ffiplatform.VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
return sorted(self.ffi._parser._declarations.items())
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise ffiplatform.VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
else:
raise NotImplementedError(tp)
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = alloca((size_t)datasize);' % (tovar,))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructType):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname), fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return
self.ffi._get_cached_btype(tp)
if tp in self._struct_pending_verification:
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise ffiplatform.VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
if check_value is not None:
self._check_int_constant_value(name, check_value)
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType):
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise ffiplatform.VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
typedef unsigned char _Bool;
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
(sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
| true | true |
1c46691999965d62eea11bab0f726d607f2615d6 | 5,738 | py | Python | iot/main.py | jonDufty/snsrpi-device | 0e2d8023093385e1ec457560e8880c43036c73dc | [
"MIT"
] | null | null | null | iot/main.py | jonDufty/snsrpi-device | 0e2d8023093385e1ec457560e8880c43036c73dc | [
"MIT"
] | null | null | null | iot/main.py | jonDufty/snsrpi-device | 0e2d8023093385e1ec457560e8880c43036c73dc | [
"MIT"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
from ShadowHandler import SensorShadowHandler, GlobalShadowHandler
from awscrt import io, mqtt, auth, http
from awsiot import mqtt_connection_builder, iotshadow
import sys
import os
import signal
import threading
import json
import time
from datetime import datetime, timedelta
from uuid import uuid4
from Device import Device
AWS_IOT_ENDPOINT = os.environ["AWS_IOT_ENDPOINT"]
# io.init_logging()
received_count = 0
stop_recording_event = threading.Event()
# Callback when connection is accidentally lost.
def on_connection_interrupted(connection, error, **kwargs):
"""Callback for connection interrup. AWS example
Args:
connection ([type]): [description]
error ([type]): [description]
"""
print("Connection interrupted. error: {}".format(error))
# Callback when an interrupted connection is re-established.
def on_connection_resumed(connection, return_code, session_present, **kwargs):
"""Callback for connection resume event. Standard AWS example
Args:
connection (mqqt_connection):
return_code (int): Return code
session_present (bool): If there is an existing session
"""
print("Connection resumed. return_code: {} session_present: {}".format(
return_code, session_present))
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
print("Session did not persist. Resubscribing to existing topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
# Cannot synchronously wait for resubscribe result because we're on the connection's event-loop thread,
# evaluate result with a callback instead.
resubscribe_future.add_done_callback(on_resubscribe_complete)
def on_resubscribe_complete(resubscribe_future):
"""Callback for resubscribe event. Standard AWS example
Args:
resubscribe_future ([type]): [description]
"""
resubscribe_results = resubscribe_future.result()
print("Resubscribe results: {}".format(resubscribe_results))
for topic, qos in resubscribe_results['topics']:
if qos is None:
sys.exit("Server rejected resubscribe to topic: {}".format(topic))
# Callback when the subscribed topic receives a message
def signal_handler(signal, frame):
"""Singnal handler for SIGINT/SIGTERM for graceful shutdown
"""
print("Terminate Signal Recieved")
stop_recording_event.set()
if __name__ == '__main__':
# Initialise
AWS_IOT_ENDPOINT = os.environ["AWS_IOT_ENDPOINT"]
DEVICE_ENDPOINT = os.environ["DEVICE_ENDPOINT"]
DEVICE_NAME = os.environ["DEVICE_NAME"]
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
proxy_options = None
# Initialise device object. This has most handler functions abstracted away
device = Device(DEVICE_NAME, DEVICE_ENDPOINT)
# Initialise mqtt connection object. This does all the talking essentially
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=AWS_IOT_ENDPOINT,
port=443,
cert_filepath=device.auth.device_cert,
pri_key_filepath=device.auth.private_key,
client_bootstrap=client_bootstrap,
ca_filepath=device.auth.root_ca_cert,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=device.name,
clean_session=False,
keep_alive_secs=30,
http_proxy_options=proxy_options)
device.set_mqtt(mqtt_connection)
# Iot shadow service client
shadow_client = iotshadow.IotShadowClient(mqtt_connection)
print(
f"Connecting to {AWS_IOT_ENDPOINT} with client ID '{device.name}'...")
connect_future = mqtt_connection.connect()
connect_future.result() #Wait for connection result
print("Connected!")
time.sleep(30) #Wait for other service to spin up otherwise we risk hitting an empty endpoint
# Conduct initial healthcheck to intialise global state/shadow
device.set_global_shadow(shadow_client)
device.get_healthcheck()
device.global_shadow.set_state(device.global_shadow.local_state, update_index=True)
device.global_shadow.update_state(override_desired=True)
# Iterrate through sensors in state and create individual shadows/states
sensors = device.global_shadow.local_state['sensors']
for s in sensors:
id = s['sensor_id']
shadow = SensorShadowHandler(
shadow_client, device.name, id, id, device.device_endpoint, device.get_healthcheck
)
shadow.set_state('active', s['active'])
result = shadow.get_or_update_sensor_settings()
if result['error']:
print(f'failed to get initial settings of sensor {id}')
# Need to call update state outside of call back functions otherwise we risk creating thread dead-lock
# and the program hangs
shadow.update_state(override_desired=True)
device.sensor_shadows.append(shadow)
# Enable periodic heartbest
device.enable_heartbeat()
# Listen continuously/wait until stop signal received
stop_recording_event.wait()
# Disconnect
print("Gracefully exitting")
device.delete_shadows()
print("Disconnecting...")
disconnect_future = mqtt_connection.disconnect()
disconnect_future.result()
print("Disconnected!")
exit()
| 34.154762 | 111 | 0.729697 |
import argparse
from ShadowHandler import SensorShadowHandler, GlobalShadowHandler
from awscrt import io, mqtt, auth, http
from awsiot import mqtt_connection_builder, iotshadow
import sys
import os
import signal
import threading
import json
import time
from datetime import datetime, timedelta
from uuid import uuid4
from Device import Device
AWS_IOT_ENDPOINT = os.environ["AWS_IOT_ENDPOINT"]
received_count = 0
stop_recording_event = threading.Event()
def on_connection_interrupted(connection, error, **kwargs):
print("Connection interrupted. error: {}".format(error))
def on_connection_resumed(connection, return_code, session_present, **kwargs):
print("Connection resumed. return_code: {} session_present: {}".format(
return_code, session_present))
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
print("Session did not persist. Resubscribing to existing topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
resubscribe_future.add_done_callback(on_resubscribe_complete)
def on_resubscribe_complete(resubscribe_future):
resubscribe_results = resubscribe_future.result()
print("Resubscribe results: {}".format(resubscribe_results))
for topic, qos in resubscribe_results['topics']:
if qos is None:
sys.exit("Server rejected resubscribe to topic: {}".format(topic))
def signal_handler(signal, frame):
print("Terminate Signal Recieved")
stop_recording_event.set()
if __name__ == '__main__':
AWS_IOT_ENDPOINT = os.environ["AWS_IOT_ENDPOINT"]
DEVICE_ENDPOINT = os.environ["DEVICE_ENDPOINT"]
DEVICE_NAME = os.environ["DEVICE_NAME"]
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
proxy_options = None
device = Device(DEVICE_NAME, DEVICE_ENDPOINT)
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=AWS_IOT_ENDPOINT,
port=443,
cert_filepath=device.auth.device_cert,
pri_key_filepath=device.auth.private_key,
client_bootstrap=client_bootstrap,
ca_filepath=device.auth.root_ca_cert,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=device.name,
clean_session=False,
keep_alive_secs=30,
http_proxy_options=proxy_options)
device.set_mqtt(mqtt_connection)
shadow_client = iotshadow.IotShadowClient(mqtt_connection)
print(
f"Connecting to {AWS_IOT_ENDPOINT} with client ID '{device.name}'...")
connect_future = mqtt_connection.connect()
connect_future.result()
print("Connected!")
time.sleep(30)
device.set_global_shadow(shadow_client)
device.get_healthcheck()
device.global_shadow.set_state(device.global_shadow.local_state, update_index=True)
device.global_shadow.update_state(override_desired=True)
sensors = device.global_shadow.local_state['sensors']
for s in sensors:
id = s['sensor_id']
shadow = SensorShadowHandler(
shadow_client, device.name, id, id, device.device_endpoint, device.get_healthcheck
)
shadow.set_state('active', s['active'])
result = shadow.get_or_update_sensor_settings()
if result['error']:
print(f'failed to get initial settings of sensor {id}')
shadow.update_state(override_desired=True)
device.sensor_shadows.append(shadow)
device.enable_heartbeat()
stop_recording_event.wait()
print("Gracefully exitting")
device.delete_shadows()
print("Disconnecting...")
disconnect_future = mqtt_connection.disconnect()
disconnect_future.result()
print("Disconnected!")
exit()
| true | true |
1c466940b6dc99ba7aabaaea9b7a0414ef2ddf39 | 13,009 | py | Python | formfactory/tests/test_base.py | AltusBarry/django-formfactory | 8da378d0952bfd0eb9a08d49b17b5b95ee7e607d | [
"BSD-3-Clause"
] | null | null | null | formfactory/tests/test_base.py | AltusBarry/django-formfactory | 8da378d0952bfd0eb9a08d49b17b5b95ee7e607d | [
"BSD-3-Clause"
] | null | null | null | formfactory/tests/test_base.py | AltusBarry/django-formfactory | 8da378d0952bfd0eb9a08d49b17b5b95ee7e607d | [
"BSD-3-Clause"
] | null | null | null | import os
import shutil
import uuid
from django.conf import settings
from formfactory import models
from formfactory.tests.models import Enum, EnumItem
def cleanup_files():
test_file_dir = os.path.join(settings.MEDIA_ROOT, "uploads/test")
shutil.rmtree(test_file_dir, ignore_errors=True)
def load_fixtures(kls):
kls.form_data = {
"title": "Form 1",
"slug": "form-1"
}
kls.form = models.Form.objects.create(**kls.form_data)
kls.fieldchoice_data = {
"label": "Choice 1",
"value": "choice-1"
}
kls.fieldchoice = models.FieldChoice.objects.create(**kls.fieldchoice_data)
kls.enum_data = {
"title": "Enum 1"
}
kls.enum = Enum.objects.create(**kls.enum_data)
kls.enumitem_data = {
"enum": kls.enum,
"label": "Choice 2",
"value": "choice-2"
}
kls.enumitem = EnumItem.objects.create(**kls.enumitem_data)
kls.fieldgroup_data = {
"title": "Field Group 1",
"show_title": True
}
kls.fieldgroup = models.FormFieldGroup.objects.create(
**kls.fieldgroup_data
)
kls.fieldgroupformthrough_data = {
"form": kls.form,
"field_group": kls.fieldgroup,
"order": 0
}
kls.fieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.fieldgroupformthrough_data
)
for count, field_type in enumerate(models.FIELD_TYPES):
data = {
"title": "Form Field %s" % count,
"slug": "form-field-%s" % count,
"field_type": field_type[0],
"label": "Form Field %s" % count,
"placeholder": "Field Placeholder %s" % count
}
# Specialised fields with none default fields will need to have extra
# data added.
if field_type[0] == "formfactory.fields.ParagraphField":
data["paragraph"] = "**formfactory.fields.ParagraphField**"
setattr(kls, "formfield_data_%s" % count, data)
if field_type[0] == "django.forms.fields.CharField":
getattr(kls, "formfield_data_%s" % count)["max_length"] = 100
setattr(kls, "formfield_%s" % count, models.FormField.objects.create(
**getattr(kls, "formfield_data_%s" % count)
))
if field_type[0] == "django.forms.fields.ChoiceField":
getattr(kls, "formfield_%s" % count).choices.add(kls.fieldchoice)
getattr(kls, "formfield_%s" % count).model_choices = kls.enum
setattr(kls, "fieldgroupthrough_data_%s" % count, {
"field_group": kls.fieldgroup,
"field": getattr(kls, "formfield_%s" % count),
"order": count
})
setattr(
kls, "fieldgroupthrough_%s" % count,
models.FieldGroupThrough.objects.create(
**getattr(kls, "fieldgroupthrough_data_%s" % count)
)
)
kls.simpleform_data = {
"title": "Subscribe Form",
"slug": "subscribe-form",
"success_message": "Success",
"failure_message": "Failure"
}
kls.simpleform = models.Form.objects.create(**kls.simpleform_data)
kls.simplefieldgroup_data = {
"title": "Field Group 1",
"show_title": False
}
kls.simplefieldgroup = models.FormFieldGroup.objects.create(
**kls.simplefieldgroup_data
)
kls.simplefieldgroupformthrough_data = {
"form": kls.simpleform,
"field_group": kls.simplefieldgroup,
"order": 0
}
kls.simplefieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.simplefieldgroupformthrough_data
)
kls.action_data = {
"action": "formfactory.actions.store_data"
}
kls.action = models.Action.objects.create(**kls.action_data)
kls.formactionthrough_data = {
"action": kls.action,
"form": kls.simpleform,
"order": 0
}
kls.formactionthrough = models.FormActionThrough.objects.create(
**kls.formactionthrough_data
)
kls.emailaction_data = {
"action": "formfactory.actions.send_email"
}
kls.emailaction = models.Action.objects.create(**kls.emailaction_data)
kls.emailactionparam_data = [
{
"key": "from_email_field",
"value": "email-address",
"action": kls.emailaction
}, {
"key": "to_email_field",
"value": "to-email",
"action": kls.emailaction
}, {
"key": "subject_field",
"value": "subject",
"action": kls.emailaction
}
]
for param in kls.emailactionparam_data:
setattr(
kls, "emailactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.emailformactionthrough_data = {
"action": kls.emailaction,
"form": kls.simpleform,
"order": 1
}
kls.emailformactionthrough = models.FormActionThrough.objects.create(
**kls.emailformactionthrough_data
)
kls.fileuploadaction_data = {
"action": "formfactory.actions.file_upload"
}
kls.fileuploadaction = models.Action.objects.create(
**kls.fileuploadaction_data
)
kls.fileuploadactionparam_data = [
{
"key": "upload_path_field",
"value": "upload-to",
"action": kls.fileuploadaction
}
]
for param in kls.fileuploadactionparam_data:
setattr(
kls, "fileuploadactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.fileuploadformactionthrough_data = {
"action": kls.fileuploadaction,
"form": kls.simpleform,
"order": 2
}
kls.fileuploadformactionthrough = models.FormActionThrough.objects.create(
**kls.fileuploadformactionthrough_data
)
kls.simpleformfield_data = {
"salutation": {
"title": "Salutation",
"slug": "salutation",
"field_type": "django.forms.fields.ChoiceField",
"label": "Salutation",
"required": False
},
"name": {
"title": "Name",
"slug": "name",
"field_type": "django.forms.fields.CharField",
"label": "Full Name",
"required": True
},
"email_address": {
"title": "Email Address",
"slug": "email-address",
"field_type": "django.forms.fields.EmailField",
"label": "Email",
"help_text": "The email you would like info to be sent to"
},
"accept_terms": {
"title": "Accept Terms",
"slug": "accept-terms",
"field_type": "django.forms.fields.BooleanField",
"label": "Do you accept the terms and conditions",
"required": False
},
"to_email": {
"title": "To Email",
"slug": "to-email",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "dev@praekelt.com",
"required": True
},
"id_copy": {
"title": "ID Copy",
"slug": "id-copy",
"field_type": "django.forms.fields.FileField",
"required": True
},
"upload_to": {
"title": "Upload To",
"slug": "upload-to",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "uploads/test",
"required": True
},
"subject": {
"title": "Subject",
"slug": "subject",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "Test Email",
"required": True
},
"paragraph": {
"title": "Paragraph",
"slug": "paragraph",
"field_type": "formfactory.fields.ParagraphField",
"paragraph": "**aaaa**"
}
}
count = 0
for key, value in kls.simpleformfield_data.items():
setattr(
kls, "simpleformfield_%s" % key,
models.FormField.objects.create(**value)
)
setattr(kls, "simplefieldgroupthrough_data_%s" % key, {
"field_group": kls.simplefieldgroup,
"field": getattr(kls, "simpleformfield_%s" % key),
"order": count
})
setattr(
kls, "simplefieldgroupthrough_%s" % key,
models.FieldGroupThrough.objects.create(
**getattr(kls, "simplefieldgroupthrough_data_%s" % key)
)
)
count += 1
for salutation in ["Mr", "Mrs", "Dr", "Prof"]:
choice = models.FieldChoice.objects.create(
label=salutation, value=salutation
)
kls.simpleformfield_salutation.choices.add(choice)
kls.loginform_data = {
"title": "Login Form",
"slug": "login-form",
"success_message": "Success",
"failure_message": "Failure",
"submit_button_text": "Login"
}
kls.loginform = models.Form.objects.create(**kls.loginform_data)
kls.loginfieldgroup_data = {
"title": "Field Group 1",
"show_title": True
}
kls.loginfieldgroup = models.FormFieldGroup.objects.create(
**kls.loginfieldgroup_data
)
kls.loginfieldgroupformthrough_data = {
"form": kls.loginform,
"field_group": kls.loginfieldgroup,
"order": 0
}
kls.loginfieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.loginfieldgroupformthrough_data
)
kls.loginaction_data = {
"action": "formfactory.actions.login"
}
kls.loginaction = models.Action.objects.create(**kls.loginaction_data)
kls.loginactionparam_data = [
{
"key": "username_field",
"value": "username",
"action": kls.loginaction
}, {
"key": "password_field",
"value": "password",
"action": kls.loginaction
}
]
for param in kls.loginactionparam_data:
setattr(
kls, "loginactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.loginformactionthrough_data = {
"action": kls.loginaction,
"form": kls.loginform,
"order": 0
}
kls.loginformactionthrough = models.FormActionThrough.objects.create(
**kls.loginformactionthrough_data
)
kls.loginformfield_data = {
"username": {
"title": "Username",
"slug": "username",
"field_type": "django.forms.fields.CharField",
"label": "Username",
"required": True
},
"password": {
"title": "Password",
"slug": "password",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.PasswordInput",
"label": "Password",
"required": True
}
}
count = 0
for key, value in kls.loginformfield_data.items():
setattr(
kls, "loginformfield_%s" % key,
models.FormField.objects.create(**value)
)
setattr(kls, "loginfieldgroupthrough_data_%s" % key, {
"field_group": kls.loginfieldgroup,
"field": getattr(kls, "loginformfield_%s" % key),
"order": count
})
setattr(
kls, "loginfieldgroupthrough_%s" % key,
models.FieldGroupThrough.objects.create(
**getattr(kls, "loginfieldgroupthrough_data_%s" % key)
)
)
count += 1
kls.formdata_data = {
"uuid": str(uuid.uuid4()),
"form": kls.form
}
kls.formdata = models.FormData.objects.create(**kls.formdata_data)
kls.formdataitem_data = {
"form_data": kls.formdata,
"form_field": kls.formfield_1,
"value": "Form Data Item Value 1"
}
kls.formdataitem = models.FormDataItem.objects.create(
**kls.formdataitem_data
)
kls.dummy_validator = "formfactory.tests.validators.dummy_validator"
kls.dummy_action = "formfactory.tests.actions.dummy_action"
kls.wizard_data = {
"title": "Test wizard",
"slug": "test-wizard",
"success_message": "Success",
"failure_message": "Failure",
"redirect_to": "/"
}
kls.validator = models.Validator.objects.create(
validator=kls.dummy_validator
)
kls.wizard = models.Wizard.objects.create(**kls.wizard_data)
kls.wizardformthrough_simple = models.WizardFormThrough.objects.create(
wizard=kls.wizard, form=kls.simpleform, order=1
)
kls.wizardformthrough_login = models.WizardFormThrough.objects.create(
wizard=kls.wizard, form=kls.loginform, order=2
)
| 30.609412 | 82 | 0.568376 | import os
import shutil
import uuid
from django.conf import settings
from formfactory import models
from formfactory.tests.models import Enum, EnumItem
def cleanup_files():
test_file_dir = os.path.join(settings.MEDIA_ROOT, "uploads/test")
shutil.rmtree(test_file_dir, ignore_errors=True)
def load_fixtures(kls):
kls.form_data = {
"title": "Form 1",
"slug": "form-1"
}
kls.form = models.Form.objects.create(**kls.form_data)
kls.fieldchoice_data = {
"label": "Choice 1",
"value": "choice-1"
}
kls.fieldchoice = models.FieldChoice.objects.create(**kls.fieldchoice_data)
kls.enum_data = {
"title": "Enum 1"
}
kls.enum = Enum.objects.create(**kls.enum_data)
kls.enumitem_data = {
"enum": kls.enum,
"label": "Choice 2",
"value": "choice-2"
}
kls.enumitem = EnumItem.objects.create(**kls.enumitem_data)
kls.fieldgroup_data = {
"title": "Field Group 1",
"show_title": True
}
kls.fieldgroup = models.FormFieldGroup.objects.create(
**kls.fieldgroup_data
)
kls.fieldgroupformthrough_data = {
"form": kls.form,
"field_group": kls.fieldgroup,
"order": 0
}
kls.fieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.fieldgroupformthrough_data
)
for count, field_type in enumerate(models.FIELD_TYPES):
data = {
"title": "Form Field %s" % count,
"slug": "form-field-%s" % count,
"field_type": field_type[0],
"label": "Form Field %s" % count,
"placeholder": "Field Placeholder %s" % count
}
if field_type[0] == "formfactory.fields.ParagraphField":
data["paragraph"] = "**formfactory.fields.ParagraphField**"
setattr(kls, "formfield_data_%s" % count, data)
if field_type[0] == "django.forms.fields.CharField":
getattr(kls, "formfield_data_%s" % count)["max_length"] = 100
setattr(kls, "formfield_%s" % count, models.FormField.objects.create(
**getattr(kls, "formfield_data_%s" % count)
))
if field_type[0] == "django.forms.fields.ChoiceField":
getattr(kls, "formfield_%s" % count).choices.add(kls.fieldchoice)
getattr(kls, "formfield_%s" % count).model_choices = kls.enum
setattr(kls, "fieldgroupthrough_data_%s" % count, {
"field_group": kls.fieldgroup,
"field": getattr(kls, "formfield_%s" % count),
"order": count
})
setattr(
kls, "fieldgroupthrough_%s" % count,
models.FieldGroupThrough.objects.create(
**getattr(kls, "fieldgroupthrough_data_%s" % count)
)
)
kls.simpleform_data = {
"title": "Subscribe Form",
"slug": "subscribe-form",
"success_message": "Success",
"failure_message": "Failure"
}
kls.simpleform = models.Form.objects.create(**kls.simpleform_data)
kls.simplefieldgroup_data = {
"title": "Field Group 1",
"show_title": False
}
kls.simplefieldgroup = models.FormFieldGroup.objects.create(
**kls.simplefieldgroup_data
)
kls.simplefieldgroupformthrough_data = {
"form": kls.simpleform,
"field_group": kls.simplefieldgroup,
"order": 0
}
kls.simplefieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.simplefieldgroupformthrough_data
)
kls.action_data = {
"action": "formfactory.actions.store_data"
}
kls.action = models.Action.objects.create(**kls.action_data)
kls.formactionthrough_data = {
"action": kls.action,
"form": kls.simpleform,
"order": 0
}
kls.formactionthrough = models.FormActionThrough.objects.create(
**kls.formactionthrough_data
)
kls.emailaction_data = {
"action": "formfactory.actions.send_email"
}
kls.emailaction = models.Action.objects.create(**kls.emailaction_data)
kls.emailactionparam_data = [
{
"key": "from_email_field",
"value": "email-address",
"action": kls.emailaction
}, {
"key": "to_email_field",
"value": "to-email",
"action": kls.emailaction
}, {
"key": "subject_field",
"value": "subject",
"action": kls.emailaction
}
]
for param in kls.emailactionparam_data:
setattr(
kls, "emailactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.emailformactionthrough_data = {
"action": kls.emailaction,
"form": kls.simpleform,
"order": 1
}
kls.emailformactionthrough = models.FormActionThrough.objects.create(
**kls.emailformactionthrough_data
)
kls.fileuploadaction_data = {
"action": "formfactory.actions.file_upload"
}
kls.fileuploadaction = models.Action.objects.create(
**kls.fileuploadaction_data
)
kls.fileuploadactionparam_data = [
{
"key": "upload_path_field",
"value": "upload-to",
"action": kls.fileuploadaction
}
]
for param in kls.fileuploadactionparam_data:
setattr(
kls, "fileuploadactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.fileuploadformactionthrough_data = {
"action": kls.fileuploadaction,
"form": kls.simpleform,
"order": 2
}
kls.fileuploadformactionthrough = models.FormActionThrough.objects.create(
**kls.fileuploadformactionthrough_data
)
kls.simpleformfield_data = {
"salutation": {
"title": "Salutation",
"slug": "salutation",
"field_type": "django.forms.fields.ChoiceField",
"label": "Salutation",
"required": False
},
"name": {
"title": "Name",
"slug": "name",
"field_type": "django.forms.fields.CharField",
"label": "Full Name",
"required": True
},
"email_address": {
"title": "Email Address",
"slug": "email-address",
"field_type": "django.forms.fields.EmailField",
"label": "Email",
"help_text": "The email you would like info to be sent to"
},
"accept_terms": {
"title": "Accept Terms",
"slug": "accept-terms",
"field_type": "django.forms.fields.BooleanField",
"label": "Do you accept the terms and conditions",
"required": False
},
"to_email": {
"title": "To Email",
"slug": "to-email",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "dev@praekelt.com",
"required": True
},
"id_copy": {
"title": "ID Copy",
"slug": "id-copy",
"field_type": "django.forms.fields.FileField",
"required": True
},
"upload_to": {
"title": "Upload To",
"slug": "upload-to",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "uploads/test",
"required": True
},
"subject": {
"title": "Subject",
"slug": "subject",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "Test Email",
"required": True
},
"paragraph": {
"title": "Paragraph",
"slug": "paragraph",
"field_type": "formfactory.fields.ParagraphField",
"paragraph": "**aaaa**"
}
}
count = 0
for key, value in kls.simpleformfield_data.items():
setattr(
kls, "simpleformfield_%s" % key,
models.FormField.objects.create(**value)
)
setattr(kls, "simplefieldgroupthrough_data_%s" % key, {
"field_group": kls.simplefieldgroup,
"field": getattr(kls, "simpleformfield_%s" % key),
"order": count
})
setattr(
kls, "simplefieldgroupthrough_%s" % key,
models.FieldGroupThrough.objects.create(
**getattr(kls, "simplefieldgroupthrough_data_%s" % key)
)
)
count += 1
for salutation in ["Mr", "Mrs", "Dr", "Prof"]:
choice = models.FieldChoice.objects.create(
label=salutation, value=salutation
)
kls.simpleformfield_salutation.choices.add(choice)
kls.loginform_data = {
"title": "Login Form",
"slug": "login-form",
"success_message": "Success",
"failure_message": "Failure",
"submit_button_text": "Login"
}
kls.loginform = models.Form.objects.create(**kls.loginform_data)
kls.loginfieldgroup_data = {
"title": "Field Group 1",
"show_title": True
}
kls.loginfieldgroup = models.FormFieldGroup.objects.create(
**kls.loginfieldgroup_data
)
kls.loginfieldgroupformthrough_data = {
"form": kls.loginform,
"field_group": kls.loginfieldgroup,
"order": 0
}
kls.loginfieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.loginfieldgroupformthrough_data
)
kls.loginaction_data = {
"action": "formfactory.actions.login"
}
kls.loginaction = models.Action.objects.create(**kls.loginaction_data)
kls.loginactionparam_data = [
{
"key": "username_field",
"value": "username",
"action": kls.loginaction
}, {
"key": "password_field",
"value": "password",
"action": kls.loginaction
}
]
for param in kls.loginactionparam_data:
setattr(
kls, "loginactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.loginformactionthrough_data = {
"action": kls.loginaction,
"form": kls.loginform,
"order": 0
}
kls.loginformactionthrough = models.FormActionThrough.objects.create(
**kls.loginformactionthrough_data
)
kls.loginformfield_data = {
"username": {
"title": "Username",
"slug": "username",
"field_type": "django.forms.fields.CharField",
"label": "Username",
"required": True
},
"password": {
"title": "Password",
"slug": "password",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.PasswordInput",
"label": "Password",
"required": True
}
}
count = 0
for key, value in kls.loginformfield_data.items():
setattr(
kls, "loginformfield_%s" % key,
models.FormField.objects.create(**value)
)
setattr(kls, "loginfieldgroupthrough_data_%s" % key, {
"field_group": kls.loginfieldgroup,
"field": getattr(kls, "loginformfield_%s" % key),
"order": count
})
setattr(
kls, "loginfieldgroupthrough_%s" % key,
models.FieldGroupThrough.objects.create(
**getattr(kls, "loginfieldgroupthrough_data_%s" % key)
)
)
count += 1
kls.formdata_data = {
"uuid": str(uuid.uuid4()),
"form": kls.form
}
kls.formdata = models.FormData.objects.create(**kls.formdata_data)
kls.formdataitem_data = {
"form_data": kls.formdata,
"form_field": kls.formfield_1,
"value": "Form Data Item Value 1"
}
kls.formdataitem = models.FormDataItem.objects.create(
**kls.formdataitem_data
)
kls.dummy_validator = "formfactory.tests.validators.dummy_validator"
kls.dummy_action = "formfactory.tests.actions.dummy_action"
kls.wizard_data = {
"title": "Test wizard",
"slug": "test-wizard",
"success_message": "Success",
"failure_message": "Failure",
"redirect_to": "/"
}
kls.validator = models.Validator.objects.create(
validator=kls.dummy_validator
)
kls.wizard = models.Wizard.objects.create(**kls.wizard_data)
kls.wizardformthrough_simple = models.WizardFormThrough.objects.create(
wizard=kls.wizard, form=kls.simpleform, order=1
)
kls.wizardformthrough_login = models.WizardFormThrough.objects.create(
wizard=kls.wizard, form=kls.loginform, order=2
)
| true | true |
1c466974e1deea98828676448579173ed8d0bcef | 357 | py | Python | rusel/config.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | rusel/config.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | rusel/config.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | from task.const import *
app_config = {
'name': APP_ALL,
'app_title': 'search results',
'icon': 'search',
'role': ROLE_SEARCH_RESULTS,
'sort': [
('name', 'name'),
('created', 'create date'),
],
'views': {
'search': {
'icon': 'search',
'title': 'search results',
},
}
} | 19.833333 | 38 | 0.456583 | from task.const import *
app_config = {
'name': APP_ALL,
'app_title': 'search results',
'icon': 'search',
'role': ROLE_SEARCH_RESULTS,
'sort': [
('name', 'name'),
('created', 'create date'),
],
'views': {
'search': {
'icon': 'search',
'title': 'search results',
},
}
} | true | true |
1c466c2eb51ca67b46ef055458e6f5edc433953f | 8,546 | py | Python | handler.py | abizerlokhandwala/Cowin-Notification-Service | 4fd7fd9c3cfab37502ad4135007a6127ca4cc15f | [
"MIT"
] | 14 | 2021-05-07T13:09:03.000Z | 2022-01-10T23:24:42.000Z | handler.py | abizerlokhandwala/Cowin-Notification-Service | 4fd7fd9c3cfab37502ad4135007a6127ca4cc15f | [
"MIT"
] | 16 | 2021-05-10T16:41:21.000Z | 2021-06-09T14:49:03.000Z | handler.py | abizerlokhandwala/Cowin-Notification-Service | 4fd7fd9c3cfab37502ad4135007a6127ca4cc15f | [
"MIT"
] | 5 | 2021-05-09T12:14:03.000Z | 2021-06-08T13:56:55.000Z | import asyncio
import json
import logging
import random
from datetime import date
import boto3
from helpers.constants import ISSUE_MSG, DB_NAME
from helpers.cowin_sdk import CowinAPI
from helpers.db_handler import DBHandler, get_pin_code_location
from helpers.notificationHandler import NotifHandler
from helpers.queries import USER_PATTERN_MATCH, GET_USER_QUERY, UPDATE_USER_VERIFIED, SUBSCRIBED_DISTRICT_USERS
from helpers.utils import response_handler, get_preference_slots, send_historical_diff, get_event_loop
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_states(event, context):
cowin = CowinAPI()
states = cowin.get_states()
return response_handler(states, 200)
def get_districts(event, context):
cowin = CowinAPI()
state_id = event["queryStringParameters"]["state_id"]
districts = cowin.get_districts(state_id)
return response_handler(districts, 200)
def get_centers(event, context):
cowin = CowinAPI()
district_id = event["queryStringParameters"]["district_id"]
date_today = date.today().strftime("%d-%m-%Y")
centers = cowin.get_centers_7(district_id, date_today)
return response_handler(centers, 200)
def get_district_preferences(event, context):
district_id = event["queryStringParameters"]['district_id']
vaccine = event["queryStringParameters"]['vaccine']
age_group = event["queryStringParameters"]['age_group']
return response_handler(get_preference_slots(district_id, vaccine, age_group), 200)
def subscribe(event, context):
body = json.loads(event['body'])
body['email'] = body['email'].strip()
db = DBHandler.get_instance()
notif = NotifHandler()
is_verified, verification_token = db.subscribe(body)
if is_verified == -1:
return response_handler({'message': f'Email Already exists'}, 400)
elif is_verified == -2: # pincode not found
return response_handler({'message': f'Pincode is invalid'}, 400)
additional_comments = ''
if is_verified is False:
notif.send_verification_email(body['email'], True)
additional_comments = f'Please verify your email ID: {body["email"]}'
db.close()
return response_handler({'message': f'Subscribed successfully! {additional_comments}'}, 201)
def unsubscribe(event, context):
user_email = event["queryStringParameters"]["email"]
token = event["queryStringParameters"]["token"]
db = DBHandler.get_instance()
if db.unsubscribe(user_email, token):
logger.info(f'{user_email} unsubscribed')
db.close()
return response_handler({'message': f'Unsubscribed successfully!'}, 200)
else:
db.close()
return response_handler({'message': ISSUE_MSG}, status=400)
def verify_email(event, context):
user_email = event["queryStringParameters"]["email"]
token = event["queryStringParameters"]["token"]
db = DBHandler.get_instance()
user = db.query(GET_USER_QUERY, (user_email,))
if user and int(user[0][3]) == 1:
db.close()
return response_handler({'message': 'User already verified'}, status=200)
if user and user[0][2] == token:
db.insert(UPDATE_USER_VERIFIED, (user_email,))
db.close()
return response_handler({'message': 'Successful Verification'}, status=200)
db.close()
return response_handler({'message': 'Unsuccessful Verification'}, status=403)
def check_district_nums(event, context):
cowin = CowinAPI()
districts = cowin.get_all_districts()
for ind in range(0,1+max(districts)):
if ind not in districts:
print(f'Missing {ind}')
return districts
district_nums = []
def trigger_district_updates(event, context):
global district_nums
# db = DBHandler.get_instance()
# districts = db.candidate_districts()
# db.close()
if not district_nums:
cowin = CowinAPI()
district_nums = cowin.get_all_districts()
client = boto3.client('lambda', region_name='ap-south-1')
UPDATE_FUNCTION_NAME = 'cowin-notification-service-dev-update_district_slots'
batch = []
for district in district_nums:
if district:
batch.append(district)
if len(batch) >= 10:
client.invoke(FunctionName=UPDATE_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'districts': batch}))
batch.clear()
if len(batch) > 0:
client.invoke(FunctionName=UPDATE_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'districts': batch}))
return response_handler({}, 200)
def update_district_slots(event, context):
# logger.info(f"IP: {requests.get('https://api.ipify.org').text}")
district_ids = event['districts']
# district_ids = [363]
get_event_loop().run_until_complete(asyncio.gather(*[send_historical_diff(district_id) for district_id in
district_ids]))
return response_handler({'message': f'Districts {district_ids} processed'}, 200)
def notif_dispatcher(event, context):
message = event['message']
# message = {'vaccine':'covishield','age_group':'above_18','district_id':'363','pincode':'411028'}
location = get_pin_code_location(message['pincode'])
db = DBHandler.get_instance()
user_info = [(row[0], row[1]) for row in db.query(USER_PATTERN_MATCH, (
'email', message['age_group'], message['vaccine'], message['dose_1'], message['dose_2'],
message['district_id'], location))]
db.close()
# print(user_info)
# return {}
# logger.info(f'Users to send emails to: {user_info}')
message['age_group'] += '+'
message['age_group'] = message['age_group'].replace('above_', '')
client = boto3.client('lambda', region_name='ap-south-1')
SEND_EMAIL_FUNCTION_NAME = 'cowin-notification-service-dev-send_batch_email'
batch = []
for user in user_info:
if user:
batch.append(user)
if len(batch) >= 20:
client.invoke(FunctionName=SEND_EMAIL_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'users': batch, 'message': message}))
batch.clear()
if len(batch) > 0:
client.invoke(FunctionName=SEND_EMAIL_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'users': batch, 'message': message}))
return response_handler({},200)
def send_batch_email(event, context):
notif = NotifHandler()
users = event['users']
message = event['message']
notif.send_template_emails(users, message)
return response_handler({'message': f'All notifs processed'}, 200)
def test_email(event, context):
notif = NotifHandler()
notif.send_template_emails(
[('abizerL123@gmail.com', 'abc'), ('sharangpai123@gmail.com', 'abc'), ('pujan.iceman@gmail.com', 'abc'),
('shloksingh10@gmail.com', 'abc'), ('arsenal.arpit11@gmail.com', 'abc')], {
'center_name': 'test_center',
'slots': '[1-2]',
'district_name': 'test_district',
'date': '1-1-1',
'age_group': '45',
'vaccine': 'covishield',
'address': 'abc, pqr, xyz',
'pincode': '411040',
'capacity': '40',
'capacity_dose_1': '20',
'capacity_dose_2': '20',
'fee_amount': '₹200'
})
return
def notify_pincode_email(event, context):
db = DBHandler.get_instance()
user_info = [(row[0], row[1]) for row in db.query(SUBSCRIBED_DISTRICT_USERS, (
'email'))]
db.close()
notif = NotifHandler()
notif.send_pincode_one_time_email(user_info)
return
def test_email_pincode(event, context):
notif = NotifHandler()
notif.send_pincode_one_time_email(
[('abizerL123@gmail.com', 'abc'), ('sharangpai123@gmail.com', 'abc'), ('pujan.iceman@gmail.com', 'abc'),
('shloksingh10@gmail.com', 'abc'), ('arsenal.arpit11@gmail.com', 'abc')])
return
def send_verification_email_manual(event, context):
db = DBHandler.get_instance()
users = db.query(f"SELECT email FROM {DB_NAME}.users where id>=%s and is_verified = 0",(4923,))
db.close()
notif = NotifHandler()
for user in users:
notif.send_verification_email(user[0], False)
return response_handler({'message': f'Sent'}, 200)
def poller_service_endpoint(event, context):
body = event['body']
logger.info(body)
return response_handler({'message': 'success'},200) | 38.845455 | 118 | 0.662064 | import asyncio
import json
import logging
import random
from datetime import date
import boto3
from helpers.constants import ISSUE_MSG, DB_NAME
from helpers.cowin_sdk import CowinAPI
from helpers.db_handler import DBHandler, get_pin_code_location
from helpers.notificationHandler import NotifHandler
from helpers.queries import USER_PATTERN_MATCH, GET_USER_QUERY, UPDATE_USER_VERIFIED, SUBSCRIBED_DISTRICT_USERS
from helpers.utils import response_handler, get_preference_slots, send_historical_diff, get_event_loop
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_states(event, context):
cowin = CowinAPI()
states = cowin.get_states()
return response_handler(states, 200)
def get_districts(event, context):
cowin = CowinAPI()
state_id = event["queryStringParameters"]["state_id"]
districts = cowin.get_districts(state_id)
return response_handler(districts, 200)
def get_centers(event, context):
cowin = CowinAPI()
district_id = event["queryStringParameters"]["district_id"]
date_today = date.today().strftime("%d-%m-%Y")
centers = cowin.get_centers_7(district_id, date_today)
return response_handler(centers, 200)
def get_district_preferences(event, context):
district_id = event["queryStringParameters"]['district_id']
vaccine = event["queryStringParameters"]['vaccine']
age_group = event["queryStringParameters"]['age_group']
return response_handler(get_preference_slots(district_id, vaccine, age_group), 200)
def subscribe(event, context):
body = json.loads(event['body'])
body['email'] = body['email'].strip()
db = DBHandler.get_instance()
notif = NotifHandler()
is_verified, verification_token = db.subscribe(body)
if is_verified == -1:
return response_handler({'message': f'Email Already exists'}, 400)
elif is_verified == -2:
return response_handler({'message': f'Pincode is invalid'}, 400)
additional_comments = ''
if is_verified is False:
notif.send_verification_email(body['email'], True)
additional_comments = f'Please verify your email ID: {body["email"]}'
db.close()
return response_handler({'message': f'Subscribed successfully! {additional_comments}'}, 201)
def unsubscribe(event, context):
user_email = event["queryStringParameters"]["email"]
token = event["queryStringParameters"]["token"]
db = DBHandler.get_instance()
if db.unsubscribe(user_email, token):
logger.info(f'{user_email} unsubscribed')
db.close()
return response_handler({'message': f'Unsubscribed successfully!'}, 200)
else:
db.close()
return response_handler({'message': ISSUE_MSG}, status=400)
def verify_email(event, context):
user_email = event["queryStringParameters"]["email"]
token = event["queryStringParameters"]["token"]
db = DBHandler.get_instance()
user = db.query(GET_USER_QUERY, (user_email,))
if user and int(user[0][3]) == 1:
db.close()
return response_handler({'message': 'User already verified'}, status=200)
if user and user[0][2] == token:
db.insert(UPDATE_USER_VERIFIED, (user_email,))
db.close()
return response_handler({'message': 'Successful Verification'}, status=200)
db.close()
return response_handler({'message': 'Unsuccessful Verification'}, status=403)
def check_district_nums(event, context):
cowin = CowinAPI()
districts = cowin.get_all_districts()
for ind in range(0,1+max(districts)):
if ind not in districts:
print(f'Missing {ind}')
return districts
district_nums = []
def trigger_district_updates(event, context):
global district_nums
if not district_nums:
cowin = CowinAPI()
district_nums = cowin.get_all_districts()
client = boto3.client('lambda', region_name='ap-south-1')
UPDATE_FUNCTION_NAME = 'cowin-notification-service-dev-update_district_slots'
batch = []
for district in district_nums:
if district:
batch.append(district)
if len(batch) >= 10:
client.invoke(FunctionName=UPDATE_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'districts': batch}))
batch.clear()
if len(batch) > 0:
client.invoke(FunctionName=UPDATE_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'districts': batch}))
return response_handler({}, 200)
def update_district_slots(event, context):
district_ids = event['districts']
get_event_loop().run_until_complete(asyncio.gather(*[send_historical_diff(district_id) for district_id in
district_ids]))
return response_handler({'message': f'Districts {district_ids} processed'}, 200)
def notif_dispatcher(event, context):
message = event['message']
location = get_pin_code_location(message['pincode'])
db = DBHandler.get_instance()
user_info = [(row[0], row[1]) for row in db.query(USER_PATTERN_MATCH, (
'email', message['age_group'], message['vaccine'], message['dose_1'], message['dose_2'],
message['district_id'], location))]
db.close()
message['age_group'] += '+'
message['age_group'] = message['age_group'].replace('above_', '')
client = boto3.client('lambda', region_name='ap-south-1')
SEND_EMAIL_FUNCTION_NAME = 'cowin-notification-service-dev-send_batch_email'
batch = []
for user in user_info:
if user:
batch.append(user)
if len(batch) >= 20:
client.invoke(FunctionName=SEND_EMAIL_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'users': batch, 'message': message}))
batch.clear()
if len(batch) > 0:
client.invoke(FunctionName=SEND_EMAIL_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'users': batch, 'message': message}))
return response_handler({},200)
def send_batch_email(event, context):
notif = NotifHandler()
users = event['users']
message = event['message']
notif.send_template_emails(users, message)
return response_handler({'message': f'All notifs processed'}, 200)
def test_email(event, context):
notif = NotifHandler()
notif.send_template_emails(
[('abizerL123@gmail.com', 'abc'), ('sharangpai123@gmail.com', 'abc'), ('pujan.iceman@gmail.com', 'abc'),
('shloksingh10@gmail.com', 'abc'), ('arsenal.arpit11@gmail.com', 'abc')], {
'center_name': 'test_center',
'slots': '[1-2]',
'district_name': 'test_district',
'date': '1-1-1',
'age_group': '45',
'vaccine': 'covishield',
'address': 'abc, pqr, xyz',
'pincode': '411040',
'capacity': '40',
'capacity_dose_1': '20',
'capacity_dose_2': '20',
'fee_amount': '₹200'
})
return
def notify_pincode_email(event, context):
db = DBHandler.get_instance()
user_info = [(row[0], row[1]) for row in db.query(SUBSCRIBED_DISTRICT_USERS, (
'email'))]
db.close()
notif = NotifHandler()
notif.send_pincode_one_time_email(user_info)
return
def test_email_pincode(event, context):
notif = NotifHandler()
notif.send_pincode_one_time_email(
[('abizerL123@gmail.com', 'abc'), ('sharangpai123@gmail.com', 'abc'), ('pujan.iceman@gmail.com', 'abc'),
('shloksingh10@gmail.com', 'abc'), ('arsenal.arpit11@gmail.com', 'abc')])
return
def send_verification_email_manual(event, context):
db = DBHandler.get_instance()
users = db.query(f"SELECT email FROM {DB_NAME}.users where id>=%s and is_verified = 0",(4923,))
db.close()
notif = NotifHandler()
for user in users:
notif.send_verification_email(user[0], False)
return response_handler({'message': f'Sent'}, 200)
def poller_service_endpoint(event, context):
body = event['body']
logger.info(body)
return response_handler({'message': 'success'},200) | true | true |
1c466c5148c6b829d6eaaf73cbd026659824bb69 | 1,098 | py | Python | 2021/25/day25.py | AlbertVeli/AdventOfCode | 3d3473695318a0686fac720a1a21dd3629f09e33 | [
"Unlicense"
] | null | null | null | 2021/25/day25.py | AlbertVeli/AdventOfCode | 3d3473695318a0686fac720a1a21dd3629f09e33 | [
"Unlicense"
] | null | null | null | 2021/25/day25.py | AlbertVeli/AdventOfCode | 3d3473695318a0686fac720a1a21dd3629f09e33 | [
"Unlicense"
] | 1 | 2021-12-04T10:37:09.000Z | 2021-12-04T10:37:09.000Z | #!/usr/bin/env python3
import sys
import numpy as np
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
a = []
for line in open(sys.argv[1]):
a.append(list(line.rstrip()))
a = np.array(a)
sy, sx = a.shape
# Could probably move without copying a
# but logic is simpler if keeping a copy
def move(a):
moved = False
# Move right
a_org = np.array(a)
for y in range(sy):
ny = y
for x in range(sx):
if a_org[y][x] != '>':
continue
nx = (x + 1) % sx
if a_org[ny][nx] == '.':
a[ny][nx] = '>'
a[y][x] = '.'
moved = True
# Move down
a_org = np.array(a)
for y in range(sy):
ny = (y + 1) % sy
for x in range(sx):
nx = x
if a_org[y][x] != 'v':
continue
if a_org[ny][nx] == '.':
a[ny][nx] = 'v'
a[y][x] = '.'
moved = True
return moved
moves = 0
while move(a):
moves += 1
#print(a)
print(moves + 1)
| 21.115385 | 47 | 0.441712 |
import sys
import numpy as np
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
a = []
for line in open(sys.argv[1]):
a.append(list(line.rstrip()))
a = np.array(a)
sy, sx = a.shape
def move(a):
moved = False
a_org = np.array(a)
for y in range(sy):
ny = y
for x in range(sx):
if a_org[y][x] != '>':
continue
nx = (x + 1) % sx
if a_org[ny][nx] == '.':
a[ny][nx] = '>'
a[y][x] = '.'
moved = True
a_org = np.array(a)
for y in range(sy):
ny = (y + 1) % sy
for x in range(sx):
nx = x
if a_org[y][x] != 'v':
continue
if a_org[ny][nx] == '.':
a[ny][nx] = 'v'
a[y][x] = '.'
moved = True
return moved
moves = 0
while move(a):
moves += 1
print(moves + 1)
| true | true |
1c466c8c0bd43fad411e356a53e99baf9f31c048 | 987 | py | Python | 2019/25_Josepus_Survivor/my_solution.py | erik-kristofer-anderson/codewars | fda780f40d1a2d8c5210cfd6ccf81148444bc9e8 | [
"MIT"
] | null | null | null | 2019/25_Josepus_Survivor/my_solution.py | erik-kristofer-anderson/codewars | fda780f40d1a2d8c5210cfd6ccf81148444bc9e8 | [
"MIT"
] | 1 | 2019-07-27T15:42:25.000Z | 2019-07-27T15:42:25.000Z | 2019/25_Josepus_Survivor/my_solution.py | erik-kristofer-anderson/Codewars | fda780f40d1a2d8c5210cfd6ccf81148444bc9e8 | [
"MIT"
] | null | null | null | def josephus_survivor(n,k):
my_array = list(range(1, n+1))
# print(my_array)
i = 0
while len(my_array) > 1:
length = len(my_array)
# print(my_array)
# print(length)
# print('i', i)
while not i < length:
i -= length
i += k - 1
while not i < length:
i -= length
# print(my_array)
# print('length', length)
# print('i', i)
_ = my_array.pop(i)
# print('pop out', _)
# print(my_array)
# print()
# if i < length:
# i += k
# if i >= length:
# i -= length
# result = my_array.pop(i)
# print(result)
# print(my_array)
# if i > length -1:
# i -= length
# # print('result', result)
return my_array[0]
# n, k = (7,3) # 4 expected
# print (josephus_survivor(n, k))
n, k = (11,19) # 10 expected
print (josephus_survivor(n, k))
| 22.953488 | 38 | 0.449848 | def josephus_survivor(n,k):
my_array = list(range(1, n+1))
i = 0
while len(my_array) > 1:
length = len(my_array)
while not i < length:
i -= length
i += k - 1
while not i < length:
i -= length
_ = my_array.pop(i)
1,19)
print (josephus_survivor(n, k))
| true | true |
1c466d37b86970653faf38b62fdf0da523eb0c8b | 284 | py | Python | src/Distiller/textbrewer/distillers.py | haroldNLP/Distiller | f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd | [
"MIT"
] | 2 | 2022-03-21T08:02:02.000Z | 2022-03-21T08:29:07.000Z | src/Distiller/textbrewer/distillers.py | haroldNLP/Distiller | f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd | [
"MIT"
] | null | null | null | src/Distiller/textbrewer/distillers.py | haroldNLP/Distiller | f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd | [
"MIT"
] | null | null | null | from .distiller_train import BasicTrainer
from .distiller_basic import BasicDistiller
from .distiller_general import GeneralDistiller
from .distiller_multitask import MultiTaskDistiller
from .distiller_multiteacher import MultiTeacherDistiller
from .distiller_emd import EMDDistiller
| 40.571429 | 57 | 0.894366 | from .distiller_train import BasicTrainer
from .distiller_basic import BasicDistiller
from .distiller_general import GeneralDistiller
from .distiller_multitask import MultiTaskDistiller
from .distiller_multiteacher import MultiTeacherDistiller
from .distiller_emd import EMDDistiller
| true | true |
1c466db29b0f06ae975488369e9165135775e8e7 | 4,842 | py | Python | src/models/sp_classifier_model_test.py | bartek-wojcik/graph_agregations | 12305e4ffdf4db60da041689f04d96b48e9e72e5 | [
"MIT"
] | null | null | null | src/models/sp_classifier_model_test.py | bartek-wojcik/graph_agregations | 12305e4ffdf4db60da041689f04d96b48e9e72e5 | [
"MIT"
] | null | null | null | src/models/sp_classifier_model_test.py | bartek-wojcik/graph_agregations | 12305e4ffdf4db60da041689f04d96b48e9e72e5 | [
"MIT"
] | null | null | null | from typing import Any, List
import torch
from pytorch_lightning import LightningModule
from pytorch_lightning.metrics.classification import Accuracy
from src.models.modules import gat, jumping_knowledge, graph_sage, gcn, vector_sage_module, jumping_knowledge_test
class SuperpixelClassifierModelTest(LightningModule):
"""LightningModule for image classification from superpixels."""
def __init__(
self,
architecture: str = "GraphSAGE",
aggregation_method: str = "concat",
num_node_features: int = 1,
add_pos_to_features: bool = False,
num_conv_layers: int = 3,
conv_size: int = 128,
lin_size: int = 128,
output_size: int = 10,
lr: float = 0.001,
weight_decay: float = 0,
**kwargs
):
super().__init__()
self.save_hyperparameters()
self.add_pos_to_features = add_pos_to_features
# init network architecture
if self.hparams.architecture == "GraphSAGE":
self.model = graph_sage.GraphSage(hparams=self.hparams)
elif self.hparams.architecture == "GAT":
self.model = gat.GAT(hparams=self.hparams)
elif self.hparams.architecture == "JumpingKnowledge":
self.model = jumping_knowledge_test.JK(hparams=self.hparams)
elif self.hparams.architecture == "GCN":
self.model = gcn.GCN(hparams=self.hparams)
elif self.hparams.architecture == "VectorSAGE":
self.model = vector_sage_module.VectorSAGEModule(hparams=self.hparams)
else:
raise Exception("Incorrect architecture name!")
# loss function
self.criterion = torch.nn.CrossEntropyLoss()
# use separate metric instance for train, val and test step
# to ensure a proper reduction over the epoch
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.metric_hist = {
"train/acc": [],
"val/acc": [],
"train/loss": [],
"val/loss": [],
}
def forward(self, x, edge_index, batch, pos):
return self.model(x, edge_index, batch, pos)
def step(self, data: Any):
x, edge_index, batch, pos, y = data.x, data.edge_index, data.batch, data.pos, data.y
if self.add_pos_to_features:
x = torch.cat((x, pos), 1)
logits = self.forward(x, edge_index, batch, pos)
loss = self.criterion(logits, y)
preds = torch.argmax(logits, dim=1)
return loss, preds, y
def training_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.train_accuracy(preds, targets)
self.log("train/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("train/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def training_epoch_end(self, outputs: List[Any]):
# log best so far train acc and train loss
self.metric_hist["train/acc"].append(self.trainer.callback_metrics["train/acc"])
self.metric_hist["train/loss"].append(self.trainer.callback_metrics["train/loss"])
self.log("train/acc_best", max(self.metric_hist["train/acc"]), prog_bar=False)
self.log("train/loss_best", min(self.metric_hist["train/loss"]), prog_bar=False)
def validation_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.val_accuracy(preds, targets)
self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("val/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def validation_epoch_end(self, outputs: List[Any]):
# log best so far val acc and val loss
self.metric_hist["val/acc"].append(self.trainer.callback_metrics["val/acc"])
self.metric_hist["val/loss"].append(self.trainer.callback_metrics["val/loss"])
self.log("val/acc_best", max(self.metric_hist["val/acc"]), prog_bar=False)
self.log("val/loss_best", min(self.metric_hist["val/loss"]), prog_bar=False)
def test_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.test_accuracy(preds, targets)
self.log("test/loss", loss, on_step=False, on_epoch=True)
self.log("test/acc", acc, on_step=False, on_epoch=True)
return {"loss": loss, "preds": preds, "targets": targets}
def test_epoch_end(self, outputs: List[Any]):
pass
def configure_optimizers(self):
return torch.optim.Adam(
params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay,
) | 42.104348 | 114 | 0.644981 | from typing import Any, List
import torch
from pytorch_lightning import LightningModule
from pytorch_lightning.metrics.classification import Accuracy
from src.models.modules import gat, jumping_knowledge, graph_sage, gcn, vector_sage_module, jumping_knowledge_test
class SuperpixelClassifierModelTest(LightningModule):
def __init__(
self,
architecture: str = "GraphSAGE",
aggregation_method: str = "concat",
num_node_features: int = 1,
add_pos_to_features: bool = False,
num_conv_layers: int = 3,
conv_size: int = 128,
lin_size: int = 128,
output_size: int = 10,
lr: float = 0.001,
weight_decay: float = 0,
**kwargs
):
super().__init__()
self.save_hyperparameters()
self.add_pos_to_features = add_pos_to_features
if self.hparams.architecture == "GraphSAGE":
self.model = graph_sage.GraphSage(hparams=self.hparams)
elif self.hparams.architecture == "GAT":
self.model = gat.GAT(hparams=self.hparams)
elif self.hparams.architecture == "JumpingKnowledge":
self.model = jumping_knowledge_test.JK(hparams=self.hparams)
elif self.hparams.architecture == "GCN":
self.model = gcn.GCN(hparams=self.hparams)
elif self.hparams.architecture == "VectorSAGE":
self.model = vector_sage_module.VectorSAGEModule(hparams=self.hparams)
else:
raise Exception("Incorrect architecture name!")
self.criterion = torch.nn.CrossEntropyLoss()
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.metric_hist = {
"train/acc": [],
"val/acc": [],
"train/loss": [],
"val/loss": [],
}
def forward(self, x, edge_index, batch, pos):
return self.model(x, edge_index, batch, pos)
def step(self, data: Any):
x, edge_index, batch, pos, y = data.x, data.edge_index, data.batch, data.pos, data.y
if self.add_pos_to_features:
x = torch.cat((x, pos), 1)
logits = self.forward(x, edge_index, batch, pos)
loss = self.criterion(logits, y)
preds = torch.argmax(logits, dim=1)
return loss, preds, y
def training_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.train_accuracy(preds, targets)
self.log("train/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("train/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def training_epoch_end(self, outputs: List[Any]):
self.metric_hist["train/acc"].append(self.trainer.callback_metrics["train/acc"])
self.metric_hist["train/loss"].append(self.trainer.callback_metrics["train/loss"])
self.log("train/acc_best", max(self.metric_hist["train/acc"]), prog_bar=False)
self.log("train/loss_best", min(self.metric_hist["train/loss"]), prog_bar=False)
def validation_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.val_accuracy(preds, targets)
self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("val/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def validation_epoch_end(self, outputs: List[Any]):
self.metric_hist["val/acc"].append(self.trainer.callback_metrics["val/acc"])
self.metric_hist["val/loss"].append(self.trainer.callback_metrics["val/loss"])
self.log("val/acc_best", max(self.metric_hist["val/acc"]), prog_bar=False)
self.log("val/loss_best", min(self.metric_hist["val/loss"]), prog_bar=False)
def test_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.test_accuracy(preds, targets)
self.log("test/loss", loss, on_step=False, on_epoch=True)
self.log("test/acc", acc, on_step=False, on_epoch=True)
return {"loss": loss, "preds": preds, "targets": targets}
def test_epoch_end(self, outputs: List[Any]):
pass
def configure_optimizers(self):
return torch.optim.Adam(
params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay,
) | true | true |
1c466dc07772bb7c04ea801073ca12ed12884e52 | 7,794 | py | Python | torchvision/datasets/celeba.py | kirilkoroves/torchvision-0.3.0 | 39f46d141f6a7ac2b094545c33936ad4500d3c7d | [
"BSD-3-Clause"
] | 125 | 2020-06-17T19:58:56.000Z | 2022-03-28T12:54:43.000Z | datasets/celeba.py | ANLGBOY/ddim | 34d640e5180cc5ab378f84af6ed596cb0c810e6c | [
"MIT"
] | 6 | 2021-03-19T15:30:28.000Z | 2022-03-12T00:51:16.000Z | datasets/celeba.py | ANLGBOY/ddim | 34d640e5180cc5ab378f84af6ed596cb0c810e6c | [
"MIT"
] | 29 | 2020-06-18T19:24:04.000Z | 2022-03-11T11:20:47.000Z | import torch
import os
import PIL
from .vision import VisionDataset
from .utils import download_file_from_google_drive, check_integrity
class CelebA(VisionDataset):
"""`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
split (string): One of {'train', 'valid', 'test'}.
Accordingly dataset is selected.
target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,
or ``landmarks``. Can also be a list to output a tuple with all specified target types.
The targets represent:
``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes
``identity`` (int): label for each person (data points with the same identity are the same person)
``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height)
``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
Defaults to ``attr``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = "celeba"
# There currently does not appear to be a easy way to extract 7z in python (without introducing additional
# dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available
# right now.
file_list = [
# File ID MD5 Hash Filename
("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
# ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"),
# ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"),
("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
# ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"),
("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
]
def __init__(self, root,
split="train",
target_type="attr",
transform=None, target_transform=None,
download=False):
import pandas
super(CelebA, self).__init__(root)
self.split = split
if isinstance(target_type, list):
self.target_type = target_type
else:
self.target_type = [target_type]
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.transform = transform
self.target_transform = target_transform
if split.lower() == "train":
split = 0
elif split.lower() == "valid":
split = 1
elif split.lower() == "test":
split = 2
else:
raise ValueError('Wrong split entered! Please use split="train" '
'or split="valid" or split="test"')
with open(os.path.join(self.root, self.base_folder, "list_eval_partition.txt"), "r") as f:
splits = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)
with open(os.path.join(self.root, self.base_folder, "identity_CelebA.txt"), "r") as f:
self.identity = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)
with open(os.path.join(self.root, self.base_folder, "list_bbox_celeba.txt"), "r") as f:
self.bbox = pandas.read_csv(f, delim_whitespace=True, header=1, index_col=0)
with open(os.path.join(self.root, self.base_folder, "list_landmarks_align_celeba.txt"), "r") as f:
self.landmarks_align = pandas.read_csv(f, delim_whitespace=True, header=1)
with open(os.path.join(self.root, self.base_folder, "list_attr_celeba.txt"), "r") as f:
self.attr = pandas.read_csv(f, delim_whitespace=True, header=1)
mask = (splits[1] == split)
self.filename = splits[mask].index.values
self.identity = torch.as_tensor(self.identity[mask].values)
self.bbox = torch.as_tensor(self.bbox[mask].values)
self.landmarks_align = torch.as_tensor(self.landmarks_align[mask].values)
self.attr = torch.as_tensor(self.attr[mask].values)
self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
def _check_integrity(self):
for (_, md5, filename) in self.file_list:
fpath = os.path.join(self.root, self.base_folder, filename)
_, ext = os.path.splitext(filename)
# Allow original archive to be deleted (zip and 7z)
# Only need the extracted images
if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
return False
# Should check a hash of the images
return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
for (file_id, md5, filename) in self.file_list:
download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f:
f.extractall(os.path.join(self.root, self.base_folder))
def __getitem__(self, index):
X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
target = []
for t in self.target_type:
if t == "attr":
target.append(self.attr[index, :])
elif t == "identity":
target.append(self.identity[index, 0])
elif t == "bbox":
target.append(self.bbox[index, :])
elif t == "landmarks":
target.append(self.landmarks_align[index, :])
else:
raise ValueError("Target type \"{}\" is not recognized.".format(t))
target = tuple(target) if len(target) > 1 else target[0]
if self.transform is not None:
X = self.transform(X)
if self.target_transform is not None:
target = self.target_transform(target)
return X, target
def __len__(self):
return len(self.attr)
def extra_repr(self):
lines = ["Target type: {target_type}", "Split: {split}"]
return '\n'.join(lines).format(**self.__dict__)
| 47.52439 | 120 | 0.628689 | import torch
import os
import PIL
from .vision import VisionDataset
from .utils import download_file_from_google_drive, check_integrity
class CelebA(VisionDataset):
base_folder = "celeba"
file_list = [
("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
]
def __init__(self, root,
split="train",
target_type="attr",
transform=None, target_transform=None,
download=False):
import pandas
super(CelebA, self).__init__(root)
self.split = split
if isinstance(target_type, list):
self.target_type = target_type
else:
self.target_type = [target_type]
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.transform = transform
self.target_transform = target_transform
if split.lower() == "train":
split = 0
elif split.lower() == "valid":
split = 1
elif split.lower() == "test":
split = 2
else:
raise ValueError('Wrong split entered! Please use split="train" '
'or split="valid" or split="test"')
with open(os.path.join(self.root, self.base_folder, "list_eval_partition.txt"), "r") as f:
splits = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)
with open(os.path.join(self.root, self.base_folder, "identity_CelebA.txt"), "r") as f:
self.identity = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)
with open(os.path.join(self.root, self.base_folder, "list_bbox_celeba.txt"), "r") as f:
self.bbox = pandas.read_csv(f, delim_whitespace=True, header=1, index_col=0)
with open(os.path.join(self.root, self.base_folder, "list_landmarks_align_celeba.txt"), "r") as f:
self.landmarks_align = pandas.read_csv(f, delim_whitespace=True, header=1)
with open(os.path.join(self.root, self.base_folder, "list_attr_celeba.txt"), "r") as f:
self.attr = pandas.read_csv(f, delim_whitespace=True, header=1)
mask = (splits[1] == split)
self.filename = splits[mask].index.values
self.identity = torch.as_tensor(self.identity[mask].values)
self.bbox = torch.as_tensor(self.bbox[mask].values)
self.landmarks_align = torch.as_tensor(self.landmarks_align[mask].values)
self.attr = torch.as_tensor(self.attr[mask].values)
self.attr = (self.attr + 1) // 2
def _check_integrity(self):
for (_, md5, filename) in self.file_list:
fpath = os.path.join(self.root, self.base_folder, filename)
_, ext = os.path.splitext(filename)
if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
return False
return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
for (file_id, md5, filename) in self.file_list:
download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f:
f.extractall(os.path.join(self.root, self.base_folder))
def __getitem__(self, index):
X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
target = []
for t in self.target_type:
if t == "attr":
target.append(self.attr[index, :])
elif t == "identity":
target.append(self.identity[index, 0])
elif t == "bbox":
target.append(self.bbox[index, :])
elif t == "landmarks":
target.append(self.landmarks_align[index, :])
else:
raise ValueError("Target type \"{}\" is not recognized.".format(t))
target = tuple(target) if len(target) > 1 else target[0]
if self.transform is not None:
X = self.transform(X)
if self.target_transform is not None:
target = self.target_transform(target)
return X, target
def __len__(self):
return len(self.attr)
def extra_repr(self):
lines = ["Target type: {target_type}", "Split: {split}"]
return '\n'.join(lines).format(**self.__dict__)
| true | true |
1c466ddb0d67ce19e3f6d28c4f2f173c575e35e2 | 16,649 | py | Python | netapp/santricity/api/symbol/p_api.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 5 | 2016-08-23T17:52:22.000Z | 2019-05-16T08:45:30.000Z | netapp/santricity/api/symbol/p_api.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-11-10T05:30:21.000Z | 2019-04-05T15:03:37.000Z | netapp/santricity/api/symbol/p_api.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 7 | 2016-08-25T16:11:44.000Z | 2021-02-22T05:31:25.000Z | #!/usr/bin/env python
# coding: utf-8
"""
PApi.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ....santricity.configuration import Configuration
from ....santricity.api_client import ApiClient
class PApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient(context_path='/devmgr/v2')
self.api_client = config.api_client
def symbol_ping_controller(self, system_id, **kwargs):
"""
This procedure simply verifies that the controller is responsive and is operating properly.
Documented return codes: ok.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_ping_controller(system_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_ping_controller" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_ping_controller`")
resource_path = '/storage-systems/{system-id}/symbol/pingController'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_power_cycle_physical_drive(self, system_id, body, **kwargs):
"""
This procedure is used to power cycle an individual physical drive.
Documented return codes: ok, driveNotUnassigned, volumeReconfiguring, volumeNotOptimal, downloadInProgress, parityScanInProgress, volumeGroupNotComplete, dpcVolumeGroupNotRedundant, dpcVolumeNotInitialized, dpcExclusiveOperationActive, dpcFormatActive, dpcUnreadableSectorsPresent, dpcPowerCycleAlreadyInProgress, dpcEnclosureHardwareUnsupported, dpcEnclosureFwDownlevel.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_power_cycle_physical_drive(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param PowerCyclePhysicalDriveDescriptor body: The descriptor for the drive to be power cycled. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: PowerCyclePhysicalDriveDataReturn
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_power_cycle_physical_drive" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_power_cycle_physical_drive`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_power_cycle_physical_drive`")
resource_path = '/storage-systems/{system-id}/symbol/powerCyclePhysicalDrive'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PowerCyclePhysicalDriveDataReturn',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_power_down_array(self, system_id, **kwargs):
"""
This command provides a programmatic means of powering off a storage array The operation is performed as gracefully as possible. Once this command is received, all open sockets, except for those that have in-process commands, are closed, meaning that any new SYMbol commands attempted will receive an RPC error. SYMbol commands that are in-process when this command is received are allowed to continue execution. In-process SYMbol commands in the \"active\" category are guaranteed to complete; In-process commands in the \"passive\" category may complete, but there is no guarantee. This command returns and reports status just prior to the actual power down event. Authentication is required for this command.
Documented return codes: ok, noHeap, background, cacheSyncFailure, quiescenceFailed, controllerInServiceMode.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_power_down_array(system_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_power_down_array" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_power_down_array`")
resource_path = '/storage-systems/{system-id}/symbol/powerDownArray'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| 41.210396 | 845 | 0.568983 |
from __future__ import absolute_import
import sys
import os
from six import iteritems
from ....santricity.configuration import Configuration
from ....santricity.api_client import ApiClient
class PApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient(context_path='/devmgr/v2')
self.api_client = config.api_client
def symbol_ping_controller(self, system_id, **kwargs):
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_ping_controller" % key
)
params[key] = val
del params['kwargs']
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_ping_controller`")
resource_path = '/storage-systems/{system-id}/symbol/pingController'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_power_cycle_physical_drive(self, system_id, body, **kwargs):
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_power_cycle_physical_drive" % key
)
params[key] = val
del params['kwargs']
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_power_cycle_physical_drive`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_power_cycle_physical_drive`")
resource_path = '/storage-systems/{system-id}/symbol/powerCyclePhysicalDrive'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PowerCyclePhysicalDriveDataReturn',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_power_down_array(self, system_id, **kwargs):
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_power_down_array" % key
)
params[key] = val
del params['kwargs']
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_power_down_array`")
resource_path = '/storage-systems/{system-id}/symbol/powerDownArray'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| true | true |
1c466e24cb956d06875072696560d66b6ce8e400 | 4,071 | py | Python | alipay/aop/api/request/KoubeiMarketingCampaignItemBatchqueryRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/KoubeiMarketingCampaignItemBatchqueryRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/KoubeiMarketingCampaignItemBatchqueryRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMarketingCampaignItemBatchqueryModel import KoubeiMarketingCampaignItemBatchqueryModel
class KoubeiMarketingCampaignItemBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMarketingCampaignItemBatchqueryModel):
self._biz_content = value
else:
self._biz_content = KoubeiMarketingCampaignItemBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.marketing.campaign.item.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.075862 | 166 | 0.651437 |
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMarketingCampaignItemBatchqueryModel import KoubeiMarketingCampaignItemBatchqueryModel
class KoubeiMarketingCampaignItemBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMarketingCampaignItemBatchqueryModel):
self._biz_content = value
else:
self._biz_content = KoubeiMarketingCampaignItemBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.marketing.campaign.item.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
1c466e5cad5ca16ff031b3575687a75e615161ff | 1,926 | py | Python | tests/terraform/checks/resource/aws/test_S3MFADelete.py | cclauss/checkov | 60a385fcaff1499cf00c2d0018575fe5ab71f556 | [
"Apache-2.0"
] | 5 | 2021-07-29T18:08:40.000Z | 2022-03-21T04:39:32.000Z | tests/terraform/checks/resource/aws/test_S3MFADelete.py | cclauss/checkov | 60a385fcaff1499cf00c2d0018575fe5ab71f556 | [
"Apache-2.0"
] | 16 | 2021-03-09T07:38:38.000Z | 2021-06-09T03:53:55.000Z | tests/terraform/checks/resource/aws/test_S3MFADelete.py | cclauss/checkov | 60a385fcaff1499cf00c2d0018575fe5ab71f556 | [
"Apache-2.0"
] | 2 | 2021-08-23T13:25:36.000Z | 2021-11-05T21:44:52.000Z | import unittest
from checkov.terraform.checks.resource.aws.S3MFADelete import scanner
from checkov.common.models.enums import CheckResult
class TestS3MFADelete(unittest.TestCase):
def test_failure(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}]}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_versioning_enabled(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}],
"versioning": [{"enabled": [True]}]}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}],
"logging": [{"target_bucket": "logging-bucket",
"target_prefix": "log/"
}],
"versioning": [
{"enabled": [True]},
{"mfa_delete": [True]}
]
}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 40.125 | 72 | 0.475597 | import unittest
from checkov.terraform.checks.resource.aws.S3MFADelete import scanner
from checkov.common.models.enums import CheckResult
class TestS3MFADelete(unittest.TestCase):
def test_failure(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}]}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_versioning_enabled(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}],
"versioning": [{"enabled": [True]}]}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}],
"logging": [{"target_bucket": "logging-bucket",
"target_prefix": "log/"
}],
"versioning": [
{"enabled": [True]},
{"mfa_delete": [True]}
]
}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| true | true |
1c46706d423b7f39a9afbea8fb6279b3a302d7b9 | 3,384 | py | Python | test/functional/test_framework/coverage.py | AlvaStudio/vtl2 | 0d0eeeaeb45d841086a9fabaeb77d3cad14c5f87 | [
"MIT"
] | null | null | null | test/functional/test_framework/coverage.py | AlvaStudio/vtl2 | 0d0eeeaeb45d841086a9fabaeb77d3cad14c5f87 | [
"MIT"
] | null | null | null | test/functional/test_framework/coverage.py | AlvaStudio/vtl2 | 0d0eeeaeb45d841086a9fabaeb77d3cad14c5f87 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `vitalium-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| 30.763636 | 87 | 0.661052 |
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| true | true |
1c467098f9187f933e5f2190a330b0d788c96cba | 524 | py | Python | lib/gensim/summarization/commons.py | duyetdev/api.duyetdev.com | 4c33cc2cfb43ad6c4089873230e7b657659bff15 | [
"MIT"
] | 4 | 2018-11-27T01:35:30.000Z | 2022-01-27T01:17:11.000Z | lib/gensim/summarization/commons.py | duyetdev/api.duyetdev.com | 4c33cc2cfb43ad6c4089873230e7b657659bff15 | [
"MIT"
] | 12 | 2020-07-11T01:42:51.000Z | 2020-08-12T17:17:35.000Z | lib/gensim/summarization/commons.py | duyetdev/api.duyetdev.com | 4c33cc2cfb43ad6c4089873230e7b657659bff15 | [
"MIT"
] | 1 | 2018-11-27T01:35:33.000Z | 2018-11-27T01:35:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
from gensim.summarization.graph import Graph
def build_graph(sequence):
graph = Graph()
for item in sequence:
if not graph.has_node(item):
graph.add_node(item)
return graph
def remove_unreachable_nodes(graph):
for node in graph.nodes():
if sum(graph.edge_weight((node, other)) for other in graph.neighbors(node)) == 0:
graph.del_node(node)
| 24.952381 | 89 | 0.658397 |
from gensim.summarization.graph import Graph
def build_graph(sequence):
graph = Graph()
for item in sequence:
if not graph.has_node(item):
graph.add_node(item)
return graph
def remove_unreachable_nodes(graph):
for node in graph.nodes():
if sum(graph.edge_weight((node, other)) for other in graph.neighbors(node)) == 0:
graph.del_node(node)
| true | true |
1c4670dad12c93b48d43314ce50b40cb389322c4 | 1,554 | py | Python | python/script/run_vmaf_cross_validation.py | christosbampis/vmaf | 33e8dc675ace44dd1412b318c31eb3378612744c | [
"Apache-2.0"
] | null | null | null | python/script/run_vmaf_cross_validation.py | christosbampis/vmaf | 33e8dc675ace44dd1412b318c31eb3378612744c | [
"Apache-2.0"
] | 1 | 2018-09-05T16:33:08.000Z | 2018-09-05T16:33:08.000Z | python/script/run_vmaf_cross_validation.py | christosbampis/vmaf | 33e8dc675ace44dd1412b318c31eb3378612744c | [
"Apache-2.0"
] | 2 | 2018-09-05T03:59:46.000Z | 2018-09-18T03:57:52.000Z | __copyright__ = "Copyright 2016-2017, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import matplotlib.pyplot as plt
import numpy as np
from vmaf.config import VmafConfig
from vmaf.routine import run_vmaf_cv, run_vmaf_kfold_cv
if __name__ == '__main__':
# ==== Run simple cross validation: one training and one testing dataset ====
run_vmaf_cv(
train_dataset_filepath=VmafConfig.resource_path('dataset', 'NFLX_dataset_public.py'),
test_dataset_filepath=VmafConfig.resource_path('dataset', 'VQEGHD3_dataset.py'),
param_filepath=VmafConfig.resource_path('param', 'vmaf_v3.py'),
output_model_filepath=VmafConfig.workspace_path('model', 'test_model1.pkl'),
)
# ==== Run cross validation across genres (tough test) ====
nflx_dataset_path = VmafConfig.resource_path('dataset', 'NFLX_dataset_public.py')
contentid_groups = [
[0, 5], # cartoon: BigBuckBunny, FoxBird
[1], # CG: BirdsInCage
[2, 6, 7], # complex: CrowdRun, OldTownCross, Seeking
[3, 4], # ElFuente: ElFuente1, ElFuente2
[8], # sports: Tennis
]
param_filepath = VmafConfig.resource_path('param', 'vmaf_v3.py')
aggregate_method = np.mean
# aggregate_method = ListStats.harmonic_mean
# aggregate_method = partial(ListStats.lp_norm, p=2.0)
run_vmaf_kfold_cv(
dataset_filepath=nflx_dataset_path,
contentid_groups=contentid_groups,
param_filepath=param_filepath,
aggregate_method=aggregate_method,
)
plt.show()
print 'Done.' | 33.782609 | 93 | 0.695624 | __copyright__ = "Copyright 2016-2017, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import matplotlib.pyplot as plt
import numpy as np
from vmaf.config import VmafConfig
from vmaf.routine import run_vmaf_cv, run_vmaf_kfold_cv
if __name__ == '__main__':
run_vmaf_cv(
train_dataset_filepath=VmafConfig.resource_path('dataset', 'NFLX_dataset_public.py'),
test_dataset_filepath=VmafConfig.resource_path('dataset', 'VQEGHD3_dataset.py'),
param_filepath=VmafConfig.resource_path('param', 'vmaf_v3.py'),
output_model_filepath=VmafConfig.workspace_path('model', 'test_model1.pkl'),
)
nflx_dataset_path = VmafConfig.resource_path('dataset', 'NFLX_dataset_public.py')
contentid_groups = [
[0, 5],
[1],
[2, 6, 7],
[3, 4],
[8],
]
param_filepath = VmafConfig.resource_path('param', 'vmaf_v3.py')
aggregate_method = np.mean
run_vmaf_kfold_cv(
dataset_filepath=nflx_dataset_path,
contentid_groups=contentid_groups,
param_filepath=param_filepath,
aggregate_method=aggregate_method,
)
plt.show()
print 'Done.' | false | true |
1c467153e4dd6d941d4fa3ba61730d6360584541 | 271 | py | Python | lab_6.py | goni21-meet/meet2019y1lab6 | c75fab3b544a13d1ad8bef13e5675ff5ea165a0d | [
"MIT"
] | null | null | null | lab_6.py | goni21-meet/meet2019y1lab6 | c75fab3b544a13d1ad8bef13e5675ff5ea165a0d | [
"MIT"
] | null | null | null | lab_6.py | goni21-meet/meet2019y1lab6 | c75fab3b544a13d1ad8bef13e5675ff5ea165a0d | [
"MIT"
] | null | null | null | #import turtle
#x = 0
#while x<300:
# y = x**2/300
# turtle.goto(x,y)
# print( turtle.pos())
#x = x+100
#turtle.mainloop()
import turtle
num_pts = 5
for i in range(num_pts):
turtle.left(360/num_pts)
turtle.forward(100)
turtle.mainloop()
| 11.291667 | 28 | 0.597786 |
import turtle
num_pts = 5
for i in range(num_pts):
turtle.left(360/num_pts)
turtle.forward(100)
turtle.mainloop()
| true | true |
1c46723f0a2b6f567ce353e54cc92039a142acf3 | 119 | py | Python | h/groups/__init__.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | 2 | 2021-11-07T23:14:54.000Z | 2021-11-17T10:11:55.000Z | h/groups/__init__.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | null | null | null | h/groups/__init__.py | ssin122/test-h | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | [
"MIT"
] | 1 | 2017-03-12T00:18:33.000Z | 2017-03-12T00:18:33.000Z | # -*- coding: utf-8 -*-
def includeme(config):
config.memex_add_search_filter('h.groups.search.GroupAuthFilter')
| 19.833333 | 69 | 0.714286 |
def includeme(config):
config.memex_add_search_filter('h.groups.search.GroupAuthFilter')
| true | true |
1c467277bdb5e4e30d2dce15c741769d571a9408 | 3,454 | py | Python | lines/Segment.py | AlexTaguchi/lines | d091d52350d0bedc3c8af0aa5438b6a1da95151d | [
"MIT"
] | null | null | null | lines/Segment.py | AlexTaguchi/lines | d091d52350d0bedc3c8af0aa5438b6a1da95151d | [
"MIT"
] | null | null | null | lines/Segment.py | AlexTaguchi/lines | d091d52350d0bedc3c8af0aa5438b6a1da95151d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
class EvalPointError(ValueError):
pass
class Segment:
def __init__(self):
self.a = 0.0 # y = a.x + b
self.b = 0.0
self._points = []
self._rss = 0.0 # Residual sum of squares
self._wixiyi = 0.0
self._wixi = 0.0
self._wiyi = 0.0
self._wixi2 = 0.0
self._wi = 0.0
def getLength(self):
return len(self._points)
def appendPoint(self, p):
"""append point to the segment, update parameters
for the line function
"""
self._points.append(p)
self._wixiyi += p.w * p.x * p.y
self._wixi += p.w * p.x
self._wiyi += p.w * p.y
self._wixi2 += p.w * p.x * p.x
self._wi += p.w
if len(self._points) > 1:
# if points are aligned with exactly the same x, line is vertical
# a and b would be infinity
if self._wixi2 == self._wixi**2/self._wi:
self.a = float("inf")
self.b = float("inf")
else:
self.a = (self._wixiyi - self._wixi*self._wiyi/self._wi)\
/ (self._wixi2 - self._wixi**2/self._wi)
self.b = (self._wiyi - self.a * self._wixi)/self._wi
self._rss = self.calcRSS(self.a, self.b)
def calcRSS(self, a, b):
""" calculate Residual Sum of Squares
Args:
a (float): slope
b (float): intercept
Returns:
float: residual sum of squares
"""
rss = 0.0
if len(self._points) < 2:
return rss
if self.a == float("inf") or self.b == float("inf"):
# in case when y is a vertical line, independent of x
avg_x = 0.0
for p in self._points:
avg_x += p.x * p.w
avg_x /= float(len(self._points)) # weighted average of x
for p in self._points:
rss += p.w * (p.x - avg_x)**2
return rss
for p in self._points:
rss += p.w * (p.y - a * p.x - b)**2
return rss
def evalPoint(self, p):
"""calculate a and b of y=a.x+b before including in the point p
Args:
p (Point): a point not in Segment
Returns:
(a,b): a tuple for slope and intercept for fitted line y=a.x+b
"""
if not self._points:
raise EvalPointError("""Cannot calculate slope
and intercept with a single point.
""")
x = p.x
y = p.y
w = float(p.w)
wixiyi = self._wixiyi + w*x*y
wixi = self._wixi + w*x
wiyi = self._wiyi + w*y
wixi2 = self._wixi2 + w*x*x
wi = self._wi + w
if wixi2 == wixi**2/wi:
a = float("inf")
b = float("inf")
return (a, b)
a = (wixiyi - wixi*wiyi/wi)/(wixi2 - wixi**2/wi)
b = (wiyi - a * wixi)/wi
return (a, b)
def evalRSS(self, p):
""" evaluate Residual Sum of Squares before including the point """
if len(self._points) < 2:
return self._rss
new_a, new_b = self.evalPoint(p)
rss = self.calcRSS(new_a, new_b)
rss += p.w * (p.y - self.a * p.x - self.b)**2
return rss
def getPoints(self):
""" return a list of Point stored inside the Segment """
return self._points
| 29.271186 | 77 | 0.486972 |
class EvalPointError(ValueError):
pass
class Segment:
def __init__(self):
self.a = 0.0
self.b = 0.0
self._points = []
self._rss = 0.0
self._wixiyi = 0.0
self._wixi = 0.0
self._wiyi = 0.0
self._wixi2 = 0.0
self._wi = 0.0
def getLength(self):
return len(self._points)
def appendPoint(self, p):
self._points.append(p)
self._wixiyi += p.w * p.x * p.y
self._wixi += p.w * p.x
self._wiyi += p.w * p.y
self._wixi2 += p.w * p.x * p.x
self._wi += p.w
if len(self._points) > 1:
if self._wixi2 == self._wixi**2/self._wi:
self.a = float("inf")
self.b = float("inf")
else:
self.a = (self._wixiyi - self._wixi*self._wiyi/self._wi)\
/ (self._wixi2 - self._wixi**2/self._wi)
self.b = (self._wiyi - self.a * self._wixi)/self._wi
self._rss = self.calcRSS(self.a, self.b)
def calcRSS(self, a, b):
rss = 0.0
if len(self._points) < 2:
return rss
if self.a == float("inf") or self.b == float("inf"):
avg_x = 0.0
for p in self._points:
avg_x += p.x * p.w
avg_x /= float(len(self._points))
for p in self._points:
rss += p.w * (p.x - avg_x)**2
return rss
for p in self._points:
rss += p.w * (p.y - a * p.x - b)**2
return rss
def evalPoint(self, p):
if not self._points:
raise EvalPointError("""Cannot calculate slope
and intercept with a single point.
""")
x = p.x
y = p.y
w = float(p.w)
wixiyi = self._wixiyi + w*x*y
wixi = self._wixi + w*x
wiyi = self._wiyi + w*y
wixi2 = self._wixi2 + w*x*x
wi = self._wi + w
if wixi2 == wixi**2/wi:
a = float("inf")
b = float("inf")
return (a, b)
a = (wixiyi - wixi*wiyi/wi)/(wixi2 - wixi**2/wi)
b = (wiyi - a * wixi)/wi
return (a, b)
def evalRSS(self, p):
if len(self._points) < 2:
return self._rss
new_a, new_b = self.evalPoint(p)
rss = self.calcRSS(new_a, new_b)
rss += p.w * (p.y - self.a * p.x - self.b)**2
return rss
def getPoints(self):
return self._points
| true | true |
1c46739728bc3decdb8284725d1278b660ceff72 | 826 | py | Python | pirates/creature/Monstrous.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/creature/Monstrous.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/creature/Monstrous.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.creature.Monstrous
class Monstrous:
__module__ = __name__
def initializeMonstrousTags(self, rootNodePath):
from pirates.piratesbase import PiratesGlobals
rootNodePath.setPythonTag('MonstrousObject', self)
self.setPythonTag('MonstrousObject', self)
rootNodePath.setTag('objType', str(PiratesGlobals.COLL_MONSTROUS))
self.setTag('objType', str(PiratesGlobals.COLL_MONSTROUS))
def cleanupMontstrousTags(self, rootNodePath):
rootNodePath.clearPythonTag('MonstrousObject')
self.clearPythonTag('MonstrousObject')
def initializeBattleCollisions(self):
pass | 37.545455 | 104 | 0.728814 |
class Monstrous:
__module__ = __name__
def initializeMonstrousTags(self, rootNodePath):
from pirates.piratesbase import PiratesGlobals
rootNodePath.setPythonTag('MonstrousObject', self)
self.setPythonTag('MonstrousObject', self)
rootNodePath.setTag('objType', str(PiratesGlobals.COLL_MONSTROUS))
self.setTag('objType', str(PiratesGlobals.COLL_MONSTROUS))
def cleanupMontstrousTags(self, rootNodePath):
rootNodePath.clearPythonTag('MonstrousObject')
self.clearPythonTag('MonstrousObject')
def initializeBattleCollisions(self):
pass | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.