code
stringlengths 1
199k
|
|---|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CostRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('calculated', models.BooleanField(db_index=True, default=False)),
('name', models.CharField(max_length=30)),
('cost', models.FloatField()),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('comment', models.TextField(blank=True)),
('category', models.CharField(choices=[('FO', 'Food'), ('CL', 'Clothing'), ('HO', 'Housing'), ('TR', 'Transportation'), ('ED', 'Education'), ('EN', 'Entertainment'), ('OT', 'Others')], db_index=True, default='OT', max_length=2)),
('payer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Project')),
],
),
migrations.AddIndex(
model_name='costrecord',
index=models.Index(fields=['project', 'payer'], name='sheets_cost_project_8cabe8_idx'),
),
migrations.AddIndex(
model_name='costrecord',
index=models.Index(fields=['project', 'payer', 'calculated'], name='sheets_cost_project_c57fc5_idx'),
),
]
|
from ..map_resource.utility import Utility
from .. import tools
import pandas as pd
settings = {
'app_id': 'F8aPRXcW3MmyUvQ8Z3J9',
'app_code' : 'IVp1_zoGHdLdz0GvD_Eqsw',
'map_tile_base_url': 'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/',
'json_tile_base_url': 'https://traffic.cit.api.here.com/traffic/6.2/flow.json?'
}
util = Utility(settings)
def test_get_tile():
"""
The official example provided by HERE
https://developer.here.com/rest-apis/documentation/enterprise-map-tile/topics/key-concepts.html
"""
assert util.get_tile(52.525439, 13.38727, 12) == [2200, 1343]
def test_get_quadkeys():
"""
The official example provided by HERE
https://developer.here.com/rest-apis/documentation/traffic/common/map_tile/topics/quadkeys.html
"""
assert util.get_quadkeys(35210, 21493, 16) == "1202102332221212"
def test_get_map_tile_resource():
assert util.get_map_tile_resource((33.670156, -84.325984),"latlon", 14, 512) == \
'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/14/4354/6562/512/png8?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw'
assert util.get_map_tile_resource((4354, 6562),"colrow", 14, 512) == \
'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/14/4354/6562/512/png8?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw'
def test_get_traffic_json_resource():
assert util.get_traffic_json_resource((34.9237, -82.4383), "latlon", 14) == \
'https://traffic.cit.api.here.com/traffic/6.2/flow.json?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw&quadkey=03200303033202&responseattributes=sh,fc'
assert util.get_traffic_json_resource((4440, 6493), "colrow", 14) == \
'https://traffic.cit.api.here.com/traffic/6.2/flow.json?app_id=F8aPRXcW3MmyUvQ8Z3J9&app_code=IVp1_zoGHdLdz0GvD_Eqsw&quadkey=03200303033202&responseattributes=sh,fc'
def test_get_area_tile_matrix():
df1 = pd.DataFrame([[(4350, 6557),(4351, 6557),(4352, 6557)],
[(4350, 6558),(4351, 6558),(4352, 6558)],
[(4350, 6559),(4351, 6559),(4352, 6559)]])
df2 = pd.DataFrame([[(4350, 6558),(4351, 6558),(4352, 6558)],
[(4350, 6559),(4351, 6559),(4352, 6559)]])
df3 = pd.DataFrame([[(4351, 6557),(4352, 6557)],
[(4351, 6558),(4352, 6558)],
[(4351, 6559),(4352, 6559)]])
assert df1.equals(util.get_area_tile_matrix([(33.766764, -84.409533), (33.740003, -84.368978)], 14))
assert df2.equals(util.get_area_tile_matrix([(33.741455, -84.397218), (33.744203, -84.369581)], 14)) # asymmetrical case 1
assert df3.equals(util.get_area_tile_matrix([(33.728999, -84.395856), (33.775902, -84.363917)], 14)) # asymmetrical case 2
def test_get_area_tile_matrix_url():
df = tools.load_data_object("test_data/get_area_tile_matrix_url() for map_tile.pkl")
cor1 = (33.766764, -84.409533)
cor2 = (33.740003, -84.368978)
info = util.get_area_tile_matrix([cor1, cor2], 14)
matrix = util.get_area_tile_matrix_url("map_tile", [cor1, cor2], 14)
assert df.equals(matrix)
def test_get_distance():
assert util.get_distance((33.70524,-84.40353), (33.71337,-84.39347)) == 1297.72758534478
def test_read_geojson_polygon():
assert util.read_geojson_polygon('{ "type": "FeatureCollection", "features": [ { "type": "Feature", "geometry": { "type": "Polygon", "coordinates": [ [ [ -84.39285278320312, 33.76266589608855 ], [ -84.3738842010498, 33.770015152780125 ], [ -84.3610954284668, 33.7613101391079 ], [ -84.37019348144531, 33.74468253332004 ], [ -84.38830375671387, 33.751391054166746 ], [ -84.39705848693848, 33.758384485188 ], [ -84.39285278320312, 33.76266589608855 ] ] ] }, "properties": {} } ] }') == [[33.76266589608855,-84.39285278320312],[33.770015152780125,-84.3738842010498],[33.7613101391079,-84.3610954284668],[33.74468253332004,-84.37019348144531],[33.751391054166746,-84.38830375671387],[33.758384485188,-84.39705848693848],[33.76266589608855,-84.39285278320312]]
|
'''Compare performance between PIL and OpenCV'''
from __future__ import print_function
import os
import sys
import cv2
from PIL import Image
from time import time
from pilib import ExtendedImage as pilImage
from cvlib import ExtendedImage as cvImage
class Benchmark(object):
def __init__(self, bmClass):
self.imageClass = bmClass
self.runs = 0
self.deltat_min = sys.maxint
self.deltat_max = 0
self.elapsedt = 0
def run(self, src_image_file):
'''Compute the spent time
converting a color image to greyscale
and returning the pixel counts (histogram).
'''
self.runs = self.runs + 1
#img = self.imageClass(src_image_file).greyscale()
img = self.imageClass(src_image_file)
deltat = time()
img.greyscale()
pixel_counts = img.histogram()
deltat = time() - deltat
if self.deltat_min > deltat:
self.deltat_min = deltat
if self.deltat_max < deltat:
self.deltat_max = deltat
self.elapsedt = self.elapsedt + deltat
def report(self):
print('Read %d pictures in %f seconds' % (self.runs, self.elapsedt))
print('deltat min: %fs' % self.deltat_min)
print('deltat max: %fs' % self.deltat_max)
print('deltat average %fs:' % (self.elapsedt / self.runs))
class Benchmark_PIL(object):
def __init__(self):
self.runs = 0
self.deltat_min = sys.maxint
self.deltat_max = 0
self.elapsedt = 0
self.width_ave = 0
self.height_ave = 0
def run(self, src_image_file):
'''Compute the spent time
converting a color image to greyscale
and returning the pixel counts (histogram).
'''
self.runs = self.runs + 1
img = Image.open(src_image_file)
width, height = img.size
self.width_ave = self.width_ave + width
self.height_ave = self.height_ave + height
deltat = time()
img = img.convert(mode='L')
pixel_counts = img.histogram()
deltat = time() - deltat
if self.deltat_min > deltat:
self.deltat_min = deltat
if self.deltat_max < deltat:
self.deltat_max = deltat
self.elapsedt = self.elapsedt + deltat
def report(self):
print('Read %d %dx%d pictures in %f seconds' %
(self.runs, (self.width_ave/self.runs), (self.height_ave/self.runs), self.elapsedt))
print('deltat min: %fs' % self.deltat_min)
print('deltat max: %fs' % self.deltat_max)
print('deltat average %fs:' % (self.elapsedt / self.runs))
class Benchmark_OpenCV(object):
def __init__(self):
self.runs = 0
self.deltat_min = sys.maxint
self.deltat_max = 0
self.elapsedt = 0
self.width_ave = 0
self.height_ave = 0
def run(self, src_image_file):
'''Compute the spent time
converting a color image to greyscale
and returning the pixel counts (histogram).
'''
self.runs = self.runs + 1
#img = cv2.imread(src_image_file, cv2.IMREAD_GRAYSCALE)
img = cv2.imread(src_image_file)
height, width = img.shape[:2]
self.width_ave = self.width_ave + width
self.height_ave = self.height_ave + height
deltat = time()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pixel_counts = cv2.calcHist([img],[0],None,[256],[0,256])
deltat = time() - deltat
if self.deltat_min > deltat:
self.deltat_min = deltat
if self.deltat_max < deltat:
self.deltat_max = deltat
self.elapsedt = self.elapsedt + deltat
def report(self):
print('Read %d %dx%d pictures in %f seconds' %
(self.runs, (self.width_ave/self.runs), (self.height_ave/self.runs), self.elapsedt))
print('deltat min: %fs' % self.deltat_min)
print('deltat max: %fs' % self.deltat_max)
print('deltat average %fs:' % (self.elapsedt / self.runs))
if __name__ == "__main__":
if len(sys.argv[1:]) > 0:
imagePathName = sys.argv[1]
else:
imagePathName = '.'
loop_cnt = 0
bmPIL = Benchmark(pilImage)
bmPIL_native = Benchmark_PIL()
bmCV = Benchmark(cvImage)
bmCV_native = Benchmark_OpenCV()
if os.path.isdir(imagePathName):
# iterate top directory listing
for dirname, dirnames, filenames in os.walk(imagePathName):
for imageFileName in filenames:
if imageFileName.lower().endswith('.jpg'):
if (loop_cnt % 20) == 0:
print('Running loop %d...' % (loop_cnt+1))
loop_cnt = loop_cnt + 1
bmPIL.run(os.path.join(dirname, imageFileName))
bmPIL_native.run(os.path.join(dirname, imageFileName))
bmCV.run(os.path.join(dirname, imageFileName))
bmCV_native.run(os.path.join(dirname, imageFileName))
break # only top directory listing
else:
if imagePathName.lower().endswith('.jpg'):
# loop on single file
for loop_cnt in range(500):
if (loop_cnt % 10) == 0:
print('Running loop %d...' % (loop_cnt+1))
bmPIL.run(imagePathName)
bmPIL_native.run(imagePathName)
bmCV.run(imagePathName)
bmCV_native.run(imagePathName)
loop_cnt = loop_cnt + 1
else:
print('JPG file required')
if loop_cnt > 0:
print('\nPIL stats:')
bmPIL.report()
print('\nOpenCV stats:')
bmCV.report()
print('\nPIL native stats:')
bmPIL_native.report()
print('\nOpenCV native stats:')
bmCV_native.report()
|
n=int(input('digite um valor: '))
x=0
for x in range (n):
print(x)
|
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), os.pardir))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'pvdzwgho*_9vsng#n+&mu$)01d7-ov+4y*-b^lzs)yi%4_szst'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'weiboapp.urls'
WSGI_APPLICATION = 'weiboapp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'weiboapp.app',
'gunicorn',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from weibo_settings import *
except ImportError:
raise Exception("""Init weibo_settings.py for your app first!
Set the app's address in http://open.weibo.com/apps
to run this demo local, just use http://localhost:8000/auth
And replace the following variables accordingly
Eg:
APP_KEY='1234567890'
APP_SECRET='a1234567b12345c123d1ef123gh1i1j1'
APP_URL='http://apps.weibo.com/SOMEAPP'
WEIBO_ID=12345678
""")
|
from __future__ import absolute_import, division
import argparse
import os
import platform
import re
import shutil
import subprocess
import sys
import warnings
import setuptools
from distutils.core import Command
from Cython.Distutils import build_ext as _build_ext
__package__ = 'pydriver'
SKIP_PCL_HELPER = False
if platform.system() == 'Windows':
# requires manual compilation on Windows
SKIP_PCL_HELPER = True
cwd = os.path.abspath(os.path.dirname(__file__))
pcl_helper_dir = os.path.join(__package__, 'pcl', 'pcl_helper')
pcl_helper_dir_build = os.path.join(pcl_helper_dir, 'build')
pcl_helper_dir_lib = os.path.join(pcl_helper_dir, 'lib')
version_py_path = os.path.join(cwd, __package__, 'version.py')
version_py_src = """# this file was created automatically by setup.py
__version__ = '{version}'
__version_info__ = {{
'full': __version__,
'short': '.'.join(__version__.split('.')[:2])
}}
"""
def read(fname):
return open(os.path.join(cwd, fname)).read()
def update_version_py():
"""Update version.py using "git describe" command"""
if not os.path.isdir('.git'):
print('This does not appear to be a Git repository, leaving version.py unchanged.')
return False
try:
describe_output = subprocess.check_output(['git', 'describe', '--long', '--dirty']).decode('ascii').strip()
except:
print('Unable to run Git, leaving version.py unchanged.')
return False
# output looks like <version tag>-<commits since tag>-g<hash> and can end with '-dirty', e.g. v0.1.0-14-gd9f10e2-dirty
# our version tags look like 'v0.1.0' or 'v0.1' and optionally additional segments (e.g. v0.1.0rc1), see PEP 0440
describe_parts = re.match('^v([0-9]+\.[0-9]+(?:\.[0-9]+)?\S*)-([0-9]+)-g([0-9a-f]+)(?:-(dirty))?$', describe_output)
assert describe_parts is not None, 'Unexpected output from "git describe": {}'.format(describe_output)
version_tag, n_commits, commit_hash, dirty_flag = describe_parts.groups()
version_parts = re.match('^([0-9]+)\.([0-9]+)(?:\.([0-9]+))?(\S*)$', version_tag)
assert version_parts is not None, 'Unexpected version format: {}'.format(version_tag)
version_major, version_minor, version_micro, version_segments = version_parts.groups()
version_major = int(version_major)
version_minor = int(version_minor)
version_micro = int(version_micro) if version_micro is not None else 0
n_commits = int(n_commits)
if dirty_flag is not None:
print('WARNING: Uncommitted changes detected.')
if n_commits > 0:
# non-exact match, dev version
version_micro += 1
version_segments += '.dev{}+{}'.format(n_commits, commit_hash)
# final version string
if version_micro > 0:
version = '{}.{}.{}{}'.format(version_major, version_minor, version_micro, version_segments)
else:
version = '{}.{}{}'.format(version_major, version_minor, version_segments)
with open(version_py_path, 'w') as f:
f.write(version_py_src.format(version=version))
print('Set version to: {}'.format(version))
# success
return True
update_version_py()
exec(open(version_py_path).read())
class build_pcl_helper(Command):
description = 'build pcl_helper library (inplace)'
user_options = []
def initialize_options(self):
self.cwd_pcl_helper_dir_build = None
def finalize_options(self):
# build inplace
self.cwd_pcl_helper_dir_build = os.path.join(cwd, pcl_helper_dir_build)
def run(self):
# create build dir if it doesn't exist
if not os.path.exists(self.cwd_pcl_helper_dir_build):
os.makedirs(self.cwd_pcl_helper_dir_build)
# build pcl_helper
if platform.system() == 'Windows':
self._build_pcl_helper_windows(self.cwd_pcl_helper_dir_build)
else:
self._build_pcl_helper_linux(self.cwd_pcl_helper_dir_build)
def _build_pcl_helper_linux(self, build_dir):
subprocess.check_call(['cmake', '..'], cwd=build_dir)
subprocess.check_call('make', cwd=build_dir)
def _build_pcl_helper_windows(self, build_dir):
raise NotImplementedError
class build_ext(_build_ext):
user_options = _build_ext.user_options + [
('skip-pcl-helper', None, 'skip pcl_helper compilation (assume manual compilation)'),
]
boolean_options = _build_ext.boolean_options + ['skip-pcl-helper']
def initialize_options(self):
_build_ext.initialize_options(self)
# don't skip pcl helper by default
self.skip_pcl_helper = False
# pcl_helper location in source directory
self.cwd_pcl_helper_dir_lib = None
# pcl_helper location in package build directory
self.build_pcl_helper_dir_lib = None
def finalize_options(self):
_build_ext.finalize_options(self)
# prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy as np
self.include_dirs.append(np.get_include())
# finalize pcl_helper directories
self.cwd_pcl_helper_dir_lib = os.path.join(cwd, pcl_helper_dir_lib)
self.build_pcl_helper_dir_lib = os.path.join(self.build_lib, pcl_helper_dir_lib)
# check global flag SKIP_PCL_HELPER
self.skip_pcl_helper = self.skip_pcl_helper or SKIP_PCL_HELPER
def build_extensions(self, *args, **kwargs):
compiler_type = self.compiler.compiler_type
if compiler_type not in extra_args:
compiler_type = 'unix' # probably some unix-like compiler
# merge compile and link arguments with global arguments for current compiler
for e in self.extensions:
e.extra_compile_args = list(set(e.extra_compile_args + extra_args[compiler_type]['extra_compile_args']))
e.extra_link_args = list(set(e.extra_link_args + extra_args[compiler_type]['extra_link_args']))
_build_ext.build_extensions(self, *args, **kwargs)
def run(self):
if not self.skip_pcl_helper:
# build pcl_helper first
try:
self.run_command('build_pcl_helper')
except:
print('Error: pcl_helper could not be compiled automatically')
print('Please compile pcl_helper manually (see %s/pcl/pcl_helper/README.rst for instructions)' % __package__ + \
' and set SKIP_PCL_HELPER in setup.py to True.')
raise
# copy pcl_helper library to package build directory
self.copy_tree(self.cwd_pcl_helper_dir_lib, self.build_pcl_helper_dir_lib)
_build_ext.run(self)
def get_outputs(self):
# add contents of pcl_helper library directory to outputs (so they can be uninstalled)
outputs = []
for dirpath, dirnames, filenames in os.walk(self.build_pcl_helper_dir_lib):
outputs.extend([os.path.join(dirpath, f) for f in filenames])
return _build_ext.get_outputs(self) + outputs
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self._remove_dirs('__pycache__')
self._remove_dir(cwd, 'build')
self._remove_dir(cwd, 'build_c')
self._remove_dir(cwd, 'dist')
self._remove_dir(cwd, '.eggs')
self._remove_dir(cwd, '{}.egg-info'.format(__package__))
self._remove_dir(cwd, pcl_helper_dir_build)
self._remove_dir(cwd, pcl_helper_dir_lib)
self._remove_files('pyc')
self._remove_files('pyo')
self._remove_files('pyd')
self._remove_files('so')
def _remove_dirs(self, dirname, parent_dir=None):
if parent_dir is None:
full_parent_dir = cwd
else:
full_parent_dir = os.path.join(cwd, parent_dir)
matches = []
for dirpath, dirnames, filenames in os.walk(full_parent_dir):
matches.extend([os.path.join(dirpath, d) for d in dirnames if d==dirname])
for d in matches:
self._remove_dir(d)
def _remove_dir(self, *args):
dirpath = os.path.abspath(os.path.join(*args))
# sanity checks
if not os.path.exists(dirpath):
# nothing to do
return
if not os.path.isdir(dirpath):
print('"{}" is not a directory, aborting...'.format(dirpath))
sys.exit()
path_check = True
if not dirpath.startswith(cwd):
path_check = False
if path_check and len(dirpath) > len(cwd):
# first character after cwd should be a slash or a backslash
if dirpath[len(cwd)] != os.sep:
path_check = False
if not path_check:
print('The directory "{}" appears to be outside of main directory ({}), aborting...'.format(dirpath, cwd))
sys.exit()
# all sanity checks ok
if not os.path.islink(dirpath):
print('Removing directory: ' + dirpath)
shutil.rmtree(dirpath, ignore_errors=True)
else:
print("Can't remove symlink to directory: " + dirpath)
def _remove_files(self, ext, parent_dir=None):
if parent_dir is None:
full_parent_dir = cwd
else:
full_parent_dir = os.path.join(cwd, parent_dir)
matches = []
for dirpath, dirnames, filenames in os.walk(full_parent_dir):
matches.extend([os.path.join(dirpath, f) for f in filenames if f.endswith('.'+ext)])
for f in matches:
self._remove_file(f)
def _remove_file(self, *args):
filepath = os.path.abspath(os.path.join(*args))
# sanity checks
if not os.path.exists(filepath):
# nothing to do
return
if not os.path.isfile(filepath):
print('"{}" is not a file, aborting...'.format(filepath))
sys.exit()
filepath_dir = os.path.abspath(os.path.dirname(filepath))
path_check = True
if not filepath_dir.startswith(cwd):
path_check = False
if path_check and len(filepath_dir) > len(cwd):
# first character after cwd should be a slash or a backslash
if filepath_dir[len(cwd)] != os.sep:
path_check = False
if not path_check:
print('The file "{}" appears to be outside of main directory ({}), aborting...'.format(filepath, cwd))
sys.exit()
# all sanity checks ok
print('Removing file: ' + filepath)
os.remove(filepath)
class lazy_cythonize(list):
# cythonize only if needed (e.g. not for "clean" command)
def __init__(self, extensions, *args, **kwargs):
self._list = None
self.extensions = extensions
self.args = args
self.kwargs = kwargs
def c_list(self):
if self._list is None:
self._list = self._cythonize()
return self._list
def __iter__(self):
for e in self.c_list(): yield e
def __getitem__(self, ii): return self.c_list()[ii]
def __len__(self): return len(self.c_list())
def _cythonize(self):
from Cython.Build import cythonize
return cythonize(self.extensions, *self.args, **self.kwargs)
parser = argparse.ArgumentParser(
description = '%s setup script, basic install: python setup.py install' % __package__,
epilog = 'Other arguments will be passed to setuptools, use --help-setup for more information.',
)
parser.add_argument('command', nargs = '?',
help = 'command to pass to setuptools, use "install" to install package')
parser.add_argument('--debug', '-g', action = 'store_true',
help = 'compile/link with debugging information')
parser.add_argument('--force', '-f', action = 'store_true',
help = 'forcibly build everything (ignore file timestamps)')
parser.add_argument('--help-setup', action = 'store_true',
help = 'show setuptools help and exit')
parser.add_argument('--annotate', action = 'store_true',
help = 'let Cython generate HTML files with performance information')
parser.add_argument('--cython-build-dir', default = 'build_c',
help = 'directory for C/C++ sources and HTML files generated by Cython (default: build_c)')
parser.add_argument('--inplace', action = 'store_true',
help = 'build inplace')
parser.add_argument('--no-openmp', dest = 'openmp', action = 'store_false',
help = 'compile/link without OpenMP support')
parser.add_argument('--profile', action = 'store_true',
help = 'enable profiling with cProfile')
parser.add_argument('--skip-pcl-helper', action = 'store_true',
help = 'skip pcl_helper compilation (assume manual compilation)')
cmdargs, unknown_args = parser.parse_known_args()
if cmdargs.help_setup:
# show setuptools help and exit
sys.argv = [sys.argv[0], '--help']
setuptools.setup()
sys.exit()
setuptools_argv = [sys.argv[0]]
if cmdargs.command: setuptools_argv.append(cmdargs.command)
if cmdargs.force: setuptools_argv.append('--force')
if cmdargs.debug: setuptools_argv.append('--debug')
setuptools_argv += unknown_args
sys.argv = setuptools_argv
setup_args = {
'name': __package__,
'version': __version__,
'url': 'http://github.com/lpltk/pydriver',
'license': 'MIT',
'author': 'Leonard Plotkin',
'author_email': 'git@leonard-plotkin.de',
'description': 'A framework for training and evaluating object detectors and classifiers in road traffic environment.',
'long_description': read('README.rst'),
'zip_safe': False,
'package_dir': {__package__: __package__},
'packages': setuptools.find_packages(),
'package_data': {__package__+'.pcl': ['pcl_helper/lib/*']},
'include_package_data': True,
'platforms': 'any',
'setup_requires': [
'numpy>=1.8.1',
'cython>=0.22.1',
],
'install_requires': [
'numpy>=1.8.1',
'cython>=0.22.1',
'scipy>=0.13.3',
'scikit-image',
'scikit-learn',
],
'classifiers': [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Cython',
'Programming Language :: C++',
'Topic :: Scientific/Engineering :: Image Recognition',
],
}
setup_args['include_dirs'] = [
os.path.join(__package__, 'common'), # common constants, structs and typedefs
]
extra_args = {}
extra_args['unix'] = {'extra_compile_args': [], 'extra_link_args': []}
if cmdargs.openmp:
extra_args['unix']['extra_compile_args'].append('-fopenmp')
extra_args['unix']['extra_link_args'].append('-fopenmp')
if not cmdargs.debug:
extra_args['unix']['extra_compile_args'].append('-O3') # maximum optimization
extra_args['unix']['extra_compile_args'].append('-w') # suppress warnings
extra_args['msvc'] = {'extra_compile_args': [], 'extra_link_args': []}
extra_args['msvc']['extra_compile_args'].append('/EHsc') # exception handling option
if cmdargs.openmp:
extra_args['msvc']['extra_compile_args'].append('/openmp')
if not cmdargs.debug:
extra_args['msvc']['extra_compile_args'].append('/O2') # optimize for speed
extra_args['msvc']['extra_compile_args'].append('/W0') # suppress warnings
def create_extension(*args, **kwargs):
def add_package_path(kwarg_key):
# prepend package directory to every element in list in kwargs[kwarg_key]
kwargs[kwarg_key] = [os.path.join(__package__, e) for e in kwargs.get(kwarg_key, [])]
# add package name to extension module name and paths
args = (__package__ + '.' + args[0],) + args[1:]
add_package_path('sources')
add_package_path('include_dirs')
add_package_path('library_dirs')
# generate C++ code (instead of C) by default
if 'language' not in kwargs:
kwargs['language'] = 'c++'
return setuptools.Extension(*args, **kwargs)
extensions = [
# common
create_extension(
'common.constants',
sources = ['common/constants.pyx'],
),
create_extension(
'common.functions',
sources = ['common/functions.pyx'],
),
# geometry
create_extension(
'geometry.geometry',
sources = ['geometry/geometry.pyx'],
),
# stereo
create_extension(
'stereo.stereo',
sources = ['stereo/stereo.pyx'],
),
# pcl
create_extension(
'pcl.pcl',
sources = ['pcl/pcl.pyx'],
language = 'c++',
include_dirs = ['pcl/pcl_helper'],
libraries = ['pcl_helper'],
library_dirs = ['pcl/pcl_helper/lib'],
extra_link_args = ['-Wl,-rpath,$ORIGIN/pcl_helper/lib'] if platform.system() != 'Windows' else [], # handle paths in __init__.py on Windows
),
# preprocessing
create_extension(
'preprocessing.preprocessing',
sources = ['preprocessing/preprocessing.pyx'],
),
# keypoints
create_extension(
'keypoints.base',
sources = ['keypoints/base.pyx'],
),
create_extension(
'keypoints.harris',
sources = ['keypoints/harris.pyx'],
),
create_extension(
'keypoints.iss',
sources = ['keypoints/iss.pyx'],
),
# features
create_extension(
'features.shot',
sources = ['features/shot.pyx'],
),
create_extension(
'features.base',
sources = ['features/base.pyx'],
),
# detectors
create_extension(
'detectors.vocabularies',
sources = ['detectors/vocabularies.pyx'],
),
create_extension(
'detectors.detectors',
sources = ['detectors/detectors.pyx'],
),
]
setup_args['cmdclass'] = {
'build_pcl_helper': build_pcl_helper,
'build_ext': build_ext,
'clean': CleanCommand,
}
cython_kwargs = {
'build_dir': cmdargs.cython_build_dir, # build directory
'compiler_directives': {
'embedsignature': True, # embed signatures for documentation tools
},
}
if cmdargs.force:
cython_kwargs['force'] = True # enforce full recompilation
if cmdargs.annotate:
cython_kwargs['annotate'] = True # generate HTML reports (in cmdargs.cython_build_dir)
if cmdargs.profile:
if cmdargs.debug:
warnings.warn(UserWarning('You should only profile in release mode with full optimization.'))
cython_kwargs['profile'] = True # globally enable profiling with cProfile
setup_args['ext_modules'] = lazy_cythonize(extensions, **cython_kwargs)
setup_args['options'] = {
'build_ext': {
'inplace': cmdargs.inplace,
'skip_pcl_helper': cmdargs.skip_pcl_helper,
},
}
try:
setuptools.setup(**setup_args)
except Exception as e:
print('Compilation errors encountered, aborting...')
print('Exception information:')
print(e)
sys.exit(1)
|
from __future__ import absolute_import, print_function
from glob import glob
from os.path import basename, splitext
from setuptools import find_packages, setup
setup(
name="builder",
version="0.1.0",
license="MIT",
description="Builder Component.",
long_description="Builder Component.",
author="Renat Zhilkibaev",
author_email="rzhilkibaev@gmail.com",
url="https://github.com/rzhilkibaev/shmenkins",
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
install_requires=[
"boto3", "shmenkins"
],
)
|
from __future__ import absolute_import, print_function
import logging
from tornado import gen
import re
import requests
import kazoo.client
import kazoo.exceptions
import requests
import requests.exceptions
from . import exceptions
from . import zookeeper
from .. import log, util
from modules.util import flat_list
from modules.cache import CURRENT as cache
from config.config import marathon_zks, CACHE as cache_cfg, envs, TEST, env_config_for_zk
logger = logging.getLogger('tutu.modules.mesos.' + __name__)
class MarathonResolver(object):
bucket = {}
def __init__(self, zk):
self.zk = zk
@classmethod
def is_cached(cls, zk):
return zk in cls.bucket
@classmethod
def cache(cls, zk, addresses):
cls.bucket[zk] = addresses
@classmethod
def clear_cache_for_zk(cls, zk):
del cls.bucket[zk]
@classmethod
def get_addresses_from_cache(cls, zk):
return cls.bucket[zk]
def resolve(self):
if not TEST:
if not self.is_cached(self.zk):
hosts, path = self.zk[5:].split("/", 1)
path = "/" + path + "/leader"
with zookeeper.client(hosts=hosts, read_only=True) as zk:
try:
marathon_addresses = map(lambda path: zk.get(path)[0], map(lambda n: '/marathon-cluster/leader/' + n, zk.get_children(path)))
except kazoo.exceptions.NoNodeError:
log.fatal(INVALID_PATH.format(cfg))
self.cache(self.zk, marathon_addresses)
return marathon_addresses
else:
return self.get_addresses_from_cache(zk)
def refresh(self):
self.clear_cache_for_zk(self.zk)
return self.resolve()
class Marathon(object):
def __init__(self, zk):
self.zk = zk
self.resolver = MarathonResolver(zk)
self.addresses = self.resolver.resolve()
self.refreshed = False
def is_addressa_alive(self, address):
return requests.get('http://' + address + '/ping').ok
def get_marathon_address(self):
for address in self.addresses:
if self.is_addressa_alive(address):
return 'http://' + address
if not self.refreshed:
self.refreshed = True
self.addresses = self.resolver.refresh()
return self.get_marathon_address()
raise MarathonConnectionException("None of these address works `{0}`, please check whether marathon is alive".format(','.join(self.addresses)))
def resolve_apps(self):
return map(lambda mi: MarathonApp(self, mi), requests.get(self.get_marathon_address() + '/v2/apps').json()['apps']) if not TEST else []
def apps_by_id_pattern(self, pattern):
logger.debug('Filtering apps by app id pattern: {0}'.format(str(pattern)))
pattern = '.*' + pattern if pattern.endswith('$') else pattern + '.*' if pattern.startswith('^') else pattern
return filter(lambda app: re.match(pattern, app.id) or pattern in app.id, self.apps())
def apps_by_id_patterns(self, patterns):
return flat_list(map(lambda p: self.apps_by_id_pattern(p), patterns))
@gen.coroutine
def register_callback(self, callback):
registry_url = self.get_marathon_address().rstrip('/') + '/v2/eventSubscriptions?callbackUrl=' + callback
logger.debug('Registerring callback: {0}'.format(registry_url))
requests.post(registry_url, headers={'Content-Type': 'application/json; charset=utf-8'})
@property
def id(self):
return self.zk
@property
def cache_key(self):
return cache_cfg.get('APPS_PREFIX') + self.id
def contains_ip(self, ip):
return ip in map(lambda address: address.split(':')[0], self.addresses)
def apps(self):
if not cache.is_cached(self.cache_key):
cache.set_cache(self.cache_key, self.resolve_apps())
return cache.get_cache(self.cache_key)
class BaseInfo(object):
def _val_of_key(self, key):
return util.val_from_json(self.info, key)
def _has_val(self, key):
return self._val_of_key(key) is not None
class MarathonTask(BaseInfo):
def __init__(self, app, task_info):
self.app = app
self.task_info = task_info
@property
def info(self):
return self.task_info
@property
def app_id(self):
return self._val_of_key('appId')
@property
def id(self):
return self._val_of_key('id')
@property
def host(self):
return self._val_of_key('host')
@property
def ports(self):
return self._val_of_key('ports')
@property
def started_at(self):
return self._val_of_key('startedAt')
@property
def staged_at(self):
return self._val_of_key('stagedAt')
def __str__(self):
return """
Actual Service Address:
{0}
Started at: {1} """.format('\n\t\t'.join(map(lambda port: 'http://' + self.host + ":" + str(port), self.ports)), self.started_at)
class MarathonApp(BaseInfo):
def __init__(self, marathon, app_info):
self.marathon = marathon
self.app_info = app_info
@property
def info(self):
return self.app_info
@property
def id(self):
return self.app_info['id'][1:]
def env_config(self):
evs = filter(lambda e: e.get('app-prefix') and self.id.startswith(e.get('app-prefix')), envs)
return evs[0] if len(evs) > 0 else env_config_for_zk(self.marathon.zk)
def api_gateway_address(self):
api_gateway = self.env_config()['api_gateway']
apis_url = api_gateway + ':8001/apis'
apis_data = None
try:
logger.debug('Fetching all api data from: {0}'.format(apis_url))
apis_data = requests.get(apis_url).json()
all_apis = apis_data['data']
if apis_data.has_key('data'):
bamboo_addresses = self.bamboo_addresses()
apis = filter(lambda api: api.get('upstream_url') in bamboo_addresses, all_apis)
api_info = [api_gateway + ':8000' + api['request_path'] for api in apis]
logger.debug('Found api address: {0}'.format(str(api_info)))
return api_info
elif apis_data.has_key('message'):
logger.warn("Seems something wrong's happening on API-Gateway: {0}".format(apis_url))
return ['Message got from API-Gateway: {0}'.format(apis_url)]
else:
logger.warn("Response from API-Gateway: {0}".format(str(apis_data)))
return ['Unexcepted response from API-Gateway: {0}'.format(str(apis_data))]
except requests.ConnectionError, e:
logger.exception(e)
logger.warn('Cannt connect to api-gateway: {0}'.format(apis_url))
return ["Cann't connect to API-Gateway :{0}".format(apis_url)]
def bamboo_addresses(self):
bamboo_address = self.env_config()['bamboo_url']
if self._has_val('env.BAMBOO_HTTP_PORTS'):
bamboo_ports = self._val_of_key('env.BAMBOO_HTTP_PORTS').split(',')
return [bamboo_address + ':' + port for port in bamboo_ports]
return []
def str_api_gateway_address(self):
api_gateway = self.api_gateway_address()
return '\n\t'.join(api_gateway) if api_gateway else 'Not configured behinded API-Gateway yet.'
def str_bamboo_address(self):
if self._has_val('env.BAMBOO_HTTP_PORTS'):
bamboo_addresses = self.bamboo_addresses()
docker_port_mappings = map(lambda dp: dp['containerPort'], self._val_of_key("container.docker.portMappings"))
mappings = []
for i in range(len(docker_port_mappings)):
mappings.append("Service on Port: " + str(docker_port_mappings[i]) + " is on Bamboo: " + bamboo_addresses[i])
return '\n\t'.join(mappings)
else:
return "No bamboo config found"
@property
def tasks(self):
# TODO: consider cache task info to speed up.
tasks_url = self.marathon.get_marathon_address().rstrip('/') + '/v2/apps/' + self.id + '/tasks'
logger.debug('Fetching task info for {0} : {1}'.format(self.id, tasks_url))
return map(lambda t: MarathonTask(self, t), requests.get(tasks_url).json()['tasks'])
def task_info(self):
return '\n\t'.join(map(str, self.tasks))
@property
def cpus(self):
return self._val_of_key('cpus')
@property
def mem(self):
return self._val_of_key('mem')
@property
def instances(self):
return self._val_of_key('instances')
def is_dockerized(self):
return self._has_val('container') and self._val_of_key('container.type') == 'DOCKER'
def docker_container_info(self):
if self.is_dockerized():
return DockerContainerInfo(self._val_of_key('container'))
else:
return None
def container_info(self, verbose=False):
ci = self.docker_container_info()
return ci.to_str(verbose=verbose) if ci else "<No Container Info>"
@property
def version(self):
return self._val_of_key('version')
class DockerVolumeInfo(BaseInfo):
def __init__(self, volume_info):
self.volume_info = volume_info
@property
def info(self):
return self.volume_info
@property
def container_path(self):
return self._val_of_key('containerPath')
@property
def host_path(self):
return self._val_of_key('hostPath')
@property
def mode(self):
return self._val_of_key('mode')
def __str__(self):
return "{0} -> {1}, Mode: {2}".format(self.container_path, self.host_path, self.mode)
class DockerContainerInfo(BaseInfo):
def __init__(self, docker_info):
self.docker_info = docker_info
@property
def info(self):
return self.docker_info
@property
def volumes(self):
return map(lambda v: DockerVolumeInfo(v), self._val_of_key('volumes'))
@property
def image(self):
return self._val_of_key('docker.image')
@property
def network(self):
return self._val_of_key('docker.network')
@property
def port_mappings(self):
return self._val_of_key('docker.portMappings')
def str_port_mappings(self):
if self.port_mappings:
pm = map(lambda pm: "{0} -> {1}".format(pm['containerPort'], "Random Port Mapping" if pm['hostPort'] == 0 else pm['hostPort']), self.port_mappings)
return '\n\t\t'.join(pm)
else:
return "No port mappings found"
def str_volumes(self):
return '\n\t\t'.join(map(str, self.volumes))
@property
def privileged(self):
return self._val_of_key('docker.privileged') == 'true'
@property
def parameters(self):
return self._val_of_key('docker.parameters')
def str_parameters(self):
return '\n\t\t'.join(["{0} : {1}".format(x['key'], x['value']) for x in self.parameters])
@property
def force_pull_image(self):
return self._val_of_key('docker.forcePullImage') == 'true'
def __str__(self):
return """
Image: {0}
Network: {1}
Privileged: {2}
Force pull image: {3}
Port Mappings:
{4}
Volumes:
{5}
Parameters:
{6}
""".format(self.image, self.network, self.privileged, self.force_pull_image, self.str_port_mappings(), self.str_volumes(), self.str_parameters())
def to_str(self, verbose=False):
return """
Image: {0}
Port Mappings:
{1}
Volumes:
{2}
""".format(self.image, self.str_port_mappings(), self.str_volumes()) if not verbose else str(self)
marathons = map(lambda zk: Marathon(zk), marathon_zks)
def marathon_of_env(env):
for e in envs:
if e.get('name') == env.lower():
return filter(lambda m: m.zk == e.get('marathon_url'), marathons)[0]
|
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations:
"""SecurityRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> "_models.SecurityRule":
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
security_rule_parameters: "_models.SecurityRule",
**kwargs: Any
) -> "_models.SecurityRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
security_rule_parameters: "_models.SecurityRule",
**kwargs: Any
) -> AsyncLROPoller["_models.SecurityRule"]:
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2018_12_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_12_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SecurityRuleListResult"]:
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_12_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
|
import unicodecsv
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.insertbuffer import BulkInsertBuffer
from openelex.lib.text import ocd_type_id
from .datasource import Datasource
"""
Pennsylvania elections have pre-processed CSV results files for elections beginning in 2000. These files contain precinct-level data for each of the state's
counties, and includes all contests in that county. Special election results are contained in election-specific files. The CSV versions of those are contained in the
https://github.com/openelections/openelections-data-pa repository.
"""
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['election']
if 'special' in election_id:
loader = CSVSpecialLoader()
else:
loader = CSVLoader()
loader.run(mapping)
class PABaseLoader(BaseLoader):
datasource = Datasource()
target_offices = set([
'USP',
'USS',
'USC',
'STS',
'STH',
'AUD',
'TRE',
'ATT',
'GOV',
'State Senate',
'State House',
'State Representative'
])
district_offices = set([
'USC',
'STS',
'STH',
'State Senate',
'State House',
'State Representative'
])
def _skip_row(self, row):
"""
Should this row be skipped?
This should be implemented in subclasses.
"""
return False
class CSVSpecialLoader(PABaseLoader):
"""
Loads Pennsylvania special election results from 2000-2012.
Format:
Converted CSVs with county-level results.
"""
def load(self):
headers = [
'candidate',
'office',
'district',
'party',
'county',
'votes',
'winner'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'county'
# Store result instances for bulk loading
# We use a BulkInsertBuffer because the load process was running out of
# memory on prod-1
results = BulkInsertBuffer(RawResult)
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, fieldnames = headers, encoding='latin-1')
for row in reader:
if self._skip_row(row):
continue
if row['county'].strip() == '':
total_votes = int(row['votes'].strip())
contest_winner = row['winner'].strip()
else:
rr_kwargs = self._common_kwargs.copy()
rr_kwargs['primary_party'] = row['party'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
jurisdiction = row['county'].strip()
rr_kwargs.update({
'party': row['party'].strip(),
'jurisdiction': jurisdiction,
'ocd_id': "{}/county:{}".format(self.mapping['ocd_id'],
ocd_type_id(jurisdiction)),
'office': row['office'].strip(),
'district': row['district'].strip(),
'votes': int(row['votes'].strip())
})
results.append(RawResult(**rr_kwargs))
# Flush any remaining results that are still in the buffer and need
# to be inserted.
results.flush()
def _skip_row(self, row):
return row['office'].strip() not in self.target_offices
def _build_contest_kwargs(self, row):
return {
'office': row['office'].strip(),
'district': row['district'].strip(),
}
def _build_candidate_kwargs(self, row):
return {
'full_name': row['candidate'].strip()
}
class CSVLoader(PABaseLoader):
"""
Loads Pennsylvania primary and general election results for 2000-2012.
Format:
Pennsylvania has tab-delimited files that have been converted to CSV files.
"""
offices = [
('USP', 'President'),
('USS', 'U.S. Senate'),
('GOV', 'Governor'),
('LTG', 'Lieutenant Governor'),
('ATT', 'Attorney General'),
('AUD', 'Auditor General'),
('TRE', 'State Treasurer'),
('USC', 'U.S. House'),
('STS', 'State Senate'),
('STH', 'State Representative')
]
def load(self):
headers = [
'year',
'election_type',
'county_code',
'precinct_code',
'cand_office_rank',
'cand_district',
'cand_party_rank',
'cand_ballot_position',
'cand_office_code',
'cand_party_code',
'cand_number',
'cand_last_name',
'cand_first_name',
'cand_middle_name',
'cand_suffix',
'votes',
'congressional_district',
'state_senate_district',
'state_house_district',
'municipality_type_code',
'municipality',
'municipality_breakdown_code_1',
'municipality_breakdown_name_1',
'municipality_breakdown_code_2',
'municipality_breakdown_name_2',
'bicounty_code',
'mcd_code',
'fips_code',
'vtd_code',
'previous_precinct_code',
'previous_congressional_district',
'previous_state_senate_district',
'previous_state_house_district'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'precinct'
# Store result instances for bulk loading
results = BulkInsertBuffer(RawResult)
with self._file_handle as csvfile:
if '2014' in self.election_id:
reader = unicodecsv.DictReader((line.replace('\0','') for line in csvfile), fieldnames = headers, encoding='latin-1')
else:
reader = unicodecsv.DictReader(csvfile, fieldnames = headers, encoding='latin-1')
for row in reader:
if self._skip_row(row):
continue
rr_kwargs = self._common_kwargs.copy()
if 'primary' in self.mapping['election']:
rr_kwargs['primary_party'] = row['cand_party_code'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['state_id'] == str(row['county_code'])][0]['ocd_id']
rr_kwargs.update({
'party': row['cand_party_code'].strip(),
'jurisdiction': str(row['precinct_code']),
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(str(row['precinct_code']))),
'votes': int(row['votes'].strip()),
# PA-specific data
'congressional_district': row['congressional_district'],
'state_senate_district': row['state_senate_district'],
'state_house_district': row['state_house_district'],
'municipality_type_code': row['municipality_type_code'],
'municipality': row['municipality'],
'previous_precinct_code': row['previous_precinct_code'],
'previous_congressional_district': row['previous_congressional_district'],
'previous_state_senate_district': row['previous_state_senate_district'],
'previous_state_house_district': row['previous_state_house_district']
})
results.append(RawResult(**rr_kwargs))
results.flush()
def _skip_row(self, row):
return row['cand_office_code'].strip() not in self.target_offices
def _build_contest_kwargs(self, row):
office = [o for o in self.offices if o[0] == row['cand_office_code']][0][1]
if row['cand_district'] == 0:
district = None
else:
district = row['cand_district']
return {
'office': office,
'district': district,
}
def _build_candidate_kwargs(self, row):
return {
'given_name': row['cand_first_name'],
'family_name': row['cand_last_name'],
'additional_name': row['cand_middle_name'],
'suffix': row['cand_suffix']
}
|
from modeltranslation.translator import translator, TranslationOptions
from .models import NotificationTemplate
class NotificationTemplateTranslationOptions(TranslationOptions):
fields = ('subject', 'body', 'html_body')
translator.register(NotificationTemplate, NotificationTemplateTranslationOptions)
|
from django.core.management.base import BaseCommand
from bongo.apps.bongo.models import Issue
class Command(BaseCommand):
def handle(self, *args, **options):
for issue in Issue.objects.all():
issue.save()
|
from time import sleep, time
def f():
sleep(.3)
def g():
sleep(.5)
t = time()
f()
print('f took: ', time() - t) # f took: 0.3003859519958496
t = time()
g()
print('g took:', time() - t) # g took: 0.5005719661712646
|
request = {
"method": "POST",
"uri": uri("/post_chunked_all_your_base"),
"version": (1, 1),
"headers": [
("TRANSFER-ENCODING", "chunked"),
],
"body": "all your base are belong to us"
}
|
"""
main_no_background.py: Start visualization, no background.
This script runs a visualization of the electricity prices over Europe
for the period of April 2014, with controls to increase the installed wind and solar capacity,
and change scenarios for the rest of the system.
Wind and solar penetration are relative numbers (0-150%), with 100% corresponding to the scenario
where the average gross production of renewables matches average demand. The installed capacities
are 2015 numbers.
Some commented out code refers to wind and solar backgrounds, which cannot be distributed due to
licensing issues. Sorry.
"""
import matplotlib
matplotlib.use('Qt4Agg')
matplotlib.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import mpl_toolkits.basemap as bm
import defaults
from matplotlib import animation
from matplotlib.widgets import Slider, RadioButtons, Button
from helper_funcs import DiscreteSlider
from plot_classes import Production_Consumption_Plot, WindMap, Network_Plot, Pieplots, Priceplot
sns.set_style('ticks')
__author__ = "Tue V. Jensen"
__copyright__ = "Copyright 2016"
__credits__ = ["Tue V. Jensen"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Tue V. Jensen"
__email__ = "tvjens@elektro.dtu.dk"
__status__ = "Eternal Prototype"
class expando(object):
pass
NUM_TIMESTEPS = defaults.NUM_TIMESTEPS
MINWIND = 0
MAXWIND = 20
WIND_LEVELS = 21
MINPRICE = -10
MAXPRICE = 10
WIND_CAP = 1200. # GW of wind capacity at 100\% penetration
WIND_TODAY = 142. # Installed wind capacity today
SOLAR_CAP = 1048. # GW of solar capacity at 100\% penetration
SOLAR_TODAY = 95. # Installed solar capacity today
class formatspec:
def __init__(self, baseval, valfmt='{0:.00f} GW ({2:.00f}%)\n ({1:.00f}% vs today)', valtoday=1):
self.baseval = baseval
self.valfmt = valfmt
self.valtoday = valtoday
def format(self, x):
return self.valfmt.format(self.baseval*x, 100.*x*self.baseval/self.valtoday, 100*x)
def __mod__(self, x):
# return self.format(x)
return ''
WIND_SETTINGS = np.linspace(0, 1, 11)
SOLAR_SETTINGS = np.linspace(0, 1, 11)
wind_formatspec = formatspec(WIND_CAP, valtoday=WIND_TODAY)
solar_formatspec = formatspec(SOLAR_CAP, valtoday=SOLAR_TODAY)
mymap = bm.Basemap(
projection='cyl',
llcrnrlat=defaults.LLCRNRLAT, llcrnrlon=defaults.LLCRNRLON,
urcrnrlat=defaults.URCRNRLAT, urcrnrlon=defaults.URCRNRLON,
resolution='l')
fig = plt.figure(figsize=(16, 9), dpi=80)
fig.patch.set_facecolor('white')
ax1 = plt.subplot2grid((9, 6), (0, 0), colspan=4, rowspan=6)
contourholder = expando()
windticks = np.linspace(MINWIND, MAXWIND, WIND_LEVELS)
networkplot = Network_Plot(ax1)
pricecb = plt.colorbar(networkplot.nodeplot, ax=ax1, orientation='vertical', pad=0.05, aspect=30, extend='both', format='%.1f')
pricecb.set_label(U'Electricity price [€/MWh]')
coastlines = mymap.drawcoastlines(ax=ax1)
coastlines.set_alpha(0.5)
coastlines.set_zorder(10)
ax2 = plt.subplot2grid((9, 6), (0, 4), rowspan=3, colspan=2)
thePriceplot = Priceplot(ax2)
ax2.set_ylabel(u'Mean European Price [€/MWh]')
ax2.set_ylim((defaults.MINPRICE, defaults.MAXPRICE*1.25))
sns.despine(ax=ax2, offset=3)
ax3 = plt.subplot2grid((9, 6), (3, 4), rowspan=3, colspan=2)
ProdConPlot = Production_Consumption_Plot(ax3)
ax3.set_ylabel(u'Production/consumption [MWh]')
sns.despine(ax=ax3, offset=3)
ax6 = plt.subplot2grid((9, 6), (6, 2), rowspan=3, colspan=4)
ax6.set_aspect(1)
ax6.axis('off')
pp = Pieplots(ax6)
plt.tight_layout()
r = fig.canvas.get_renderer()
def wind_slider_change(*args, **kwargs):
networkplot.update_wind(*args, **kwargs)
ProdConPlot.update_wind(*args, **kwargs)
thePriceplot.update_wind(*args, **kwargs)
pp.update_wind(*args, **kwargs)
for a in pp.get_artists():
ax6.draw_artist(a)
fig.canvas.blit(ax6.bbox)
fig.canvas.blit(wind_slider_ax.bbox)
wind_slider_text.set_text(wind_formatspec.format(wind_slider.discrete_val))
wind_slider_text_ax.draw_artist(wind_slider_text)
fig.canvas.blit(wind_slider_text_ax.bbox)
def solar_slider_change(*args, **kwargs):
networkplot.update_solar(*args, **kwargs)
ProdConPlot.update_solar(*args, **kwargs)
thePriceplot.update_solar(*args, **kwargs)
pp.update_solar(*args, **kwargs)
for a in pp.get_artists():
ax6.draw_artist(a)
fig.canvas.blit(ax6.bbox)
fig.canvas.blit(solar_slider_ax.bbox)
solar_slider_text.set_text(solar_formatspec.format(solar_slider.discrete_val))
solar_slider_text_ax.draw_artist(solar_slider_text)
fig.canvas.blit(solar_slider_text_ax.bbox)
wind_slider_ax = plt.axes([0.08, 2.0/9, 1./3-0.16, 0.04])
wind_slider = DiscreteSlider(wind_slider_ax, 'Installed Wind', 0.0, 1.5, valinit=0.0, increment=0.1, valfmt=wind_formatspec, facecolor=sns.xkcd_rgb['sky blue'], dragging=True)
wind_slider.on_changed(wind_slider_change)
wind_slider_text_ax = plt.axes([1./3-0.07, 2.0/9, 0.1, 0.04])
wind_slider_text_ax.axis('off')
wind_slider_text = wind_slider_text_ax.text(
0.01, 0.02, wind_formatspec.format(wind_slider.discrete_val),
verticalalignment='bottom', horizontalalignment='left',
transform=wind_slider_text_ax.transAxes,
color='black', fontsize=12, bbox=dict(facecolor='white'))
solar_slider_ax = plt.axes([0.08, 1.4/9, 1./3-0.16, 0.04])
solar_slider = DiscreteSlider(solar_slider_ax, 'Installed Solar', 0.0, 0.5, valinit=0.0, increment=0.05, valfmt=solar_formatspec, facecolor=sns.xkcd_rgb['pale yellow'], dragging=True)
solar_slider.on_changed(solar_slider_change)
solar_slider_text_ax = plt.axes([1./3-0.07, 1.4/9, 0.1, 0.04])
solar_slider_text_ax.axis('off')
solar_slider_text = solar_slider_text_ax.text(
0.01, 0.02, solar_formatspec.format(solar_slider.discrete_val),
verticalalignment='bottom', horizontalalignment='left',
transform=solar_slider_text_ax.transAxes,
color='black', fontsize=12, bbox=dict(facecolor='white'))
scenario_dict = {
'Today\'s system': 'base',
'Nuclear is shut down': 'nuclear',
'Demand increases by 15\%': 'demandincrease'
}
scenario_list = [
'Today\'s system',
'Nuclear is shut down',
# u'CO2 price at 100 €/Ton',
# 'Gas and Oil at 3x today\'s price',
'Demand increases by 15\%'
]
scenario_select_ax = plt.axes([0.005, 0.1/9, 1./6, 1.1/9], aspect='equal', frameon=False)
scenario_select_radio = RadioButtons(scenario_select_ax, scenario_list, activecolor=sns.xkcd_rgb['dark grey'])
def scenario_change(val):
newscen = scenario_dict[val]
networkplot.update_scenario(newscen)
ProdConPlot.update_scenario(newscen)
thePriceplot.update_scenario(newscen)
pp.update_scenario(newscen)
for a in pp.get_artists():
ax6.draw_artist(a)
fig.canvas.blit(ax6.bbox)
fig.canvas.blit(scenario_select_ax)
scenario_select_radio.on_clicked(scenario_change)
bg_list = ['Plot Wind', 'Plot Solar', 'Leave Blank']
bg_dict = {
'Plot Wind': 'wind',
'Plot Solar': 'solar',
'Leave Blank': 'blank'}
def init():
pass
def animate(i):
# windout = windcontour.animate(i)
ProdConPlot.animate(i)
netout = networkplot.animate(i)
thePriceplot.animate(i)
return ProdConPlot.areas + [ProdConPlot.curtime_line] + \
[coastlines] + netout + thePriceplot.areas + thePriceplot.lines + [thePriceplot.curtime_line] # windout + \
ani = animation.FuncAnimation(fig, animate, frames=NUM_TIMESTEPS, interval=100, repeat=True, repeat_delay=1000, blit=True)
plt.show()
|
"""All the builtin operations (operators) are defined here
Also, for the moment, some builtin functions will also be defined here
"""
from __future__ import unicode_literals, print_function
import six
import sys
import operator
import itertools
from whispy_lispy import keywords, types, exceptions
if six.PY3:
unicode = str
def to_internal(value):
"""Convert Python types to Whispy Lispy types """
if isinstance(value, bool):
return types.Bool((value,))
if isinstance(value, int):
return types.Int((value,))
if isinstance(value, float):
return types.Float((value,))
if isinstance(value, six.string_types):
return types.String((value,))
def to_python(value):
"""Converts Whispy Lispy types to Python types """
return value.values[0]
VALUE_SELF_REFERENCE = object()
ADDITION_NEUTRAL_VALUES = {
int: 0, float: 0.0, unicode: '', bool: True}
MULTIPLICATION_NEUTRAL_VALUES = {
int: 1, float: 1, unicode: 1, bool: True
}
NO_DEFAULT_VALUE = object()
class Operator(object):
"""Template for creating Whispy Lispy operations from Python operators
Operators in Python don't work exactly as their intended analogues in
Whispy Lispy. For instance `reduce(operators.eq, ["asdf"]` will return
"asdf" in python.
We'd like this to return True in Whispy Lispy, because this operation
is reflective (don't know if that's a word)
For the same reason, the '>' operation applied to only one value should
always return False. In Python it again returns the value
"""
def __init__(self, operator_,
default_value=VALUE_SELF_REFERENCE,
type_fallbacks={},
incompatible_types=lambda *args: False):
"""
:param operator_: a python operator from module operator.*
:param dict default_value: a value to be used as the default for this
operator
:param dict[type, str|unicode] type_fallbacks: For the provided
parameter types, use these operators instead
:param incompatible_types: lambda that takes a set of python types
and returns True if they can't be considered compatible in the
context of this operator
"""
self.type_fallbacks = dict(type_fallbacks)
self.operator = operator_
self.defaults_dict = default_value
self.incompatible_types_check = incompatible_types
def __call__(self, interpreter, scope, *values):
processable_values = self.get_values_with_defaults(values)
iter_for_type_check, iter_for_reduce = itertools.tee(
to_python(interpreter(val, scope)) for val in processable_values)
types_ = set(type(item) for item in iter_for_type_check)
if self.incompatible_types_check(types_):
raise exceptions.EvaluationError(
'Incompatible types: {}'.format(processable_values)
)
values_type = types_.pop()
if values_type in self.type_fallbacks:
return OPERATIONS[self.type_fallbacks[values_type]](interpreter,
scope, *values)
return to_internal(
reduce(self.operator, iter_for_reduce))
def get_values_with_defaults(self, values):
if self.defaults_dict is VALUE_SELF_REFERENCE:
# The list can't be empty. Should have blown up at the AST
if len(values) == 1:
processable_values = values + (values[0],)
else:
processable_values = values
elif self.defaults_dict is not NO_DEFAULT_VALUE:
processable_values = values + (
to_internal(
self.defaults_dict[type(to_python(values[0]))]),)
else:
processable_values = values
return processable_values
def internal_sum(interpreter, scope, *nums):
"""
:param interpreter: the interpreter2.interpret_ast function or
something that interprets the *nums list
:param scope: a scope (usually dict)
:param nums: internal numbers to add
:return:
"""
try:
return to_internal(
sum(
to_python(interpreter(num, scope))
for num in nums)
)
except:
raise
def internal_sub(interpreter, scope, *nums):
return to_internal(
reduce(operator.sub,
[to_python(interpreter(val, scope))
for val in nums])
)
def get_input(interpreter, scope, *values):
"""
:rtype: str| float | int | bool | None
"""
# Hardcode the message, because we don't have strings yet
user_input = raw_input('input: ')
# float?
if '.' in user_input:
try:
return types.Float((float(user_input),))
except ValueError:
pass
# int?
try:
return types.Int((int(user_input),))
except ValueError:
pass
# bool?
result = (True if user_input == '#t' else
False if user_input == '#f' else None)
# string?
if result is None:
result = '"{}"'.format(user_input)
return to_internal(result)
builtin_print = lambda i, s, *args: print(*args)
def operation_quit(interpreter, scope, *args):
"""Just quits and avoids funny values"""
print('Thank you! Come again!')
if args:
if isinstance(args[0], types.Int):
sys.exit(int(to_python(args[0])))
else:
print(args[0])
sys.exit(1)
sys.exit()
def _incompatible_all_except_int_with_float(types_):
return types_ != {float, int} and len(types_) > 1
OPERATIONS = dict(zip(
keywords.OPERATORS,
[
Operator( # Operator +
operator_=operator.add,
default_value=ADDITION_NEUTRAL_VALUES,
type_fallbacks={bool: 'or'},
incompatible_types=_incompatible_all_except_int_with_float
)
] +
[None]+
[
Operator(
operator_=operator.mul,
default_value=MULTIPLICATION_NEUTRAL_VALUES,
type_fallbacks={bool: 'and'},
incompatible_types=_incompatible_all_except_int_with_float
)
] +
[None] * 8 +
[Operator(operator.eq)] +
[None] * 9 +
[Operator(operator.or_)] +
[None] * 13))
|
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class Access(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Access to be allowed or denied.
"""
ALLOW = "Allow"
DENY = "Deny"
class ApplicationGatewayBackendHealthServerHealth(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Health of backend server.
"""
UNKNOWN = "Unknown"
UP = "Up"
DOWN = "Down"
PARTIAL = "Partial"
DRAINING = "Draining"
class ApplicationGatewayCookieBasedAffinity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Cookie based affinity.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ApplicationGatewayCustomErrorStatusCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status code of the application gateway customer error.
"""
HTTP_STATUS403 = "HttpStatus403"
HTTP_STATUS502 = "HttpStatus502"
class ApplicationGatewayFirewallMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Web application firewall mode.
"""
DETECTION = "Detection"
PREVENTION = "Prevention"
class ApplicationGatewayOperationalState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operational state of the application gateway resource.
"""
STOPPED = "Stopped"
STARTING = "Starting"
RUNNING = "Running"
STOPPING = "Stopping"
class ApplicationGatewayProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Application Gateway protocol.
"""
HTTP = "Http"
HTTPS = "Https"
class ApplicationGatewayRedirectType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Redirect type enum.
"""
PERMANENT = "Permanent"
FOUND = "Found"
SEE_OTHER = "SeeOther"
TEMPORARY = "Temporary"
class ApplicationGatewayRequestRoutingRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Rule type.
"""
BASIC = "Basic"
PATH_BASED_ROUTING = "PathBasedRouting"
class ApplicationGatewaySkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of an application gateway SKU.
"""
STANDARD_SMALL = "Standard_Small"
STANDARD_MEDIUM = "Standard_Medium"
STANDARD_LARGE = "Standard_Large"
WAF_MEDIUM = "WAF_Medium"
WAF_LARGE = "WAF_Large"
STANDARD_V2 = "Standard_v2"
WAF_V2 = "WAF_v2"
class ApplicationGatewaySslCipherSuite(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl cipher suites enums.
"""
TLS_ECDHE_RSA_WITH_AES256_CBC_SHA384 = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"
TLS_ECDHE_RSA_WITH_AES128_CBC_SHA256 = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
TLS_ECDHE_RSA_WITH_AES256_CBC_SHA = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
TLS_ECDHE_RSA_WITH_AES128_CBC_SHA = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
TLS_DHE_RSA_WITH_AES256_GCM_SHA384 = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"
TLS_DHE_RSA_WITH_AES128_GCM_SHA256 = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
TLS_DHE_RSA_WITH_AES256_CBC_SHA = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"
TLS_DHE_RSA_WITH_AES128_CBC_SHA = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"
TLS_RSA_WITH_AES256_GCM_SHA384 = "TLS_RSA_WITH_AES_256_GCM_SHA384"
TLS_RSA_WITH_AES128_GCM_SHA256 = "TLS_RSA_WITH_AES_128_GCM_SHA256"
TLS_RSA_WITH_AES256_CBC_SHA256 = "TLS_RSA_WITH_AES_256_CBC_SHA256"
TLS_RSA_WITH_AES128_CBC_SHA256 = "TLS_RSA_WITH_AES_128_CBC_SHA256"
TLS_RSA_WITH_AES256_CBC_SHA = "TLS_RSA_WITH_AES_256_CBC_SHA"
TLS_RSA_WITH_AES128_CBC_SHA = "TLS_RSA_WITH_AES_128_CBC_SHA"
TLS_ECDHE_ECDSA_WITH_AES256_GCM_SHA384 = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
TLS_ECDHE_ECDSA_WITH_AES128_GCM_SHA256 = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
TLS_ECDHE_ECDSA_WITH_AES256_CBC_SHA384 = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"
TLS_ECDHE_ECDSA_WITH_AES128_CBC_SHA256 = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
TLS_ECDHE_ECDSA_WITH_AES256_CBC_SHA = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
TLS_ECDHE_ECDSA_WITH_AES128_CBC_SHA = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
TLS_DHE_DSS_WITH_AES256_CBC_SHA256 = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256"
TLS_DHE_DSS_WITH_AES128_CBC_SHA256 = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256"
TLS_DHE_DSS_WITH_AES256_CBC_SHA = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"
TLS_DHE_DSS_WITH_AES128_CBC_SHA = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"
TLS_RSA_WITH3_DES_EDE_CBC_SHA = "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
TLS_DHE_DSS_WITH3_DES_EDE_CBC_SHA = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA"
TLS_ECDHE_RSA_WITH_AES128_GCM_SHA256 = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
TLS_ECDHE_RSA_WITH_AES256_GCM_SHA384 = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
class ApplicationGatewaySslPolicyName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl predefined policy name enums.
"""
APP_GW_SSL_POLICY20150501 = "AppGwSslPolicy20150501"
APP_GW_SSL_POLICY20170401 = "AppGwSslPolicy20170401"
APP_GW_SSL_POLICY20170401_S = "AppGwSslPolicy20170401S"
class ApplicationGatewaySslPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of Ssl Policy.
"""
PREDEFINED = "Predefined"
CUSTOM = "Custom"
class ApplicationGatewaySslProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl protocol enums.
"""
TL_SV1_0 = "TLSv1_0"
TL_SV1_1 = "TLSv1_1"
TL_SV1_2 = "TLSv1_2"
class ApplicationGatewayTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Tier of an application gateway.
"""
STANDARD = "Standard"
WAF = "WAF"
STANDARD_V2 = "Standard_v2"
WAF_V2 = "WAF_v2"
class AssociationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The association type of the child resource to the parent resource.
"""
ASSOCIATED = "Associated"
CONTAINS = "Contains"
class AuthenticationMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN client authentication method.
"""
EAPTLS = "EAPTLS"
EAPMSCHA_PV2 = "EAPMSCHAPv2"
class AuthorizationUseStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The authorization use status.
"""
AVAILABLE = "Available"
IN_USE = "InUse"
class AzureFirewallApplicationRuleProtocolType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol type of a Application Rule resource.
"""
HTTP = "Http"
HTTPS = "Https"
MSSQL = "Mssql"
class AzureFirewallNatRCActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action type of a NAT rule collection.
"""
SNAT = "Snat"
DNAT = "Dnat"
class AzureFirewallNetworkRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol of a Network Rule resource.
"""
TCP = "TCP"
UDP = "UDP"
ANY = "Any"
ICMP = "ICMP"
class AzureFirewallRCActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action type of a rule collection.
"""
ALLOW = "Allow"
DENY = "Deny"
class AzureFirewallSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of an Azure Firewall SKU.
"""
AZFW_VNET = "AZFW_VNet"
AZFW_HUB = "AZFW_Hub"
class AzureFirewallSkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Tier of an Azure Firewall.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
class AzureFirewallThreatIntelMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The operation mode for Threat Intel.
"""
ALERT = "Alert"
DENY = "Deny"
OFF = "Off"
class BastionConnectProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol used to connect to the target.
"""
SSH = "SSH"
RDP = "RDP"
class BgpPeerState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The BGP peer state.
"""
UNKNOWN = "Unknown"
STOPPED = "Stopped"
IDLE = "Idle"
CONNECTING = "Connecting"
CONNECTED = "Connected"
class CircuitConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Express Route Circuit connection state.
"""
CONNECTED = "Connected"
CONNECTING = "Connecting"
DISCONNECTED = "Disconnected"
class ConnectionMonitorEndpointFilterItemType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of item included in the filter. Currently only 'AgentAddress' is supported.
"""
AGENT_ADDRESS = "AgentAddress"
class ConnectionMonitorEndpointFilterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The behavior of the endpoint filter. Currently only 'Include' is supported.
"""
INCLUDE = "Include"
class ConnectionMonitorSourceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of connection monitor source.
"""
UNKNOWN = "Unknown"
ACTIVE = "Active"
INACTIVE = "Inactive"
class ConnectionMonitorTestConfigurationProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol to use in test evaluation.
"""
TCP = "Tcp"
HTTP = "Http"
ICMP = "Icmp"
class ConnectionMonitorType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of connection monitor.
"""
MULTI_ENDPOINT = "MultiEndpoint"
SINGLE_SOURCE_DESTINATION = "SingleSourceDestination"
class ConnectionState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The connection state.
"""
REACHABLE = "Reachable"
UNREACHABLE = "Unreachable"
UNKNOWN = "Unknown"
class ConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The connection status.
"""
UNKNOWN = "Unknown"
CONNECTED = "Connected"
DISCONNECTED = "Disconnected"
DEGRADED = "Degraded"
class DdosCustomPolicyProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol for which the DDoS protection policy is being customized.
"""
TCP = "Tcp"
UDP = "Udp"
SYN = "Syn"
class DdosCustomPolicyTriggerSensitivityOverride(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The customized DDoS protection trigger rate sensitivity degrees. High: Trigger rate set with
most sensitivity w.r.t. normal traffic. Default: Trigger rate set with moderate sensitivity
w.r.t. normal traffic. Low: Trigger rate set with less sensitivity w.r.t. normal traffic.
Relaxed: Trigger rate set with least sensitivity w.r.t. normal traffic.
"""
RELAXED = "Relaxed"
LOW = "Low"
DEFAULT = "Default"
HIGH = "High"
class DdosSettingsProtectionCoverage(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The DDoS protection policy customizability of the public IP. Only standard coverage will have
the ability to be customized.
"""
BASIC = "Basic"
STANDARD = "Standard"
class DhGroup(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The DH Groups used in IKE Phase 1 for initial SA.
"""
NONE = "None"
DH_GROUP1 = "DHGroup1"
DH_GROUP2 = "DHGroup2"
DH_GROUP14 = "DHGroup14"
DH_GROUP2048 = "DHGroup2048"
ECP256 = "ECP256"
ECP384 = "ECP384"
DH_GROUP24 = "DHGroup24"
class Direction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The direction of the traffic.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class EffectiveRouteSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Who created the route.
"""
UNKNOWN = "Unknown"
USER = "User"
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
DEFAULT = "Default"
class EffectiveRouteState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The value of effective route.
"""
ACTIVE = "Active"
INVALID = "Invalid"
class EffectiveSecurityRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The network protocol this rule applies to.
"""
TCP = "Tcp"
UDP = "Udp"
ALL = "All"
class EvaluationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Connectivity analysis evaluation state.
"""
NOT_STARTED = "NotStarted"
IN_PROGRESS = "InProgress"
COMPLETED = "Completed"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The advertised public prefix state of the Peering resource.
"""
NOT_CONFIGURED = "NotConfigured"
CONFIGURING = "Configuring"
CONFIGURED = "Configured"
VALIDATION_NEEDED = "ValidationNeeded"
class ExpressRouteCircuitPeeringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of peering.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class ExpressRouteCircuitSkuFamily(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The family of the SKU.
"""
UNLIMITED_DATA = "UnlimitedData"
METERED_DATA = "MeteredData"
class ExpressRouteCircuitSkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The tier of the SKU.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
BASIC = "Basic"
LOCAL = "Local"
class ExpressRouteLinkAdminState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Administrative state of the physical port.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ExpressRouteLinkConnectorType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Physical fiber port type.
"""
LC = "LC"
SC = "SC"
class ExpressRouteLinkMacSecCipher(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Mac security cipher.
"""
GCM_AES128 = "gcm-aes-128"
GCM_AES256 = "gcm-aes-256"
class ExpressRoutePeeringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of peering.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class ExpressRoutePeeringType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The peering type.
"""
AZURE_PUBLIC_PEERING = "AzurePublicPeering"
AZURE_PRIVATE_PEERING = "AzurePrivatePeering"
MICROSOFT_PEERING = "MicrosoftPeering"
class ExpressRoutePortsEncapsulation(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Encapsulation method on physical ports.
"""
DOT1_Q = "Dot1Q"
QIN_Q = "QinQ"
class FirewallPolicyFilterRuleActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action type of a rule.
"""
ALLOW = "Allow"
DENY = "Deny"
class FirewallPolicyNatRuleActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action type of a rule.
"""
DNAT = "DNAT"
class FirewallPolicyRuleConditionApplicationProtocolType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The application protocol type of a Rule condition.
"""
HTTP = "Http"
HTTPS = "Https"
class FirewallPolicyRuleConditionNetworkProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Network protocol of a Rule condition.
"""
TCP = "TCP"
UDP = "UDP"
ANY = "Any"
ICMP = "ICMP"
class FirewallPolicyRuleConditionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Rule Condition Type.
"""
APPLICATION_RULE_CONDITION = "ApplicationRuleCondition"
NETWORK_RULE_CONDITION = "NetworkRuleCondition"
NAT_RULE_CONDITION = "NatRuleCondition"
class FirewallPolicyRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the rule.
"""
FIREWALL_POLICY_NAT_RULE = "FirewallPolicyNatRule"
FIREWALL_POLICY_FILTER_RULE = "FirewallPolicyFilterRule"
class FlowLogFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The file type of flow log.
"""
JSON = "JSON"
class HTTPConfigurationMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The HTTP method to use.
"""
GET = "Get"
POST = "Post"
class HTTPMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""HTTP method.
"""
GET = "Get"
class HubVirtualNetworkConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the VirtualHub to vnet connection.
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class IkeEncryption(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The IKE encryption algorithm (IKE phase 2).
"""
DES = "DES"
DES3 = "DES3"
AES128 = "AES128"
AES192 = "AES192"
AES256 = "AES256"
GCMAES256 = "GCMAES256"
GCMAES128 = "GCMAES128"
class IkeIntegrity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The IKE integrity algorithm (IKE phase 2).
"""
MD5 = "MD5"
SHA1 = "SHA1"
SHA256 = "SHA256"
SHA384 = "SHA384"
GCMAES256 = "GCMAES256"
GCMAES128 = "GCMAES128"
class IPAllocationMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""IP address allocation method.
"""
STATIC = "Static"
DYNAMIC = "Dynamic"
class IpAllocationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""IpAllocation type.
"""
UNDEFINED = "Undefined"
HYPERNET = "Hypernet"
class IpFlowProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol to be verified on.
"""
TCP = "TCP"
UDP = "UDP"
class IpsecEncryption(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The IPSec encryption algorithm (IKE phase 1).
"""
NONE = "None"
DES = "DES"
DES3 = "DES3"
AES128 = "AES128"
AES192 = "AES192"
AES256 = "AES256"
GCMAES128 = "GCMAES128"
GCMAES192 = "GCMAES192"
GCMAES256 = "GCMAES256"
class IpsecIntegrity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The IPSec integrity algorithm (IKE phase 1).
"""
MD5 = "MD5"
SHA1 = "SHA1"
SHA256 = "SHA256"
GCMAES128 = "GCMAES128"
GCMAES192 = "GCMAES192"
GCMAES256 = "GCMAES256"
class IPVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""IP address version.
"""
I_PV4 = "IPv4"
I_PV6 = "IPv6"
class IssueType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of issue.
"""
UNKNOWN = "Unknown"
AGENT_STOPPED = "AgentStopped"
GUEST_FIREWALL = "GuestFirewall"
DNS_RESOLUTION = "DnsResolution"
SOCKET_BIND = "SocketBind"
NETWORK_SECURITY_RULE = "NetworkSecurityRule"
USER_DEFINED_ROUTE = "UserDefinedRoute"
PORT_THROTTLED = "PortThrottled"
PLATFORM = "Platform"
class LoadBalancerOutboundRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol for the outbound rule in load balancer.
"""
TCP = "Tcp"
UDP = "Udp"
ALL = "All"
class LoadBalancerSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of a load balancer SKU.
"""
BASIC = "Basic"
STANDARD = "Standard"
class LoadDistribution(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The load distribution policy for this rule.
"""
DEFAULT = "Default"
SOURCE_IP = "SourceIP"
SOURCE_IP_PROTOCOL = "SourceIPProtocol"
class ManagedRuleEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of the managed rule. Defaults to Disabled if not specified.
"""
DISABLED = "Disabled"
class NatGatewaySkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of Nat Gateway SKU.
"""
STANDARD = "Standard"
class NetworkOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the Azure async operation.
"""
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class NextHopType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Next hop type.
"""
INTERNET = "Internet"
VIRTUAL_APPLIANCE = "VirtualAppliance"
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
VNET_LOCAL = "VnetLocal"
HYPER_NET_GATEWAY = "HyperNetGateway"
NONE = "None"
class OfficeTrafficCategory(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The office traffic category.
"""
OPTIMIZE = "Optimize"
OPTIMIZE_AND_ALLOW = "OptimizeAndAllow"
ALL = "All"
NONE = "None"
class Origin(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The origin of the issue.
"""
LOCAL = "Local"
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class OutputType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Connection monitor output destination type. Currently, only "Workspace" is supported.
"""
WORKSPACE = "Workspace"
class OwaspCrsExclusionEntryMatchVariable(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The variable to be excluded.
"""
REQUEST_HEADER_NAMES = "RequestHeaderNames"
REQUEST_COOKIE_NAMES = "RequestCookieNames"
REQUEST_ARG_NAMES = "RequestArgNames"
class OwaspCrsExclusionEntrySelectorMatchOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""When matchVariable is a collection, operate on the selector to specify which elements in the
collection this exclusion applies to.
"""
EQUALS = "Equals"
CONTAINS = "Contains"
STARTS_WITH = "StartsWith"
ENDS_WITH = "EndsWith"
EQUALS_ANY = "EqualsAny"
class PcError(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INTERNAL_ERROR = "InternalError"
AGENT_STOPPED = "AgentStopped"
CAPTURE_FAILED = "CaptureFailed"
LOCAL_FILE_FAILED = "LocalFileFailed"
STORAGE_FAILED = "StorageFailed"
class PcProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol to be filtered on.
"""
TCP = "TCP"
UDP = "UDP"
ANY = "Any"
class PcStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the packet capture session.
"""
NOT_STARTED = "NotStarted"
RUNNING = "Running"
STOPPED = "Stopped"
ERROR = "Error"
UNKNOWN = "Unknown"
class PfsGroup(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Pfs Groups used in IKE Phase 2 for new child SA.
"""
NONE = "None"
PFS1 = "PFS1"
PFS2 = "PFS2"
PFS2048 = "PFS2048"
ECP256 = "ECP256"
ECP384 = "ECP384"
PFS24 = "PFS24"
PFS14 = "PFS14"
PFSMM = "PFSMM"
class PreferredIPVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The preferred IP version to use in test evaluation. The connection monitor may choose to use a
different version depending on other parameters.
"""
I_PV4 = "IPv4"
I_PV6 = "IPv6"
class ProbeProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol of the end point. If 'Tcp' is specified, a received ACK is required for the probe
to be successful. If 'Http' or 'Https' is specified, a 200 OK response from the specifies URI
is required for the probe to be successful.
"""
HTTP = "Http"
TCP = "Tcp"
HTTPS = "Https"
class ProcessorArchitecture(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN client Processor Architecture.
"""
AMD64 = "Amd64"
X86 = "X86"
class Protocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Network protocol.
"""
TCP = "Tcp"
HTTP = "Http"
HTTPS = "Https"
ICMP = "Icmp"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current provisioning state.
"""
SUCCEEDED = "Succeeded"
UPDATING = "Updating"
DELETING = "Deleting"
FAILED = "Failed"
class PublicIPAddressSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of a public IP address SKU.
"""
BASIC = "Basic"
STANDARD = "Standard"
class PublicIPPrefixSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of a public IP prefix SKU.
"""
STANDARD = "Standard"
class ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity used for the resource. The type 'SystemAssigned, UserAssigned' includes
both an implicitly created identity and a set of user assigned identities. The type 'None' will
remove any identities from the virtual machine.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class RouteFilterRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The rule type of the rule.
"""
COMMUNITY = "Community"
class RouteNextHopType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of Azure hop the packet should be sent to.
"""
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
VNET_LOCAL = "VnetLocal"
INTERNET = "Internet"
VIRTUAL_APPLIANCE = "VirtualAppliance"
NONE = "None"
class SecurityPartnerProviderConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the connection with Security Partner Provider.
"""
UNKNOWN = "Unknown"
PARTIALLY_CONNECTED = "PartiallyConnected"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class SecurityProviderName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Security Providers.
"""
Z_SCALER = "ZScaler"
I_BOSS = "IBoss"
CHECKPOINT = "Checkpoint"
class SecurityRuleAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether network traffic is allowed or denied.
"""
ALLOW = "Allow"
DENY = "Deny"
class SecurityRuleDirection(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The direction of the rule. The direction specifies if rule will be evaluated on incoming or
outgoing traffic.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class SecurityRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Network protocol this rule applies to.
"""
TCP = "Tcp"
UDP = "Udp"
ICMP = "Icmp"
ESP = "Esp"
ASTERISK = "*"
AH = "Ah"
class ServiceProviderProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The ServiceProviderProvisioningState state of the resource.
"""
NOT_PROVISIONED = "NotProvisioned"
PROVISIONING = "Provisioning"
PROVISIONED = "Provisioned"
DEPROVISIONING = "Deprovisioning"
class Severity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The severity of the issue.
"""
ERROR = "Error"
WARNING = "Warning"
class TransportProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The transport protocol for the endpoint.
"""
UDP = "Udp"
TCP = "Tcp"
ALL = "All"
class TunnelConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the tunnel.
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class UsageUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enum describing the unit of measurement.
"""
COUNT = "Count"
class VerbosityLevel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Verbosity level.
"""
NORMAL = "Normal"
MINIMUM = "Minimum"
FULL = "Full"
class VirtualNetworkGatewayConnectionProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway connection protocol.
"""
IK_EV2 = "IKEv2"
IK_EV1 = "IKEv1"
class VirtualNetworkGatewayConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Virtual Network Gateway connection status.
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class VirtualNetworkGatewayConnectionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway connection type.
"""
I_PSEC = "IPsec"
VNET2_VNET = "Vnet2Vnet"
EXPRESS_ROUTE = "ExpressRoute"
VPN_CLIENT = "VPNClient"
class VirtualNetworkGatewaySkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway SKU name.
"""
BASIC = "Basic"
HIGH_PERFORMANCE = "HighPerformance"
STANDARD = "Standard"
ULTRA_PERFORMANCE = "UltraPerformance"
VPN_GW1 = "VpnGw1"
VPN_GW2 = "VpnGw2"
VPN_GW3 = "VpnGw3"
VPN_GW4 = "VpnGw4"
VPN_GW5 = "VpnGw5"
VPN_GW1_AZ = "VpnGw1AZ"
VPN_GW2_AZ = "VpnGw2AZ"
VPN_GW3_AZ = "VpnGw3AZ"
VPN_GW4_AZ = "VpnGw4AZ"
VPN_GW5_AZ = "VpnGw5AZ"
ER_GW1_AZ = "ErGw1AZ"
ER_GW2_AZ = "ErGw2AZ"
ER_GW3_AZ = "ErGw3AZ"
class VirtualNetworkGatewaySkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway SKU tier.
"""
BASIC = "Basic"
HIGH_PERFORMANCE = "HighPerformance"
STANDARD = "Standard"
ULTRA_PERFORMANCE = "UltraPerformance"
VPN_GW1 = "VpnGw1"
VPN_GW2 = "VpnGw2"
VPN_GW3 = "VpnGw3"
VPN_GW4 = "VpnGw4"
VPN_GW5 = "VpnGw5"
VPN_GW1_AZ = "VpnGw1AZ"
VPN_GW2_AZ = "VpnGw2AZ"
VPN_GW3_AZ = "VpnGw3AZ"
VPN_GW4_AZ = "VpnGw4AZ"
VPN_GW5_AZ = "VpnGw5AZ"
ER_GW1_AZ = "ErGw1AZ"
ER_GW2_AZ = "ErGw2AZ"
ER_GW3_AZ = "ErGw3AZ"
class VirtualNetworkGatewayType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of this virtual network gateway.
"""
VPN = "Vpn"
EXPRESS_ROUTE = "ExpressRoute"
class VirtualNetworkPeeringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the virtual network peering.
"""
INITIATED = "Initiated"
CONNECTED = "Connected"
DISCONNECTED = "Disconnected"
class VirtualWanSecurityProviderType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The virtual wan security provider type.
"""
EXTERNAL = "External"
NATIVE = "Native"
class VpnAuthenticationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN authentication types enabled for the VpnServerConfiguration.
"""
CERTIFICATE = "Certificate"
RADIUS = "Radius"
AAD = "AAD"
class VpnClientProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN client protocol enabled for the virtual network gateway.
"""
IKE_V2 = "IkeV2"
SSTP = "SSTP"
OPEN_VPN = "OpenVPN"
class VpnConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the vpn connection.
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class VpnGatewayGeneration(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The generation for this VirtualNetworkGateway. Must be None if gatewayType is not VPN.
"""
NONE = "None"
GENERATION1 = "Generation1"
GENERATION2 = "Generation2"
class VpnGatewayTunnelingProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN protocol enabled for the VpnServerConfiguration.
"""
IKE_V2 = "IkeV2"
OPEN_VPN = "OpenVPN"
class VpnType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of this virtual network gateway.
"""
POLICY_BASED = "PolicyBased"
ROUTE_BASED = "RouteBased"
class WebApplicationFirewallAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of Actions.
"""
ALLOW = "Allow"
BLOCK = "Block"
LOG = "Log"
class WebApplicationFirewallEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of the policy.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class WebApplicationFirewallMatchVariable(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Match Variable.
"""
REMOTE_ADDR = "RemoteAddr"
REQUEST_METHOD = "RequestMethod"
QUERY_STRING = "QueryString"
POST_ARGS = "PostArgs"
REQUEST_URI = "RequestUri"
REQUEST_HEADERS = "RequestHeaders"
REQUEST_BODY = "RequestBody"
REQUEST_COOKIES = "RequestCookies"
class WebApplicationFirewallMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The mode of the policy.
"""
PREVENTION = "Prevention"
DETECTION = "Detection"
class WebApplicationFirewallOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The operator to be matched.
"""
IP_MATCH = "IPMatch"
EQUAL = "Equal"
CONTAINS = "Contains"
LESS_THAN = "LessThan"
GREATER_THAN = "GreaterThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
REGEX = "Regex"
GEO_MATCH = "GeoMatch"
class WebApplicationFirewallPolicyResourceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Resource status of the policy.
"""
CREATING = "Creating"
ENABLING = "Enabling"
ENABLED = "Enabled"
DISABLING = "Disabling"
DISABLED = "Disabled"
DELETING = "Deleting"
class WebApplicationFirewallRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The rule type.
"""
MATCH_RULE = "MatchRule"
INVALID = "Invalid"
class WebApplicationFirewallTransform(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Transforms applied before matching.
"""
LOWERCASE = "Lowercase"
TRIM = "Trim"
URL_DECODE = "UrlDecode"
URL_ENCODE = "UrlEncode"
REMOVE_NULLS = "RemoveNulls"
HTML_ENTITY_DECODE = "HtmlEntityDecode"
|
from components.propertyeditor.QPropertyModel import QPropertyModel
from components.propertyeditor.Property import Property
from components.ConnectorsInfoWnd import ConnectorsInfoWnd
class BlockPropTreeModel(QPropertyModel):
def __init__(self, mainWnd, rb, parent=None):
super(BlockPropTreeModel, self).__init__(parent)
self.rb = rb
self.block = rb.getBlock()
self.properties = {}
self.mainWnd = mainWnd
self.setupModelData(rb, self.rootItem)
self.lang_root = None
def setupModelData(self, rb, parent):
parents = [parent]
self.properties['genusName'] = Property('Genus Name', self.block.getGenusName(), parents[-1])
self.properties['genusName'].readOnly = True
self.properties['label'] = Property('Label', self.block.getBlockLabel(), parents[-1])
self.properties['label'].readOnly = True
self.lang_root = Property('Language','', parents[-1],Property.ADVANCED_EDITOR)
#for key in self.block.properties:
# self.properties[key] = Property(key,self.block.properties[key], self.lang_root)
module_name = ''
if( 'module_name' in self.block.properties):
module_name = self.block.properties['module_name']
elif( 'module_name' in self.block.getGenus().properties):
module_name = self.block.getGenus().properties['module_name']
self.properties['module_name'] = Property('module',module_name, self.lang_root,Property.ADVANCED_EDITOR)
self.properties['module_name'].onAdvBtnClick = self.getModuleName
function_name = ''
if( 'function_name' in self.block.properties):
function_name = self.block.properties['function_name']
elif( 'function_name' in self.block.getGenus().properties):
function_name = self.block.getGenus().properties['function_name']
self.properties['function_name'] = Property('function',function_name, self.lang_root,Property.COMBO_BOX_EDITOR , self.getModuleFuncList(module_name))
def onShowConnectorsInfo(self):
dlg = ConnectorsInfoWnd(self.mainWnd, self.all_connectors)
dlg.exec_()
print('onShowConnectorsInfo')
def setData(self, index, value, role):
ret = super(BlockPropTreeModel, self).setData(index, value, role)
if(ret == True):
item = index.internalPointer()
property_name = item.objectName()
if(property_name == 'module'):
self.block.properties['module_name'] = value
if(property_name == 'function'):
self.block.properties['function_name'] = value
return ret
|
import sys
t = int(raw_input())
for i in range(t):
n = int(raw_input())
digits = [int(j) for j in list(str(n))]
count = 0
for d in digits:
if (d != 0) and (n%d == 0):
count += 1
print count
|
import os
import shutil
import sys
from os.path import expanduser
TESTING = 'nosetests' in sys.argv[0]
if TESTING:
DATA_DIR = '/tmp/tsgtest123aeiae31ea/'
shutil.rmtree(DATA_DIR, ignore_errors=True)
else:
# DATA_DIR = os.path.dirname(os.path.abspath(__file__))+'/../data/'
DATA_DIR = expanduser("~") + '/tsgdata/'
RAW_DIR = DATA_DIR + 'raw/'
PARSED_DIR = DATA_DIR + 'parsed/'
INTERMEDIATE_DIR = DATA_DIR + 'intermediate/'
os.makedirs(RAW_DIR, exist_ok=True)
os.makedirs(PARSED_DIR, exist_ok=True)
os.makedirs(INTERMEDIATE_DIR, exist_ok=True)
FIELDS = ['type', 'title', 'isbn', 'content']
FIELD_WEIGHTS = [2.5, 2.5, 5, 1]
CSV_HEADER = ','.join(['uuid'] + FIELDS)
DICTIONARY_PATH = DATA_DIR + 'dictionary.dat'
PAGERANK_PATH = DATA_DIR + 'pagerank.csv'
INDEXINFO_PATH = DATA_DIR + 'indexinfo.json'
QSCORE_PATH = DATA_DIR + 'qscores.csv'
THROTTLE_SECONDS = 1.6 # default throttle time, gets replaced by robots.txt
ALLOWED_SITES = []
DISALLOWED_SITES = []
MIN_RESULTS = 10
RANKER_K = 100000
|
# -*- coding: utf-8 -*-
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
import os
from .base import *
SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "CHANGEME!!!")
|
import os
from datetime import datetime
from collections import defaultdict
from subprocess import Popen, PIPE
sout,serr = Popen(["which", "cql2"], stdout=PIPE, stderr=PIPE).communicate()
if len(serr) > 0:
raise Exception(serr)
if len(sout) == 0:
raise Exception("Could not find 'cql2' in path. Library requires this binary.")
def _parse_dt(x):
if isinstance(x, list):
x = ' '.join(x)
return datetime.strptime(x, "%Y-%m-%d %H:%M:%S+00:00")
def query(zone, op=None, service=None, host=None, metric=None, args=None, query=None, latest_only=False):
""" Usage:
op = sum/avg/min/max
service = kestrel
host = <hostname>/colony/members
metric = <metric name>
OR
query = <cql query>
Return:
If multiple data sets then data is returned as:
{<dataset>: [(datetime, value)]}
If single data set:
[(datetime, value)]
"""
cmd = ['cql2','-z',zone,'q']
if latest_only:
args = ['-d', '300'] # 5 min history only
elif args is None:
args = []
cmd.extend(args)
if query:
cmd.append(query)
else:
cmd.extend(['-o', op, service, host, metric])
sout, serr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
if len(serr) > 0:
raise Exception(serr)
nest_data = defaultdict(list)
flat_data = []
for line in (l.strip() for l in sout.split('\n') if l.strip() != ''):
try:
sdata = line.split()
if len(sdata) == 4: # nested data
# <dataset>: <date> <time> <value>
key = sdata[0].strip(':')
dt = _parse_dt(sdata[1:3])
value = float(sdata[3])
if latest_only:
nest_data[key] = [(dt, value)]
else:
nest_data[key].append((dt, value))
elif len(sdata) == 3: # single data set
# <date> <time> <value>
dt = _parse_dt(sdata[0:2])
value = float(sdata[2])
if host is not None: # nest for single host data
if latest_only:
nest_data[host] = [(dt, value)]
else:
nest_data[host].append((dt, value))
else: # otherwise flat
flat_data.append((dt, value))
else: # invalid format
raise Exception("Unexpected data format: {}", sdata)
except Exception, e:
raise Exception("Failed parsing record {}\n{}", sdata, e)
if len(nest_data) > 0:
return nest_data
else:
return flat_data
|
import os
import yaml
from mtm.util.Assert import *
def loadYamlFilesThatExist(*paths):
configs = []
for path in paths:
if os.path.isfile(path):
config = loadYamlFile(path)
if config != None:
configs.append(config)
return configs
def loadYamlFile(path):
return yaml.load(readAllTextFromFile(path))
def readAllTextFromFile(filePath):
with open(filePath, 'r', encoding='utf-8') as f:
return f.read()
|
from __future__ import print_function
import xml.etree.ElementTree as ET
from glob import glob
from pprint import PrettyPrinter as PP
LONG_TESTS_DEBUG_VALGRIND = [
('calib3d', 'Calib3d_InitUndistortRectifyMap.accuracy', 2017.22),
('dnn', 'Reproducibility*', 1000), # large DNN models
('features2d', 'Features2d/DescriptorImage.no_crash/3', 1000),
('features2d', 'Features2d/DescriptorImage.no_crash/4', 1000),
('features2d', 'Features2d/DescriptorImage.no_crash/5', 1000),
('features2d', 'Features2d/DescriptorImage.no_crash/6', 1000),
('features2d', 'Features2d/DescriptorImage.no_crash/7', 1000),
('imgcodecs', 'Imgcodecs_Png.write_big', 1000), # memory limit
('imgcodecs', 'Imgcodecs_Tiff.decode_tile16384x16384', 1000), # memory limit
('ml', 'ML_RTrees.regression', 1423.47),
('optflow', 'DenseOpticalFlow_DeepFlow.ReferenceAccuracy', 1360.95),
('optflow', 'DenseOpticalFlow_DeepFlow_perf.perf/0', 1881.59),
('optflow', 'DenseOpticalFlow_DeepFlow_perf.perf/1', 5608.75),
('optflow', 'DenseOpticalFlow_GlobalPatchColliderDCT.ReferenceAccuracy', 5433.84),
('optflow', 'DenseOpticalFlow_GlobalPatchColliderWHT.ReferenceAccuracy', 5232.73),
('optflow', 'DenseOpticalFlow_SimpleFlow.ReferenceAccuracy', 1542.1),
('photo', 'Photo_Denoising.speed', 1484.87),
('photo', 'Photo_DenoisingColoredMulti.regression', 2447.11),
('rgbd', 'Rgbd_Normals.compute', 1156.32),
('shape', 'Hauss.regression', 2625.72),
('shape', 'ShapeEMD_SCD.regression', 61913.7),
('shape', 'Shape_SCD.regression', 3311.46),
('tracking', 'AUKF.br_mean_squared_error', 10764.6),
('tracking', 'UKF.br_mean_squared_error', 5228.27),
('videoio', 'Videoio_Video.ffmpeg_writebig', 1000),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_BoostDesc_LBGM.regression', 1124.51),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG120.regression', 2198.1),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG48.regression', 1958.52),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG64.regression', 2113.12),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG80.regression', 2167.16),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_BoostDesc_LBGM.regression', 1511.39),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG120.regression', 1222.07),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG48.regression', 1059.14),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG64.regression', 1163.41),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG80.regression', 1179.06),
('ximgproc', 'L0SmoothTest.SplatSurfaceAccuracy', 6382.26),
('ximgproc', 'L0SmoothTest_perf.perf/17', 2052.16),
('ximgproc', 'RollingGuidanceFilterTest_perf.perf/59', 2760.29),
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.MultiThreadReproducibility/5', 1086.33),
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.MultiThreadReproducibility/7', 1405.05),
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.SplatSurfaceAccuracy/5', 1253.07),
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.SplatSurfaceAccuracy/7', 1599.98),
]
def longTestFilter(data, module = None):
res = ['*', '-'] + [v for _, v, m in data if module is None or m == module]
return '--gtest_filter={}'.format(':'.join(res))
def parseOneFile(filename, timeLimit):
tree = ET.parse(filename)
root = tree.getroot()
def guess(s, delims):
for delim in delims:
tmp = s.partition(delim)
if len(tmp[1]) != 0:
return tmp[0]
return None
module = guess(filename, ['_posix_', '_nt_', '__']) or root.get('cv_module_name')
if not module:
return (None, None)
res = []
for elem in root.findall('.//testcase'):
key = '{}.{}'.format(elem.get('classname'), elem.get('name'))
val = elem.get('time')
if float(val) >= timeLimit:
res.append((module, key, float(val)))
return (module, res)
if __name__ == '__main__':
LIMIT = 1000
res = []
xmls = glob('*.xml')
for xml in xmls:
print('Parsing file', xml, '...')
module, testinfo = parseOneFile(xml, LIMIT)
if not module:
print('SKIP')
continue
res.extend(testinfo)
print('========= RESULTS =========')
PP(indent=4, width=100).pprint(sorted(res))
|
__author__ = 'riko'
import calculations as calc
import numpy as np
A = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
A = calc.smooth_time_series(A, 0.5)
print A
|
import cv2
import numpy as np
from PIL import Image
import zbarlight
cap = cv2.VideoCapture(1)
def QRCODE():
while(cap.read()):
# 讀進來之做高斯模糊 然後Canny再抓輪廓
_,frame = cap.read()
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img_gray = cv2.equalizeHist(img_gray)
th, bi_img = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY)
img_gb = cv2.GaussianBlur(img_gray, (5, 5), 0)
edges = cv2.Canny(img_gb, 100 , 200)
img_fc, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 遞迴每個特徵 把鑲嵌層數大於五的取出來
try:
hierarchy = hierarchy[0]
found = []
for i in range(len(contours)):
k = i
c = 0
while hierarchy[k][2] != -1:
k = hierarchy[k][2]
c = c + 1
if c >= 5:
found.append(i)
draw_img = frame.copy()
# 把把鑲嵌層數大於五的取出來 minAreaRect取平行四邊形包起來
boxes = []
# 把把鑲嵌層數大於五的取出來 minAreaRect取平行四邊形包起來
# 得到四個邊點後畫圖
for i in found:
rect = cv2.minAreaRect(contours[i])
box = cv2.boxPoints(rect)
box.tolist()
for i in range(len(box)):
xx = []
for x in box:
for y in x:
xx.append(y)
box = np.int0(box)
cv2.drawContours(draw_img, [box], 0, (0, 255, 0), 2)
p1 = xx[0], xx[1]
p1 = tuple(p1)
p2 = xx[2], xx[3]
p2 = tuple(p2)
p3 = xx[4], xx[5]
p3 = tuple(p3)
p4 = xx[6], xx[7]
p4 = tuple(p4)
cv2.circle(draw_img, p1, 5, (255, 0, 0), -1)
cv2.circle(draw_img, p2, 5, (0, 255, 0), -1)
cv2.circle(draw_img, p3, 5, (0, 0, 255), -1)
cv2.circle(draw_img, p4, 5, (255, 0, 255), -1)
boxes.append(box)
def Vector2angle(v1, v2=(1, 0),degrees360 = False):
try:
v2 = np.array(v2)
cos_angle = v1.dot(v2) / (np.sqrt(v1.dot(v1)) * np.sqrt(v2.dot(v2)))
Diametermetric = np.arccos(cos_angle)
angle = Diametermetric * 360 / 2 / np.pi
if degrees360:
if v1[1]>0:
angle = 360-angle
return angle
except:
return None
# 國中數學 兩點距離懂?
def outerrectangle(pointlist):
# 輸入三個點 回傳一個矩形放大後的的四個點
pointlist = np.array(pointlist)
for i in range(len(pointlist)):
l1 = pointlist[i - 1] - pointlist[i]
l2 = pointlist[i - 2] - pointlist[i]
angle = Vector2angle(l1, l2)
if angle > 80 and angle < 100:
p4 = pointlist[i] + l1 + l2
cv2.circle(draw_img, (p4[0], p4[1]), 5, (0, 255, 255), -1)
fourpoint = np.array([pointlist[i - 1], pointlist[i], pointlist[i - 2], p4])
centerpoint = np.mean(fourpoint, axis=0)
o1 = (pointlist[i - 1] - centerpoint) * .5 + pointlist[i - 1]
o2 = (pointlist[i] - centerpoint) * .5 + pointlist[i]
o3 = (pointlist[i - 2] - centerpoint) * .5 + pointlist[i - 2]
o4 = (p4 - centerpoint) * .5 + p4
outerpoint = [o1, o2, o3, o4]
degree = Vector2angle(p4 - centerpoint, degrees360=True)
if degree>90 and degree<135:
outerpoint = [o3, o2, o1, o4]
elif degree>225 and degree<270:
outerpoint = [o3, o2, o1, o4]
elif degree>0 and degree<45:
outerpoint = [o3, o2, o1, o4]
elif degree>315 and degree<360:
outerpoint = [o3, o2, o1, o4]
else:
pass
outerpoint = np.int0(outerpoint)
cv2.drawContours(draw_img, [outerpoint], 0, (0, 0, 255), 2)
return outerpoint
def cv_distance(P, Q):
return int(np.sqrt(pow((P[0] - Q[0]), 2) + pow((P[1] - Q[1]), 2)))
def createLineIterator(P1, P2, img):
"""
Produces and array that consists of the coordinates and intensities of each pixel in a line between two points
Parameters:
-P1: a numpy array that consists of the coordinate of the first point (x,y)
-P2: a numpy array that consists of the coordinate of the second point (x,y)
-img: the image being processed
Returns:
-it: a numpy array that consists of the coordinates and intensities of each pixel in the radii (shape: [numPixels, 3], row = [x,y,intensity])
"""
# define local variables for readability
imageH = img.shape[0]
imageW = img.shape[1]
P1X = P1[0]
P1Y = P1[1]
P2X = P2[0]
P2Y = P2[1]
# difference and absolute difference between points
# used to calculate slope and relative location between points
dX = P2X - P1X
dY = P2Y - P1Y
dXa = np.abs(dX)
dYa = np.abs(dY)
# predefine numpy array for output based on distance between points
itbuffer = np.empty(shape=(np.maximum(dYa, dXa), 3), dtype=np.float32)
itbuffer.fill(np.nan)
# Obtain coordinates along the line using a form of Bresenham's algorithm
negY = P1Y > P2Y
negX = P1X > P2X
if P1X == P2X: # vertical line segment
itbuffer[:, 0] = P1X
if negY:
itbuffer[:, 1] = np.arange(P1Y - 1, P1Y - dYa - 1, -1)
else:
itbuffer[:, 1] = np.arange(P1Y + 1, P1Y + dYa + 1)
elif P1Y == P2Y: # horizontal line segment
itbuffer[:, 1] = P1Y
if negX:
itbuffer[:, 0] = np.arange(P1X - 1, P1X - dXa - 1, -1)
else:
itbuffer[:, 0] = np.arange(P1X + 1, P1X + dXa + 1)
else: # diagonal line segment
steepSlope = dYa > dXa
if steepSlope:
slope = dX.astype(np.float32) / dY.astype(np.float32)
if negY:
itbuffer[:, 1] = np.arange(P1Y - 1, P1Y - dYa - 1, -1)
else:
itbuffer[:, 1] = np.arange(P1Y + 1, P1Y + dYa + 1)
itbuffer[:, 0] = (slope * (itbuffer[:, 1] - P1Y)).astype(np.int) + P1X
else:
slope = dY.astype(np.float32) / dX.astype(np.float32)
if negX:
itbuffer[:, 0] = np.arange(P1X - 1, P1X - dXa - 1, -1)
else:
itbuffer[:, 0] = np.arange(P1X + 1, P1X + dXa + 1)
itbuffer[:, 1] = (slope * (itbuffer[:, 0] - P1X)).astype(np.int) + P1Y
# Remove points outside of image
colX = itbuffer[:, 0]
colY = itbuffer[:, 1]
itbuffer = itbuffer[(colX >= 0) & (colY >= 0) & (colX < imageW) & (colY < imageH)]
# Get intensities from img ndarray
itbuffer = img[itbuffer[:, 1].astype(np.uint), itbuffer[:, 0].astype(np.uint)]
return itbuffer
def isTimingPattern(line):
# 除去开头结尾的白色像素点
while line[0] != 0:
line = line[1:]
while line[-1] != 0:
line = line[:-1]
# 计数连续的黑白像素点
c = []
count = 1
l = line[0]
for p in line[1:]:
if p == l:
count = count + 1
else:
c.append(count)
count = 1
l = p
c.append(count)
# 如果黑白间隔太少,直接排除
if len(c) < 5:
return False
# 计算方差,根据离散程度判断是否是 Timing Pattern
threshold = 5
return np.var(c) < threshold
def check(a, b,img):
# 存 a b 之間最短距離座標
s1_ab = ()
s2_ab = ()
# 存 a b 之間最短距離座標距离,np.iinfo('i').max取的 INT最大值
s1 = np.iinfo('i').max
s2 = s1
for ai in a:
for bi in b:
d = cv_distance(ai, bi)
if d < s2:
if d < s1:
s1_ab, s2_ab = (ai, bi), s1_ab
s1, s2 = d, s1
else:
s2_ab = (ai, bi)
s2 = d
try:
dis = 20
a1, a2 = s1_ab[0], s2_ab[0]
b1, b2 = s1_ab[1], s2_ab[1]
a1 = (a1[0] + (a2[0] - a1[0]) // dis, a1[1] + (a2[1] - a1[1]) // dis)
b1 = (b1[0] + (b2[0] - b1[0]) // dis, b1[1] + (b2[1] - b1[1]) // dis)
a2 = (a2[0] + (a1[0] - a2[0]) // dis, a2[1] + (a1[1] - a2[1]) // dis)
b2 = (b2[0] + (b1[0] - b2[0]) // dis, b2[1] + (b1[1] - b2[1]) // dis)
# 記得他媽的轉凸頗
# a1 = tuple(a1)
# a2 = tuple(a2)
# b1 = tuple(b1)
# b2 = tuple(b2)
# 将最短的两个线画出来
line1 = createLineIterator(a1, b1, img)
line2 = createLineIterator(a2, b2, img)
if isTimingPattern(line1):
cv2.line(draw_img, a1, b1, (0, 0, 255), 3)
return True
if isTimingPattern(line2):
cv2.line(draw_img, a2, b2, (0, 0, 255), 3)
return True
except:
pass
# 取得i 跟 他的下一個的四個點一起比較
valid = set()
for i in range(len(boxes)):
for j in range(i+1, len(boxes)):
if check(boxes[i], boxes[j],bi_img):
valid.add(i)
valid.add(j)
point_all = []
center_all = []
while len(valid)==3:
for i in range(len(valid)):
rect = cv2.minAreaRect(contours[found[valid.pop()]])
box = cv2.boxPoints(rect)
box.tolist()
for i in range(len(box)):
xx = []
for x in box:
for y in x:
xx.append(y)
box = np.int0(box)
cv2.drawContours(draw_img, [box], 0, (0, 255, 0), 2)
center = [int((xx[0] + xx[2] + xx[4] + xx[6]) // 4), int((xx[1] + xx[3] + xx[5] + xx[7]) // 4)]
center_all.append(center)
center = tuple(center)
cv2.circle(draw_img, center, 5, (0, 255, 255), -1)
break
while outerrectangle(center_all) is not None:
pts1 = outerrectangle(center_all)
# if pts1[0][0] < pts1[2][0] :
# pts1[0], pts1[1],pts1[2], pts1[3] = pts1[1], pts1[2],pts1[3], pts1[4]
pts1 = np.float32(pts1)
pts2 = np.float32([[300,0],[0,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(bi_img, M, (300, 300))
cv2.imshow("xxxxx", dst)
cv2.imwrite("QR000.jpg",dst)
file_path = "QR000.jpg"
with open(file_path, 'rb')as image_file:
image = Image.open(image_file)
image.load()
codes = zbarlight.scan_codes("qrcode", image)
for i in codes:
if i!=None:
cap.release()
cv2.destroyAllWindows()
return i.decode("utf-8")
break
cv2.imshow("xx",draw_img)
if cv2.waitKey(1) & 0xFF == 27:
break
except:
pass
cap.release()
cv2.destroyAllWindows()
if __name__=="__main__":
QRCODE()
|
from flask import Flask, render_template, redirect
import rethinkdb as r
app = Flask(__name__)
@app.route('/')
def main():
return render_template('main.html')
@app.route('/<user>')
def bio(user):
rconn = r.connect('localhost')
useri = r.db('bittybio').table('users').get(user).run(rconn)
rconn.close()
if useri == None:
return 'Sorry! We couldn\'t find '+user+'.'
else:
return render_template('bio.html', **useri)
@app.route('/<user>/edit')
def editbio(user):
rconn = r.connect('localhost')
useri = r.db('bittybio').table('users').get(user).run(rconn)
rconn.close()
if useri == None:
return 'Sorry! We couldn\'t find '+user+'.'
else:
return render_template('edit.html', **useri)
@app.route('/<user>/bb')
def touser(user):
return redirect(user)
@app.route('/<user>/<net>')
def usersnet(user, net):
rconn = r.connect('localhost')
userdata = r.db('bittybio').table('users').get(user).run(rconn)
netdata = r.db('bittybio').table('nets').get(net).run(rconn)
if userdata == None or netdata == None:
return 'User or network undefined!'
goto_name = []
for dnet in userdata['nets']:
if dnet['net'] == net:
goto_name.append(dnet['url'])
try:
url = netdata['user_url']
except KeyError:
url = netdata['url']
if len(goto_name) > 1:
return "Multiple potential URLs found."
elif netdata['prefix']:
return redirect('http://'+goto_name[0]+'.'+url)
else:
return redirect('http://'+url+goto_name[0])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True)
|
str1 = "Hello World"
print(str1.isalpha())
str2 = "MyFavoriteSite"
print(str2.isalpha())
|
from __future__ import division, unicode_literals, print_function
import json
import glob
import itertools
import logging
import math
import os
import re
import warnings
import xml.etree.cElementTree as ET
from collections import defaultdict
from io import StringIO
import numpy as np
from monty.io import zopen, reverse_readfile
from monty.json import MSONable
from monty.json import jsanitize
from monty.re import regrep
from six import string_types
from six.moves import map, zip
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.units import unitized
from pymatgen.electronic_structure.bandstructure import BandStructure, \
BandStructureSymmLine, get_reconstructed_band_structure
from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType, Magmom
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.entries.computed_entries import \
ComputedEntry, ComputedStructureEntry
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar
from pymatgen.util.io_utils import clean_lines, micro_pyawk
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
"""
Classes for reading/manipulating/writing VASP ouput files.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, " + \
"Vincent L Chevrier, Ioannis Petousis, Stephen Dacek, Mark Turiansky"
__credits__ = "Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 30, 2012"
logger = logging.getLogger(__name__)
def _parse_parameters(val_type, val):
"""
Helper function to convert a Vasprun parameter into the proper type.
Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
"""
if val_type == "logical":
return val == "T"
elif val_type == "int":
return int(val)
elif val_type == "string":
return val.strip()
else:
return float(val)
def _parse_v_parameters(val_type, val, filename, param_name):
"""
Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains \\*\\*\\* for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
"""
if val_type == "logical":
val = [i == "T" for i in val.split()]
elif val_type == "int":
try:
val = [int(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# LDAUL/J as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
elif val_type == "string":
val = val.split()
else:
try:
val = [float(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# MAGMOM as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
return val
def _parse_varray(elem):
if elem.get("type", None) == 'logical':
m = [[True if i == 'T' else False for i in v.text.split()] for v in elem]
else:
m = [[_vasprun_float(i) for i in v.text.split()] for v in elem]
return m
def _parse_from_incar(filename, key):
"""
Helper function to parse a parameter from the INCAR.
"""
dirname = os.path.dirname(filename)
for f in os.listdir(dirname):
if re.search(r"INCAR", f):
warnings.warn("INCAR found. Using " + key + " from INCAR.")
incar = Incar.from_file(os.path.join(dirname, f))
if key in incar:
return incar[key]
else:
return None
return None
def _vasprun_float(f):
"""
Large numbers are often represented as ********* in the vasprun.
This function parses these values as np.nan
"""
try:
return float(f)
except ValueError as e:
f = f.strip()
if f == '*' * len(f):
warnings.warn('Float overflow (*******) encountered in vasprun')
return np.nan
raise e
class Vasprun(MSONable):
"""
Vastly improved cElementTree-based parser for vasprun.xml files. Uses
iterparse to support incremental parsing of large files.
Speedup over Dom is at least 2x for smallish files (~1Mb) to orders of
magnitude for larger files (~10Mb).
Args:
filename (str): Filename to parse
ionic_step_skip (int): If ionic_step_skip is a number > 1,
only every ionic_step_skip ionic steps will be read for
structure and energies. This is very useful if you are parsing
very large vasprun.xml files and you are not interested in every
single ionic step. Note that the final energies may not be the
actual final energy in the vasprun.
ionic_step_offset (int): Used together with ionic_step_skip. If set,
the first ionic step read will be offset by the amount of
ionic_step_offset. For example, if you want to start reading
every 10th structure but only from the 3rd structure onwards,
set ionic_step_skip to 10 and ionic_step_offset to 3. Main use
case is when doing statistical structure analysis with
extremely long time scale multiple VASP calculations of
varying numbers of steps.
parse_dos (bool): Whether to parse the dos. Defaults to True. Set
to False to shave off significant time from the parsing if you
are not interested in getting those data.
parse_eigen (bool): Whether to parse the eigenvalues. Defaults to
True. Set to False to shave off significant time from the
parsing if you are not interested in getting those data.
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues. Defaults to False. Set to True to obtain projected
eigenvalues. **Note that this can take an extreme amount of time
and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
exception_on_bad_xml (bool): Whether to throw a ParseException if a
malformed XML is detected. Default to True, which ensures only
proper vasprun.xml are parsed. You can set to False if you want
partial results (e.g., if you are monitoring a calculation during a
run), but use the results with care. A warning is issued.
**Vasp results**
.. attribute:: ionic_steps
All ionic steps in the run as a list of
{"structure": structure at end of run,
"electronic_steps": {All electronic step data in vasprun file},
"stresses": stress matrix}
.. attribute:: structures
List of Structure objects for the structure at each ionic step.
.. attribute:: tdos
Total dos calculated at the end of run.
.. attribute:: idos
Integrated dos calculated at the end of run.
.. attribute:: pdos
List of list of PDos objects. Access as pdos[atomindex][orbitalindex]
.. attribute:: efermi
Fermi energy
.. attribute:: eigenvalues
Available only if parse_eigen=True. Final eigenvalues as a dict of
{(spin, kpoint index):[[eigenvalue, occu]]}.
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
.. attribute:: projected_eigenvalues
Final projected eigenvalues as a dict of {spin: nd-array}. To access
a particular value, you need to do
Vasprun.projected_eigenvalues[spin][kpoint index][band index][atom index][orbital_index]
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint, band and atom indices are 0-based (unlike the 1-based indexing
in VASP).
.. attribute:: dielectric
The real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
.. attribute:: other_dielectric
Dictionary, with the tag comment as key, containing other variants of
the real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
.. attribute:: epsilon_static
The static part of the dielectric constant. Present when it's a DFPT run
(LEPSILON=TRUE)
.. attribute:: epsilon_static_wolfe
The static part of the dielectric constant without any local field
effects. Present when it's a DFPT run (LEPSILON=TRUE)
.. attribute:: epsilon_ionic
The ionic part of the static dielectric constant. Present when it's a
DFPT run (LEPSILON=TRUE) and IBRION=5, 6, 7 or 8
.. attribute:: nionic_steps
The total number of ionic steps. This number is always equal
to the total number of steps in the actual run even if
ionic_step_skip is used.
.. attribute:: force_constants
Force constants computed in phonon DFPT run(IBRION = 8).
The data is a 4D numpy array of shape (natoms, natoms, 3, 3).
.. attribute:: normalmode_eigenvals
Normal mode frequencies.
1D numpy array of size 3*natoms.
.. attribute:: normalmode_eigenvecs
Normal mode eigen vectors.
3D numpy array of shape (3*natoms, natoms, 3).
**Vasp inputs**
.. attribute:: incar
Incar object for parameters specified in INCAR file.
.. attribute:: parameters
Incar object with parameters that vasp actually used, including all
defaults.
.. attribute:: kpoints
Kpoints object for KPOINTS specified in run.
.. attribute:: actual_kpoints
List of actual kpoints, e.g.,
[[0.25, 0.125, 0.08333333], [-0.25, 0.125, 0.08333333],
[0.25, 0.375, 0.08333333], ....]
.. attribute:: actual_kpoints_weights
List of kpoint weights, E.g.,
[0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667, ....]
.. attribute:: atomic_symbols
List of atomic symbols, e.g., ["Li", "Fe", "Fe", "P", "P", "P"]
.. attribute:: potcar_symbols
List of POTCAR symbols. e.g.,
["PAW_PBE Li 17Jan2003", "PAW_PBE Fe 06Sep2000", ..]
Author: Shyue Ping Ong
"""
def __init__(self, filename, ionic_step_skip=None,
ionic_step_offset=0, parse_dos=True,
parse_eigen=True, parse_projected_eigen=False,
parse_potcar_file=True, occu_tol=1e-8,
exception_on_bad_xml=True):
self.filename = filename
self.ionic_step_skip = ionic_step_skip
self.ionic_step_offset = ionic_step_offset
self.occu_tol = occu_tol
self.exception_on_bad_xml = exception_on_bad_xml
with zopen(filename, "rt") as f:
if ionic_step_skip or ionic_step_offset:
# remove parts of the xml file and parse the string
run = f.read()
steps = run.split("<calculation>")
# The text before the first <calculation> is the preamble!
preamble = steps.pop(0)
self.nionic_steps = len(steps)
new_steps = steps[ionic_step_offset::int(ionic_step_skip)]
# add the tailing informat in the last step from the run
to_parse = "<calculation>".join(new_steps)
if steps[-1] != new_steps[-1]:
to_parse = "{}<calculation>{}{}".format(
preamble, to_parse,
steps[-1].split("</calculation>")[-1])
else:
to_parse = "{}<calculation>{}".format(preamble, to_parse)
self._parse(StringIO(to_parse), parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen)
else:
self._parse(f, parse_dos=parse_dos, parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen)
self.nionic_steps = len(self.ionic_steps)
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
self.update_charge_from_potcar(parse_potcar_file)
if self.incar.get("ALGO", "") != "BSE" and (not self.converged):
msg = "%s is an unconverged VASP run.\n" % filename
msg += "Electronic convergence reached: %s.\n" % \
self.converged_electronic
msg += "Ionic convergence reached: %s." % self.converged_ionic
warnings.warn(msg, UnconvergedVASPWarning)
def _parse(self, stream, parse_dos, parse_eigen, parse_projected_eigen):
self.efermi = None
self.eigenvalues = None
self.projected_eigenvalues = None
self.dielectric_data = {}
self.other_dielectric = {}
ionic_steps = []
parsed_header = False
try:
for event, elem in ET.iterparse(stream):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
self.kpoints, self.actual_kpoints, \
self.actual_kpoints_weights = self._parse_kpoints(
elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "structure" and elem.attrib.get("name") == \
"initialpos":
self.initial_structure = self._parse_structure(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = \
self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p,
"hash": None} for
p in self.potcar_symbols]
if tag == "calculation":
parsed_header = True
if not self.parameters.get("LCHIMAG", False):
ionic_steps.append(self._parse_calculation(elem))
else:
ionic_steps.extend(self._parse_chemical_shielding_calculation(elem))
elif parse_dos and tag == "dos":
try:
self.tdos, self.idos, self.pdos = self._parse_dos(elem)
self.efermi = self.tdos.efermi
self.dos_has_errors = False
except Exception as ex:
self.dos_has_errors = True
elif parse_eigen and tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
self.projected_eigenvalues = self._parse_projected_eigen(
elem)
elif tag == "dielectricfunction":
if ("comment" not in elem.attrib) or \
elem.attrib["comment"] == "INVERSE MACROSCOPIC DIELECTRIC TENSOR (including local field effects in RPA (Hartree))":
if not 'density' in self.dielectric_data:
self.dielectric_data['density'] = self._parse_diel(elem)
# "velocity-velocity" is also named "current-current"
# in OUTCAR
elif not 'velocity' in self.dielectric_data:
self.dielectric_data['velocity'] = self._parse_diel(elem)
else:
raise NotImplementedError('This vasprun.xml has >2 unlabelled dielectric functions')
else:
comment = elem.attrib["comment"]
self.other_dielectric[comment] = self._parse_diel(elem)
elif tag == "varray" and elem.attrib.get("name") == 'opticaltransitions':
self.optical_transition = np.array(_parse_varray(elem))
elif tag == "structure" and elem.attrib.get("name") == \
"finalpos":
self.final_structure = self._parse_structure(elem)
elif tag == "dynmat":
hessian, eigenvalues, eigenvectors = self._parse_dynmat(elem)
natoms = len(self.atomic_symbols)
hessian = np.array(hessian)
self.force_constants = np.zeros((natoms, natoms, 3, 3), dtype='double')
for i in range(natoms):
for j in range(natoms):
self.force_constants[i, j] = hessian[i * 3:(i + 1) * 3, j * 3:(j + 1) * 3]
phonon_eigenvectors = []
for ev in eigenvectors:
phonon_eigenvectors.append(np.array(ev).reshape(natoms, 3))
self.normalmode_eigenvals = np.array(eigenvalues)
self.normalmode_eigenvecs = np.array(phonon_eigenvectors)
except ET.ParseError as ex:
if self.exception_on_bad_xml:
raise ex
else:
warnings.warn(
"XML is malformed. Parsing has stopped but partial data"
"is available.", UserWarning)
self.ionic_steps = ionic_steps
self.vasp_version = self.generator["version"]
@property
def structures(self):
return [step["structure"] for step in self.ionic_steps]
@property
def epsilon_static(self):
"""
Property only available for DFPT calculations.
"""
return self.ionic_steps[-1].get("epsilon", [])
@property
def epsilon_static_wolfe(self):
"""
Property only available for DFPT calculations.
"""
return self.ionic_steps[-1].get("epsilon_rpa", [])
@property
def epsilon_ionic(self):
"""
Property only available for DFPT calculations and when IBRION=5, 6, 7 or 8.
"""
return self.ionic_steps[-1].get("epsilon_ion", [])
@property
def dielectric(self):
return self.dielectric_data['density']
@property
def optical_absorption_coeff(self):
"""
Calculate the optical absorption coefficient
from the dielectric constants. Note that this method is only
implemented for optical properties calculated with GGA and BSE.
Returns:
optical absorption coefficient in list
"""
if self.dielectric_data["density"]:
real_avg = [sum(self.dielectric_data["density"][1][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))]
imag_avg = [sum(self.dielectric_data["density"][2][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))]
def f(freq, real, imag):
"""
The optical absorption coefficient calculated in terms of
equation
"""
hbar = 6.582119514e-16 # eV/K
coeff = np.sqrt(
np.sqrt(real ** 2 + imag ** 2) - real) * \
np.sqrt(2) / hbar * freq
return coeff
absorption_coeff = [f(freq, real, imag) for freq, real, imag in
zip(self.dielectric_data["density"][0],
real_avg, imag_avg)]
return absorption_coeff
@property
def lattice(self):
return self.final_structure.lattice
@property
def lattice_rec(self):
return self.final_structure.lattice.reciprocal_lattice
@property
def converged_electronic(self):
"""
Checks that electronic step convergence has been reached in the final
ionic step
"""
final_esteps = self.ionic_steps[-1]["electronic_steps"]
if 'LEPSILON' in self.incar and self.incar['LEPSILON']:
i = 1
to_check = set(['e_wo_entrp', 'e_fr_energy', 'e_0_energy'])
while set(final_esteps[i].keys()) == to_check:
i += 1
return i + 1 != self.parameters["NELM"]
return len(final_esteps) < self.parameters["NELM"]
@property
def converged_ionic(self):
"""
Checks that ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run
"""
nsw = self.parameters.get("NSW", 0)
return nsw <= 1 or len(self.ionic_steps) < nsw
@property
def converged(self):
"""
Returns true if a relaxation run is converged.
"""
return self.converged_electronic and self.converged_ionic
@property
@unitized("eV")
def final_energy(self):
"""
Final energy from the vasp run.
"""
try:
final_istep = self.ionic_steps[-1]
if final_istep["e_wo_entrp"] != final_istep[
'electronic_steps'][-1]["e_0_energy"]:
warnings.warn("Final e_wo_entrp differs from the final "
"electronic step. VASP may have included some "
"corrections, e.g., vdw. Vasprun will return "
"the final e_wo_entrp, i.e., including "
"corrections in such instances.")
return final_istep["e_wo_entrp"]
return final_istep['electronic_steps'][-1]["e_0_energy"]
except (IndexError, KeyError):
warnings.warn("Calculation does not have a total energy. "
"Possibly a GW or similar kind of run. A value of "
"infinity is returned.")
return float('inf')
@property
def complete_dos(self):
"""
A complete dos object which incorporates the total dos and all
projected dos.
"""
final_struct = self.final_structure
pdoss = {final_struct[i]: pdos for i, pdos in enumerate(self.pdos)}
return CompleteDos(self.final_structure, self.tdos, pdoss)
@property
def hubbards(self):
"""
Hubbard U values used if a vasprun is a GGA+U run. {} otherwise.
"""
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
if not self.incar.get("LDAU", False):
return {}
us = self.incar.get("LDAUU", self.parameters.get("LDAUU"))
js = self.incar.get("LDAUJ", self.parameters.get("LDAUJ"))
if len(js) != len(us):
js = [0] * len(us)
if len(us) == len(symbols):
return {symbols[i]: us[i] - js[i] for i in range(len(symbols))}
elif sum(us) == 0 and sum(js) == 0:
return {}
else:
raise VaspParserError("Length of U value parameters and atomic "
"symbols are mismatched")
@property
def run_type(self):
"""
Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs.
TODO: Fix for other functional types like PW91, other vdW types, etc.
"""
if self.parameters.get("LHFCALC", False):
rt = "HF"
elif self.parameters.get("LUSE_VDW", False):
vdw_gga = {"RE": "DF", "OR": "optPBE", "BO": "optB88",
"MK": "optB86b", "ML": "DF2"}
gga = self.parameters.get("GGA").upper()
rt = "vdW-" + vdw_gga[gga]
elif self.potcar_symbols[0].split()[0] == 'PAW':
rt = "LDA"
else:
rt = "GGA"
if self.is_hubbard:
rt += "+U"
return rt
@property
def is_hubbard(self):
"""
True if run is a DFT+U run.
"""
if len(self.hubbards) == 0:
return False
return sum(self.hubbards.values()) > 1e-8
@property
def is_spin(self):
"""
True if run is spin-polarized.
"""
return self.parameters.get("ISPIN", 1) == 2
def get_computed_entry(self, inc_structure=True, parameters=None,
data=None):
"""
Returns a ComputedStructureEntry from the vasprun.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. If
parameters is None, a default set of parameters that are
necessary for typical post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
Returns:
ComputedStructureEntry/ComputedEntry
"""
param_names = {"is_hubbard", "hubbards", "potcar_symbols",
"potcar_spec", "run_type"}
if parameters:
param_names.update(parameters)
params = {p: getattr(self, p) for p in param_names}
data = {p: getattr(self, p) for p in data} if data is not None else {}
if inc_structure:
return ComputedStructureEntry(self.final_structure,
self.final_energy, parameters=params,
data=data)
else:
return ComputedEntry(self.final_structure.composition,
self.final_energy, parameters=params,
data=data)
def get_band_structure(self, kpoints_filename=None, efermi=None,
line_mode=False):
"""
Returns the band structure as a BandStructure object
Args:
kpoints_filename (str): Full path of the KPOINTS file from which
the band structure is generated.
If none is provided, the code will try to intelligently
determine the appropriate KPOINTS file by substituting the
filename of the vasprun.xml with KPOINTS.
The latter is the default behavior.
efermi (float): If you want to specify manually the fermi energy
this is where you should do it. By default, the None value
means the code will get it from the vasprun.
line_mode (bool): Force the band structure to be considered as
a run along symmetry lines.
Returns:
a BandStructure object (or more specifically a
BandStructureSymmLine object if the run is detected to be a run
along symmetry lines)
Two types of runs along symmetry lines are accepted: non-sc with
Line-Mode in the KPOINT file or hybrid, self-consistent with a
uniform grid+a few kpoints along symmetry lines (explicit KPOINTS
file) (it's not possible to run a non-sc band structure with hybrid
functionals). The explicit KPOINTS file needs to have data on the
kpoint label as commentary.
"""
if not kpoints_filename:
kpoints_filename = self.filename.replace('vasprun.xml', 'KPOINTS')
if not os.path.exists(kpoints_filename) and line_mode is True:
raise VaspParserError('KPOINTS needed to obtain band structure '
'along symmetry lines.')
if efermi is None:
efermi = self.efermi
kpoint_file = None
if os.path.exists(kpoints_filename):
kpoint_file = Kpoints.from_file(kpoints_filename)
lattice_new = Lattice(self.lattice_rec.matrix)
kpoints = [np.array(self.actual_kpoints[i])
for i in range(len(self.actual_kpoints))]
p_eigenvals = defaultdict(list)
eigenvals = defaultdict(list)
nkpts = len(kpoints)
neigenvalues = [len(v) for v in self.eigenvalues[Spin.up]]
min_eigenvalues = min(neigenvalues)
for spin, v in self.eigenvalues.items():
v = np.swapaxes(v, 0, 1)
eigenvals[spin] = v[:, :, 0]
if self.projected_eigenvalues:
peigen = self.projected_eigenvalues[spin]
# Original axes for self.projected_eigenvalues are kpoints,
# band, ion, orb.
# For BS input, we need band, kpoints, orb, ion.
peigen = np.swapaxes(peigen, 0, 1) # Swap kpoint and band axes
peigen = np.swapaxes(peigen, 2, 3) # Swap ion and orb axes
p_eigenvals[spin] = peigen
# for b in range(min_eigenvalues):
# p_eigenvals[spin].append(
# [{Orbital(orb): v for orb, v in enumerate(peigen[b, k])}
# for k in range(nkpts)])
# check if we have an hybrid band structure computation
# for this we look at the presence of the LHFCALC tag
hybrid_band = False
if self.parameters.get('LHFCALC', False):
hybrid_band = True
if kpoint_file is not None:
if kpoint_file.style == Kpoints.supported_modes.Line_mode:
line_mode = True
if line_mode:
labels_dict = {}
if hybrid_band:
start_bs_index = 0
for i in range(len(self.actual_kpoints)):
if self.actual_kpoints_weights[i] == 0.0:
start_bs_index = i
break
for i in range(start_bs_index, len(kpoint_file.kpts)):
if kpoint_file.labels[i] is not None:
labels_dict[kpoint_file.labels[i]] = \
kpoint_file.kpts[i]
# remake the data only considering line band structure k-points
# (weight = 0.0 kpoints)
nbands = len(eigenvals[Spin.up])
kpoints = kpoints[start_bs_index:nkpts]
up_eigen = [eigenvals[Spin.up][i][start_bs_index:nkpts]
for i in range(nbands)]
if self.projected_eigenvalues:
p_eigenvals[Spin.up] = [p_eigenvals[Spin.up][i][
start_bs_index:nkpts]
for i in range(nbands)]
if self.is_spin:
down_eigen = [eigenvals[Spin.down][i][start_bs_index:nkpts]
for i in range(nbands)]
eigenvals = {Spin.up: up_eigen, Spin.down: down_eigen}
if self.projected_eigenvalues:
p_eigenvals[Spin.down] = [p_eigenvals[Spin.down][i][
start_bs_index:nkpts]
for i in range(nbands)]
else:
eigenvals = {Spin.up: up_eigen}
else:
if '' in kpoint_file.labels:
raise Exception("A band structure along symmetry lines "
"requires a label for each kpoint. "
"Check your KPOINTS file")
labels_dict = dict(zip(kpoint_file.labels, kpoint_file.kpts))
labels_dict.pop(None, None)
return BandStructureSymmLine(kpoints, eigenvals, lattice_new,
efermi, labels_dict,
structure=self.final_structure,
projections=p_eigenvals)
else:
return BandStructure(kpoints, eigenvals, lattice_new, efermi,
structure=self.final_structure,
projections=p_eigenvals)
@property
def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct).
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
for spin, d in self.eigenvalues.items():
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
def get_potcars(self, path):
def get_potcar_in_path(p):
for fn in os.listdir(os.path.abspath(p)):
if fn.startswith('POTCAR'):
pc = Potcar.from_file(os.path.join(p, fn))
if {d.header for d in pc} == \
{sym for sym in self.potcar_symbols}:
return pc
warnings.warn("No POTCAR file with matching TITEL fields"
" was found in {}".format(os.path.abspath(p)))
if isinstance(path, string_types):
if "POTCAR" in path:
potcar = Potcar.from_file(path)
if {d.TITEL for d in potcar} != \
{sym for sym in self.potcar_symbols}:
raise ValueError("Potcar TITELs do not match Vasprun")
else:
potcar = get_potcar_in_path(path)
elif isinstance(path, bool) and path:
potcar = get_potcar_in_path(os.path.split(self.filename)[0])
else:
potcar = None
return potcar
def update_potcar_spec(self, path):
potcar = self.get_potcars(path)
if potcar:
self.potcar_spec = [{"titel": sym, "hash": ps.get_potcar_hash()}
for sym in self.potcar_symbols
for ps in potcar if
ps.symbol == sym.split()[1]]
def update_charge_from_potcar(self, path):
potcar = self.get_potcars(path)
if potcar and self.incar.get("ALGO", "") not in ["GW0", "G0W0", "GW", "BSE"]:
nelect = self.parameters["NELECT"]
potcar_nelect = int(round(sum([self.initial_structure.composition.element_composition[
ps.element] * ps.ZVAL for ps in potcar])))
charge = nelect - potcar_nelect
if charge:
for s in self.structures:
s._charge = charge
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": self.converged,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.initial_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["nkpoints"] = len(actual_kpts)
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.lattice_rec.as_dict()
d["input"] = vin
nsites = len(self.final_structure)
try:
vout = {"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
except (ArithmeticError, TypeError):
vout = {"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": None,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = {str(spin): v.tolist()
for spin, v in self.eigenvalues.items()}
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
vout['projected_eigenvalues'] = {
str(spin): v.tolist()
for spin, v in self.projected_eigenvalues.items()}
vout['epsilon_static'] = self.epsilon_static
vout['epsilon_static_wolfe'] = self.epsilon_static_wolfe
vout['epsilon_ionic'] = self.epsilon_ionic
d['output'] = vout
return jsanitize(d, strict=True)
def _parse_params(self, elem):
params = {}
for c in elem:
name = c.attrib.get("name")
if c.tag not in ("i", "v"):
p = self._parse_params(c)
if name == "response functions":
# Delete duplicate fields from "response functions",
# which overrides the values in the root params.
p = {k: v for k, v in p.items() if k not in params}
params.update(p)
else:
ptype = c.attrib.get("type")
val = c.text.strip() if c.text else ""
if c.tag == "i":
params[name] = _parse_parameters(ptype, val)
else:
params[name] = _parse_v_parameters(ptype, val,
self.filename, name)
elem.clear()
return Incar(params)
def _parse_atominfo(self, elem):
for a in elem.findall("array"):
if a.attrib["name"] == "atoms":
atomic_symbols = [rc.find("c").text.strip()
for rc in a.find("set")]
elif a.attrib["name"] == "atomtypes":
potcar_symbols = [rc.findall("c")[4].text.strip()
for rc in a.find("set")]
# ensure atomic symbols are valid elements
def parse_atomic_symbol(symbol):
try:
return str(Element(symbol))
# vasprun.xml uses X instead of Xe for xenon
except ValueError as e:
if symbol == "X":
return "Xe"
elif symbol == "r":
return "Zr"
raise e
elem.clear()
return [parse_atomic_symbol(sym) for
sym in atomic_symbols], potcar_symbols
def _parse_kpoints(self, elem):
e = elem
if elem.find("generation"):
e = elem.find("generation")
k = Kpoints("Kpoints from vasprun.xml")
k.style = Kpoints.supported_modes.from_string(
e.attrib["param"] if "param" in e.attrib else "Reciprocal")
for v in e.findall("v"):
name = v.attrib.get("name")
toks = v.text.split()
if name == "divisions":
k.kpts = [[int(i) for i in toks]]
elif name == "usershift":
k.kpts_shift = [float(i) for i in toks]
elif name in {"genvec1", "genvec2", "genvec3", "shift"}:
setattr(k, name, [float(i) for i in toks])
for va in elem.findall("varray"):
name = va.attrib["name"]
if name == "kpointlist":
actual_kpoints = _parse_varray(va)
elif name == "weights":
weights = [i[0] for i in _parse_varray(va)]
elem.clear()
if k.style == Kpoints.supported_modes.Reciprocal:
k = Kpoints(comment="Kpoints from vasprun.xml",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(k.kpts),
kpts=actual_kpoints, kpts_weights=weights)
return k, actual_kpoints, weights
def _parse_structure(self, elem):
latt = _parse_varray(elem.find("crystal").find("varray"))
pos = _parse_varray(elem.find("varray"))
struct = Structure(latt, self.atomic_symbols, pos)
sdyn = elem.find("varray/[@name='selective']")
if sdyn:
struct.add_site_property('selective_dynamics',
_parse_varray(sdyn))
return struct
def _parse_diel(self, elem):
imag = [[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("imag").find("array")
.find("set").findall("r")]
real = [[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("real")
.find("array").find("set").findall("r")]
elem.clear()
return [e[0] for e in imag], \
[e[1:] for e in real], [e[1:] for e in imag]
def _parse_optical_transition(self, elem):
for va in elem.findall("varray"):
if va.attrib.get("name") == "opticaltransitions":
# opticaltransitions array contains oscillator strength and probability of transition
oscillator_strength = np.array(_parse_varray(va))[0:, ]
probability_transition = np.array(_parse_varray(va))[0:, 1]
return oscillator_strength, probability_transition
def _parse_chemical_shielding_calculation(self, elem):
calculation = []
istep = {}
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["structure"] = s
istep["electronic_steps"] = []
calculation.append(istep)
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text)
for i in scstep.find("energy").findall("i")}
cur_ene = d['e_fr_energy']
min_steps = 1 if len(calculation) >= 1 else self.parameters.get("NELMIN", 5)
if len(calculation[-1]["electronic_steps"]) <= min_steps:
calculation[-1]["electronic_steps"].append(d)
else:
last_ene = calculation[-1]["electronic_steps"][-1]["e_fr_energy"]
if abs(cur_ene - last_ene) < 1.0:
calculation[-1]["electronic_steps"].append(d)
else:
calculation.append({"electronic_steps": [d]})
except AttributeError: # not all calculations have an energy
pass
calculation[-1].update(calculation[-1]["electronic_steps"][-1])
return calculation
def _parse_calculation(self, elem):
try:
istep = {i.attrib["name"]: float(i.text)
for i in elem.find("energy").findall("i")}
except AttributeError: # not all calculations have an energy
istep = {}
pass
esteps = []
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text)
for i in scstep.find("energy").findall("i")}
esteps.append(d)
except AttributeError: # not all calculations have an energy
pass
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["electronic_steps"] = esteps
istep["structure"] = s
elem.clear()
return istep
def _parse_dos(self, elem):
efermi = float(elem.find("i").text)
energies = None
tdensities = {}
idensities = {}
for s in elem.find("total").find("array").find("set").findall("set"):
data = np.array(_parse_varray(s))
energies = data[:, 0]
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
tdensities[spin] = data[:, 1]
idensities[spin] = data[:, 2]
pdoss = []
partial = elem.find("partial")
if partial is not None:
orbs = [ss.text for ss in partial.find("array").findall("field")]
orbs.pop(0)
lm = any(["x" in s for s in orbs])
for s in partial.find("array").find("set").findall("set"):
pdos = defaultdict(dict)
for ss in s.findall("set"):
spin = Spin.up if ss.attrib["comment"] == "spin 1" else \
Spin.down
data = np.array(_parse_varray(ss))
nrow, ncol = data.shape
for j in range(1, ncol):
if lm:
orb = Orbital(j - 1)
else:
orb = OrbitalType(j - 1)
pdos[orb][spin] = data[:, j]
pdoss.append(pdos)
elem.clear()
return Dos(efermi, energies, tdensities), \
Dos(efermi, energies, idensities), pdoss
def _parse_eigen(self, elem):
eigenvalues = defaultdict(list)
for s in elem.find("array").find("set").findall("set"):
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
for ss in s.findall("set"):
eigenvalues[spin].append(_parse_varray(ss))
eigenvalues = {spin: np.array(v) for spin, v in eigenvalues.items()}
elem.clear()
return eigenvalues
def _parse_projected_eigen(self, elem):
root = elem.find("array").find("set")
proj_eigen = defaultdict(list)
for s in root.findall("set"):
spin = int(re.match(r"spin(\d+)", s.attrib["comment"]).group(1))
# Force spin to be +1 or -1
spin = Spin.up if spin == 1 else Spin.down
for kpt, ss in enumerate(s.findall("set")):
dk = []
for band, sss in enumerate(ss.findall("set")):
db = _parse_varray(sss)
dk.append(db)
proj_eigen[spin].append(dk)
proj_eigen = {spin: np.array(v) for spin, v in proj_eigen.items()}
elem.clear()
return proj_eigen
def _parse_dynmat(self, elem):
hessian = []
eigenvalues = []
eigenvectors = []
for v in elem.findall("v"):
if v.attrib["name"] == "eigenvalues":
eigenvalues = [float(i) for i in v.text.split()]
for va in elem.findall("varray"):
if va.attrib["name"] == "hessian":
for v in va.findall("v"):
hessian.append([float(i) for i in v.text.split()])
elif va.attrib["name"] == "eigenvectors":
for v in va.findall("v"):
eigenvectors.append([float(i) for i in v.text.split()])
return hessian, eigenvalues, eigenvectors
class BSVasprun(Vasprun):
"""
A highly optimized version of Vasprun that parses only eigenvalues for
bandstructures. All other properties like structures, parameters,
etc. are ignored.
"""
def __init__(self, filename, parse_projected_eigen=False,
parse_potcar_file=False, occu_tol=1e-8):
self.filename = filename
self.occu_tol = occu_tol
with zopen(filename, "rt") as f:
self.efermi = None
parsed_header = False
self.eigenvalues = None
self.projected_eigenvalues = None
for event, elem in ET.iterparse(f):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
self.kpoints, self.actual_kpoints, \
self.actual_kpoints_weights = self._parse_kpoints(
elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = \
self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p,
"hash": None} for
p in self.potcar_symbols]
parsed_header = True
elif tag == "i" and elem.attrib.get("name") == "efermi":
self.efermi = float(elem.text)
elif tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
self.projected_eigenvalues = self._parse_projected_eigen(
elem)
elif tag == "structure" and elem.attrib.get("name") == \
"finalpos":
self.final_structure = self._parse_structure(elem)
self.vasp_version = self.generator["version"]
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": True,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.final_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.lattice_rec.as_dict()
d["input"] = vin
vout = {"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = defaultdict(dict)
for spin, values in self.eigenvalues.items():
for i, v in enumerate(values):
eigen[i][str(spin)] = v
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
peigen = []
for i in range(len(eigen)):
peigen.append({})
for spin, v in self.projected_eigenvalues.items():
for kpoint_index, vv in enumerate(v):
if str(spin) not in peigen[kpoint_index]:
peigen[kpoint_index][str(spin)] = vv
vout['projected_eigenvalues'] = peigen
d['output'] = vout
return jsanitize(d, strict=True)
class Outcar(MSONable):
"""
Parser for data in OUTCAR that is not available in Vasprun.xml
Note, this class works a bit differently than most of the other
VaspObjects, since the OUTCAR can be very different depending on which
"type of run" performed.
Creating the OUTCAR class with a filename reads "regular parameters" that
are always present.
Args:
filename (str): OUTCAR filename to parse.
.. attribute:: magnetization
Magnetization on each ion as a tuple of dict, e.g.,
({"d": 0.0, "p": 0.003, "s": 0.002, "tot": 0.005}, ... )
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: chemical_shielding
chemical shielding on each ion as a dictionary with core and valence contributions
.. attribute:: unsym_cs_tensor
Unsymmetrized chemical shielding tensor matrixes on each ion as a list.
e.g.,
[[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]],
...
[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]]]
.. attribute:: unsym_cs_tensor
G=0 contribution to chemical shielding. 2D rank 3 matrix
.. attribute:: cs_core_contribution
Core contribution to chemical shielding. dict. e.g.,
{'Mg': -412.8, 'C': -200.5, 'O': -271.1}
.. attribute:: efg
Electric Field Gradient (EFG) tensor on each ion as a tuple of dict, e.g.,
({"cq": 0.1, "eta", 0.2, "nuclear_quadrupole_moment": 0.3},
{"cq": 0.7, "eta", 0.8, "nuclear_quadrupole_moment": 0.9},
...)
.. attribute:: charge
Charge on each ion as a tuple of dict, e.g.,
({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: is_stopped
True if OUTCAR is from a stopped run (using STOPCAR, see Vasp Manual).
.. attribute:: run_stats
Various useful run stats as a dict including "System time (sec)",
"Total CPU time used (sec)", "Elapsed time (sec)",
"Maximum memory used (kb)", "Average memory used (kb)",
"User time (sec)".
.. attribute:: elastic_tensor
Total elastic moduli (Kbar) is given in a 6x6 array matrix.
.. attribute:: drift
Total drift for each step in eV/Atom
.. attribute:: ngf
Dimensions for the Augementation grid
.. attribute: sampling_radii
Size of the sampling radii in VASP for the test charges for
the electrostatic potential at each atom. Total array size is the number
of elements present in the calculation
.. attribute: electrostatic_potential
Average electrostatic potential at each atomic position in order
of the atoms in POSCAR.
One can then call a specific reader depending on the type of run being
performed. These are currently: read_igpar(), read_lepsilon() and
read_lcalcpol(), read_core_state_eign(), read_avg_core_pot().
See the documentation of those methods for more documentation.
Authors: Rickard Armiento, Shyue Ping Ong
"""
def __init__(self, filename):
self.filename = filename
self.is_stopped = False
# data from end of OUTCAR
charge = []
mag_x = []
mag_y = []
mag_z = []
header = []
run_stats = {}
total_mag = None
nelect = None
efermi = None
total_energy = None
time_patt = re.compile(r"\((sec|kb)\)")
efermi_patt = re.compile(r"E-fermi\s*:\s*(\S+)")
nelect_patt = re.compile(r"number of electron\s+(\S+)\s+magnetization")
mag_patt = re.compile(r"number of electron\s+\S+\s+magnetization\s+("
r"\S+)")
toten_pattern = re.compile(r"free energy TOTEN\s+=\s+([\d\-\.]+)")
all_lines = []
for line in reverse_readfile(self.filename):
clean = line.strip()
all_lines.append(clean)
if clean.find("soft stop encountered! aborting job") != -1:
self.is_stopped = True
else:
if time_patt.search(line):
tok = line.strip().split(":")
run_stats[tok[0].strip()] = float(tok[1].strip())
continue
m = efermi_patt.search(clean)
if m:
try:
# try-catch because VASP sometimes prints
# 'E-fermi: ******** XC(G=0): -6.1327
# alpha+bet : -1.8238'
efermi = float(m.group(1))
continue
except ValueError:
efermi = None
continue
m = nelect_patt.search(clean)
if m:
nelect = float(m.group(1))
m = mag_patt.search(clean)
if m:
total_mag = float(m.group(1))
if total_energy is None:
m = toten_pattern.search(clean)
if m:
total_energy = float(m.group(1))
if all([nelect, total_mag is not None, efermi is not None,
run_stats]):
break
# For single atom systems, VASP doesn't print a total line, so
# reverse parsing is very difficult
read_charge = False
read_mag_x = False
read_mag_y = False # for SOC calculations only
read_mag_z = False
all_lines.reverse()
for clean in all_lines:
if read_charge or read_mag_x or read_mag_y or read_mag_z:
if clean.startswith("# of ion"):
header = re.split(r"\s{2,}", clean.strip())
header.pop(0)
else:
m = re.match(r"\s*(\d+)\s+(([\d\.\-]+)\s+)+", clean)
if m:
toks = [float(i)
for i in re.findall(r"[\d\.\-]+", clean)]
toks.pop(0)
if read_charge:
charge.append(dict(zip(header, toks)))
elif read_mag_x:
mag_x.append(dict(zip(header, toks)))
elif read_mag_y:
mag_y.append(dict(zip(header, toks)))
elif read_mag_z:
mag_z.append(dict(zip(header, toks)))
elif clean.startswith('tot'):
read_charge = False
read_mag_x = False
read_mag_y = False
read_mag_z = False
if clean == "total charge":
charge = []
read_charge = True
read_mag_x, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (x)":
mag_x = []
read_mag_x = True
read_charge, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (y)":
mag_y = []
read_mag_y = True
read_charge, read_mag_x, read_mag_z = False, False, False
elif clean == "magnetization (z)":
mag_z = []
read_mag_z = True
read_charge, read_mag_x, read_mag_y = False, False, False
# merge x, y and z components of magmoms if present (SOC calculation)
if mag_y and mag_z:
# TODO: detect spin axis
mag = []
for idx in range(len(mag_x)):
mag.append({
key: Magmom([mag_x[idx][key], mag_y[idx][key], mag_z[idx][key]])
for key in mag_x[0].keys()
})
else:
mag = mag_x
# data from beginning of OUTCAR
run_stats['cores'] = 0
with zopen(filename, "rt") as f:
for line in f:
if "running" in line:
run_stats['cores'] = line.split()[2]
break
self.run_stats = run_stats
self.magnetization = tuple(mag)
self.charge = tuple(charge)
self.efermi = efermi
self.nelect = nelect
self.total_mag = total_mag
self.final_energy = total_energy
self.data = {}
# Read the drift:
self.read_pattern({
"drift": r"total drift:\s+([\.\-\d]+)\s+([\.\-\d]+)\s+([\.\-\d]+)"},
terminate_on_match=False,
postprocess=float)
self.drift = self.data.get('drift', [])
# Check if calculation is spin polarized
self.spin = False
self.read_pattern({'spin': 'ISPIN = 2'})
if self.data.get('spin', []):
self.spin = True
# Check if calculation is noncollinear
self.noncollinear = False
self.read_pattern({'noncollinear': 'LNONCOLLINEAR = T'})
if self.data.get('noncollinear', []):
self.noncollinear = False
# Check if the calculation type is DFPT
self.dfpt = False
self.read_pattern({'ibrion': "IBRION =\s+([\-\d]+)"}, terminate_on_match=True,
postprocess=int)
if self.data.get("ibrion", [[0]])[0][0] > 6:
self.dfpt = True
self.read_internal_strain_tensor()
# Check to see if LEPSILON is true and read piezo data if so
self.lepsilon = False
self.read_pattern({'epsilon': 'LEPSILON= T'})
if self.data.get('epsilon', []):
self.lepsilon = True
self.read_lepsilon()
# only read ionic contribution if DFPT is turned on
if self.dfpt:
self.read_lepsilon_ionic()
# Check to see if LCALCPOL is true and read polarization data if so
self.lcalcpol = False
self.read_pattern({'calcpol': 'LCALCPOL = T'})
if self.data.get('calcpol', []):
self.lcalcpol = True
self.read_lcalcpol()
self.read_pseudo_zval()
# Read electrostatic potential
self.read_pattern({
'electrostatic': r"average \(electrostatic\) potential at core"})
if self.data.get('electrostatic', []):
self.read_electrostatic_potential()
self.nmr_cs = False
self.read_pattern({"nmr_cs": r"LCHIMAG = (T)"})
if self.data.get("nmr_cs", None):
self.nmr_cs = True
self.read_chemical_shielding()
self.read_cs_g0_contribution()
self.read_cs_core_contribution()
self.read_cs_raw_symmetrized_tensors()
self.nmr_efg = False
self.read_pattern({"nmr_efg": r"NMR quadrupolar parameters"})
if self.data.get("nmr_efg", None):
self.nmr_efg = True
self.read_nmr_efg()
self.read_nmr_efg_tensor()
def read_pattern(self, patterns, reverse=False, terminate_on_match=False,
postprocess=str):
"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(self.filename, patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(self, header_pattern, row_pattern, footer_pattern,
postprocess=str, attribute_name=None,
last_one_only=True):
"""
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match all the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
attribute_name (str): Name of this table. If present the parsed data
will be attached to "data. e.g. self.data["efg"] = [...]
last_one_only (bool): All the tables will be parsed, if this option
is set to True, only the last table will be returned. The
enclosing list will be removed. i.e. Only a single table will
be returned. Default to be True.
Returns:
List of tables. 1) A table is a list of rows. 2) A row if either a list of
attribute values in case the the capturing group is defined without name in
row_pattern, or a dict in case that named capturing groups are defined by
row_pattern.
"""
with zopen(self.filename, 'rt') as f:
text = f.read()
table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + \
row_pattern + r")+)\s+" + footer_pattern
table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
tables = []
for mt in table_pattern.finditer(text):
table_body_text = mt.group("table_body")
table_contents = []
for line in table_body_text.split("\n"):
ml = rp.search(line)
d = ml.groupdict()
if len(d) > 0:
processed_line = {k: postprocess(v) for k, v in d.items()}
else:
processed_line = [postprocess(v) for v in ml.groups()]
table_contents.append(processed_line)
tables.append(table_contents)
if last_one_only:
retained_data = tables[-1]
else:
retained_data = tables
if attribute_name is not None:
self.data[attribute_name] = retained_data
return retained_data
def read_electrostatic_potential(self):
"""
Parses the eletrostatic potential for the last ionic step
"""
pattern = {"ngf": r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)"}
self.read_pattern(pattern, postprocess=int)
self.ngf = self.data.get("ngf", [[]])[0]
pattern = {"radii": r"the test charge radii are((?:\s+[\.\-\d]+)+)"}
self.read_pattern(pattern, reverse=True, terminate_on_match=True, postprocess=str)
self.sampling_radii = [float(f) for f in self.data["radii"][0][0].split()]
header_pattern = r"\(the norm of the test charge is\s+[\.\-\d]+\)"
table_pattern = r"((?:\s+\d+\s?[\.\-\d]+)+)"
footer_pattern = r"\s+E-fermi :"
pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern)
pots = "".join(itertools.chain.from_iterable(pots))
pots = re.findall(r"\s+\d+\s?([\.\-\d]+)+", pots)
pots = [float(f) for f in pots]
self.electrostatic_potential = pots
def read_freq_dielectric(self):
"""
Parses the frequency dependent dielectric function (obtained with
LOPTICS). Frequencies (in eV) are in self.frequencies, and dielectric
tensor function is given as self.dielectric_tensor_function.
"""
header_pattern = r"\s+frequency dependent\s+IMAGINARY " \
r"DIELECTRIC FUNCTION \(independent particle, " \
r"no local field effects\)(\sdensity-density)*$"
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 7)
lines = []
for l in reverse_readfile(self.filename):
lines.append(l)
if re.match(header_pattern, l):
break
freq = []
data = {"REAL": [], "IMAGINARY": []}
lines.reverse()
count = 0
component = "IMAGINARY"
for l in lines[3:]: # Skip the preamble.
if re.match(row_pattern, l.strip()):
toks = l.strip().split()
if component == "IMAGINARY":
freq.append(float(toks[0]))
xx, yy, zz, xy, yz, xz = [float(t) for t in toks[1:]]
matrix = [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]]
data[component].append(matrix)
elif re.match(r"\s*-+\s*", l):
count += 1
if count == 1:
component = "REAL"
elif count == 2:
break
self.frequencies = np.array(freq)
self.dielectric_tensor_function = np.array(data["REAL"]) + \
1j * np.array(data["IMAGINARY"])
def read_chemical_shielding(self):
"""
Parse the NMR chemical shieldings data. Only the second part "absolute, valence and core"
will be parsed. And only the three right most field (ISO_SHIELDING, SPAN, SKEW) will be retrieved.
Returns:
List of chemical shieldings in the order of atoms from the OUTCAR. Maryland notation is adopted.
"""
header_pattern = r"\s+CSA tensor \(J\. Mason, Solid State Nucl\. Magn\. Reson\. 2, " \
r"285 \(1993\)\)\s+" \
r"\s+-{50,}\s+" \
r"\s+EXCLUDING G=0 CONTRIBUTION\s+INCLUDING G=0 CONTRIBUTION\s+" \
r"\s+-{20,}\s+-{20,}\s+" \
r"\s+ATOM\s+ISO_SHIFT\s+SPAN\s+SKEW\s+ISO_SHIFT\s+SPAN\s+SKEW\s+" \
r"-{50,}\s*$"
first_part_pattern = r"\s+\(absolute, valence only\)\s+$"
swallon_valence_body_pattern = r".+?\(absolute, valence and core\)\s+$"
row_pattern = r"\d+(?:\s+[-]?\d+\.\d+){3}\s+" + r'\s+'.join(
[r"([-]?\d+\.\d+)"] * 3)
footer_pattern = r"-{50,}\s*$"
h1 = header_pattern + first_part_pattern
cs_valence_only = self.read_table_pattern(
h1, row_pattern, footer_pattern, postprocess=float,
last_one_only=True)
h2 = header_pattern + swallon_valence_body_pattern
cs_valence_and_core = self.read_table_pattern(
h2, row_pattern, footer_pattern, postprocess=float,
last_one_only=True)
all_cs = {}
for name, cs_table in [["valence_only", cs_valence_only],
["valence_and_core", cs_valence_and_core]]:
all_cs[name] = cs_table
self.data["chemical_shielding"] = all_cs
def read_cs_g0_contribution(self):
"""
Parse the G0 contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = r'^\s+G\=0 CONTRIBUTION TO CHEMICAL SHIFT \(field along BDIR\)\s+$\n' \
r'^\s+-{50,}$\n' \
r'^\s+BDIR\s+X\s+Y\s+Z\s*$\n' \
r'^\s+-{50,}\s*$\n'
row_pattern = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 3)
footer_pattern = r'\s+-{50,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,
last_one_only=True, attribute_name="cs_g0_contribution")
def read_cs_core_contribution(self):
"""
Parse the core contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = r'^\s+Core NMR properties\s*$\n' \
r'\n' \
r'^\s+typ\s+El\s+Core shift \(ppm\)\s*$\n' \
r'^\s+-{20,}$\n'
row_pattern = r'\d+\s+(?P<element>[A-Z][a-z]?\w?)\s+(?P<shift>[-]?\d+\.\d+)'
footer_pattern = r'\s+-{20,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=str,
last_one_only=True, attribute_name="cs_core_contribution")
core_contrib = {d['element']: float(d['shift'])
for d in self.data["cs_core_contribution"]}
self.data["cs_core_contribution"] = core_contrib
def read_cs_raw_symmetrized_tensors(self):
"""
Parse the matrix form of NMR tensor before corrected to table.
Returns:
nsymmetrized tensors list in the order of atoms.
"""
header_pattern = r"\s+-{50,}\s+" \
r"\s+Absolute Chemical Shift tensors\s+" \
r"\s+-{50,}$"
first_part_pattern = r"\s+UNSYMMETRIZED TENSORS\s+$"
row_pattern = r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
unsym_footer_pattern = r"^\s+SYMMETRIZED TENSORS\s+$"
with zopen(self.filename, 'rt') as f:
text = f.read()
unsym_table_pattern_text = header_pattern + first_part_pattern + \
r"(?P<table_body>.+)" + unsym_footer_pattern
table_pattern = re.compile(unsym_table_pattern_text,
re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
m = table_pattern.search(text)
if m:
table_text = m.group("table_body")
micro_header_pattern = r"ion\s+\d+"
micro_table_pattern_text = micro_header_pattern + \
r"\s*^(?P<table_body>(?:\s*" + \
row_pattern + r")+)\s+"
micro_table_pattern = re.compile(micro_table_pattern_text,
re.MULTILINE | re.DOTALL)
unsym_tensors = []
for mt in micro_table_pattern.finditer(table_text):
table_body_text = mt.group("table_body")
tensor_matrix = []
for line in table_body_text.rstrip().split("\n"):
ml = rp.search(line)
processed_line = [float(v) for v in ml.groups()]
tensor_matrix.append(processed_line)
unsym_tensors.append(tensor_matrix)
self.data["unsym_cs_tensor"] = unsym_tensors
else:
raise ValueError("NMR UNSYMMETRIZED TENSORS is not found")
def read_nmr_efg_tensor(self):
"""
Parses the NMR Electric Field Gradient Raw Tensors
Returns:
A list of Electric Field Gradient Tensors in the order of Atoms from OUTCAR
"""
header_pattern = r'Electric field gradients \(V/A\^2\)\n' \
r'-*\n' \
r' ion\s+V_xx\s+V_yy\s+V_zz\s+V_xy\s+V_xz\s+V_yz\n'\
r'-*\n'
row_pattern = r'\d+\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)'
footer_pattern = r'-*\n'
data = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
tensors = [make_symmetric_matrix_from_upper_tri(d) for d in data]
self.data["unsym_efg_tensor"] = tensors
return tensors
def read_nmr_efg(self):
"""
Parse the NMR Electric Field Gradient interpretted values.
Returns:
Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR.
Each dict key/value pair corresponds to a component of the tensors.
"""
header_pattern = r'^\s+NMR quadrupolar parameters\s+$\n' \
r'^\s+Cq : quadrupolar parameter\s+Cq=e[*]Q[*]V_zz/h$\n' \
r'^\s+eta: asymmetry parameters\s+\(V_yy - V_xx\)/ V_zz$\n' \
r'^\s+Q : nuclear electric quadrupole moment in mb \(millibarn\)$\n' \
r'^-{50,}$\n' \
r'^\s+ion\s+Cq\(MHz\)\s+eta\s+Q \(mb\)\s+$\n' \
r'^-{50,}\s*$\n'
row_pattern = r'\d+\s+(?P<cq>[-]?\d+\.\d+)\s+(?P<eta>[-]?\d+\.\d+)\s+' \
r'(?P<nuclear_quadrupole_moment>[-]?\d+\.\d+)'
footer_pattern = r'-{50,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,
last_one_only=True, attribute_name="efg")
def read_elastic_tensor(self):
"""
Parse the elastic tensor data.
Returns:
6x6 array corresponding to the elastic tensor from the OUTCAR.
"""
header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+"\
r"Direction\s+([X-Z][X-Z]\s+)+"\
r"\-+"
row_pattern = r"[X-Z][X-Z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"\-+"
et_table = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=float)
self.data["elastic_tensor"] = et_table
def read_piezo_tensor(self):
"""
Parse the piezo tensor data
"""
header_pattern = r"PIEZOELECTRIC TENSOR for field in x, y, " \
r"z\s+\(C/m\^2\)\s+([X-Z][X-Z]\s+)+\-+"
row_pattern = r"[x-z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"BORN EFFECTIVE"
pt_table = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=float)
self.data["piezo_tensor"] = pt_table
def read_corrections(self, reverse=True, terminate_on_match=True):
patterns = {
"dipol_quadrupol_correction": r"dipol\+quadrupol energy "
r"correction\s+([\d\-\.]+)"
}
self.read_pattern(patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=float)
self.data["dipol_quadrupol_correction"] = self.data["dipol_quadrupol_correction"][0][0]
def read_neb(self, reverse=True, terminate_on_match=True):
"""
Reads NEB data. This only works with OUTCARs from both normal
VASP NEB calculations or from the CI NEB method implemented by
Henkelman et al.
Args:
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match. Defaults to True here since we usually
want only the final value.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern. Defaults to True here
since we usually want only the final value.
Renders accessible:
tangent_force - Final tangent force.
energy - Final energy.
These can be accessed under Outcar.data[key]
"""
patterns = {
"energy": r"energy\(sigma->0\)\s+=\s+([\d\-\.]+)",
"tangent_force": r"(NEB: projections on to tangent \(spring, REAL\)\s+\S+|tangential force \(eV/A\))\s+([\d\-\.]+)"
}
self.read_pattern(patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=str)
self.data["energy"] = float(self.data["energy"][0][0])
if self.data.get("tangent_force"):
self.data["tangent_force"] = float(
self.data["tangent_force"][0][1])
def read_igpar(self):
"""
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
"""
# variables to be filled
self.er_ev = {} # will be dict (Spin.up/down) of array(3*float)
self.er_bp = {} # will be dics (Spin.up/down) of array(3*float)
self.er_ev_tot = None # will be array(3*float)
self.er_bp_tot = None # will be array(3*float)
self.p_elec = None
self.p_ion = None
try:
search = []
# Nonspin cases
def er_ev(results, match):
results.er_ev[Spin.up] = np.array(map(float,
match.groups()[1:4])) / 2
results.er_ev[Spin.down] = results.er_ev[Spin.up]
results.context = 2
search.append([r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, er_ev])
def er_bp(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)]) / 2
results.er_bp[Spin.down] = results.er_bp[Spin.up]
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == 2, er_bp])
# Spin cases
def er_ev_up(results, match):
results.er_ev[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)])
results.context = Spin.up
search.append([r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_up])
def er_bp_up(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results,
line: results.context == Spin.up, er_bp_up])
def er_ev_dn(results, match):
results.er_ev[Spin.down] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
results.context = Spin.down
search.append([r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_dn])
def er_bp_dn(results, match):
results.er_bp[Spin.down] = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results,
line: results.context == Spin.down, er_bp_dn])
# Always present spin/non-spin
def p_elc(results, match):
results.p_elc = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_elc])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^.*ionic dipole moment: "
r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_ion])
self.context = None
self.er_ev = {Spin.up: None, Spin.down: None}
self.er_bp = {Spin.up: None, Spin.down: None}
micro_pyawk(self.filename, search, self)
if self.er_ev[Spin.up] is not None and \
self.er_ev[Spin.down] is not None:
self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down]
if self.er_bp[Spin.up] is not None and \
self.er_bp[Spin.down] is not None:
self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down]
except:
self.er_ev_tot = None
self.er_bp_tot = None
raise Exception("IGPAR OUTCAR could not be parsed.")
def read_internal_strain_tensor(self):
"""
Reads the internal strain tensor and populates self.interna_strain_tensor with an array of voigt notation
tensors for each site.
"""
search = []
def internal_strain_start(results, match):
results.internal_strain_ion = int(match.group(1)) - 1
results.internal_strain_tensor.append(np.zeros((3, 6)))
search.append([r"INTERNAL STRAIN TENSOR FOR ION\s+(\d+)\s+for displacements in x,y,z \(eV/Angst\):",
None, internal_strain_start])
def internal_strain_data(results, match):
if match.group(1).lower() == "x":
index = 0
elif match.group(1).lower() == "y":
index = 1
elif match.group(1).lower() == "z":
index = 2
else:
raise Exception(
"Couldn't parse row index from symbol for internal strain tensor: {}".format(match.group(1)))
results.internal_strain_tensor[results.internal_strain_ion][index] = np.array([float(match.group(i))
for i in range(2, 8)])
if index == 2:
results.internal_strain_ion = None
search.append([r"^\s+([x,y,z])\s+" + r"([-]?\d+\.\d+)\s+" * 6, lambda results,
line: results.internal_strain_ion is not None, internal_strain_data])
self.internal_strain_ion = None
self.internal_strain_tensor = []
micro_pyawk(self.filename, search, self)
def read_lepsilon(self):
# variables to be filled
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_index = -1
search.append([r"MACROSCOPIC STATIC DIELECTRIC TENSOR \(", None,
dielectric_section_start])
def dielectric_section_start2(results, match):
results.dielectric_index = 0
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_index == -1,
dielectric_section_start2])
def dielectric_data(results, match):
results.dielectric_tensor[results.dielectric_index, :] = \
np.array([float(match.group(i)) for i in range(1, 4)])
results.dielectric_index += 1
search.append(
[r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_index >= 0
if results.dielectric_index is not None
else None,
dielectric_data])
def dielectric_section_stop(results, match):
results.dielectric_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_index >= 1
if results.dielectric_index is not None
else None,
dielectric_section_stop])
self.dielectric_index = None
self.dielectric_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_index = 0
search.append([r"PIEZOELECTRIC TENSOR for field in x, y, z "
r"\(C/m\^2\)",
None, piezo_section_start])
def piezo_data(results, match):
results.piezo_tensor[results.piezo_index, :] = \
np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_index += 1
search.append(
[r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_index >= 0
if results.piezo_index is not None
else None,
piezo_data])
def piezo_section_stop(results, match):
results.piezo_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.piezo_index >= 1
if results.piezo_index is not None
else None,
piezo_section_stop])
self.piezo_index = None
self.piezo_tensor = np.zeros((3, 6))
def born_section_start(results, match):
results.born_ion = -1
search.append([r"BORN EFFECTIVE CHARGES " +
r"\(in e, cummulative output\)",
None, born_section_start])
def born_ion(results, match):
results.born_ion = int(match.group(1)) - 1
results.born.append(np.zeros((3, 3)))
search.append([r"ion +([0-9]+)", lambda results,
line: results.born_ion is not None, born_ion])
def born_data(results, match):
results.born[results.born_ion][int(match.group(1)) - 1, :] = \
np.array([float(match.group(i)) for i in range(2, 5)])
search.append(
[r"^ *([1-3]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+)$",
lambda results, line: results.born_ion >= 0
if results.born_ion is not None
else results.born_ion,
born_data])
def born_section_stop(results, match):
results.born_ion = None
search.append(
[r"-------------------------------------",
lambda results, line: results.born_ion >= 1
if results.born_ion is not None
else results.born_ion,
born_section_stop])
self.born_ion = None
self.born = []
micro_pyawk(self.filename, search, self)
self.born = np.array(self.born)
self.dielectric_tensor = self.dielectric_tensor.tolist()
self.piezo_tensor = self.piezo_tensor.tolist()
except:
raise Exception("LEPSILON OUTCAR could not be parsed.")
def read_lepsilon_ionic(self):
# variables to be filled
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_ionic_index = -1
search.append([r"MACROSCOPIC STATIC DIELECTRIC TENSOR IONIC", None,
dielectric_section_start])
def dielectric_section_start2(results, match):
results.dielectric_ionic_index = 0
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index == -1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_start2])
def dielectric_data(results, match):
results.dielectric_ionic_tensor[results.dielectric_ionic_index, :] = \
np.array([float(match.group(i)) for i in range(1, 4)])
results.dielectric_ionic_index += 1
search.append(
[r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_ionic_index >= 0
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_data])
def dielectric_section_stop(results, match):
results.dielectric_ionic_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index >= 1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_stop])
self.dielectric_ionic_index = None
self.dielectric_ionic_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_ionic_index = 0
search.append([r"PIEZOELECTRIC TENSOR IONIC CONTR for field in "
r"x, y, z ",
None, piezo_section_start])
def piezo_data(results, match):
results.piezo_ionic_tensor[results.piezo_ionic_index, :] = \
np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_ionic_index += 1
search.append(
[r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_ionic_index >= 0
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_data])
def piezo_section_stop(results, match):
results.piezo_ionic_index = None
search.append(
["-------------------------------------",
lambda results, line: results.piezo_ionic_index >= 1
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_section_stop])
self.piezo_ionic_index = None
self.piezo_ionic_tensor = np.zeros((3, 6))
micro_pyawk(self.filename, search, self)
self.dielectric_ionic_tensor = self.dielectric_ionic_tensor.tolist()
self.piezo_ionic_tensor = self.piezo_ionic_tensor.tolist()
except:
raise Exception(
"ionic part of LEPSILON OUTCAR could not be parsed.")
def read_lcalcpol(self):
# variables to be filled
self.p_elec = None
self.p_sp1 = None
self.p_sp2 = None
self.p_ion = None
try:
search = []
# Always present spin/non-spin
def p_elec(results, match):
results.p_elec = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_elec])
# If spin-polarized (and not noncollinear)
# save spin-polarized electronic values
if self.spin and not self.noncollinear:
def p_sp1(results, match):
results.p_sp1 = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*p\[sp1\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_sp1])
def p_sp2(results, match):
results.p_sp2 = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*p\[sp2\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_sp2])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*Ionic dipole moment: *p\[ion\]="
r"\( *([-0-9.Ee+]*)"
r" *([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, p_ion])
micro_pyawk(self.filename, search, self)
except:
raise Exception("LCALCPOL OUTCAR could not be parsed.")
def read_pseudo_zval(self):
"""
Create pseudopotential ZVAL dictionary.
"""
try:
def poscar_line(results, match):
poscar_line = match.group(1)
results.poscar_line = re.findall(r'[A-Z][a-z]?', poscar_line)
def zvals(results, match):
zvals = match.group(1)
results.zvals = map(float, re.findall(r'-?\d+\.\d*', zvals))
search = []
search.append([r'^.*POSCAR.*=(.*)', None, poscar_line])
search.append([r'^\s+ZVAL.*=(.*)', None, zvals])
micro_pyawk(self.filename, search, self)
zval_dict = {}
for x, y in zip(self.poscar_line, self.zvals):
zval_dict.update({x: y})
self.zval_dict = zval_dict
# Clean-up
del(self.poscar_line)
del(self.zvals)
except:
raise Exception("ZVAL dict could not be parsed.")
def read_core_state_eigen(self):
"""
Read the core state eigenenergies at each ionic step.
Returns:
A list of dict over the atom such as [{"AO":[core state eig]}].
The core state eigenenergie list for each AO is over all ionic
step.
Example:
The core state eigenenergie of the 2s AO of the 6th atom of the
structure at the last ionic step is [5]["2s"][-1]
"""
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
while line != "":
line = foutcar.readline()
if "NIONS =" in line:
natom = int(line.split("NIONS =")[1])
cl = [defaultdict(list) for i in range(natom)]
if "the core state eigen" in line:
iat = -1
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
break
data = line.split()
# data will contain odd number of elements if it is
# the start of a new entry, or even number of elements
# if it continues the previous entry
if len(data) % 2 == 1:
iat += 1 # started parsing a new ion
data = data[1:] # remove element with ion number
for i in range(0, len(data), 2):
cl[iat][data[i]].append(float(data[i + 1]))
return cl
def read_avg_core_poten(self):
"""
Read the core potential at each ionic step.
Returns:
A list for each ionic step containing a list of the average core
potentials for each atom: [[avg core pot]].
Example:
The average core potential of the 2nd atom of the structure at the
last ionic step is: [-1][1]
"""
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a = iter(iterable)
return zip(a, a)
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
aps = []
while line != "":
line = foutcar.readline()
if "the norm of the test charge is" in line:
ap = []
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
aps.append(ap)
break
data = line.split()
# the average core potentials of up to 5 elements are
# given per line
for i, pot in pairwise(data):
ap.append(float(pot))
return aps
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__, "efermi": self.efermi,
"run_stats": self.run_stats, "magnetization": self.magnetization,
"charge": self.charge, "total_magnetization": self.total_mag,
"nelect": self.nelect, "is_stopped": self.is_stopped,
"drift": self.drift, "ngf": self.ngf,
"sampling_radii": self.sampling_radii,
"electrostatic_potential": self.electrostatic_potential}
if self.lepsilon:
d.update({"piezo_tensor": self.piezo_tensor,
"dielectric_tensor": self.dielectric_tensor,
"born": self.born})
if self.dfpt:
d.update({"internal_strain_tensor": self.interna_strain_tensor})
if self.dfpt and self.lepsilon:
d.update({"piezo_ionic_tensor": self.piezo_ionic_tensor,
"dielectric_ionic_tensor": self.dielectric_ionic_tensor})
if self.lcalcpol:
d.update({'p_elec': self.p_elec,
'p_ion': self.p_ion})
if self.spin and not self.noncollinear:
d.update({'p_sp1': self.p_sp1,
'p_sp2': self.p_sp2})
d.update({'zval_dict': self.zval_dict})
if self.nmr_cs:
d.update({"nmr_cs": {"valence and core": self.data["chemical_shielding"]["valence_and_core"],
"valence_only": self.data["chemical_shielding"]["valence_only"],
"g0": self.data["cs_g0_contribution"],
"core": self.data["cs_core_contribution"],
"raw": self.data["unsym_cs_tensor"]}})
if self.nmr_efg:
d.update({"nmr_efg": {"raw": self.data["unsym_efg_tensor"],
"parameters": self.data["efg"]}})
return d
def read_fermi_contact_shift(self):
'''
output example:
Fermi contact (isotropic) hyperfine coupling parameter (MHz)
-------------------------------------------------------------
ion A_pw A_1PS A_1AE A_1c A_tot
-------------------------------------------------------------
1 -0.002 -0.002 -0.051 0.000 -0.052
2 -0.002 -0.002 -0.051 0.000 -0.052
3 0.056 0.056 0.321 -0.048 0.321
-------------------------------------------------------------
, which corresponds to
[[-0.002, -0.002, -0.051, 0.0, -0.052],
[-0.002, -0.002, -0.051, 0.0, -0.052],
[0.056, 0.056, 0.321, -0.048, 0.321]] from 'fch' data
'''
# Fermi contact (isotropic) hyperfine coupling parameter (MHz)
header_pattern1 = r"\s*Fermi contact \(isotropic\) hyperfine coupling parameter \(MHz\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_pw\s+A_1PS\s+A_1AE\s+A_1c\s+A_tot\s+" \
r"\s*\-+"
row_pattern1 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 5)
footer_pattern = r"\-+"
fch_table = self.read_table_pattern(header_pattern1, row_pattern1,
footer_pattern, postprocess=float,
last_one_only=True)
# Dipolar hyperfine coupling parameters (MHz)
header_pattern2 = r"\s*Dipolar hyperfine coupling parameters \(MHz\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+A_xy\s+A_xz\s+A_yz\s+" \
r"\s*\-+"
row_pattern2 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 6)
dh_table = self.read_table_pattern(header_pattern2, row_pattern2,
footer_pattern, postprocess=float,
last_one_only=True)
# Total hyperfine coupling parameters after diagonalization (MHz)
header_pattern3 = r"\s*Total hyperfine coupling parameters after diagonalization \(MHz\)\s+" \
r"\s*\(convention: \|A_zz\| > \|A_xx\| > \|A_yy\|\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+asymmetry \(A_yy - A_xx\)/ A_zz\s+" \
r"\s*\-+"
row_pattern3 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 4)
th_table = self.read_table_pattern(header_pattern3, row_pattern3,
footer_pattern, postprocess=float,
last_one_only=True)
fc_shift_table = {'fch': fch_table, 'dh': dh_table, 'th': th_table}
self.data["fermi_contact_shift"] = fc_shift_table
class VolumetricData(object):
"""
Simple volumetric object for reading LOCPOT and CHGCAR type files.
.. attribute:: structure
Structure associated with the Volumetric Data object
..attribute:: is_spin_polarized
True if run is spin polarized
..attribute:: dim
Tuple of dimensions of volumetric grid in each direction (nx, ny, nz).
..attribute:: data
Actual data as a dict of {string: np.array}. The string are "total"
and "diff", in accordance to the output format of vasp LOCPOT and
CHGCAR files where the total spin density is written first, followed
by the difference spin density.
.. attribute:: ngridpts
Total number of grid points in volumetric data.
"""
def __init__(self, structure, data, distance_matrix=None, data_aug=None):
"""
Typically, this constructor is not used directly and the static
from_file constructor is used. This constructor is designed to allow
summation and other operations between VolumetricData objects.
Args:
structure: Structure associated with the volumetric data
data: Actual volumetric data.
data_aug: Any extra information associated with volumetric data
(typically augmentation charges)
distance_matrix: A pre-computed distance matrix if available.
Useful so pass distance_matrices between sums,
shortcircuiting an otherwise expensive operation.
"""
self.structure = structure
self.is_spin_polarized = len(data) >= 2
self.is_soc = len(data) >= 4
self.dim = data["total"].shape
self.data = data
self.data_aug = data_aug if data_aug else {}
self.ngridpts = self.dim[0] * self.dim[1] * self.dim[2]
# lazy init the spin data since this is not always needed.
self._spin_data = {}
self._distance_matrix = {} if not distance_matrix else distance_matrix
@property
def spin_data(self):
"""
The data decomposed into actual spin data as {spin: data}.
Essentially, this provides the actual Spin.up and Spin.down data
instead of the total and diff. Note that by definition, a
non-spin-polarized run would have Spin.up data == Spin.down data.
"""
if not self._spin_data:
spin_data = dict()
spin_data[Spin.up] = 0.5 * (self.data["total"] +
self.data.get("diff", 0))
spin_data[Spin.down] = 0.5 * (self.data["total"] -
self.data.get("diff", 0))
self._spin_data = spin_data
return self._spin_data
def get_axis_grid(self, ind):
"""
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def __add__(self, other):
return self.linear_add(other, 1.0)
def __sub__(self, other):
return self.linear_add(other, -1.0)
def linear_add(self, other, scale_factor=1.0):
"""
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
"""
if self.structure != other.structure:
raise ValueError("Adding or subtraction operations can only be "
"performed for volumetric data with the exact "
"same structure.")
# To add checks
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix)
@staticmethod
def parse_file(filename):
"""
Convenience method to parse a generic volumetric data file in the vasp
like format. Used by subclasses for parsing file.
Args:
filename (str): Path of file to parse
Returns:
(poscar, data)
"""
poscar_read = False
poscar_string = []
dataset = []
all_dataset = []
# for holding any strings in input that are not Poscar
# or VolumetricData (typically augmentation charges)
all_dataset_aug = {}
dim = None
dimline = None
read_dataset = False
ngrid_pts = 0
data_count = 0
poscar = None
with zopen(filename, "rt") as f:
for line in f:
original_line = line
line = line.strip()
if read_dataset:
toks = line.split()
for tok in toks:
if data_count < ngrid_pts:
# This complicated procedure is necessary because
# vasp outputs x as the fastest index, followed by y
# then z.
x = data_count % dim[0]
y = int(math.floor(data_count / dim[0])) % dim[1]
z = int(math.floor(data_count / dim[0] / dim[1]))
dataset[x, y, z] = float(tok)
data_count += 1
if data_count >= ngrid_pts:
read_dataset = False
data_count = 0
all_dataset.append(dataset)
elif not poscar_read:
if line != "" or len(poscar_string) == 0:
poscar_string.append(line)
elif line == "":
poscar = Poscar.from_string("\n".join(poscar_string))
poscar_read = True
elif not dim:
dim = [int(i) for i in line.split()]
ngrid_pts = dim[0] * dim[1] * dim[2]
dimline = line
read_dataset = True
dataset = np.zeros(dim)
elif line == dimline:
# when line == dimline, expect volumetric data to follow
# so set read_dataset to True
read_dataset = True
dataset = np.zeros(dim)
else:
# store any extra lines that were not part of the
# volumetric data so we know which set of data the extra
# lines are associated with
key = len(all_dataset) - 1
if key not in all_dataset_aug:
all_dataset_aug[key] = []
all_dataset_aug[key].append(original_line)
if len(all_dataset) == 4:
data = {"total": all_dataset[0], "diff_x": all_dataset[1],
"diff_y": all_dataset[2], "diff_z": all_dataset[3]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff_x": all_dataset_aug.get(1, None),
"diff_y": all_dataset_aug.get(2, None),
"diff_z": all_dataset_aug.get(3, None)}
# construct a "diff" dict for scalar-like magnetization density,
# referenced to an arbitrary direction (using same method as
# pymatgen.electronic_structure.core.Magmom, see
# Magmom documentation for justification for this)
# TODO: re-examine this, and also similar behavior in
# Magmom - @mkhorton
# TODO: does CHGCAR change with different SAXIS?
diff_xyz = np.array([data["diff_x"], data["diff_y"],
data["diff_z"]])
diff_xyz = diff_xyz.reshape((3, dim[0] * dim[1] * dim[2]))
ref_direction = np.array([1.01, 1.02, 1.03])
ref_sign = np.sign(np.dot(ref_direction, diff_xyz))
diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign)
data["diff"] = diff.reshape((dim[0], dim[1], dim[2]))
elif len(all_dataset) == 2:
data = {"total": all_dataset[0], "diff": all_dataset[1]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff": all_dataset_aug.get(1, None)}
else:
data = {"total": all_dataset[0]}
data_aug = {"total": all_dataset_aug.get(0, None)}
return poscar, data, data_aug
def write_file(self, file_name, vasp4_compatible=False):
"""
Write the VolumetricData object to a vasp compatible file.
Args:
file_name (str): Path to a file
vasp4_compatible (bool): True if the format is vasp4 compatible
"""
def _print_fortran_float(f):
"""
Fortran codes print floats with a leading zero in scientific
notation. When writing CHGCAR files, we adopt this convention
to ensure written CHGCAR files are byte-to-byte identical to
their input files as far as possible.
:param f: float
:return: str
"""
s = "{:.10E}".format(f)
if f >= 0:
return "0." + s[0] + s[2:12] + 'E' + "{:+03}".format(int(s[13:]) + 1)
else:
return "-." + s[1] + s[3:13] + 'E' + "{:+03}".format(int(s[14:]) + 1)
with zopen(file_name, "wt") as f:
p = Poscar(self.structure)
# use original name if it's been set (e.g. from Chgcar)
comment = getattr(self, 'name', p.comment)
lines = comment + "\n"
lines += " 1.00000000000000\n"
latt = self.structure.lattice.matrix
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :])
if not vasp4_compatible:
lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n"
lines += "".join(["%6d" % x for x in p.natoms]) + "\n"
lines += "Direct\n"
for site in self.structure:
lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords)
lines += " \n"
f.write(lines)
a = self.dim
def write_spin(data_type):
lines = []
count = 0
f.write(" {} {} {}\n".format(a[0], a[1], a[2]))
for (k, j, i) in itertools.product(list(range(a[2])),
list(range(a[1])),
list(range(a[0]))):
lines.append(_print_fortran_float(self.data[data_type][i, j, k]))
count += 1
if count % 5 == 0:
f.write(" " + "".join(lines) + "\n")
lines = []
else:
lines.append(" ")
f.write(" " + "".join(lines) + " \n")
f.write("".join(self.data_aug.get(data_type, [])))
write_spin("total")
if self.is_spin_polarized and self.is_soc:
write_spin("diff_x")
write_spin("diff_y")
write_spin("diff_z")
elif self.is_spin_polarized:
write_spin("diff")
def get_integrated_diff(self, ind, radius, nbins=1):
"""
Get integrated difference of atom index ind up to radius. This can be
an extremely computationally intensive process, depending on how many
grid points are in the VolumetricData.
Args:
ind (int): Index of atom.
radius (float): Radius of integration.
nbins (int): Number of bins. Defaults to 1. This allows one to
obtain the charge integration up to a list of the cumulative
charge integration values for radii for [radius/nbins,
2 * radius/nbins, ....].
Returns:
Differential integrated charge as a np array of [[radius, value],
...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],
data[:,1])
"""
# For non-spin-polarized runs, this is zero by definition.
if not self.is_spin_polarized:
radii = [radius / nbins * (i + 1) for i in range(nbins)]
data = np.zeros((nbins, 2))
data[:, 0] = radii
return data
struct = self.structure
a = self.dim
if ind not in self._distance_matrix or\
self._distance_matrix[ind]["max_radius"] < radius:
coords = []
for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):
coords.append([x / a[0], y / a[1], z / a[2]])
sites_dist = struct.lattice.get_points_in_sphere(
coords, struct[ind].coords, radius)
self._distance_matrix[ind] = {"max_radius": radius,
"data": np.array(sites_dist)}
data = self._distance_matrix[ind]["data"]
# Use boolean indexing to find all charges within the desired distance.
inds = data[:, 1] <= radius
dists = data[inds, 1]
data_inds = np.rint(np.mod(list(data[inds, 0]), 1) *
np.tile(a, (len(dists), 1))).astype(int)
vals = [self.data["diff"][x, y, z] for x, y, z in data_inds]
hist, edges = np.histogram(dists, bins=nbins,
range=[0, radius],
weights=vals)
data = np.zeros((nbins, 2))
data[:, 0] = edges[1:]
data[:, 1] = [sum(hist[0:i + 1]) / self.ngridpts
for i in range(nbins)]
return data
def get_average_along_axis(self, ind):
"""
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials from a LOCPOT
file.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
m = self.data["total"]
ng = self.dim
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
def to_hdf5(self, filename):
"""
Writes the VolumetricData to a HDF5 format, which is a highly optimized
format for reading storing large data. The mapping of the VolumetricData
to this file format is as follows:
VolumetricData.data -> f["vdata"]
VolumetricData.structure ->
f["Z"]: Sequence of atomic numbers
f["fcoords"]: Fractional coords
f["lattice"]: Lattice in the pymatgen.core.lattice.Lattice matrix
format
f.attrs["structure_json"]: String of json representation
Args:
filename (str): Filename to output to.
"""
import h5py
with h5py.File(filename, "w") as f:
ds = f.create_dataset("lattice", (3, 3), dtype='float')
ds[...] = self.structure.lattice.matrix
ds = f.create_dataset("Z", (len(self.structure.species), ),
dtype="i")
ds[...] = np.array([sp.Z for sp in self.structure.species])
ds = f.create_dataset("fcoords", self.structure.frac_coords.shape,
dtype='float')
ds[...] = self.structure.frac_coords
dt = h5py.special_dtype(vlen=str)
ds = f.create_dataset("species", (len(self.structure.species), ),
dtype=dt)
ds[...] = [str(sp) for sp in self.structure.species]
grp = f.create_group("vdata")
for k, v in self.data.items():
ds = grp.create_dataset(k, self.data[k].shape, dtype='float')
ds[...] = self.data[k]
f.attrs["name"] = self.name
f.attrs["structure_json"] = json.dumps(self.structure.as_dict())
@classmethod
def from_hdf5(cls, filename):
import h5py
with h5py.File(filename, "r") as f:
data = {k: np.array(v) for k, v in f["vdata"].items()}
structure = Structure.from_dict(json.loads(f.attrs["structure_json"]))
return VolumetricData(structure, data)
class Locpot(VolumetricData):
"""
Simple object for reading a LOCPOT file.
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
def __init__(self, poscar, data):
super(Locpot, self).__init__(poscar.structure, data)
self.name = poscar.comment
@staticmethod
def from_file(filename):
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Locpot(poscar, data)
class Chgcar(VolumetricData):
"""
Simple object for reading a CHGCAR file.
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
def __init__(self, poscar, data, data_aug=None):
super(Chgcar, self).__init__(poscar.structure, data, data_aug=data_aug)
self.poscar = poscar
self.name = poscar.comment
self._distance_matrix = {}
@staticmethod
def from_file(filename):
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Chgcar(poscar, data, data_aug=data_aug)
@property
def net_magnetization(self):
if self.is_spin_polarized:
return np.sum(self.data['diff'])
else:
return None
class Procar(object):
"""
Object for reading a PROCAR file.
Args:
filename: Name of file containing PROCAR.
.. attribute:: data
The PROCAR data of the form below. It should VASP uses 1-based indexing,
but all indices are converted to 0-based here.::
{
spin: nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
.. attribute:: weights
The weights associated with each k-point as an nd.array of lenght
nkpoints.
..attribute:: phase_factors
Phase factors, where present (e.g. LORBIT = 12). A dict of the form:
{
spin: complex nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
..attribute:: nbands
Number of bands
..attribute:: nkpoints
Number of k-points
..attribute:: nions
Number of ions
"""
def __init__(self, filename):
headers = None
with zopen(filename, "rt") as f:
preambleexpr = re.compile(
r"# of k-points:\s*(\d+)\s+# of bands:\s*(\d+)\s+# of "
r"ions:\s*(\d+)")
kpointexpr = re.compile(r"^k-point\s+(\d+).*weight = ([0-9\.]+)")
bandexpr = re.compile(r"^band\s+(\d+)")
ionexpr = re.compile(r"^ion.*")
expr = re.compile(r"^([0-9]+)\s+")
current_kpoint = 0
current_band = 0
done = False
spin = Spin.down
for l in f:
l = l.strip()
if bandexpr.match(l):
m = bandexpr.match(l)
current_band = int(m.group(1)) - 1
done = False
elif kpointexpr.match(l):
m = kpointexpr.match(l)
current_kpoint = int(m.group(1)) - 1
weights[current_kpoint] = float(m.group(2))
if current_kpoint == 0:
spin = Spin.up if spin == Spin.down else Spin.down
done = False
elif headers is None and ionexpr.match(l):
headers = l.split()
headers.pop(0)
headers.pop(-1)
def f():
return np.zeros((nkpoints, nbands, nions, len(headers)))
data = defaultdict(f)
def f2():
return np.full((nkpoints, nbands, nions, len(headers)),
np.NaN, dtype=np.complex128)
phase_factors = defaultdict(f2)
elif expr.match(l):
toks = l.split()
index = int(toks.pop(0)) - 1
num_data = np.array([float(t)
for t in toks[:len(headers)]])
if not done:
data[spin][current_kpoint, current_band,
index, :] = num_data
else:
if np.isnan(phase_factors[spin][
current_kpoint, current_band, index, 0]):
phase_factors[spin][current_kpoint, current_band,
index, :] = num_data
else:
phase_factors[spin][current_kpoint, current_band,
index, :] += 1j * num_data
elif l.startswith("tot"):
done = True
elif preambleexpr.match(l):
m = preambleexpr.match(l)
nkpoints = int(m.group(1))
nbands = int(m.group(2))
nions = int(m.group(3))
weights = np.zeros(nkpoints)
self.nkpoints = nkpoints
self.nbands = nbands
self.nions = nions
self.weights = weights
self.orbitals = headers
self.data = data
self.phase_factors = phase_factors
def get_projection_on_elements(self, structure):
"""
Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
"""
dico = {}
for spin in self.data.keys():
dico[spin] = [[defaultdict(float)
for i in range(self.nkpoints)]
for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
for k, b in itertools.product(range(self.nkpoints),
range(self.nbands)):
dico[spin][b][k][name] = np.sum(d[k, b, iat, :])
return dico
def get_occupation(self, atom_index, orbital):
"""
Returns the occupation for a particular orbital of a particular atom.
Args:
atom_num (int): Index of atom in the PROCAR. It should be noted
that VASP uses 1-based indexing for atoms, but this is
converted to 0-based indexing in this parser to be
consistent with representation of structures in pymatgen.
orbital (str): An orbital. If it is a single character, e.g., s,
p, d or f, the sum of all s-type, p-type, d-type or f-type
orbitals occupations are returned respectively. If it is a
specific orbital, e.g., px, dxy, etc., only the occupation
of that orbital is returned.
Returns:
Sum occupation of orbital of atom.
"""
orbital_index = self.orbitals.index(orbital)
return {spin: np.sum(d[:, :, atom_index, orbital_index] * self.weights[:, None])
for spin, d in self.data.items()}
class Oszicar(object):
"""
A basic parser for an OSZICAR output from VASP. In general, while the
OSZICAR is useful for a quick look at the output from a VASP run, we
recommend that you use the Vasprun parser instead, which gives far richer
information about a run.
Args:
filename (str): Filename of file to parse
.. attribute:: electronic_steps
All electronic steps as a list of list of dict. e.g.,
[[{"rms": 160.0, "E": 4507.24605593, "dE": 4507.2, "N": 1,
"deps": -17777.0, "ncg": 16576}, ...], [....]
where electronic_steps[index] refers the list of electronic steps
in one ionic_step, electronic_steps[index][subindex] refers to a
particular electronic step at subindex in ionic step at index. The
dict of properties depends on the type of VASP run, but in general,
"E", "dE" and "rms" should be present in almost all runs.
.. attribute:: ionic_steps:
All ionic_steps as a list of dict, e.g.,
[{"dE": -526.36, "E0": -526.36024, "mag": 0.0, "F": -526.36024},
...]
This is the typical output from VASP at the end of each ionic step.
"""
def __init__(self, filename):
electronic_steps = []
ionic_steps = []
ionic_pattern = re.compile(r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)$")
ionic_mag_pattern = re.compile(r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)\s+"
r"mag=\s*([\d\-\.E\+]+)")
ionic_MD_pattern = re.compile(r"(\d+)\s+T=\s*([\d\-\.E\+]+)\s+"
r"E=\s*([\d\-\.E\+]+)\s+"
r"F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"EK=\s*([\d\-\.E\+]+)\s+"
r"SP=\s*([\d\-\.E\+]+)\s+"
r"SK=\s*([\d\-\.E\+]+)")
electronic_pattern = re.compile(r"\s*\w+\s*:(.*)")
def smart_convert(header, num):
try:
if header == "N" or header == "ncg":
v = int(num)
return v
v = float(num)
return v
except ValueError:
return "--"
header = []
with zopen(filename, "rt") as fid:
for line in fid:
line = line.strip()
m = electronic_pattern.match(line)
if m:
toks = m.group(1).split()
data = {header[i]: smart_convert(header[i], toks[i])
for i in range(len(toks))}
if toks[0] == "1":
electronic_steps.append([data])
else:
electronic_steps[-1].append(data)
elif ionic_pattern.match(line.strip()):
m = ionic_pattern.match(line.strip())
ionic_steps.append({"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4))})
elif ionic_mag_pattern.match(line.strip()):
m = ionic_mag_pattern.match(line.strip())
ionic_steps.append({"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4)),
"mag": float(m.group(5))})
elif ionic_MD_pattern.match(line.strip()):
m = ionic_MD_pattern.match(line.strip())
ionic_steps.append({"T": float(m.group(2)),
"E": float(m.group(3)),
"F": float(m.group(4)),
"E0": float(m.group(5)),
"EK": float(m.group(6)),
"SP": float(m.group(7)),
"SK": float(m.group(8))})
elif re.match(r"^\s*N\s+E\s*", line):
header = line.strip().replace("d eps", "deps").split()
self.electronic_steps = electronic_steps
self.ionic_steps = ionic_steps
@property
def all_energies(self):
"""
Compilation of all energies from all electronic steps and ionic steps
as a tuple of list of energies, e.g.,
((4507.24605593, 143.824705755, -512.073149912, ...), ...)
"""
all_energies = []
for i in range(len(self.electronic_steps)):
energies = [step["E"] for step in self.electronic_steps[i]]
energies.append(self.ionic_steps[i]["F"])
all_energies.append(tuple(energies))
return tuple(all_energies)
@property
@unitized("eV")
def final_energy(self):
"""
Final energy from run.
"""
return self.ionic_steps[-1]["E0"]
def as_dict(self):
return {"electronic_steps": self.electronic_steps,
"ionic_steps": self.ionic_steps}
class VaspParserError(Exception):
"""
Exception class for VASP parsing.
"""
pass
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
# TODO: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
# get all branch dir names
branch_dir_names = [os.path.abspath(d)
for d in glob.glob("{i}/branch_*"
.format(i=dir_name))
if os.path.isdir(d)]
# sort by the directory name (e.g, branch_10)
sort_by = lambda x: int(x.split("_")[-1])
sorted_branch_dir_names = sorted(branch_dir_names, key=sort_by)
# populate branches with Bandstructure instances
branches = []
for dir_name in sorted_branch_dir_names:
xml_file = os.path.join(dir_name, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn("Skipping {}. Unable to find {}"
.format(d=dir_name, f=xml_file))
return get_reconstructed_band_structure(branches, efermi)
else:
xml_file = os.path.join(dir_name, "vasprun.xml")
# Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections)\
.get_band_structure(kpoints_filename=None, efermi=efermi)
else:
return None
class Xdatcar(object):
"""
Class representing an XDATCAR file. Only tested with VASP 5.x files.
.. attribute:: structures
List of structures parsed from XDATCAR.
.. attribute:: comment
Optional comment string.
Authors: Ram Balachandran
"""
def __init__(self, filename, ionicstep_start=1,
ionicstep_end=None, comment=None):
"""
Init a Xdatcar.
Args:
filename (str): Filename of input XDATCAR file.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
"""
preamble = None
coords_str = []
structures = []
preamble_done = False
if (ionicstep_start < 1):
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
self.comment = comment or self.structures[0].formula
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Xdatcar. Similar to 6th line in
vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def concatenate(self, filename, ionicstep_start=1,
ionicstep_end=None):
"""
Concatenate structures in file to Xdatcar.
Args:
filename (str): Filename of XDATCAR file to be concatenated.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
TODO(rambalachandran):
Requires a check to ensure if the new concatenating file has the
same lattice structure and atoms as the Xdatcar class.
"""
preamble = None
coords_str = []
structures = self.structures
preamble_done = False
if ionicstep_start < 1:
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
def get_string(self, ionicstep_start=1,
ionicstep_end=None,
significant_figures=8):
"""
Write Xdatcar class into a file
Args:
filename (str): Filename of output XDATCAR file.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
"""
from pymatgen.io.vasp import Poscar
if (ionicstep_start < 1):
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
latt = self.structures[0].lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
format_str = "{{:.{0}f}}".format(significant_figures)
ionicstep_cnt = 1
output_cnt = 1
for cnt, structure in enumerate(self.structures):
ionicstep_cnt = cnt + 1
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
lines.append("Direct configuration=" +
' ' * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
lines.append("Direct configuration=" +
' ' * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
return "\n".join(lines) + "\n"
def write_file(self, filename, **kwargs):
"""
Write Xdatcar class into a file.
Args:
filename (str): Filename of output XDATCAR file.
The supported kwargs are the same as those for the
Xdatcar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def __str__(self):
return self.get_string()
class Dynmat(object):
"""
Object for reading a DYNMAT file.
Args:
filename: Name of file containing DYNMAT.
.. attribute:: data
A nested dict containing the DYNMAT data of the form::
[atom <int>][disp <int>]['dispvec'] =
displacement vector (part of first line in dynmat block, e.g. "0.01 0 0")
[atom <int>][disp <int>]['dynmat'] =
<list> list of dynmat lines for this atom and this displacement
Authors: Patrick Huck
"""
def __init__(self, filename):
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
self._nspecs, self._natoms, self._ndisps = map(int, lines[
0].split())
self._masses = map(float, lines[1].split())
self.data = defaultdict(dict)
atom, disp = None, None
for i, l in enumerate(lines[2:]):
v = list(map(float, l.split()))
if not i % (self._natoms + 1):
atom, disp = map(int, v[:2])
if atom not in self.data:
self.data[atom] = {}
if disp not in self.data[atom]:
self.data[atom][disp] = {}
self.data[atom][disp]['dispvec'] = v[2:]
else:
if 'dynmat' not in self.data[atom][disp]:
self.data[atom][disp]['dynmat'] = []
self.data[atom][disp]['dynmat'].append(v)
def get_phonon_frequencies(self):
"""calculate phonon frequencies"""
# TODO: the following is most likely not correct or suboptimal
# hence for demonstration purposes only
frequencies = []
for k, v0 in self.data.iteritems():
for v1 in v0.itervalues():
vec = map(abs, v1['dynmat'][k - 1])
frequency = math.sqrt(sum(vec)) * 2. * \
math.pi * 15.633302 # THz
frequencies.append(frequency)
return frequencies
@property
def nspecs(self):
"""returns the number of species"""
return self._nspecs
@property
def natoms(self):
"""returns the number of atoms"""
return self._natoms
@property
def ndisps(self):
"""returns the number of displacements"""
return self._ndisps
@property
def masses(self):
"""returns the list of atomic masses"""
return list(self._masses)
def get_adjusted_fermi_level(efermi, cbm, band_structure):
"""
When running a band structure computations the fermi level needs to be
take from the static run that gave the charge density used for the non-self
consistent band structure run. Sometimes this fermi level is however a
little too low because of the mismatch between the uniform grid used in
the static run and the band structure k-points (e.g., the VBM is on Gamma
and the Gamma point is not in the uniform mesh). Here we use a procedure
consisting in looking for energy levels higher than the static fermi level
(but lower than the LUMO) if any of these levels make the band structure
appears insulating and not metallic anymore, we keep this adjusted fermi
level. This procedure has shown to detect correctly most insulators.
Args:
efermi (float): Fermi energy of the static run
cbm (float): Conduction band minimum of the static run
run_bandstructure: a band_structure object
Returns:
a new adjusted fermi level
"""
# make a working copy of band_structure
bs_working = BandStructureSymmLine.from_dict(band_structure.as_dict())
if bs_working.is_metal():
e = efermi
while e < cbm:
e += 0.01
bs_working._efermi = e
if not bs_working.is_metal():
return e
return efermi
class Wavecar:
"""
This is a class that contains the (pseudo-) wavefunctions from VASP.
Coefficients are read from the given WAVECAR file and the corresponding
G-vectors are generated using the algorithm developed in WaveTrans (see
acknowledgments below). To understand how the wavefunctions are evaluated,
please see the evaluate_wavefunc docstring.
It should be noted that the pseudopotential augmentation is not included in
the WAVECAR file. As a result, some caution should be exercised when
deriving value from this information.
The usefulness of this class is to allow the user to do projections or band
unfolding style manipulations of the wavefunction. An example of this can
be seen in the work of Shen et al. 2017
(https://doi.org/10.1103/PhysRevMaterials.1.065001).
.. attribute:: filename
String of the input file (usually WAVECAR)
.. attribute:: nk
Number of k-points from the WAVECAR
.. attribute:: nb
Number of bands per k-point
.. attribute:: encut
Energy cutoff (used to define G_{cut})
.. attribute:: efermi
Fermi energy
.. attribute:: a
Primitive lattice vectors of the cell (e.g. a_1 = self.a[0, :])
.. attribute:: b
Reciprocal lattice vectors of the cell (e.g. b_1 = self.b[0, :])
.. attribute:: vol
The volume of the unit cell in real space
.. attribute:: kpoints
The list of k-points read from the WAVECAR file
.. attribute:: band_energy
The list of band eigenenergies (and corresponding occupancies) for
each kpoint, where the first index corresponds to the index of the
k-point (e.g. self.band_energy[kp])
.. attribute:: Gpoints
The list of generated G-points for each k-point (a double list), which
are used with the coefficients for each k-point and band to recreate
the wavefunction (e.g. self.Gpoints[kp] is the list of G-points for
k-point kp). The G-points depend on the k-point and reciprocal lattice
and therefore are identical for each band at the same k-point. Each
G-point is represented by integer multipliers (e.g. assuming
Gpoints[kp][n] == [n_1, n_2, n_3], then
G_n = n_1*b_1 + n_2*b_2 + n_3*b_3)
.. attribute:: coeffs
The list of coefficients for each k-point and band for reconstructing
the wavefunction. The first index corresponds to the kpoint and the
second corresponds to the band (e.g. self.coeffs[kp][b] corresponds
to k-point kp and band b).
Acknowledgments:
This code is based upon the Fortran program, WaveTrans, written by
R. M. Feenstra and M. Widom from the Dept. of Physics at Carnegie
Mellon University. To see the original work, please visit:
https://www.andrew.cmu.edu/user/feenstra/wavetrans/
Author: Mark Turiansky
"""
def __init__(self, filename='WAVECAR', verbose=False, precision='normal'):
"""
Information is extracted from the given WAVECAR
Args:
filename (str): input file (default: WAVECAR)
verbose (bool): determines whether processing information is shown
precision (str): determines how fine the fft mesh is (normal or
accurate), only the first letter matters
"""
self.filename = filename
# c = 0.26246582250210965422
# 2m/hbar^2 in agreement with VASP
self._C = 0.262465831
with open(self.filename, 'rb') as f:
# read the header information
recl, spin, rtag = np.fromfile(f, dtype=np.float64, count=3) \
.astype(np.int)
if verbose:
print('recl={}, spin={}, rtag={}'.format(recl, spin, rtag))
recl8 = int(recl / 8)
# check that ISPIN wasn't set to 2
if spin == 2:
raise ValueError('spin polarization not currently supported')
# check to make sure we have precision correct
if rtag != 45200 and rtag != 45210:
raise ValueError('invalid rtag of {}'.format(rtag))
# padding
np.fromfile(f, dtype=np.float64, count=(recl8 - 3))
# extract kpoint, bands, energy, and lattice information
self.nk, self.nb, self.encut = np.fromfile(f, dtype=np.float64,
count=3).astype(np.int)
self.a = np.fromfile(f, dtype=np.float64, count=9).reshape((3, 3))
self.efermi = np.fromfile(f, dtype=np.float64, count=1)[0]
if verbose:
print('kpoints = {}, bands = {}, energy cutoff = {}, fermi '
'energy= {:.04f}\n'.format(self.nk, self.nb, self.encut,
self.efermi))
print('primitive lattice vectors = \n{}'.format(self.a))
self.vol = np.dot(self.a[0, :],
np.cross(self.a[1, :], self.a[2, :]))
if verbose:
print('volume = {}\n'.format(self.vol))
# calculate reciprocal lattice
b = np.array([np.cross(self.a[1, :], self.a[2, :]),
np.cross(self.a[2, :], self.a[0, :]),
np.cross(self.a[0, :], self.a[1, :])])
b = 2 * np.pi * b / self.vol
self.b = b
if verbose:
print('reciprocal lattice vectors = \n{}'.format(b))
print('reciprocal lattice vector magnitudes = \n{}\n'
.format(np.linalg.norm(b, axis=1)))
# calculate maximum number of b vectors in each direction
self._generate_nbmax()
if verbose:
print('max number of G values = {}\n\n'.format(self._nbmax))
self.ng = self._nbmax * 3 if precision.lower()[0] == 'n' else \
self._nbmax * 4
# padding
np.fromfile(f, dtype=np.float64, count=recl8 - 13)
# reading records
# np.set_printoptions(precision=7, suppress=True)
self.Gpoints = [None for _ in range(self.nk)]
self.coeffs = [[None for i in range(self.nb)]
for j in range(self.nk)]
self.kpoints = []
self.band_energy = []
for ispin in range(spin):
if verbose:
print('reading spin {}'.format(ispin))
for ink in range(self.nk):
# information for this kpoint
nplane = int(np.fromfile(f, dtype=np.float64, count=1)[0])
kpoint = np.fromfile(f, dtype=np.float64, count=3)
self.kpoints.append(kpoint)
if verbose:
print('kpoint {: 4} with {: 5} plane waves at {}'
.format(ink, nplane, kpoint))
# energy and occupation information
enocc = np.fromfile(f, dtype=np.float64,
count=3 * self.nb).reshape((self.nb, 3))
self.band_energy.append(enocc)
if verbose:
print(enocc[:, [0, 2]])
# padding
np.fromfile(f, dtype=np.float64, count=(recl8 - 4 - 3 * self.nb))
# generate G integers
self.Gpoints[ink] = self._generate_G_points(kpoint)
if len(self.Gpoints[ink]) != nplane:
raise ValueError('failed to generate the correct '
'number of G points')
# extract coefficients
for inb in range(self.nb):
if rtag == 45200:
self.coeffs[ink][inb] = \
np.fromfile(f, dtype=np.complex64,
count=nplane)
np.fromfile(f, dtype=np.float64,
count=recl8 - nplane)
elif rtag == 45210:
# this should handle double precision coefficients
# but I don't have a WAVECAR to test it with
self.coeffs[ink][inb] = \
np.fromfile(f, dtype=np.complex128,
count=nplane)
np.fromfile(f, dtype=np.float64,
count=recl8 - 2 * nplane)
def _generate_nbmax(self):
"""
Helper function that determines maximum number of b vectors for
each direction.
This algorithm is adapted from WaveTrans (see Class docstring). There
should be no reason for this function to be called outside of
initialization.
"""
bmag = np.linalg.norm(self.b, axis=1)
b = self.b
# calculate maximum integers in each direction for G
phi12 = np.arccos(np.dot(b[0, :], b[1, :]) / (bmag[0] * bmag[1]))
sphi123 = np.dot(b[2, :], np.cross(b[0, :], b[1, :])) / \
(bmag[2] * np.linalg.norm(np.cross(b[0, :], b[1, :])))
nbmaxA = np.sqrt(self.encut * self._C) / bmag
nbmaxA[0] /= np.abs(np.sin(phi12))
nbmaxA[1] /= np.abs(np.sin(phi12))
nbmaxA[2] /= np.abs(sphi123)
nbmaxA += 1
phi13 = np.arccos(np.dot(b[0, :], b[2, :]) / (bmag[0] * bmag[2]))
sphi123 = np.dot(b[1, :], np.cross(b[0, :], b[2, :])) / \
(bmag[1] * np.linalg.norm(np.cross(b[0, :], b[2, :])))
nbmaxB = np.sqrt(self.encut * self._C) / bmag
nbmaxB[0] /= np.abs(np.sin(phi13))
nbmaxB[1] /= np.abs(sphi123)
nbmaxB[2] /= np.abs(np.sin(phi13))
nbmaxB += 1
phi23 = np.arccos(np.dot(b[1, :], b[2, :]) / (bmag[1] * bmag[2]))
sphi123 = np.dot(b[0, :], np.cross(b[1, :], b[2, :])) / \
(bmag[0] * np.linalg.norm(np.cross(b[1, :], b[2, :])))
nbmaxC = np.sqrt(self.encut * self._C) / bmag
nbmaxC[0] /= np.abs(sphi123)
nbmaxC[1] /= np.abs(np.sin(phi23))
nbmaxC[2] /= np.abs(np.sin(phi23))
nbmaxC += 1
self._nbmax = np.max([nbmaxA, nbmaxB, nbmaxC], axis=0) \
.astype(np.int)
def _generate_G_points(self, kpoint):
"""
Helper function to generate G-points based on nbmax.
This function iterates over possible G-point values and determines
if the energy is less than G_{cut}. Valid values are appended to
the output array. This function should not be called outside of
initialization.
Args:
kpoint (np.array): the array containing the current k-point value
Returns:
a list containing valid G-points
"""
gpoints = []
for i in range(2 * self._nbmax[2] + 1):
i3 = i - 2 * self._nbmax[2] - 1 if i > self._nbmax[2] else i
for j in range(2 * self._nbmax[1] + 1):
j2 = j - 2 * self._nbmax[1] - 1 if j > self._nbmax[1] else j
for k in range(2 * self._nbmax[0] + 1):
k1 = k - 2 * self._nbmax[0] - 1 if k > self._nbmax[0] else k
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, self.b))
E = g**2 / self._C
if E < self.encut:
gpoints.append(G)
return np.array(gpoints, dtype=np.float64)
def evaluate_wavefunc(self, kpoint, band, r):
r"""
Evaluates the wavefunction for a given position, r.
The wavefunction is given by the k-point and band. It is evaluated
at the given position by summing over the components. Formally,
\psi_n^k (r) = \sum_{i=1}^N c_i^{n,k} \exp (i (k + G_i^{n,k}) \cdot r)
where \psi_n^k is the wavefunction for the nth band at k-point k, N is
the number of plane waves, c_i^{n,k} is the ith coefficient that
corresponds to the nth band and k-point k, and G_i^{n,k} is the ith
G-point corresponding to k-point k.
NOTE: This function is very slow; a discrete fourier transform is the
preferred method of evaluation (see Wavecar.fft_mesh).
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
r (np.array): the position where the wavefunction will be evaluated
Returns:
a complex value corresponding to the evaluation of the wavefunction
"""
v = self.Gpoints[kpoint] + self.kpoints[kpoint]
u = np.dot(np.dot(v, self.b), r)
c = self.coeffs[kpoint][band]
return np.sum(np.dot(c, np.exp(1j * u, dtype=np.complex64))) / \
np.sqrt(self.vol)
def fft_mesh(self, kpoint, band, shift=True):
"""
Places the coefficients of a wavefunction onto an fft mesh.
Once the mesh has been obtained, a discrete fourier transform can be
used to obtain real-space evaluation of the wavefunction. The output
of this function can be passed directly to numpy's fft function. For
example:
mesh = Wavecar().fft_mesh(kpoint, band)
evals = np.fft.fft(mesh)
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
shift (bool): determines if the zero frequency coefficient is
placed at index (0, 0, 0) or centered
Returns:
a numpy ndarray representing the 3D mesh of coefficients
"""
mesh = np.zeros(tuple(self.ng), dtype=np.complex)
for gp, coeff in zip(self.Gpoints[kpoint], self.coeffs[kpoint][band]):
t = tuple(gp.astype(np.int) + (self.ng / 2).astype(np.int))
mesh[t] = coeff
if shift:
return np.fft.ifftshift(mesh)
else:
return mesh
class Wavederf(object):
"""
Object for reading a WAVEDERF file.
Note: This file is only produced when LOPTICS is true AND vasp has been
recompiled after uncommenting the line that calls
WRT_CDER_BETWEEN_STATES_FORMATTED in linear_optics.F
Args:
filename: Name of file containing WAVEDERF.
.. attribute:: data
A numpy array containing the WAVEDERF data of the form below. It should
be noted that VASP uses 1-based indexing for bands, but this is
converted to 0-based numpy array indexing.
For each kpoint (in the same order as in IBZKPT), and for each pair of
bands:
[ #kpoint index
[ #band 1 index
[ #band 2 index
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
]
]
]
This structure follows the file format. Numpy array methods can be used
to fetch data in a more useful way (e.g., get matrix elements between
wo specific bands at each kpoint, fetch x/y/z components,
real/imaginary parts, abs/phase, etc. )
Author: Miguel Dias Costa
"""
def __init__(self, filename):
with zopen(filename, "rt") as f:
header = f.readline().split()
ispin = int(header[0])
nb_kpoints = int(header[1])
nb_bands = int(header[2])
data = np.zeros((nb_kpoints, nb_bands, nb_bands, 6))
for ik in range(nb_kpoints):
for ib1 in range(nb_bands):
for ib2 in range(nb_bands):
# each line in the file includes besides the band
# indexes, which are redundant, each band's energy
# and occupation, which are already available elsewhere,
# so we store only the 6 matrix elements after this 6
# redundant values
data[ik][ib1][ib2] = [
float(element)
for element in f.readline().split()[6:]]
self.data = data
self._nb_kpoints = nb_kpoints
self._nb_bands = nb_bands
@property
def nb_bands(self):
"""
returns the number of bands in the band structure
"""
return self._nb_bands
@property
def nb_kpoints(self):
"""
Returns the number of k-points in the band structure calculation
"""
return self._nb_kpoints
def get_elements_between_bands(self, band_i, band_j):
"""
Method returning a numpy array with elements
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
between bands band_i and band_j (vasp 1-based indexing) for all kpoints.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
Returns:
a numpy list of elements for each kpoint
"""
if band_i < 1 or band_i > self.nb_bands or band_j < 1 or band_j > self.nb_bands:
raise ValueError("Band index out of bounds")
return self.data[:, band_i - 1, band_j - 1, :]
class UnconvergedVASPWarning(Warning):
"""
Warning for unconverged vasp run.
"""
pass
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
import numpy as np
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import scipy.sparse as sp
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDprec as MHDpreconditioner
import gc
import MHDmulti
import MHDmatrixSetup as MHDsetup
import HartmanChannel
import ExactSol
m = 3
set_log_active(False)
errL2u = np.zeros((m-1,1))
errH1u = np.zeros((m-1,1))
errL2p = np.zeros((m-1,1))
errL2b = np.zeros((m-1,1))
errCurlb = np.zeros((m-1,1))
errL2r = np.zeros((m-1,1))
errH1r = np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder = np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
DimSave = np.zeros((m-1,4))
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0] = 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx + 3
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
L = 10.
y0 = 2.
z0 = 1.
# mesh, boundaries, domains = HartmanChannel.Domain(nn)
mesh = UnitSquareMesh(nn, nn)
parameters['form_compiler']['quadrature_degree'] = -1
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorElement("CG", mesh.ufl_cell(), order)
Pressure = FiniteElement("CG", mesh.ufl_cell(), order-1)
Magnetic = FiniteElement("N1curl", mesh.ufl_cell(), order-1)
Lagrange = FiniteElement("CG", mesh.ufl_cell(), order-1)
VelocityF = VectorFunctionSpace(mesh, "CG", order)
PressureF = FunctionSpace(mesh, "CG", order-1)
MagneticF = FunctionSpace(mesh, "N1curl", order-1)
LagrangeF = FunctionSpace(mesh, "CG", order-1)
W = FunctionSpace(mesh, MixedElement([Velocity, Pressure, Magnetic,Lagrange]))
Velocitydim[xx-1] = W.sub(0).dim()
Pressuredim[xx-1] = W.sub(1).dim()
Magneticdim[xx-1] = W.sub(2).dim()
Lagrangedim[xx-1] = W.sub(3).dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [W.sub(0).dim(), W.sub(1).dim(), W.sub(2).dim(), W.sub(3).dim()]
def boundary(x, on_boundary):
return on_boundary
FSpaces = [VelocityF,PressureF,MagneticF,LagrangeF]
DimSave[xx-1,:] = np.array(dim)
kappa = 1.0
Mu_m = 10.0
MU = 1.0
N = FacetNormal(mesh)
IterType = 'Full'
params = [kappa,Mu_m,MU]
n = FacetNormal(mesh)
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4, 1)
MO.PrintStr("Seting up initial guess matricies",2,"=","\n\n","\n")
BCtime = time.time()
BC = MHDsetup.BoundaryIndices(mesh)
MO.StrTimePrint("BC index function, time: ", time.time()-BCtime)
Hiptmairtol = 1e-6
HiptmairMatrices = PrecondSetup.MagneticSetup(mesh, Magnetic, Lagrange, b0, r0, Hiptmairtol, params)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
F_NS = -MU*Laplacian + Advection + gradPres - kappa*NS_Couple
if kappa == 0.0:
F_M = Mu_m*CurlCurl + gradR - kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl + gradR - kappa*M_Couple
u_k = Function(VelocityF)
p_k = Function(PressureF)
b_k = Function(MagneticF)
r_k = Function(LagrangeF)
(u, p, b, r) = TrialFunctions(W)
(v, q, c, s) = TestFunctions(W)
if kappa == 0.0:
m11 = params[1]*inner(curl(b),curl(c))*dx
else:
m11 = params[1]*params[0]*inner(curl(b),curl(c))*dx
m21 = inner(c,grad(r))*dx
m12 = inner(b,grad(s))*dx
a11 = params[2]*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx + (1./2)*div(u_k)*inner(u,v)*dx - (1./2)*inner(u_k,n)*inner(u,v)*ds
a12 = -div(v)*p*dx
a21 = -div(u)*q*dx
CoupleT = params[0]*(v[0]*b_k[1]-v[1]*b_k[0])*curl(b)*dx
Couple = -params[0]*(u[0]*b_k[1]-u[1]*b_k[0])*curl(c)*dx
a = m11 + m12 + m21 + a11 + a21 + a12 + Couple + CoupleT
Lns = inner(v, F_NS)*dx
Lmaxwell = inner(c, F_M)*dx
L = Lns + Lmaxwell
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(PressureF, MU, mesh)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(PressureF, MU, u_k, mesh)
IS = MO.IndexSet(W, 'Blocks')
ones = Function(PressureF)
ones.vector()[:]=(0*ones.vector().array()+1)
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 20 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
u_is = PETSc.IS().createGeneral(W.sub(0).dofmap().dofs())
p_is = PETSc.IS().createGeneral(W.sub(1).dofmap().dofs())
b_is = PETSc.IS().createGeneral(W.sub(2).dofmap().dofs())
r_is = PETSc.IS().createGeneral(W.sub(3).dofmap().dofs())
NS_is = PETSc.IS().createGeneral(range(VelocityF.dim()+PressureF.dim()))
M_is = PETSc.IS().createGeneral(range(VelocityF.dim()+PressureF.dim(),W.dim()))
bcu = DirichletBC(W.sub(0), u0, boundary)
bcp = DirichletBC(W.sub(1),Expression(("0.0"), degree=4), boundary)
bcb = DirichletBC(W.sub(2), b0, boundary)
bcr = DirichletBC(W.sub(3), r0, boundary)
bcs = [bcu, bcb, bcr]
OuterTol = 1e-4
InnerTol = 1e-4
NSits = 0
Mits = 0
TotalStart = time.time()
SolutionTime = 0
errors = np.array([])
bcu1 = DirichletBC(VelocityF,Expression(("0.0","0.0"), degree=4), boundary)
U = x
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
A, b = assemble_system(a, L, bcs)
A, b = CP.Assemble(A,b)
u = x.duplicate()
print " Max rhs = ",np.max(b.array)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(PressureF, MU, u_k, mesh)
ShiftedMass = A.getSubMatrix(u_is, u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(ShiftedMass)
Options = 'p4'
norm = (b-A*U).norm()
stime = time.time()
u, mits,nsits = S.solve(A,b,u,params,W,'Direct',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF)
U = u
Soltime = time.time() - stime
MO.StrTimePrint("MHD solve, time: ", Soltime)
Mits += mits
NSits += mits
SolutionTime += Soltime
# u = IO.arrayToVec(u)
f = Function(W)
f.vector()[:] = u.array
f = f.vector()
for bc in bcs:
bc.apply(f)
u = IO.arrayToVec(f.array())
u1 = Function(VelocityF)
p1 = Function(PressureF)
b1 = Function(MagneticF)
r1 = Function(LagrangeF)
u1.vector()[:] = u.getSubVector(u_is).array
p1.vector()[:] = u.getSubVector(p_is).array
b1.vector()[:] = u.getSubVector(b_is).array
r1.vector()[:] = u.getSubVector(r_is).array
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld = np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
X = x
x = IO.arrayToVec(uOld)
err = np.divide((x-X).array, x.array,out=np.zeros_like(X.array), where=x.array!=0)
eps = np.linalg.norm(err)
errors = np.append(errors,eps)
eps = eps/errors[0]
print ' ssss ', eps, ' ', (x-X).norm(), " ", np.linalg.norm(norm)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
ExactSolution = [u0,p0,b0,r0]
errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(XX,mesh,FSpaces,ExactSolution,order,dim, "CG")
print float(Wdim[xx-1][0])/Wdim[xx-2][0]
if xx > 1:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1])/np.log2((float(Velocitydim[xx-1][0])/Velocitydim[xx-2][0])**(1./2)))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1])/np.log2((float(Velocitydim[xx-1][0])/Velocitydim[xx-2][0])**(1./2)))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1])/np.log2((float(Pressuredim[xx-1][0])/Pressuredim[xx-2][0])**(1./2)))
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1])/np.log2((float(Magneticdim[xx-1][0])/Magneticdim[xx-2][0])**(1./2)))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1])/np.log2((float(Magneticdim[xx-1][0])/Magneticdim[xx-2][0])**(1./2)))
l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1])/np.log2((float(Lagrangedim[xx-1][0])/Lagrangedim[xx-2][0])**(1./2)))
H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1])/np.log2((float(Lagrangedim[xx-1][0])/Lagrangedim[xx-2][0])**(1./2)))
import pandas as pd
LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
pd.set_option('precision',3)
LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
print LatexTable.to_latex()
print "\n\n Magnetic convergence"
MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
pd.set_option('precision',3)
MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
print MagneticTable.to_latex()
print "\n\n Lagrange convergence"
LagrangeTitles = ["l","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
LagrangeValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
pd.set_option('precision',3)
LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,"L2-order","%1.2f")
LagrangeTable = MO.PandasFormat(LagrangeTable,'H1-order',"%1.2f")
print LagrangeTable.to_latex()
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
MO.StoreMatrix(DimSave, "dim")
file = File("u_k.pvd")
file << u_k
file = File("p_k.pvd")
file << p_k
file = File("b_k.pvd")
file << b_k
file = File("r_k.pvd")
file << r_k
file = File("u0.pvd")
file << interpolate(u0, VelocityF)
file = File("p0.pvd")
file << interpolate(p0, PressureF)
file = File("b0.pvd")
file << interpolate(b0, MagneticF)
file = File("r0.pvd")
file << interpolate(r0, LagrangeF)
file = File("uError.pvd")
error = Function(VelocityF)
error.vector()[:] = u_k.vector().array()-interpolate(u0, VelocityF).vector().array()
file << error
file = File("pError.pvd")
error = Function(PressureF)
error.vector()[:] = p_k.vector().array()-interpolate(p0, PressureF).vector().array()
file << error
file = File("bError.pvd")
error = Function(MagneticF)
error.vector()[:] = b_k.vector().array()-interpolate(b0, MagneticF).vector().array()
file << error
file = File("rError.pvd")
error = Function(LagrangeF)
error.vector()[:] = r_k.vector().array()-interpolate(r0, LagrangeF).vector().array()
file << error
interactive()
|
import difflib
import os
import multiprocessing
import utils
import jdecode
import cardlib
libdir = os.path.dirname(os.path.realpath(__file__))
datadir = os.path.realpath(os.path.join(libdir, '../data'))
cores = multiprocessing.cpu_count()
def list_split(l, n):
if n <= 0:
return l
split_size = len(l) / n
if len(l) % n > 0:
split_size += 1
return [l[i:i+split_size] for i in range(0, len(l), split_size)]
def list_flatten(l):
return [item for sublist in l for item in sublist]
def f_nearest(name, matchers, n):
for m in matchers:
m.set_seq1(name)
ratios = [(m.ratio(), m.b) for m in matchers]
ratios.sort(reverse = True)
if ratios[0][0] >= 1:
return ratios[:1]
else:
return ratios[:n]
def f_nearest_per_thread(workitem):
(worknames, names, n) = workitem
# each thread (well, process) needs to generate its own matchers
matchers = [difflib.SequenceMatcher(b=name, autojunk=False) for name in names]
return map(lambda name: f_nearest(name, matchers, n), worknames)
class Namediff:
def __init__(self, verbose = True,
json_fname = os.path.join(datadir, 'AllSets.json')):
self.verbose = verbose
self.names = {}
self.codes = {}
self.cardstrings = {}
if self.verbose:
print 'Setting up namediff...'
if self.verbose:
print ' Reading names from: ' + json_fname
json_srcs = jdecode.mtg_open_json(json_fname, verbose)
namecount = 0
for json_cardname in sorted(json_srcs):
if len(json_srcs[json_cardname]) > 0:
jcards = json_srcs[json_cardname]
# just use the first one
idx = 0
card = cardlib.Card(jcards[idx])
name = card.name
jname = jcards[idx]['name']
jcode = jcards[idx][utils.json_field_info_code]
if 'number' in jcards[idx]:
jnum = jcards[idx]['number']
else:
jnum = ''
if name in self.names:
print ' Duplicate name ' + name + ', ignoring.'
else:
self.names[name] = jname
self.cardstrings[name] = card.encode()
if jcode and jnum:
self.codes[name] = jcode + '/' + jnum + '.jpg'
else:
self.codes[name] = ''
namecount += 1
print ' Read ' + str(namecount) + ' unique cardnames'
print ' Building SequenceMatcher objects.'
self.matchers = [difflib.SequenceMatcher(b=n, autojunk=False) for n in self.names]
self.card_matchers = [difflib.SequenceMatcher(b=self.cardstrings[n], autojunk=False) for n in self.cardstrings]
print '... Done.'
def nearest(self, name, n=3):
return f_nearest(name, self.matchers, n)
def nearest_par(self, names, n=3, threads=cores):
workpool = multiprocessing.Pool(threads)
proto_worklist = list_split(names, threads)
worklist = map(lambda x: (x, self.names, n), proto_worklist)
donelist = workpool.map(f_nearest_per_thread, worklist)
return list_flatten(donelist)
def nearest_card(self, card, n=5):
return f_nearest(card.encode(), self.card_matchers, n)
def nearest_card_par(self, cards, n=5, threads=cores):
workpool = multiprocessing.Pool(threads)
proto_worklist = list_split(cards, threads)
worklist = map(lambda x: (map(lambda c: c.encode(), x), self.cardstrings.values(), n), proto_worklist)
donelist = workpool.map(f_nearest_per_thread, worklist)
return list_flatten(donelist)
|
import os
import errno
def list_directory(path):
list_dire=[]
list_dire=os.listdir(path)
return list_dire
def list_dir(path):
list_file=[]
list_file=os.listdir(path)
return list_file
def change_dir(path):
os.chdir(path)
change_dir("/home/mancube/Downloads/data/")
#p1 ...
list_directory= list(list_dir("/home/mancube/Downloads/data")) #a1 a2 ...
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def write_file(inputfile,outputfile):
list_column = 'number,T1_xacc,T2_yacc,T3_zacc,T4_xgyro,T5_ygyro,T6_zgyro,T7_xmag,T8_ymag,T9_zmag,RA_xacc,RA_yacc,RA_zacc,RA_xgyro,RA_ygyro,RA_zgyro,RA_xmag,RA_ymag,RA_zmag,LA_xacc,LA_yacc,LA_zacc,LA_xgyro,LA_ygyro,LA_zgyro,LA_xmag,LA_ymag,LA_zmag,RL_xacc,RL_yacc,RL_zacc,RL_xgyro,RL_ygyro,RL_zgyro,RL_xmag,RL_ymag,RL_zmag,LL_xacc,LL_yacc,LL_zacc,LL_xgyro,LL_ygyro,LL_zgyro,LL_xmag,LL_ymag,LL_zmag'
header_list=list_column.split(',')
columns = header_list
list_dir(os.getcwd())
with open(inputfile,'rb') as fin:
with open(outputfile,'wb+') as fout:
a = fin.readlines()
fout.write(list_column+'\n')
i=1
for line in a:
#print line
string_interm="%s,"%i
string_interma=string_interm+line
fout.write(string_interma)
i=i+1
#add first line
#add first column(len)
for j in range(len(list_directory)):
list_file = list(list_dir("/home/mancube/Downloads/data/"+list_directory[j]))
list_file
for i in range(len(list_file)):
#print '/home/mancube/Downloads/data/'+list_directory[j]+'/'+list_file[i]
a = os.listdir('/home/mancube/Downloads/data/'+list_directory[j]+'/'+list_file[i])
a
mkdir_p('/home/mancube/Downloads/data2/'+list_directory[j])
for k in range(len(a)):
#print '/home/mancube/Downloads/data/'+list_directory[j]+'dup'
#print os.listdir('/home/mancube/Downloads/data2/')
mkdir_p('/home/mancube/Downloads/data2/'+list_directory[j]+'/'+list_file[i])
#os.mkdir('/home/mancube/Downloads/data2/'+list_directory[j] )
write_file('/home/mancube/Downloads/data/'+list_directory[j]+'/'+list_file[i]+'/'+a[k],'/home/mancube/Downloads/data2/'+list_directory[j]+'/'+list_file[i]+'/'+a[k])
|
import os
import datetime
import time
class Utilities():
''' Encapsulates the utility methods used by other parts of the tool '''
ONE_DAY = 24 * 60 * 60
def __init__(self):
pass
def execute(self, call_string):
handle = os.popen(call_string)
return handle.read()
def create_directory(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
def set_directory(self, directory_name):
os.chdir(directory_name)
def convert_timestamp_to_string(self, timestamp):
return datetime.datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
def check_last_commit_date(self, commit_date, time_period):
current_milli_time = lambda: int(time.time())
return (current_milli_time() - int(commit_date) <= time_period * self.ONE_DAY)
def ensure_output_directory_exists(self, directory_location, directory_name):
self.create_directory(directory_location + directory_name)
|
from ansiblelint import AnsibleLintRule
class LineTooLong(AnsibleLintRule):
id = 'E602'
shortdesc = 'Line too long'
description = 'Line too long'
tags = ['formatting']
def match(self, file, line):
if len(line) > 80:
self.shortdesc += " ({} characters)".format(len(line))
return True
return False
|
"""
Compute Gradients of a Field
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Estimate the gradient of a scalar or vector field in a data set.
The ordering for the output gradient tuple will be
{du/dx, du/dy, du/dz, dv/dx, dv/dy, dv/dz, dw/dx, dw/dy, dw/dz} for
an input array {u, v, w}.
Showing the :func:`pyvista.DataSetFilters.compute_derivative` filter.
"""
import pyvista as pv
from pyvista import examples
import numpy as np
mesh = examples.download_carotid()
mesh
mesh_g = mesh.compute_derivative(scalars="vectors")
mesh_g["gradient"]
def gradients_to_dict(arr):
"""A helper method to label the gradients into a dictionary."""
keys = np.array(["du/dx", "du/dy", "du/dz", "dv/dx", "dv/dy", "dv/dz", "dw/dx", "dw/dy", "dw/dz"])
keys = keys.reshape((3,3))[:,:arr.shape[1]].ravel()
return dict(zip(keys, mesh_g["gradient"].T))
gradients = gradients_to_dict(mesh_g["gradient"])
gradients
mesh_g.point_arrays.update(gradients)
mesh_g
keys = np.array(list(gradients.keys())).reshape(3,3)
p = pv.Plotter(shape=keys.shape)
for i in range(keys.shape[0]):
for j in range(keys.shape[1]):
name = keys[i,j]
p.subplot(i,j)
p.add_mesh(mesh_g.contour(scalars=name), scalars=name, opacity=0.75)
p.add_mesh(mesh_g.outline(), color="k")
p.link_views()
p.view_isometric()
p.show()
mesh_g = mesh.compute_derivative(scalars="scalars")
gradients = gradients_to_dict(mesh_g["gradient"])
gradients
mesh_g.point_arrays.update(gradients)
keys = np.array(list(gradients.keys())).reshape(1,3)
p = pv.Plotter(shape=keys.shape)
for i in range(keys.shape[0]):
for j in range(keys.shape[1]):
name = keys[i,j]
p.subplot(i,j)
p.add_mesh(mesh_g.contour(scalars=name), scalars=name, opacity=0.75)
p.add_mesh(mesh_g.outline(), color="k")
p.link_views()
p.view_isometric()
p.show()
|
from PIL import Image, ImageTk, ImageDraw
from collections import namedtuple
import tkinter as tk
Point = namedtuple('Point', ['x', 'y'])
class Application(tk.Frame):
def __init__(self, master):
super().__init__(master, width=640, height=480, bg='white')
self.width = 640
self.height = 480
# Associate application root to main controller
self.root = master
self.root.title('Minimal Application FLIGHT STONE')
# Bind the exit of the GUI to a specific function
self.root.protocol('WM_DELETE_WINDOW', self.on_quit)
self._video_holder = tk.Label(self)
self.pack()
# The GUI initiated successfully
self.status = True
# Creating thread controls
self.region_to_draw = False
self.region = (Point(x=0, y=0), Point(x=0, y=0))
self.img_update = False
self.last_frame = None
def setRegionToDraw(self, p1, p2):
self.region_to_draw = True
self.region = (Point(x=p1[0], y=p1[1]), Point(x=p2[0], y=p2[1]))
def updateVideoHolder(self):
if self.img_update:
self.img_update = False
img = Image.fromarray(self.last_frame, 'RGB')
# All extra drawings to be applied to current frame
self.applyExtraDrawings(img)
# Put the image in the label
self.photo = ImageTk.PhotoImage(img)
self._video_holder.imgtk = self.photo
self._video_holder.config(image=self.photo)
self._video_holder.pack()
def updateVideoState(self, frame):
if not self.img_update:
self.last_frame = frame
self.img_update = True
def applyExtraDrawings(self, img):
if self.region_to_draw:
draw = ImageDraw.Draw(img)
draw.line(
[
(self.region[0].x, self.region[0].y),
(self.region[1].x, self.region[0].y),
(self.region[1].x, self.region[1].y),
(self.region[0].x, self.region[1].y),
(self.region[0].x, self.region[0].y),
],
fill=(255, 0, 0, 255)
)
def on_quit(self):
self.status = False
self.root.destroy()
|
"""
Module with standard logging hook.
"""
import logging
from typing import Iterable
import numpy as np
from . import AbstractHook
from ..types import EpochData
class LogVariables(AbstractHook):
"""
Log the training results to stderr via standard :py:mod:`logging` module.
.. code-block:: yaml
:caption: log all the variables
hooks:
- LogVariables
.. code-block:: yaml
:caption: log only certain variables
hooks:
- LogVariables:
variables: [loss]
.. code-block:: yaml
:caption: warn about unsupported variables
hooks:
- LogVariables:
on_unknown_type: warn
"""
UNKNOWN_TYPE_ACTIONS = ['error', 'warn', 'str', 'ignore']
"""Posible actions to take on unknown variable type."""
def __init__(self, variables: Iterable[str]=None, on_unknown_type='ignore', **kwargs):
"""
Create new LogVariables hook.
:param variables: variable names to be logged; log all the variables by default
:param on_unknown_type: an action to be taken if the variable type is not supported (e.g. a list)
"""
assert on_unknown_type in LogVariables.UNKNOWN_TYPE_ACTIONS
self._variables = variables
self._on_unknown_type = on_unknown_type
super().__init__(**kwargs)
def _log_variables(self, epoch_data: EpochData):
"""
Log variables from the epoch data.
.. warning::
At the moment, only scalars and dicts of scalars are properly formatted and logged.
Other value types are ignored by default.
One may set ``on_unknown_type`` to ``str`` in order to log all the variables anyways.
:param epoch_data: epoch data to be logged
:raise KeyError: if the specified variable is not found in the stream
:raise TypeError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``
"""
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be logged was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if np.isscalar(value):
logging.info('\t%s %s: %f', stream_name, variable, value)
elif isinstance(value, dict):
keys = list(value.keys())
if len(keys) == 1:
logging.info('\t%s %s %s: %f', stream_name, variable, keys[0], value[keys[0]])
else:
logging.info('\t%s %s:', stream_name, variable)
for key, val in value.items():
logging.info('\t\t%s: %f', key, val)
else:
if self._on_unknown_type == 'error':
raise TypeError('Variable type `{}` can not be logged. Variable name: `{}`.'
.format(type(value).__name__, variable))
elif self._on_unknown_type == 'warn':
logging.warning('Variable type `%s` can not be logged. Variable name: `%s`.',
type(value).__name__, variable)
elif self._on_unknown_type == 'str':
logging.info('\t%s %s: %s', stream_name, variable, value)
def after_epoch(self, epoch_id: int, epoch_data: EpochData) -> None:
"""
Log the epoch data via :py:mod:`logging` API.
Additionally, a blank line is printed directly to stderr to delimit the outputs from other epochs.
:param epoch_id: number of processed epoch
:param epoch_data: epoch data to be logged
"""
self._log_variables(epoch_data)
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="layout.scene.xaxis.tickfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
import codecs
import json
from bottle import request
from conans import DEFAULT_REVISION_V1
from conans.errors import NotFoundException, RecipeNotFoundException, PackageNotFoundException
from conans.model.ref import ConanFileReference, PackageReference
from conans.paths import CONAN_MANIFEST
from conans.server.rest.bottle_routes import BottleRoutes
from conans.server.service.v1.service import ConanService
class ConanController(object):
"""
Serve requests related with Conan
"""
@staticmethod
def attach_to(app):
r = BottleRoutes()
@app.route(r.v1_recipe_digest, method=["GET"])
def get_recipe_manifest_url(name, version, username, channel, auth_user):
"""
Get a dict with all files and the download url
"""
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
ref = ConanFileReference(name, version, username, channel)
urls = conan_service.get_conanfile_download_urls(ref, [CONAN_MANIFEST])
if not urls:
raise RecipeNotFoundException(ref)
return urls
@app.route(r.v1_package_digest, method=["GET"])
def get_package_manifest_url(name, version, username, channel, package_id, auth_user):
"""
Get a dict with all files and the download url
"""
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
ref = ConanFileReference(name, version, username, channel)
pref = PackageReference(ref, package_id)
urls = conan_service.get_package_download_urls(pref, [CONAN_MANIFEST])
if not urls:
raise PackageNotFoundException(pref)
urls_norm = {filename.replace("\\", "/"): url for filename, url in urls.items()}
return urls_norm
@app.route(r.recipe, method=["GET"])
def get_recipe_snapshot(name, version, username, channel, auth_user):
"""
Get a dictionary with all files and their each md5s
"""
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
ref = ConanFileReference(name, version, username, channel)
snapshot = conan_service.get_recipe_snapshot(ref)
snapshot_norm = {filename.replace("\\", "/"): the_md5
for filename, the_md5 in snapshot.items()}
return snapshot_norm
@app.route(r.package, method=["GET"])
def get_package_snapshot(name, version, username, channel, package_id, auth_user):
"""
Get a dictionary with all files and their each md5s
"""
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
ref = ConanFileReference(name, version, username, channel)
pref = PackageReference(ref, package_id)
snapshot = conan_service.get_package_snapshot(pref)
snapshot_norm = {filename.replace("\\", "/"): the_md5
for filename, the_md5 in snapshot.items()}
return snapshot_norm
@app.route(r.v1_recipe_download_urls, method=["GET"])
def get_conanfile_download_urls(name, version, username, channel, auth_user):
"""
Get a dict with all files and the download url
"""
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
ref = ConanFileReference(name, version, username, channel)
try:
urls = conan_service.get_conanfile_download_urls(ref)
except NotFoundException:
raise RecipeNotFoundException(ref)
urls_norm = {filename.replace("\\", "/"): url for filename, url in urls.items()}
return urls_norm
@app.route(r.v1_package_download_urls, method=["GET"])
def get_package_download_urls(name, version, username, channel, package_id,
auth_user):
"""
Get a dict with all packages files and the download url for each one
"""
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
ref = ConanFileReference(name, version, username, channel)
pref = PackageReference(ref, package_id)
try:
urls = conan_service.get_package_download_urls(pref)
except NotFoundException:
raise PackageNotFoundException(pref)
urls_norm = {filename.replace("\\", "/"): url for filename, url in urls.items()}
return urls_norm
@app.route(r.v1_recipe_upload_urls, method=["POST"])
def get_conanfile_upload_urls(name, version, username, channel, auth_user):
"""
Get a dict with all files and the upload url
"""
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
ref = ConanFileReference(name, version, username, channel, DEFAULT_REVISION_V1)
reader = codecs.getreader("utf-8")
filesizes = json.load(reader(request.body))
urls = conan_service.get_conanfile_upload_urls(ref, filesizes)
urls_norm = {filename.replace("\\", "/"): url for filename, url in urls.items()}
app.server_store.update_last_revision(ref)
return urls_norm
@app.route(r.v1_package_upload_urls, method=["POST"])
def get_package_upload_urls(name, version, username, channel, package_id, auth_user):
"""
Get a dict with all files and the upload url
"""
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
ref = ConanFileReference(name, version, username, channel, DEFAULT_REVISION_V1)
pref = PackageReference(ref, package_id, DEFAULT_REVISION_V1)
reader = codecs.getreader("utf-8")
filesizes = json.load(reader(request.body))
urls = conan_service.get_package_upload_urls(pref, filesizes)
urls_norm = {filename.replace("\\", "/"): url for filename, url in urls.items()}
app.server_store.update_last_package_revision(pref)
return urls_norm
|
#!flask/bin/python
"""
Author: StackFocus
File: app.py
Purpose: runs the app!
"""
from swagip import app
app.config.from_object('config')
if __name__ == "__main__":
app.run()
|
from logger import logger
from cache import Cache
import db
class Costume:
""" Costume class """
def __init__(self, costume_id, job, level):
self._info = Cache.costume(job, costume_id)
self._info['costume_id'] = costume_id
self._info['job'] = job
self._info['level'] = level
def costume_id(self): return self._info['costume_id']
def job(self): return self._info['job']
def level(self): return self._info['level']
def market_price(self): return self._info['market_price']
def make_honbul(selF): return self._info['make_honbul']
def ug_honbul(self):
pass
def level_up(self, user_id):
pass
def get_attrs(self):
pass
|
'''
Created on Jul 29, 2012
@author: rafaelolaechea
'''
import subprocess
from AppendPartialInstanceAndGoals import generate_and_append_partial_instances_and_goals
from spl_claferanalyzer import SPL_ClaferAnalyzer
def execute_main():
spl_names = ["apacheicse212", "berkeleydbqualityjournal", "berkeleydbsplc2011",
"linkedlistsplc2011", "pkjabsplc2011", "prevaylersplc2011", "sqlitesplc2011",
"zipmesplc2011"]
satisfiable_partialconfigurations = [
"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_1.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_2.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_3.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_7.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_9.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_10.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_11.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_12.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_13.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/apacheicse212_14.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_5.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_16.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_17.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_19.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_20.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_25.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_27.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_30.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_32.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbqualityjournal_40.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_1.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_2.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_4.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_5.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_6.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_8.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_11.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_13.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_14.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/berkeleydbsplc2011_15.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_14.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_18.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_19.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_24.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_33.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_37.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_38.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_40.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_46.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/pkjabsplc2011_50.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_1.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_3.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_4.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_5.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_6.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_10.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_11.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_12.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_13.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/prevaylersplc2011_15.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_22.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_27.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_31.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_52.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_94.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_104.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_123.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_129.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_148.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/sqlitesplc2011_192.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_1.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_2.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_6.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_8.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_11.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_12.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_13.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_17.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_31.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/zipmesplc2011_32.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_9.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_16.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_42.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_46.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_98.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_137.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_170.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_239.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_276.cfr"
, "/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/satisfiable_partial_configurations_dataset/linkedlistsplc2011_283.cfr"
]
satisfiable_partialconfigurations = [
"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/apacheicse212.cfr"
,"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/berkeleydbqualityjournal.cfr"
,"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/berkeleydbsplc2011.cfr"
,"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/linkedlistsplc2011.cfr"
,"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/pkjabsplc2011.cfr"
,"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/prevaylersplc2011.cfr"
,"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/sqlitesplc2011.cfr"
,"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/zipmesplc2011.cfr"]
satisfiable_partialconfigurations = [
"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/non_configured_dataset/sqlitesplc2011_no_omit.cfr"]
for clafer_fname in satisfiable_partialconfigurations:
print ", \"%s\"" % clafer_fname
subprocess.check_output(["clafer", '--mode=xml','--nr', clafer_fname], stderr=subprocess.STDOUT)
subprocess.check_output(["clafer", '--nr', clafer_fname], stderr=subprocess.STDOUT)
als_fp = open(clafer_fname[:-4] + ".als", "a")
generate_and_append_partial_instances_and_gaols(clafer_fname[:-4] + ".xml", als_fp)
als_fp.close()
"""
REWRITE ZIPME
for clafer_fname in satisfiable_partialconfigurations:
if clafer_fname.find("zipmesplc2011")!=-1:
fd_clafer = open(clafer_fname, "r")
zipme_contents = fd_clafer.readlines()
fd_clafer.close()
fd_rewrite_clafer = open(clafer_fname, "w")
for line in zipme_contents:
if line.find("simpleZip : simpleConfig") != -1:
fd_rewrite_clafer.write("simpleConfig : ZipMeSPL\n")
elif line.find("<< min simpleZip.total_footprint >>") != -1:
fd_rewrite_clafer.write("<< min simpleConfig.total_footprint >>\n")
else :
fd_rewrite_clafer.write(line)
fd_rewrite_clafer.close()
"""
execute_main()
|
"""
:Program: WAFNinja
:ModuleName: db
:Version: 1.0
:Revision: 1.0.0
:Author: Khalil Bijjou
:Description: The db module is responsible for the interaction with the database.
"""
import sqlite3
def getPayload(type, waf):
"""
:Description: This function retrieves Payloads from the database.
:param type: Type of the Payload [sql | xss].
:type type: String
:param waf: Payloads linked to a WAF
:type waf: String
:return: List of payloads
"""
conn = sqlite3.connect("db/db.sqlite")
c = conn.cursor()
list = [type]
sql = '''SELECT payload from payload where type=? '''
if waf is not None:
list.append(waf)
sql = sql + 'and waf=? '
c.execute(sql, (list))
output = []
for value in c.fetchall(): #the first item is the real payload
output.append(value[0])
try:
return output
finally:
conn.close()
def setPayload(input, type, waf):
"""
:Description: This function adds a Payload to the database.
:param input: The Payload
:type input: String
:param type: Type of the Payload [sql | xss].
:type type: String
:param waf: The WAF the payload is going to be linked with
:type waf: String
"""
conn = sqlite3.connect("db/db.sqlite")
c = conn.cursor()
if waf is None:
waf = 'generic'
list = [input, type, waf]
sql = '''Insert into payload (payload, type, waf) VALUES (?, ?, ?)'''
c.execute(sql, (list))
conn.commit()
conn.close()
print 'Payload inserted successfully!'
def getFuzz(type):
"""
:Description: This function retrieves Fuzzing strings from the database.
:param type: Type of the Fuzzing string [sql | xss].
:type type: String
:return: List of Fuzzing strings
"""
conn = sqlite3.connect("db/db.sqlite")
c = conn.cursor()
list = [type]
sql = '''SELECT fuzz, expected from fuzz where type=?'''
c.execute(sql, list)
output = []
for value in c.fetchall(): #the first item is the real payload
output.append([value[0], value[1]])
try:
return output
finally:
conn.close()
def setFuzz(input, expected, type):
"""
:Description: This function adds a Fuzzing string to the database.
:param input: The Fuzzing string
:type input: String
:param expected: The expected output if the fuzzing string is included in a web server's response. Useful if the input is encoded in any way.
:type expected: String
:param type: Type of the Fuzzing string [sql | xss].
:type type: String
"""
conn = sqlite3.connect("db/db.sqlite")
c = conn.cursor()
list = [input, expected, type]
sql = '''Insert into fuzz (fuzz, expected, type) VALUES (?, ?, ?)'''
c.execute(sql, (list))
conn.commit()
conn.close()
print 'Fuzz inserted successfully!'
|
import re
import io
import csv
def CSVDEFILTER(inputfilename, outputfilename):
"Change CSVDE output CSV file to simple user based CSV file for import"
inputfile = open(inputfilename, "r")
outputfile = open(outputfilename, "w")
scsv = ''
for line in inputfile:
if re.search(",user,", line):
scsv += line + '\n'
f = io.StringIO(scsv)
reader = csv.reader(f, delimiter=',')
outputfile.write('objectClass,sAMAccountName,dn')
for row in reader:
if (len(row) > 0):
if (row[50] != ''):
if (not row[50].startswith('CN=')):
outputfile.write('\n' + row[1] + ',' + row[82] + '," ' + row[0] + '"')
inputfile.close()
outputfile.close()
return;
|
from contextlib import closing
from django.db import connection
import csv
import re
def dump_table(table_name, pk_column, batch_size, dest_file):
with closing(connection.cursor()) as cursor:
cursor.execute('SELECT COUNT(*) FROM {}'.format(table_name))
count, = cursor.fetchone()
for offset in range(0, count, batch_size):
with closing(connection.cursor()) as cursor:
cursor.execute('SELECT * FROM {} ORDER BY {} LIMIT {} OFFSET {}'.format(table_name, pk_column, batch_size, offset))
dump_cursor(
cursor,
dest_file,
append=(offset > 0)
)
def dump_cursor(cursor, dest_file, append=False):
headers = [re.sub(r'_id$', '', col[0]) for col in cursor.description]
with open(dest_file, 'a' if append else 'w') as csvfile:
writer = csv.writer(csvfile)
if not append:
writer.writerow(headers)
for row in cursor:
row = [str(val) for val in row]
writer.writerow(row)
def is_on_postgresql():
return connection.settings_dict['ENGINE'] == 'django.db.backends.postgresql_psycopg2'
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='imageralbum',
name='published',
field=models.CharField(default=b'public', max_length=7, choices=[(b'public', b'Public'), (b'private', b'Private'), (b'shared', b'Shared')]),
preserve_default=True,
),
migrations.AlterField(
model_name='imageralbum',
name='title',
field=models.CharField(default=b'MyAlbum', max_length=63),
preserve_default=True,
),
migrations.AlterField(
model_name='imagerphoto',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='imagerphoto',
name='published',
field=models.CharField(default=b'public', max_length=31, choices=[(b'public', b'Public'), (b'private', b'Private'), (b'shared', b'Shared')]),
preserve_default=True,
),
migrations.AlterField(
model_name='imagerphoto',
name='title',
field=models.CharField(default=b'MyPhoto', max_length=31),
preserve_default=True,
),
]
|
from __future__ import unicode_literals
from .base import *
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = True
ADMINS = (('John', 'john@example.com'), ) # Log email to console when DEBUG = False
SECRET_KEY = "DEV"
ALLOWED_HOSTS = ['127.0.0.1', '.quantzen.cn', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CACHES.update({
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'st_rate_limit': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'spirit_rl_cache',
'TIMEOUT': None
}
})
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
"""
Backend for test environment.
"""
from django.core import mail
from django.core.mail.backends.locmem import EmailBackend
class SMSBackend(EmailBackend):
"""A SMS backend for use during test sessions.
The test connection stores SMS messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super(SMSBackend, self).__init__(*args, **kwargs)
if not hasattr(mail, 'outbox'):
mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
msg_count = 0
for message in messages: # .message() triggers header validation
message.message()
msg_count += 1
mail.outbox.extend(messages)
return msg_count
|
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import numpy as np
from numpy.testing import assert_array_equal
from .. import converters
from .. import exceptions
from .. import tree
from ....tests.helper import raises, catch_warnings
@raises(exceptions.E13)
def test_invalid_arraysize():
field = tree.Field(
None, name='broken', datatype='char', arraysize='foo')
converters.get_converter(field)
def test_oversize_char():
config = {'pedantic': True}
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert len(w) == 1
with catch_warnings(exceptions.W46) as w:
c.parse("XXX")
assert len(w) == 1
def test_char_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
def test_oversize_unicode():
config = {'pedantic': True}
with catch_warnings(exceptions.W46) as w:
field = tree.Field(
None, name='c2', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
c.parse("XXX")
assert len(w) == 1
def test_unicode_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
@raises(exceptions.E02)
def test_wrong_number_of_elements():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='int', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
c.parse("2 3 4 5 6")
@raises(ValueError)
def test_float_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('') == (c.null, True)
c.parse('null')
def test_float_mask_permissive():
config = {'pedantic': False}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('null') == (c.null, True)
@raises(exceptions.E02)
def test_complex_array_vararray():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
c.parse("2 3 4 5 6")
def test_complex_array_vararray2():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("")
assert len(x[0]) == 0
def test_complex_array_vararray3():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4 5 6 7 8 9 10 11 12")
assert len(x) == 2
assert np.all(x[0][0][0] == complex(1, 2))
def test_complex_vararray():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4")
assert len(x) == 2
assert x[0][0] == complex(1, 2)
@raises(exceptions.E03)
def test_complex():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3")
@raises(exceptions.E04)
def test_bit():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("T")
def test_bit_mask(recwarn):
config = {'pedantic': True}
with catch_warnings(exceptions.W39) as w:
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
c.output(True, True)
assert len(w) == 1
@raises(exceptions.E05)
def test_boolean():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='boolean',
config=config)
c = converters.get_converter(field, config=config)
c.parse('YES')
def test_boolean_array():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='boolean', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
r, mask = c.parse('TRUE FALSE T F 0 1')
assert_array_equal(r, [True, False, True, False, False, True])
@raises(exceptions.E06)
def test_invalid_type():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='foobar',
config=config)
c = converters.get_converter(field, config=config)
def test_precision():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float', precision="E4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2'
field = tree.Field(
None, name='c', datatype='float', precision="F4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2480'
@raises(exceptions.W51)
def test_integer_overflow():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='int', config=config)
c = converters.get_converter(field, config=config)
c.parse('-2208988800', config=config)
def test_float_default_precision():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float', arraysize="4",
config=config)
c = converters.get_converter(field, config=config)
assert (c.output([1, 2, 3, 8.999999], [False, False, False, False]) ==
'1 2 3 8.9999990000000007')
def test_vararray():
votable = tree.VOTableFile()
resource = tree.Resource()
votable.resources.append(resource)
table = tree.Table(votable)
resource.tables.append(table)
tabarr = []
heads = ['headA', 'headB', 'headC']
types = ["char", "double", "int"]
vals = [["A", 1.0, 2],
["B", 2.0, 3],
["C", 3.0, 4]]
for i in range(len(heads)):
tabarr.append(tree.Field(
votable, name=heads[i], datatype=types[i], arraysize="*"))
table.fields.extend(tabarr)
table.create_arrays(len(vals))
for i in range(len(vals)):
values = tuple(vals[i])
table.array[i] = values
buff = io.BytesIO()
votable.to_xml(buff)
|
from abc import abstractmethod
import logging
import zmq
from zmq.eventloop import zmqstream
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class Connector:
def __init__(self, name, end_point, context=None):
self._context = context or zmq.Context.instance()
self._name = name
self._end_point = end_point
@abstractmethod
def connect(self):
pass
@abstractmethod
def _on_recv(self, stream, msg):
pass
@abstractmethod
def send(self, data):
pass
class CommandConnector(Connector):
def __init__(self, name, end_point, context=None):
Connector.__init__(self, name, end_point, context)
self._sockt = self._context.socket(zmq.REQ)
self._sockt.setsockopt(zmq.IDENTITY, self._name)
self._stream = zmqstream.ZMQStream(self._sockt)
self.log = logger.getChild('command-connector')
def connect(self):
self.log.info(' try connect to {0} '.format(self._end_point))
self._sockt.connect(str(self._end_point))
self.log.info('connected to {0} '.format(self._end_point))
def send(self, data):
self._sockt.send(data, copy=False)
class QueryHandlerConnector(Connector):
def __init__(self, name, end_point, context=None):
Connector.__init__(self, name, end_point, context)
|
"""
get data from a Geras time-series database.
There are two main options:
--list lists all time series in the Geras database
--get gets a specified time series
In all cases a Geras key must be given using the --key option. If no
key if given on the command line, the user will be prompted for it, in
which case it will not be visible on the screen.
--list requires one parameter:
--list all lists all time series in the database
-- list <pattern> lists all time series with a string that matches <pattern>
--get can optionally be supplied with a start and end time, using:
--start <time>
--end <time>
The time parameter must be in the format: '18-10-2014 11:05:02'. The quotes
are required.
Examples (you need to use your own key for these to work):
./getgeras.py --key c685297d8c0f710e3bd1c8e771eb8d3d --list all
./getgeras.py --key c685297d8c0f710e3bd1c8e771eb8d3d --list BID8
./getgeras.py --key c485f97d8c0f410e3bdbc8e771eb8d2d --get /BID8/Kitchen/binary
./getgeras.py --key c485f97d8c0f410e3bdbc8e771eb8d2d --get /BID8/Kitchen/binary --start '2014-10-15 09:00:00' --end '2014-10-18 09:00:00'
"""
gerasurl = 'http://geras.1248.io/'
import requests
import json
import time
import click
import os, sys
def nicetime(timeStamp):
localtime = time.localtime(timeStamp)
milliseconds = '%03d' % int((timeStamp - int(timeStamp)) * 1000)
now = time.strftime('%Y-%m-%d %H:%M:%S', localtime)
return now
def epochtime(date_time):
pattern = '%Y-%m-%d %H:%M:%S'
epoch = int(time.mktime(time.strptime(date_time, pattern)))
return epoch
@click.command()
@click.option('--list', nargs=1, help='Lists time series on Geras. --list all or --list <pattern>.')
@click.option('--get', nargs=1, help='Gets time series from Geras. --get <name of time series.')
@click.option('--start', nargs=1, help='Start time for getting time series in the format: 18-10-2014 11:05:02.')
@click.option('--end', nargs=1, help='End time for getting time series in the format: 18-10-2014 11:05:02.')
@click.option('--key', prompt='Geras API key', help='Your Geras API key. See http://geras.1248.io/user/apidoc.')
def pulldata(list, key, get, start, end):
if list:
r = requests.get('http://geras.1248.io/serieslist', auth=(key,''))
timeseries = json.loads(r.content)
if list.lower() == "all":
print(json.dumps(timeseries, indent=4))
else:
for t in timeseries:
if list in t:
print t
elif get:
if start:
startTime = epochtime(start)
if end:
endTime = epochtime(end)
else:
endTime = time.time()
url = gerasurl + 'series/' + get +'?start=' + str(startTime) + '&end=' + str(endTime)
elif end:
print "If you specify an end time, you must also specify a start time"
exit()
else:
url = gerasurl + 'series/' + get
r = requests.get(url, auth=(key,''))
timeseries = json.loads(r.content)
#print(json.dumps(timeseries, indent=4))
for t in timeseries["e"]:
print nicetime(t["t"]) + ' ' + str("%2.1f" %t["v"])
else:
print "You must use --list, --get or --help."
if __name__ == '__main__':
pulldata()
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from panda3d.core import SamplerState, Vec4
from rpcore.render_stage import RenderStage
from rpcore.image import Image
class CloudVoxelStage(RenderStage):
""" This stage generates the volumetric cloud voxel grid """
required_pipes = ["ScatteringIBLDiffuse"]
def __init__(self, pipeline):
RenderStage.__init__(self, pipeline)
self._voxel_res_xy = 256
self._voxel_res_z = 32
@property
def produced_pipes(self):
return {"CloudVoxels": self._cloud_voxels}
@property
def produced_defines(self):
return {
"CLOUD_RES_XY": self._voxel_res_xy,
"CLOUD_RES_Z": self._voxel_res_z
}
def create(self):
# Construct the voxel texture
self._cloud_voxels = Image.create_3d(
"CloudVoxels", self._voxel_res_xy, self._voxel_res_xy, self._voxel_res_z, "RGBA8")
self._cloud_voxels.set_wrap_u(SamplerState.WM_repeat)
self._cloud_voxels.set_wrap_v(SamplerState.WM_repeat)
self._cloud_voxels.set_wrap_w(SamplerState.WM_border_color)
self._cloud_voxels.set_border_color(Vec4(0, 0, 0, 0))
# Construct the target which populates the voxel texture
self._grid_target = self.create_target("CreateVoxels")
self._grid_target.size = self._voxel_res_xy, self._voxel_res_xy
self._grid_target.prepare_buffer()
self._grid_target.quad.set_instance_count(self._voxel_res_z)
self._grid_target.set_shader_input("CloudVoxels", self._cloud_voxels)
# Construct the target which shades the voxels
self._shade_target = self.create_target("ShadeVoxels")
self._shade_target.size = self._voxel_res_xy, self._voxel_res_xy
self._shade_target.prepare_buffer()
self._shade_target.quad.set_instance_count(self._voxel_res_z)
self._shade_target.set_shader_input("CloudVoxels", self._cloud_voxels)
self._shade_target.set_shader_input("CloudVoxelsDest", self._cloud_voxels)
def reload_shaders(self):
self._grid_target.shader = self.load_plugin_shader(
"/$$rp/shader/default_post_process_instanced.vert.glsl",
"generate_clouds.frag.glsl")
self._shade_target.shader = self.load_plugin_shader(
"/$$rp/shader/default_post_process_instanced.vert.glsl",
"shade_clouds.frag.glsl")
|
'''
'''
from .base import (
ToolBase,
# execute_in_main_thread
)
class RotateTool(ToolBase):
# default settings override
# polling = 0.066 # in seconds
# threshold = 0.2
tool_context = "RotateSuperContext"
class MoveTool(ToolBase):
# default settings override
# polling = 0.066 # in seconds
# threshold = 0.2
tool_context = "moveSuperContext"
class ScaleTool(ToolBase):
# default settings override
# polling = 0.066 # in seconds
# threshold = 0.2
tool_context = "scaleSuperContext"
|
"""
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
Solution comments:
Answer is the product of all primes in all the divisors, with each
prime beeing included only as many times as the maximum multiplisity of that
prime in the divisors.
"""
def primes(n):
"""Return dict with primes and multiplisities.
primes(20) -> {2:2, 5:1}
"""
primfac = {}
d = 2
while d*d <= n:
primfac[d] = 0
while (n % d) == 0:
primfac[d] += 1
n //= d
d += 1
if n > 1:
primfac[n] = 1
return primfac
def smallest_commondivisor_for_less_than(n):
s = primes(n)
x = n - 1
while x > 1:
q = primes(x)
for key in q:
if key in s:
s[key] = max(s[key], q[key])
else:
s[key] = q[key]
x -= 1
res = 1
for key in s:
res *= key**s[key]
return res
print smallest_commondivisor_for_less_than(20)
|
import blueback.config
from blueback.job import Job
def run():
configs = blueback.config.get_config()
for config in configs:
job = Job(config)
job.run()
if __name__ == "__main__":
run()
|
from msrest.serialization import Model
class GenerateUpgradedDefinitionParameters(Model):
"""GenerateUpgradedDefinitionParameters.
:param target_schema_version: The target schema version.
:type target_schema_version: str
"""
_attribute_map = {
'target_schema_version': {'key': 'targetSchemaVersion', 'type': 'str'},
}
def __init__(self, target_schema_version=None):
self.target_schema_version = target_schema_version
|
from pybrain.utilities import drawGibbs
import random
from wolfmodbot import Role, Claim
from fakeirc import Event
class SimpleWolfPlayer():
def __init__(self, name, modbot):
self.name = name
self.modbot = modbot
self.e = Event("privmsg", self.name, None, None)
def getOtherPlayers(self):
return [p for p in self.modbot.live_players if p.nickname != self.name]
def getVoteCandidates(self, player):
voteCandidates = self.getOtherPlayers()
# If someone claims WW, vote for him TODO: Robber 50/50
wolfClaims = [p for p in voteCandidates if p.claim.role == Role.Roles.Werewolf]
if wolfClaims:
return wolfClaims
# If WW, don't vote for your teammate
if player.orig_role == Role.Roles.Werewolf:
voteCandidates = [p for p in voteCandidates if p.orig_role != Role.Roles.Werewolf]
# If Seer sees WW, vote for him
if player.orig_role == Role.Roles.Seer and player.night_targets[0].orig_role == Role.Roles.Werewolf:
voteCandidates = [player.night_targets[0]]
return voteCandidates
def getAction(self):
player = self.modbot.find_player(self.name)
if self.modbot.gamestate != self.modbot.GAMESTATE_RUNNING:
raise
if self.modbot.time == "day":
if (self.modbot.turnnum < 2):
if player.orig_role == Role.Roles.Werewolf:
self.modbot.claim(self.e, Claim(Role.Roles.Villager, [], [], self.getOtherPlayers()))
else:
self.modbot.claim(self.e, Claim(player.orig_role, player.night_targets, [t.orig_role.name for t in player.night_targets], self.getVoteCandidates(player)))
if (self.modbot.turnnum > 0):
self.votes = [p.nickname for p in self.getVoteCandidates(player)]
else:
self.modbot.cmd_vote([random.choice(self.getVoteCandidates(player)).nickname], self.e)
else:
if not player.night_done:
if player.orig_role == Role.Roles.Werewolf:
self.modbot.cmd_see([random.choice(["left", "middle", "right"])], self.e)
elif player.orig_role == Role.Roles.Seer:
self.modbot.cmd_see([random.choice(self.getOtherPlayers()).nickname], self.e)
elif player.orig_role == Role.Roles.Robber:
self.modbot.cmd_rob([random.choice(self.getOtherPlayers()).nickname], self.e)
elif player.orig_role == Role.Roles.Troublemaker:
self.modbot.cmd_swap([p.nickname for p in random.sample(self.getOtherPlayers(), 2)], self.e)
|
from django.test import TestCase
from apps.courses.models import Course
from django.contrib.auth.models import User
from apps.documents.models import Document
#def test_dokument(self):
|
class Draft4(object):
PersonQuery = {
"properties": {
"from": {"type": "string"},
"till": {"type": "string"},
"start": {
"type": "integer",
"minimum": 0
},
"limit": {
"type": "integer",
"minimum": 0
},
"filter": {
"anyOf": ["type", "name", "age"]
},
"output": {
"oneOf": [
{"anyOf": ["type", "name", "age"]},
"all"
]
},
},
"required": ["from", "till"]
}
|
import os
import sys
import math
from itertools import product
import rexi_benchmarks
from mule_local.JobGeneration import *
from mule.JobParallelization import *
from mule.JobParallelizationDimOptions import *
jg = JobGeneration()
verbose = False
jg.compile.mode = 'release'
if '_gnu' in os.getenv('MULE_PLATFORM_ID'):
jg.compile.compiler = 'gnu'
else:
jg.compile.compiler = 'intel'
jg.compile.sweet_mpi = 'enable'
jg.runtime.space_res_spectral = 128
jg.runtime.reuse_plans = "require_load"
jg.parallelization.core_oversubscription = False
jg.parallelization.core_affinity = 'compact'
jg.compile.threading = 'omp'
jg.compile.rexi_thread_parallel_sum = 'disable'
gen_reference_solution = True
jg.runtime.max_simulation_time = 60*60*24*5 # 5 days
params_timestep_sizes_explicit = [15/8, 15/4, 15/2, 15, 30, 60, 120, 180, 360]
params_timestep_sizes_implicit = [15/8, 15/4, 15/2, 15, 30, 60, 120, 180, 360, 480, 600, 720]
params_timestep_sizes_exp = [15, 30, 60, 120, 180, 240, 300, 360, 480, 600, 720]
params_pspace_num_cores_per_rank = [jg.platform_resources.num_cores_per_socket]
params_pspace_num_threads_per_rank = [jg.platform_resources.num_cores_per_socket]
params_ptime_num_cores_per_rank = [1]
unique_id_filter = []
unique_id_filter.append('compile')
unique_id_filter.append('runtime.rexi_params_phi1')
unique_id_filter.append('runtime.rexi_params_phi2')
unique_id_filter.append('runtime.rexi_params_phi3')
unique_id_filter.append('runtime.rexi_params_phi4')
unique_id_filter.append('runtime.disc_space')
unique_id_filter.append('runtime.reuse_plans')
unique_id_filter.append('runtime.simparams')
unique_id_filter.append('runtime.benchmark')
unique_id_filter.append('runtime.timestepping_order')
unique_id_filter.append('runtime.max_wallclock_time')
unique_id_filter.append('parallelization.cores_per_rank')
unique_id_filter.append('parallelization.threads_per_rank')
jg.unique_id_filter = unique_id_filter
jg.runtime.output_timestep_size = jg.runtime.max_simulation_time
jg.runtime.output_file_mode = "bin"
def fun_params_ci_N(ci_max_real, ci_max_imag):
if ci_max_imag >= 7:
return 128
else:
return 32
params_ci_max_imag = [30.0]
params_ci_max_real = [10.0]
params_ci_max_imag_scaling_relative_to_timestep_size = 480
params_ci_min_imag = 5.0
jg.parallelization.force_turbo_off = True
def estimateWallclockTime(jg):
if jg.reference_job:
return 2*24*60*60
return 60*60
# Give 2h for non-REXI runs
#return 2*60*60
#
# Reference wallclock time and corresponding time step size
# e.g. for explicit RK2 integration scheme
#
# On Cheyenne with GNU compiler
# OMP_NUM_THREADS=18
# 247.378 seconds for ln_erk2 with dt=30 m=128 t=432000
#
ref_wallclock_seconds = 60*4
ref_simtime = 432000
ref_timestep_size = 60
ref_mode_res = 128
# Use this scaling for additional wallclock time
safety_scaling = 10
# 5 Min additionaly
safety_add = 60*5
wallclock_seconds = ref_wallclock_seconds
# inv. linear with simulation time
wallclock_seconds *= jg.runtime.max_simulation_time/ref_simtime
# linear with time step size
wallclock_seconds *= ref_timestep_size/jg.runtime.timestep_size
# quadratic with resolution
wallclock_seconds *= pow(ref_mode_res/jg.runtime.space_res_spectral, 2.0)
if jg.runtime.rexi_method != '':
if jg.runtime.rexi_method != 'ci':
raise Exception("TODO: Support other REXI methods")
# Complex-valued
wallclock_seconds *= 2.0
# Number of REXI terms
wallclock_seconds *= fun_params_ci_N(10, 10)
# Parallelization in time
wallclock_seconds /= jg.parallelization.pardims_dict['time'].num_ranks
if wallclock_seconds <= 0:
raise Exception("Estimated wallclock_seconds <= 0")
wallclock_seconds *= safety_scaling
wallclock_seconds += safety_add
if wallclock_seconds > jg.platform_resources.max_wallclock_seconds:
wallclock_seconds = jg.platform_resources.max_wallclock_seconds
return wallclock_seconds
jg.compile.lapack = 'enable'
jg.compile.mkl = 'disable'
jg.compilecommand_in_jobscript = False
jg.compile.program = 'swe_sphere'
jg.compile.plane_spectral_space = 'disable'
jg.compile.plane_spectral_dealiasing = 'disable'
jg.compile.sphere_spectral_space = 'enable'
jg.compile.sphere_spectral_dealiasing = 'enable'
jg.compile.benchmark_timings = 'enable'
jg.compile.quadmath = 'enable'
jg.compile.fortran_source = 'enable'
jg.runtime.verbosity = 0
jg.runtime.space_res_spectral = 128
jg.runtime.space_res_physical = -1
jg.runtime.benchmark_name = "galewsky"
jg.runtime.compute_error = 0
jg.runtime.rexi_sphere_preallocation = 1
jg.runtime.instability_checks = 1
jg.runtime.rexi_method = ''
jg.runtime.viscosity = 0.0
timestep_size_reference = params_timestep_sizes_explicit[0]
groups = ['ln2']
if __name__ == "__main__":
if len(sys.argv) > 1:
groups = [sys.argv[1]]
print("Groups: "+str(groups))
for group in groups:
# 1st order linear
# 2nd order nonlinear
if group == 'ln2':
ts_methods = [
['ln_erk', 4, 4, 0], # reference solution
###########
# Runge-Kutta
###########
#['ln_erk', 2, 2, 0],
###########
# CN
###########
['lg_irk_lc_n_erk_ver0', 2, 2, 0],
['lg_irk_lc_n_erk_ver1', 2, 2, 0],
['l_irk_n_erk_ver0', 2, 2, 0],
['l_irk_n_erk_ver1', 2, 2, 0],
###########
# REXI
###########
['lg_exp_lc_n_erk_ver0', 2, 2, 0],
['lg_exp_lc_n_erk_ver1', 2, 2, 0],
['l_exp_n_erk_ver0', 2, 2, 0],
['l_exp_n_erk_ver1', 2, 2, 0],
###########
# ETDRK
###########
['lg_exp_lc_n_etdrk', 2, 2, 0],
['l_exp_n_etdrk', 2, 2, 0],
]
# 4th order nonlinear
if group == 'ln4':
ts_methods = [
['ln_erk', 4, 4, 0], # reference solution
['l_exp_n_etdrk', 4, 4, 0],
['ln_erk', 4, 4, 0],
]
#
# Reference solution
#
if gen_reference_solution:
tsm = ts_methods[0]
jg.runtime.timestep_size = timestep_size_reference
jg.runtime.timestepping_method = tsm[0]
jg.runtime.timestepping_order = tsm[1]
jg.runtime.timestepping_order2 = tsm[2]
# Update TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = 1
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = 1
pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[-1]
pspace.num_ranks = 1
# Setup parallelization
jg.setup_parallelization([pspace, ptime])
if verbose:
pspace.print()
ptime.print()
jg.parallelization.print()
if len(tsm) > 4:
s = tsm[4]
jg.load_from_dict(tsm[4])
jg.reference_job = True
jg.parallelization.max_wallclock_seconds = estimateWallclockTime(jg)
jg.gen_jobscript_directory('job_benchref_'+jg.getUniqueID())
jg.reference_job = False
jg.reference_job_unique_id = jg.job_unique_id
#
# Create job scripts
#
for tsm in ts_methods[1:]:
jg.runtime.timestepping_method = tsm[0]
jg.runtime.timestepping_order = tsm[1]
jg.runtime.timestepping_order2 = tsm[2]
if len(tsm) > 4:
s = tsm[4]
jg.runtime.load_from_dict(tsm[4])
tsm_name = tsm[0]
if 'ln_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit
elif 'l_erk' in tsm_name or 'lg_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit
elif 'l_irk' in tsm_name or 'lg_irk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_implicit
elif 'l_exp' in tsm_name or 'lg_exp' in tsm_name:
params_timestep_sizes = params_timestep_sizes_exp
else:
print("Unable to identify time stepping method "+tsm_name)
sys.exit(1)
for pspace_num_cores_per_rank, pspace_num_threads_per_rank, jg.runtime.timestep_size in product(params_pspace_num_cores_per_rank, params_pspace_num_threads_per_rank, params_timestep_sizes):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = pspace_num_cores_per_rank
pspace.num_threads_per_rank = pspace_num_threads_per_rank
pspace.num_ranks = 1
pspace.setup()
if not '_exp' in jg.runtime.timestepping_method:
jg.runtime.rexi_method = ''
# Update TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = 1
ptime.setup()
jg.setup_parallelization([pspace, ptime])
if verbose:
pspace.print()
ptime.print()
jg.parallelization.print()
jg.parallelization.max_wallclock_seconds = estimateWallclockTime(jg)
jg.gen_jobscript_directory('job_bench_'+jg.getUniqueID())
else:
###########################################################
# Special treatment for exponential time integrators
###########################################################
#
# Load all variants of REXI which should be tested
#
rb = rexi_benchmarks.get_rexi_benchmarks(jg)
for r in rb:
jg.runtime.rexi_method = r['rexi_method']
jg.runtime.rexi_files_coefficients = r['rexi_files_coefficients']
# Update TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1
ptime.num_ranks = jg.runtime.rexi_files_coefficients[0].len()
wallclock_multiplier = 1
if True:
#max_ranks = 64
max_ranks = 32 # /2 = num nodes
max_ranks = 8 # /2 = num nodes
new_num_ranks = min(ptime.num_ranks, max_ranks)
wallclock_multiplier *= ptime.num_ranks/new_num_ranks
print("NEW WALLCLOCK TIME: "+str(wallclock_multiplier))
ptime.num_ranks = new_num_ranks
ptime.setup()
if jg.platform_resources.num_nodes == 1:
ptime.num_ranks = 1
jg.setup_parallelization([pspace, ptime])
if verbose:
pspace.print()
ptime.print()
jg.parallelization.print()
# Generate only scripts with max number of cores
jg.parallelization.max_wallclock_seconds = estimateWallclockTime(jg)*wallclock_multiplier
if int(jg.runtime.max_simulation_time / jg.runtime.timestep_size) * jg.runtime.timestep_size != jg.runtime.max_simulation_time:
raise Exception("Simtime "+str(jg.runtime.max_simulation_time)+" not dividable without remainder by "+str(jg.runtime.timestep_size))
jg.gen_jobscript_directory('job_bench_'+jg.getUniqueID())
#
# SHTNS plan generation scripts
#
if True:
#
# Search for plans and store them
# This is in particular important for running studies across several nodes
# since they rely on using the same transformation plans in order to have no
# load imbalances
#
jg.runtime.reuse_plans = "save"
#
# Create dummy scripts to be used for SHTNS script generation
#
# No parallelization in time
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1
ptime.num_ranks = 1
ptime.setup()
for tsm in ts_methods[1:2]:
jg.runtime.timestepping_method = tsm[0]
jg.runtime.timestepping_order = tsm[1]
jg.runtime.timestepping_order2 = tsm[2]
if not '_exp' in jg.runtime.timestepping_method:
jg.runtime.rexi_method = ''
else:
jg.runtime.rexi_method = 'ci'
if len(tsm) > 4:
s = tsm[4]
jg.runtime.load_from_dict(tsm[4])
for pspace_num_cores_per_rank, pspace_num_threads_per_rank, jg.runtime.timestep_size in product(params_pspace_num_cores_per_rank, params_pspace_num_threads_per_rank, [params_timestep_sizes_explicit[0]]):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = pspace_num_cores_per_rank
pspace.num_threads_per_rank = pspace_num_threads_per_rank
pspace.num_ranks = 1
pspace.setup()
jg.setup_parallelization([pspace, ptime])
# Use 12h per default to generate plans
jg.parallelization.max_wallclock_seconds = 60*60*12
# Set simtime to 0
jg.runtime.max_simulation_time = 0
# No output
jg.runtime.output_timestep_size = -1
jg.runtime.output_filename = "-"
jobdir = 'job_plan'+jg.getUniqueID()
jg.gen_jobscript_directory(jobdir)
# Write compile script
jg.write_compilecommands("./compile_platform_"+jg.platforms.platform_id+".sh")
|
from hubbot.response import IRCResponse, ResponseType
from hubbot.moduleinterface import ModuleInterface
class Triggers(ModuleInterface):
triggers = ["triggers"]
help = "triggers [module] -- returns a list of all commands, if no module is specified, " \
"returns all commands currently loaded."
def on_trigger(self, message):
"""
@type message: hubbot.message.IRCMessage
"""
if len(message.parameter_list) == 0:
if message.user.name != message.reply_to:
response = ", ".join(sorted(self.bot.module_handler.mapped_triggers.keys()))
return IRCResponse(ResponseType.NOTICE, response, message.reply_to)
else:
response = ", ".join(sorted(self.bot.module_handler.mapped_triggers.keys()))
return IRCResponse(ResponseType.SAY, response, message.reply_to)
else:
if message.parameter_list[0].lower() in self.bot.module_handler.mapped_triggers:
proper_name = self.bot.module_handler.mapped_triggers[message.parameter_list[0].lower()].__class__.__name__
return IRCResponse(ResponseType.SAY,
"Module {!r} contains the triggers: {}".format(proper_name, ", ".join(self.bot.module_handler.mapped_triggers[message.parameter_list[0].lower()].triggers)),
message.reply_to)
elif message.parameter_list[0].lower() not in self.bot.module_handler.module_case_map:
return IRCResponse(ResponseType.SAY,
"No module named {!r} is currently loaded!".format(message.parameter_list[0].lower()),
message.reply_to)
else:
proper_name = self.bot.module_handler.module_case_map[message.parameter_list[0].lower()]
loaded_module = self.bot.module_handler.modules[proper_name]
return IRCResponse(ResponseType.SAY,
"Module {!r} contains the triggers: {}".format(proper_name, ", ".join(loaded_module.triggers)),
message.reply_to)
|
import unittest
from config import settings as config
from probSyllabifier import Utils
'''
testing exists for parseCelexTrainingSet() but not for makeNistPhonemeLst()
'''
class TestSyllabParser(unittest.TestCase):
def setUp(self):
self.utils = Utils()
self.training_set = ["@-b2d", "dI-litIN", "pVI-sIN"]
self.scheme = None
def tearDown(self):
self.utils = None
self.training_set = None
def test_load_scheme(self):
self.scheme = self.utils.load_scheme(2)
# ensure a transcription scheme was loaded
self.assertIsNotNone(self.scheme)
# ensure all phones are in transcription scheme
gene_set = set(config["genetic_algorithm"]["gene_list"])
trans_set = set()
map(lambda c: trans_set.update(set(c)), self.scheme)
trans_set.discard('<')
trans_set.discard('>')
self.assertEqual(gene_set, trans_set)
def test_parse_celex_training_word(self):
if self.scheme is None:
self.test_load_scheme()
parsed_lst = [
self.utils.parse_celex_training_word(w, self.scheme)
for w in self.training_set
]
self.assertEqual(len(parsed_lst), len(self.training_set))
for i in range(3):
self.assertEqual(len(parsed_lst[i][0]), len(parsed_lst[i][1]))
if __name__ == '__main__':
unittest.main()
|
from setuptools import setup
setup(
entry_points={
'console_scripts': [
'kappa_catalytic_potential = KaSaAn.scripts.kappa_catalytic_potential:main',
'kappa_observable_plotter = KaSaAn.scripts.kappa_observable_plotter:main',
'kappa_observable_coplotter = KaSaAn.scripts.kappa_observable_coplotter:main',
'kappa_snapshot_largest_complex_time = KaSaAn.scripts.kappa_snapshot_largest_complex_time:main',
'kappa_snapshot_visualizer_patchwork = KaSaAn.scripts.kappa_snapshot_visualizer_patchwork:main',
'kappa_snapshot_visualizer_network = KaSaAn.scripts.kappa_snapshot_visualizer_network:main',
'kappa_snapshot_visualizer_subcomponent = KaSaAn.scripts.kappa_snapshot_visualizer_subcomponent:main',
'kappa_trace_movie_maker = KaSaAn.scripts.kappa_trace_movie_maker:main',
]
}
)
|
import random
def read_input():
return (input().rstrip(), input().rstrip())
def print_occurrences(output):
print(' '.join(map(str, output)))
def PolyHash(S, p, x):
hash = 0
for c in reversed(S):
hash = (hash * x + ord(c)) % p
return hash
def PrecomputeHashes(T, lenP, p, x):
lenT = len(T)
H = [None] * (lenT - lenP + 1)
S = T[lenT - lenP:lenT]
H[lenT-lenP] = PolyHash(S, p, x)
y = 1
for i in range(0, lenP):
y = (y * x) % p
for i in range(lenT - lenP - 1, -1, -1):
H[i] = (x * H[i + 1] + ord(T[i]) - y * ord(T[i + lenP])) % p
return H
def get_occurrences(pattern, text):
lenP = len(pattern)
lenT = len(text)
p = 10 ** 9
x = random.randint(0, p - 1)
result = []
pHash = PolyHash(pattern, p, x)
H = PrecomputeHashes(text, lenP, p, x)
for i in range(0, len(H)):
if pHash != H[i]:
continue
if text[i:i+lenP] == pattern:
result.append(i)
return result
if __name__ == '__main__':
print_occurrences(get_occurrences(*read_input()))
|
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AlertsOperations(object):
"""AlertsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AlertList"]
"""Gets all the alerts for a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AlertList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01_preview.models.AlertList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AlertList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AlertList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/alerts'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Alert"
"""Gets an alert by name.
Gets an alert by name.
:param device_name: The device name.
:type device_name: str
:param name: The alert name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Alert, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.Alert
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Alert"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Alert', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/alerts/{name}'} # type: ignore
|
from __future__ import unicode_literals
from contrail_api_cli.context import Context
from contrail_api_cli.resource import Resource
from contrail_api_cli.utils import printo, continue_prompt
from ..utils import CheckCommand
class CleanSubnet(CheckCommand):
"""Command to fix subnets KV store.
This command works by first retrieving all the KV store, then it
builds a dict which tends to reproduce a clean KV store based on
the virtual-network back-refs objects present in the API. Finally
it compares the clean KV store with the current one and retrieves
the keys of the stale entries to remove it in the current KV store.
To run the command::
contrail-api-cli --ns contrail_api_cli.clean clean-subnet
"""
description = "Clean stale contrail ipam-subnets"
def _get_kv_store(self, session):
return session.get_kv_store()
def _get_vn_back_refs(self):
return Resource(
'network-ipam',
fq_name='default-domain:default-project:default-network-ipam',
fetch=True)['virtual_network_back_refs']
def _get_healthy_kv(self, vn_back_refs):
"""Build a dict which reproduces a healthy KV store
For each vn it will two key-pair entries. Example:
kv = {
"6c725bae-a5d6-4a29-bea7-c24b4ed3dc2b 10.1.0.0/24" : "6f620bf3-ca0c-4331-8dcf-bbf3a93fa3d7"
"062d7842-07eb-429d-9002-d404dc773939 10.2.0.0/24" : "062d7842-07eb-429d-9002-d404dc773939"
}
"""
kv = {}
for vn in vn_back_refs:
if not vn['attr']['ipam_subnets']:
continue
for subnet in vn['attr']['ipam_subnets']:
vn_uuid = str("%s %s/%s" % (vn['uuid'],
subnet['subnet']['ip_prefix'],
subnet['subnet']['ip_prefix_len']))
subnet_uuid = str(subnet['subnet_uuid'])
kv.update({subnet_uuid: vn_uuid})
kv.update({vn_uuid: subnet_uuid})
printo("Clean KV store should have %d entries." % len(kv))
return kv
def _get_stale_subnets(self, kv, healthy_kv):
stale_subnets = []
for _dict in kv:
key = _dict['key']
value = _dict['value']
if not (key in healthy_kv and str(value) == healthy_kv[key]):
stale_subnets.append(key)
printo("Current KV store contains %d stale entries." %
len(stale_subnets))
return stale_subnets
def _clean_kv(self, session, stale_subnets, dry_run=False):
if continue_prompt("Do you really want to delete these entries ?"):
for key in stale_subnets:
if not dry_run:
session.remove_kv_store(key)
printo(
"Entry with key \"%s\" has been removed from the KV store" %
key)
def __call__(self, **kwargs):
super(CleanSubnet, self).__call__(**kwargs)
session = Context().session
vn_back_refs = self._get_vn_back_refs()
kv = self._get_kv_store(session)
healthy_kv = self._get_healthy_kv(vn_back_refs)
stale_subnets = self._get_stale_subnets(kv, healthy_kv)
if self.check:
return
self._clean_kv(session, stale_subnets, self.dry_run)
return "Clean stale subnets ended"
|
"""empty message.
Revision ID: 0563ce5ca262
Revises: ('21a2938dd9f5', '40f652bd98a8')
Create Date: 2018-02-26 12:26:55.304692
"""
from alembic import op
import sqlalchemy as sa
revision = '0563ce5ca262'
down_revision = ('21a2938dd9f5', '40f652bd98a8')
def upgrade():
pass
def downgrade():
pass
|
__GPL__ = """
SIPVicious report engine manages sessions from previous scans with SIPVicious
tools and allows you to export these scans.
Copyright (C) 2007 Sandro Gauci <sandrogauc@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from libs.svhelper import __author__, __version__
__prog__ = 'svreport'
import anydbm
from xml.dom.minidom import Document
from optparse import OptionParser
from sys import exit
import os
import logging
import socket
def main():
commandsusage = """Supported commands:\r\n
- list:\tlists all scans\r\n
- export:\texports the given scan to a given format\r\n
- delete:\tdeletes the scan\r\n
- stats:\tprint out some statistics of interest\r\n
- search:\tsearch for a specific string in the user agent (svmap)\r\n
"""
commandsusage += "examples:\r\n\r\n"
commandsusage += " %s.py list\r\n\r\n" % __prog__
commandsusage += " %s.py export -f pdf -o scan1.pdf -s scan1\r\n\r\n" % __prog__
commandsusage += " %s.py delete -s scan1\r\n\r\n" % __prog__
usage = "%prog [command] [options]\r\n\r\n"
usage += commandsusage
parser = OptionParser(usage=usage,version="%prog v"+str(__version__)+__GPL__)
parser.add_option('-v', '--verbose', dest="verbose", action="count",
help="Increase verbosity")
parser.add_option('-q', '--quiet', dest="quiet", action="store_true",
default=False,
help="Quiet mode")
parser.add_option("-t", "--type", dest="sessiontype",
help="Type of session. This is usually either svmap, svwar or svcrack. If not set I will try to find the best match")
parser.add_option("-s", "--session", dest="session",
help="Name of the session")
parser.add_option("-f", "--format", dest="format",
help="Format type. Can be stdout, pdf, xml, csv or txt")
parser.add_option("-o", "--output", dest="outputfile",
help="Output filename")
parser.add_option("-n", dest="resolve", default=True,
action="store_false", help="Do not resolve the ip address")
parser.add_option("-c", "--count", dest="count", default=False,
action="store_true", help="Used togather with 'list' command to count the number of entries")
(options,args) = parser.parse_args()
if len(args) < 1:
parser.error("Please specify a command.\r\n")
exit(1)
command = args[0]
from libs.svhelper import listsessions,deletesessions,createReverseLookup, dbexists
from libs.svhelper import getsessionpath,getasciitable,outputtoxml,outputtopdf, calcloglevel
validcommands = ['list','export','delete','stats','search']
if command not in validcommands:
parser.error('%s is not a supported command' % command)
exit(1)
logging.basicConfig(level=calcloglevel(options))
sessiontypes = ['svmap','svwar','svcrack']
logging.debug('started logging')
if command == 'list':
listsessions(options.sessiontype,count=options.count)
if command == 'delete':
if options.session is None:
parser.error("Please specify a valid session.")
exit(1)
sessionpath = deletesessions(options.session,options.sessiontype)
if sessionpath is None:
parser.error('Session could not be found. Make sure it exists by making use of %s.py list' % __prog__)
exit(1)
elif command == 'export':
from datetime import datetime
start_time = datetime.now()
if options.session is None:
parser.error("Please specify a valid session")
exit(1)
if options.outputfile is None and options.format not in [None,'stdout']:
parser.error("Please specify an output file")
exit(1)
tmp = getsessionpath(options.session,options.sessiontype)
if tmp is None:
parser.error('Session could not be found. Make sure it exists by making use of %s list' % __prog__)
exit(1)
sessionpath,sessiontype = tmp
resolve = False
resdb = None
if sessiontype == 'svmap':
dbloc = os.path.join(sessionpath,'resultua')
labels = ['Host','User Agent']
elif sessiontype == 'svwar':
dbloc = os.path.join(sessionpath,'resultauth')
labels = ['Extension','Authentication']
elif sessiontype == 'svcrack':
dbloc = os.path.join(sessionpath,'resultpasswd')
labels = ['Extension','Password']
if not dbexists(dbloc):
logging.error('The database could not be found: %s'%dbloc)
exit(1)
db = anydbm.open(dbloc,'r')
if options.resolve and sessiontype == 'svmap':
resolve = True
labels.append('Resolved')
resdbloc = os.path.join(sessionpath,'resolved')
if not dbexists(resdbloc):
logging.info('Performing DNS reverse lookup')
resdb = anydbm.open(resdbloc,'c')
createReverseLookup(db,resdb)
else:
logging.info('Not Performing DNS lookup')
resdb = anydbm.open(resdbloc,'r')
if options.outputfile is not None:
if options.outputfile.find('.') < 0:
if options.format is None:
options.format = 'txt'
options.outputfile += '.%s' % options.format
if options.format in [None,'stdout','txt']:
o = getasciitable(labels,db,resdb)
if options.outputfile is None:
print o
else:
open(options.outputfile,'w').write(o)
elif options.format == 'xml':
from xml.dom.minidom import Document
doc = Document()
node = doc.createElement(sessiontype)
o = outputtoxml('%s report' % sessiontype,labels,db,resdb)
open(options.outputfile,'w').write(o)
elif options.format == 'pdf':
outputtopdf(options.outputfile,'%s report' % sessiontype,labels,db,resdb)
elif options.format == 'csv':
import csv
writer = csv.writer(open(options.outputfile,"w"))
for k in db.keys():
row = [k,db[k]]
if resdb is not None:
if resdb.has_key(k):
row.append(resdb[k])
else:
row.append('N/A')
writer.writerow(row)
logging.info( "That took %s" % (datetime.now() - start_time))
elif command == 'stats':
from operator import itemgetter
import re
if options.session is None:
parser.error("Please specify a valid session")
exit(1)
if options.outputfile is None and options.format not in [None,'stdout']:
parser.error("Please specify an output file")
exit(1)
tmp = getsessionpath(options.session,options.sessiontype)
if tmp is None:
parser.error('Session could not be found. Make sure it exists by making use of %s list' % __prog__)
exit(1)
sessionpath,sessiontype = tmp
if sessiontype != 'svmap':
parser.error('Only takes svmap sessions for now')
exit(1)
dbloc = os.path.join(sessionpath,'resultua')
if not dbexists(dbloc):
logging.error('The database could not be found: %s'%dbloc)
exit(1)
db = anydbm.open(dbloc,'r')
useragents = dict()
useragentconames = dict()
for k in db.keys():
v = db[k]
if not useragents.has_key(v):
useragents[v] = 0
useragents[v]+=1
useragentconame = re.split('[ /]',v)[0]
if not useragentconames.has_key(useragentconame):
useragentconames[useragentconame] = 0
useragentconames[useragentconame] += 1
_useragents = sorted(useragents.iteritems(), key=itemgetter(1), reverse=True)
suseragents = map(lambda x: '\t- %s (%s)' % (x[0],x[1]), _useragents)
_useragentsnames = sorted(useragentconames.iteritems(), key=itemgetter(1), reverse=True)
suseragentsnames = map(lambda x: '\t- %s (%s)' % (x[0],x[1]), _useragentsnames)
print "Total number of SIP devices found: %s" % len(db.keys())
print "Total number of useragents: %s\r\n" % len(suseragents)
print "Total number of useragent names: %s\r\n" % len(suseragentsnames)
print "Most popular top 30 useragents:\r\n"
print '\r\n'.join(suseragents[:30])
print '\r\n\r\n'
print "Most unpopular top 30 useragents:\r\n\t"
print '\r\n'.join(suseragents[-30:])
print "\r\n\r\n"
print "Most popular top 30 useragent names:\r\n"
print '\r\n'.join(suseragentsnames[:30])
print '\r\n\r\n'
print "Most unpopular top 30 useragent names:\r\n\t"
print '\r\n'.join(suseragentsnames[-30:])
print "\r\n\r\n"
elif command == 'search':
if options.session is None:
parser.error("Please specify a valid session")
exit(1)
if len(args) < 2:
parser.error('You need to specify a search string')
searchstring = args[1]
tmp = getsessionpath(options.session,options.sessiontype)
if tmp is None:
parser.error('Session could not be found. Make sure it exists by making use of %s list' % __prog__)
exit(1)
sessionpath,sessiontype = tmp
if sessiontype != 'svmap':
parser.error('Only takes svmap sessions for now')
exit(1)
dbloc = os.path.join(sessionpath,'resultua')
if not dbexists(dbloc):
logging.error('The database could not be found: %s'%dbloc)
exit(1)
db = anydbm.open(dbloc,'r')
useragents = dict()
useragentconames = dict()
labels = ['Host','User Agent']
for k in db.keys():
v = db[k]
if searchstring.lower() in v.lower():
print k+'\t'+v
if __name__ == "__main__":
main()
|
__version__ = '1.1'
__author__ = 'alfred richardsn'
__description__ = 'чат-бот для вк, который может быть использован для добавления пользователей в беседы в случае их исключения оттуда'
|
from contentbase.auditor import (
audit_checker,
AuditFailure,
)
def includeme(config):
config.scan('.testing_views')
config.scan(__name__)
def has_condition1(value, system):
return value.get('condition1')
@audit_checker('testing_link_source', condition=has_condition1)
def checker1(value, system):
if not value.get('checker1'):
return AuditFailure('testchecker', 'Missing checker1')
@audit_checker('testing_link_target')
def testing_link_target_status(value, system):
if value.get('status') == 'CHECK':
if not len(value['reverse']):
return AuditFailure('status', 'Missing reverse items')
|
from maze_builder.random2 import weighted_choice, Choice
class LineIllustratorBase(object):
def __init__(self):
self.image = None
def __call__(self, cubic):
return self.draw(cubic)
def prepare(self, width, height):
pass
def draw_wall(self, p0, p1):
pass
def draw_feature(self, feature):
pass
def draw(self, cubic):
self.prepare(1+cubic.maxx-cubic.minx, 1+cubic.maxy-cubic.miny)
z = cubic.minz
for x in range(cubic.minx, cubic.maxx+2):
for y in range(cubic.miny, cubic.maxy+2):
xo = x - cubic.minx
yo = y - cubic.miny
if not cubic.any_active_route_connecting((x, y), (x-1, y)):
self.draw_wall((xo, yo), (xo, yo+1))
if not cubic.any_active_route_connecting((x, y), (x, y-1)):
self.draw_wall((xo, yo), (xo+1, yo))
for feature in cubic.features:
p0 = (feature.minx - cubic.minx,
feature.miny - cubic.miny)
p1 = (1+feature.maxx - cubic.minx,
1+feature.maxy - cubic.miny)
self.draw_feature(p0, p1, feature)
return self.image
class BlockIllustratorBase(object):
def __init__(self, junctions=[1], rooms=[0], xwalls=None, ywalls=None, xhalls=None, yhalls=None, margin=0):
self.junctions = Choice.of(junctions)
self.rooms = Choice.of(rooms)
self.xwalls = Choice.of(xwalls or junctions)
self.ywalls = Choice.of(ywalls or junctions)
self.xhalls = Choice.of(xhalls or rooms)
self.yhalls = Choice.of(yhalls or rooms)
self.margin = margin
def __call__(self, cubic):
return self.draw(cubic)
def draw(self, cubic):
width = 1 + cubic.maxx - cubic.minx
height = 1 + cubic.maxy - cubic.miny
ox = cubic.minx
oy = cubic.miny
z = cubic.maxz
if cubic.maxz != cubic.minz:
raise RuntimeError('I can only draw 2D images')
rows = [[None for _ in range(2*width+1)] for _ in range(2*height+1)]
# Boundaries (left & top)
for i in range(width):
rows[0][i*2+1] = self.xwalls()
for j in range(height):
rows[j*2+1][0] = self.ywalls()
# Junctions everywhere
for i in range(width+1):
for j in range(height+1):
rows[j*2][i*2] = self.junctions()
# Rooms and halls
for i in range(width):
x = ox + i
for j in range(height):
y = oy + j
rows[j*2+1][i*2+1] = self.rooms()
if cubic.any_active_route_connecting((x, y, z), (x+1, y, z)):
rows[j*2+1][i*2+2] = self.xhalls()
else:
rows[j*2+1][i*2+2] = self.ywalls()
if cubic.any_active_route_connecting((x, y, z), (x, y+1, z)):
rows[j*2+2][i*2+1] = self.yhalls()
else:
rows[j*2+2][i*2+1] = self.xwalls()
if self.margin:
return [line[self.margin:-self.margin] for line in rows[self.margin:-self.margin]]
else:
return rows
|
"""Settings that need to be set in order to run the tests."""
import os
def get_env_setting(setting, default):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except KeyError:
return default
TMDB_API_KEY=get_env_setting('TMDB_API_KEY', '626b3a716f2469415c3d5b26433d445c')
TMDB_SESSION_ID=get_env_setting('TMDB_SESSION_ID', '')
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
ROOT_URLCONF = 'tmdb.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../media')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(APP_ROOT, 'tests/test_app/templates')],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
)
}
}]
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'django_tmdb',
'django_tmdb.tests.test_app',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
SECRET_KEY = 'foobar'
|
'''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
__all__ = ['dis', 'genops', 'optimize']
"""
"A pickle" is a program for a virtual pickle machine (PM, but more accurately
called an unpickling machine). It's a sequence of opcodes, interpreted by the
PM, building an arbitrarily complex Python object.
For the most part, the PM is very simple: there are no looping, testing, or
conditional instructions, no arithmetic and no function calls. Opcodes are
executed once each, from first to last, until a STOP opcode is reached.
The PM has two data areas, "the stack" and "the memo".
Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
integer object on the stack, whose value is gotten from a decimal string
literal immediately following the INT opcode in the pickle bytestream. Other
opcodes take Python objects off the stack. The result of unpickling is
whatever object is left on the stack when the final STOP opcode is executed.
The memo is simply an array of objects, or it can be implemented as a dict
mapping little integers to objects. The memo serves as the PM's "long term
memory", and the little integers indexing the memo are akin to variable
names. Some opcodes pop a stack object into the memo at a given index,
and others push a memo object at a given index onto the stack again.
At heart, that's all the PM has. Subtleties arise for these reasons:
+ Object identity. Objects can be arbitrarily complex, and subobjects
may be shared (for example, the list [a, a] refers to the same object a
twice). It can be vital that unpickling recreate an isomorphic object
graph, faithfully reproducing sharing.
+ Recursive objects. For example, after "L = []; L.append(L)", L is a
list, and L[0] is the same list. This is related to the object identity
point, and some sequences of pickle opcodes are subtle in order to
get the right result in all cases.
+ Things pickle doesn't know everything about. Examples of things pickle
does know everything about are Python's builtin scalar and container
types, like ints and tuples. They generally have opcodes dedicated to
them. For things like module references and instances of user-defined
classes, pickle's knowledge is limited. Historically, many enhancements
have been made to the pickle protocol in order to do a better (faster,
and/or more compact) job on those.
+ Backward compatibility and micro-optimization. As explained below,
pickle opcodes never go away, not even when better ways to do a thing
get invented. The repertoire of the PM just keeps growing over time.
For example, protocol 0 had two opcodes for building Python integers (INT
and LONG), protocol 1 added three more for more-efficient pickling of short
integers, and protocol 2 added two more for more-efficient pickling of
long integers (before protocol 2, the only ways to pickle a Python long
took time quadratic in the number of digits, for both pickling and
unpickling). "Opcode bloat" isn't so much a subtlety as a source of
wearying complication.
Pickle protocols:
For compatibility, the meaning of a pickle opcode never changes. Instead new
pickle opcodes get added, and each version's unpickler can handle all the
pickle opcodes in all protocol versions to date. So old pickles continue to
be readable forever. The pickler can generally be told to restrict itself to
the subset of opcodes available under previous protocol versions too, so that
users can create pickles under the current version readable by older
versions. However, a pickle does not contain its version number embedded
within it. If an older unpickler tries to read a pickle using a later
protocol, the result is most likely an exception due to seeing an unknown (in
the older unpickler) opcode.
The original pickle used what's now called "protocol 0", and what was called
"text mode" before Python 2.3. The entire pickle bytestream is made up of
printable 7-bit ASCII characters, plus the newline character, in protocol 0.
That's why it was called text mode. Protocol 0 is small and elegant, but
sometimes painfully inefficient.
The second major set of additions is now called "protocol 1", and was called
"binary mode" before Python 2.3. This added many opcodes with arguments
consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
bytes. Binary mode pickles can be substantially smaller than equivalent
text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
int as 4 bytes following the opcode, which is cheaper to unpickle than the
(perhaps) 11-character decimal string attached to INT. Protocol 1 also added
a number of opcodes that operate on many stack elements at once (like APPENDS
and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
The third major set of additions came in Python 2.3, and is called "protocol
2". This added:
- A better way to pickle instances of new-style classes (NEWOBJ).
- A way for a pickle to identify its protocol (PROTO).
- Time- and space- efficient pickling of long ints (LONG{1,4}).
- Shortcuts for small tuples (TUPLE{1,2,3}}.
- Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
- The "extension registry", a vector of popular objects that can be pushed
efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
the registry contents are predefined (there's nothing akin to the memo's
PUT).
Another independent change with Python 2.3 is the abandonment of any
pretense that it might be safe to load pickles received from untrusted
parties -- no sufficient security analysis has been done to guarantee
this and there isn't a use case that warrants the expense of such an
analysis.
To this end, all tests for __safe_for_unpickling__ or for
copy_reg.safe_constructors are removed from the unpickling code.
References to these variables in the descriptions below are to be seen
as describing unpickling in Python 2.2 and before.
"""
UP_TO_NEWLINE = -1
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, (int, long)) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import StringIO
>>> read_uint1(StringIO.StringIO('\xff'))
255
"""
data = f.read(1)
if data:
return ord(data)
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import StringIO
>>> read_uint2(StringIO.StringIO('\xff\x00'))
255
>>> read_uint2(StringIO.StringIO('\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import StringIO
>>> read_int4(StringIO.StringIO('\xff\x00\x00\x00'))
255
>>> read_int4(StringIO.StringIO('\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import StringIO
>>> read_stringnl(StringIO.StringIO("'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(StringIO.StringIO("\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around ''
>>> read_stringnl(StringIO.StringIO("\n"), stripquotes=False)
''
>>> read_stringnl(StringIO.StringIO("''\n"))
''
>>> read_stringnl(StringIO.StringIO('"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(StringIO.StringIO(r"'a\n\\b\x00c\td'" + "\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in "'\"":
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
# I'm not sure when 'string_escape' was added to the std codecs; it's
# crazy not to use it if it's there.
if decode:
data = data.decode('string_escape')
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, decode=False, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import StringIO
>>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string4(f):
r"""
>>> import StringIO
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x00abc"))
''
>>> read_string4(StringIO.StringIO("\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_string1(f):
r"""
>>> import StringIO
>>> read_string1(StringIO.StringIO("\x00"))
''
>>> read_string1(StringIO.StringIO("\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import StringIO
>>> read_unicodestringnl(StringIO.StringIO("abc\uabcd\njunk"))
u'abc\uabcd'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return unicode(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring4(f):
r"""
>>> import StringIO
>>> s = u'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
'abcd\xea\xaf\x8d'
>>> n = chr(len(enc)) + chr(0) * 3 # little-endian 4-byte length
>>> t = read_unicodestring4(StringIO.StringIO(n + enc + 'junk'))
>>> s == t
True
>>> read_unicodestring4(StringIO.StringIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("unicodestring4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return unicode(data, 'utf-8')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import StringIO
>>> read_decimalnl_short(StringIO.StringIO("1234\n56"))
1234
>>> read_decimalnl_short(StringIO.StringIO("1234L\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' not allowed in '1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s.endswith("L"):
raise ValueError("trailing 'L' not allowed in %r" % s)
# It's not necessarily true that the result fits in a Python short int:
# the pickle may have been written on a 64-bit box. There's also a hack
# for True and False here.
if s == "00":
return False
elif s == "01":
return True
try:
return int(s)
except OverflowError:
return long(s)
def read_decimalnl_long(f):
r"""
>>> import StringIO
>>> read_decimalnl_long(StringIO.StringIO("1234\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' required in '1234'
Someday the trailing 'L' will probably go away from this output.
>>> read_decimalnl_long(StringIO.StringIO("1234L\n56"))
1234L
>>> read_decimalnl_long(StringIO.StringIO("123456789012345678901234L\n6"))
123456789012345678901234L
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if not s.endswith("L"):
raise ValueError("trailing 'L' required in %r" % s)
return long(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import StringIO
>>> read_floatnl(StringIO.StringIO("-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import StringIO, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(StringIO.StringIO(raw + "\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and cPickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
from pickle import decode_long
def read_long1(f):
r"""
>>> import StringIO
>>> read_long1(StringIO.StringIO("\x00"))
0L
>>> read_long1(StringIO.StringIO("\x02\xff\x00"))
255L
>>> read_long1(StringIO.StringIO("\x02\xff\x7f"))
32767L
>>> read_long1(StringIO.StringIO("\x02\x00\xff"))
-256L
>>> read_long1(StringIO.StringIO("\x02\x00\x80"))
-32768L
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import StringIO
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x00"))
255L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x7f"))
32767L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\xff"))
-256L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\x80"))
-32768L
>>> read_long1(StringIO.StringIO("\x00\x00\x00\x00"))
0L
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the long 0L, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = StackObject(
name='int',
obtype=int,
doc="A short (as opposed to long) Python integer object.")
pylong = StackObject(
name='long',
obtype=long,
doc="A long (as opposed to short) Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, long, bool),
doc="A Python integer object (short or long), or "
"a Python bool.")
pybool = StackObject(
name='bool',
obtype=(bool,),
doc="A Python bool object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pystring = StackObject(
name='str',
obtype=str,
doc="A Python string object.")
pyunicode = StackObject(
name='unicode',
obtype=unicode,
doc="A Python Unicode string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, (int, long)) and 0 <= proto <= 2
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pylong],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pystring],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte signed
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=int4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
signed little-endian integer following.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If type(callable) is not ClassType, REDUCE complains unless the
callable has been registered with the copy_reg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
This may raise RuntimeError in restricted execution mode (which
disallows access to __dict__ directly); in that case, the object
is updated instead via
for k, v in argument.items():
anyobject[k] = v
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ It's an old-style class object (the type of the class object is
ClassType).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done. In
restricted execution mode it can fail (assignment to __class__ is
disallowed), and I'm not really sure what happens then -- it looks
like the code ends up calling the class object's __init__ anyway,
via falling into the next case.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug; cPickle
requires the attribute to be true). If __safe_for_unpickling__
doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug; cPickle does test __safe_for_unpickling__). See INST for
the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
import pickle, re
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print "skipping %r: it doesn't look like an opcode name" % name
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, str) or len(picklecode) != 1:
if verbose:
print ("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode))
continue
if picklecode in copy:
if verbose:
print "checking name %r w/ code %r for consistency" % (
name, picklecode)
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a string object,
it's wrapped in a StringIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
import cStringIO as StringIO
if isinstance(pickle, str):
pickle = StringIO.StringIO(pickle)
if hasattr(pickle, "tell"):
getpos = pickle.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = pickle.read(1)
opcode = code2op.get(code)
if opcode is None:
if code == "":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
pos is None and "<unknown>" or pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(pickle)
yield opcode, arg, pos
if code == '.':
assert opcode.name == 'STOP'
break
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
gets = set() # set of args used by a GET opcode
puts = [] # (arg, startpos, stoppos) for the PUT opcodes
prevpos = None # set to pos if previous opcode was a PUT
for opcode, arg, pos in genops(p):
if prevpos is not None:
puts.append((prevarg, prevpos, pos))
prevpos = None
if 'PUT' in opcode.name:
prevarg, prevpos = arg, pos
elif 'GET' in opcode.name:
gets.add(arg)
# Copy the pickle string except for PUTS without a corresponding GET
s = []
i = 0
for arg, start, stop in puts:
j = stop if (arg in gets) else start
s.append(p[i:j])
i = stop
s.append(p[i:])
return ''.join(s)
def dis(pickle, out=None, memo=None, indentlevel=4):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg indentlevel is the number of blanks by which to indent
a new MARK level. It defaults to 4.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpickler memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
for opcode, arg, pos in genops(pickle):
if pos is not None:
print >> out, "%5d:" % pos,
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT"):
assert arg is not None
if arg in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[arg] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
print >> out, line
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print >> out, "highest protocol among opcodes =", maxproto
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {'abc': u"def"}]
>>> pkl = pickle.dumps(x, 0)
>>> dis(pkl)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: I INT 1
8: a APPEND
9: I INT 2
12: a APPEND
13: ( MARK
14: I INT 3
17: I INT 4
20: t TUPLE (MARK at 13)
21: p PUT 1
24: a APPEND
25: ( MARK
26: d DICT (MARK at 25)
27: p PUT 2
30: S STRING 'abc'
37: p PUT 3
40: V UNICODE u'def'
45: p PUT 4
48: s SETITEM
49: a APPEND
50: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl = pickle.dumps(x, 1)
>>> dis(pkl)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: U SHORT_BINSTRING 'abc'
24: q BINPUT 3
26: X BINUNICODE u'def'
34: q BINPUT 4
36: s SETITEM
37: e APPENDS (MARK at 3)
38: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: i INST 'pickletools _Example' (MARK at 5)
28: p PUT 1
31: ( MARK
32: d DICT (MARK at 31)
33: p PUT 2
36: S STRING 'value'
45: p PUT 3
48: I INT 42
52: s SETITEM
53: b BUILD
54: a APPEND
55: g GET 1
58: a APPEND
59: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: ( MARK
5: c GLOBAL 'pickletools _Example'
27: q BINPUT 1
29: o OBJ (MARK at 4)
30: q BINPUT 2
32: } EMPTY_DICT
33: q BINPUT 3
35: U SHORT_BINSTRING 'value'
42: q BINPUT 4
44: K BININT1 42
46: s SETITEM
47: b BUILD
48: h BINGET 2
50: e APPENDS (MARK at 3)
51: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> from StringIO import StringIO
>>> f = StringIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class PhoneNumber(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('type',): {
'HOME': "home",
'WORK': "work",
'OFFICE': "office",
'MOBILE': "mobile",
'MOBILE1': "mobile1",
'OTHER': "other",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'data': (str,), # noqa: E501
'primary': (bool,), # noqa: E501
'type': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'primary': 'primary', # noqa: E501
'type': 'type', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, data, primary, type, *args, **kwargs): # noqa: E501
"""PhoneNumber - a model defined in OpenAPI
Args:
data (str): The phone number.
primary (bool): When `true`, identifies the phone number as the primary number on an account.
type (str): The type of phone number.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
self.primary = primary
self.type = type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
from KBParallel.utils.job import Job
class Task:
"""
An object representing a task to be run either locally or remotely.
Contains module_name, method_name, and parameters for a task.
Also contains a list of jobs that were run, a count of run failures, error messages, and
responses.
If a task is running, then it will have a .current_job field that points to the currently
running Job object.
"""
def __init__(self, args, task_manager):
"""
Create a new task, passing in the parameters for a Task (see KBParallel.spec)
keys in `args`:
module_name - required - module name to run
function_name - required - method name in the module to run
params - required - parameters to pass to the method
task_manager - required - task manager that created this task
Also pass in a TaskManager instance as the second parameter
"""
self.task_manager = task_manager
self.method_name = args['function_name']
self.module_name = args['module_name']
self.full_name = self.module_name + '.' + self.method_name
self.service_ver = args['version']
self.params = args['parameters']
self.failures = 0
self.jobs = []
self.results = None
self.current_job = None
self.completed = False
def start_job(self, run_location):
"""Spawn a new job for this task."""
job = Job(
task=self,
task_manager=self.task_manager,
run_location=run_location
)
self.current_job = job
self.jobs.append(job)
def handle_job_results(self, results):
"""Update the results for a finished current_job."""
if results.get('error'):
self.failures += 1
elif results.get('finished'):
self.completed = True
self.results = results
if self.failures == self.task_manager.max_retries:
self.completed = True
self.results = results
|
import unittest
from livefyre import Livefyre
from livefyre.tests import LfTest
from livefyre.src.cursor.factory import CursorFactory
from livefyre.src.cursor import TimelineCursor
from livefyre.src.cursor.model import CursorData
import datetime
from livefyre.src.utils import pyver
class TimelineCursorTestCase(LfTest, unittest.TestCase):
def test_build_cursor(self):
network = Livefyre.get_network(self.NETWORK_NAME, self.NETWORK_KEY)
date = datetime.datetime.now()
cursor = TimelineCursor(network, CursorData("resource", 50, date))
self.assertTrue(cursor)
cursor.data.set_cursor_time(date)
self.assertTrue(cursor.data.cursor_time)
if pyver < 2.7:
pass
elif pyver < 3.0:
with self.assertRaisesRegexp(AssertionError, 'resource is missing'):
TimelineCursor.init(network, None, 50, datetime.datetime.now())
with self.assertRaisesRegexp(AssertionError, 'limit is missing'):
TimelineCursor.init(network, 'resource', None, datetime.datetime.now())
with self.assertRaisesRegexp(AssertionError, 'cursor_time is missing'):
TimelineCursor.init(network, 'resource', 50, None)
else:
with self.assertRaisesRegex(AssertionError, 'resource is missing'):
TimelineCursor.init(network, None, 50, datetime.datetime.now())
with self.assertRaisesRegex(AssertionError, 'limit is missing'):
TimelineCursor.init(network, 'resource', None, datetime.datetime.now())
with self.assertRaisesRegex(AssertionError, 'cursor_time is missing'):
TimelineCursor.init(network, 'resource', 50, None)
def test_api_calls(self):
network = Livefyre.get_network(self.NETWORK_NAME, self.NETWORK_KEY)
cursor = CursorFactory.get_personal_stream_cursor(network, self.USER_ID)
cursor.next_items()
json = cursor.previous_items()
self.assertTrue(json)
if __name__ == '__main__':
unittest.main()
|
import sys
from os.path import abspath, dirname
import django
from django.conf import settings
from django.core.management.utils import get_random_secret_key
sys.path.insert(0, abspath(dirname(__file__)))
if not settings.configured:
settings.configure(
SECRET_KEY=get_random_secret_key(),
INSTALLED_APPS=(
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.auth",
"django.contrib.admin",
"email_log",
"email_log.tests",
),
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
EMAIL_LOG_BACKEND="django.core.mail.backends.locmem.EmailBackend",
MIDDLEWARE=[
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
],
ROOT_URLCONF="email_log.tests.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
]
},
},
],
)
def runtests():
if hasattr(django, "setup"):
django.setup()
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ["email_log.tests"]
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
test_args = ["tests"]
failures = runner_class(failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
runtests()
|
from __future__ import absolute_import, division, print_function
from itertools import product
from math import ceil
from numbers import Number
from operator import getitem, add, itemgetter
import numpy as np
from toolz import merge, accumulate, pluck, memoize
from ..base import tokenize
from ..compatibility import long
colon = slice(None, None, None)
def sanitize_index(ind):
""" Sanitize the elements for indexing along one axis
>>> sanitize_index([2, 3, 5])
[2, 3, 5]
>>> sanitize_index([True, False, True, False])
[0, 2]
>>> sanitize_index(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index(np.array([False, True, True]))
[1, 2]
>>> type(sanitize_index(np.int32(0)))
<type 'int'>
>>> sanitize_index(1.0)
1
>>> sanitize_index(0.5)
Traceback (most recent call last):
...
IndexError: Bad index. Must be integer-like: 0.5
"""
if isinstance(ind, Number):
ind2 = int(ind)
if ind2 != ind:
raise IndexError("Bad index. Must be integer-like: %s" % ind)
else:
return ind2
if hasattr(ind, 'tolist'):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
if isinstance(ind, list):
return [sanitize_index(i) for i in ind]
if isinstance(ind, slice):
return slice(sanitize_index(ind.start),
sanitize_index(ind.stop),
sanitize_index(ind.step))
if ind is None:
return ind
try:
return sanitize_index(np.array(ind).tolist())
except:
raise TypeError("Invalid index type", type(ind), ind)
def slice_array(out_name, in_name, blockdims, index):
"""
Master function for array slicing
This function makes a new dask that slices blocks along every
dimension and aggregates (via cartesian product) each dimension's
slices so that the resulting block slices give the same results
as the original slice on the original structure
Index must be a tuple. It may contain the following types
int, slice, list (at most one list), None
Parameters
----------
in_name - string
This is the dask variable name that will be used as input
out_name - string
This is the dask variable output name
blockshape - iterable of integers
index - iterable of integers, slices, lists, or None
Returns
-------
Dict where the keys are tuples of
(out_name, dim_index[, dim_index[, ...]])
and the values are
(function, (in_name, dim_index, dim_index, ...),
(slice(...), [slice()[,...]])
Also new blockdims with shapes of each block
((10, 10, 10, 10), (20, 20))
Examples
--------
>>> dsk, blockdims = slice_array('y', 'x', [(20, 20, 20, 20, 20)],
... (slice(10, 35),)) # doctest: +SKIP
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), (slice(10, 20),)),
('y', 1): (getitem, ('x', 1), (slice(0, 15),))}
>>> blockdims # doctest: +SKIP
((10, 15),)
See Also
--------
This function works by successively unwrapping cases and passing down
through a sequence of functions.
slice_with_newaxis - handle None/newaxis case
slice_wrap_lists - handle fancy indexing with lists
slice_slices_and_integers - handle everything else
"""
index = replace_ellipsis(len(blockdims), index)
index = tuple(map(sanitize_index, index))
blockdims = tuple(map(tuple, blockdims))
# x[:, :, :] - Punt and return old value
if all(index == slice(None, None, None) for index in index):
suffixes = product(*[range(len(bd)) for bd in blockdims])
dsk = dict(((out_name,) + s, (in_name,) + s)
for s in suffixes)
return dsk, blockdims
# Add in missing colons at the end as needed. x[5] -> x[5, :, :]
missing = len(blockdims) - (len(index) - index.count(None))
index += (slice(None, None, None),) * missing
# Pass down to next function
dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
bd_out = tuple(map(tuple, bd_out))
return dsk_out, bd_out
def slice_with_newaxes(out_name, in_name, blockdims, index):
"""
Handle indexing with Nones
Strips out Nones then hands off to slice_wrap_lists
"""
# Strip Nones from index
index2 = tuple([ind for ind in index if ind is not None])
where_none = [i for i, ind in enumerate(index) if ind is None]
where_none_orig = list(where_none)
for i, x in enumerate(where_none):
n = sum(isinstance(ind, int) for ind in index[:x])
if n:
where_none[i] -= n
# Pass down and do work
dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
if where_none:
expand = expander(where_none)
expand_orig = expander(where_none_orig)
# Insert ",0" into the key: ('x', 2, 3) -> ('x', 0, 2, 0, 3)
dsk2 = {(out_name,) + expand(k[1:], 0):
(v[:2] + (expand_orig(v[2], None),))
for k, v in dsk.items()
if k[0] == out_name}
# Add back intermediate parts of the dask that weren't the output
dsk3 = merge(dsk2, {k: v for k, v in dsk.items() if k[0] != out_name})
# Insert (1,) into blockdims: ((2, 2), (3, 3)) -> ((2, 2), (1,), (3, 3))
blockdims3 = expand(blockdims2, (1,))
return dsk3, blockdims3
else:
return dsk, blockdims2
def slice_wrap_lists(out_name, in_name, blockdims, index):
"""
Fancy indexing along blocked array dasks
Handles index of type list. Calls slice_slices_and_integers for the rest
See Also
--------
take - handle slicing with lists ("fancy" indexing)
slice_slices_and_integers - handle slicing with slices and integers
"""
shape = tuple(map(sum, blockdims))
assert all(isinstance(i, (slice, list, int, long)) for i in index)
if not len(blockdims) == len(index):
raise IndexError("Too many indices for array")
for bd, i in zip(blockdims, index):
check_index(i, sum(bd))
# Change indices like -1 to 9
index2 = posify_index(shape, index)
# Do we have more than one list in the index?
where_list = [i for i, ind in enumerate(index) if isinstance(ind, list)]
if len(where_list) > 1:
raise NotImplementedError("Don't yet support nd fancy indexing")
# No lists, hooray! just use slice_slices_and_integers
if not where_list:
return slice_slices_and_integers(out_name, in_name, blockdims, index2)
# Replace all lists with full slices [3, 1, 0] -> slice(None, None, None)
index_without_list = tuple(slice(None, None, None)
if isinstance(i, list) else i
for i in index2)
# lists and full slices. Just use take
if all(isinstance(i, list) or i == slice(None, None, None)
for i in index2):
axis = where_list[0]
blockdims2, dsk3 = take(out_name, in_name, blockdims,
index2[where_list[0]], axis=axis)
# Mixed case. Both slices/integers and lists. slice/integer then take
else:
# Do first pass without lists
tmp = 'slice-' + tokenize((out_name, in_name, blockdims, index))
dsk, blockdims2 = slice_slices_and_integers(tmp, in_name, blockdims, index_without_list)
# After collapsing some axes due to int indices, adjust axis parameter
axis = where_list[0]
axis2 = axis - sum(1 for i, ind in enumerate(index2)
if i < axis and isinstance(ind, (int, long)))
# Do work
blockdims2, dsk2 = take(out_name, tmp, blockdims2, index2[axis],
axis=axis2)
dsk3 = merge(dsk, dsk2)
return dsk3, blockdims2
def slice_slices_and_integers(out_name, in_name, blockdims, index):
"""
Dask array indexing with slices and integers
See Also
--------
_slice_1d
"""
shape = tuple(map(sum, blockdims))
assert all(isinstance(ind, (slice, int, long)) for ind in index)
assert len(index) == len(blockdims)
# Get a list (for each dimension) of dicts{blocknum: slice()}
block_slices = list(map(_slice_1d, shape, blockdims, index))
sorted_block_slices = [sorted(i.items()) for i in block_slices]
# (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...
in_names = list(product([in_name], *[pluck(0, s) for s in sorted_block_slices]))
# (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...
out_names = list(product([out_name],
*[range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))
for d, i in zip(block_slices, index)
if not isinstance(i, (int, long))]))
all_slices = list(product(*[pluck(1, s) for s in sorted_block_slices]))
dsk_out = {out_name: (getitem, in_name, slices)
for out_name, in_name, slices
in zip(out_names, in_names, all_slices)}
new_blockdims = [new_blockdim(d, db, i)
for d, i, db in zip(shape, index, blockdims)
if not isinstance(i, (int, long))]
return dsk_out, new_blockdims
def _slice_1d(dim_shape, lengths, index):
"""Returns a dict of {blocknum: slice}
This function figures out where each slice should start in each
block for a single dimension. If the slice won't return any elements
in the block, that block will not be in the output.
Parameters
----------
dim_shape - the number of elements in this dimension.
This should be a positive, non-zero integer
blocksize - the number of elements per block in this dimension
This should be a positive, non-zero integer
index - a description of the elements in this dimension that we want
This might be an integer, a slice(), or an Ellipsis
Returns
-------
dictionary where the keys are the integer index of the blocks that
should be sliced and the values are the slices
Examples
--------
Trivial slicing
>>> _slice_1d(100, [60, 40], slice(None, None, None))
{0: slice(None, None, None), 1: slice(None, None, None)}
100 length array cut into length 20 pieces, slice 0:35
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))
{0: slice(None, None, None), 1: slice(0, 15, 1)}
Support irregular blocks and various slices
>>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))
{0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}
Support step sizes
>>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))
{0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize
{0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}
Also support indexing single elements
>>> _slice_1d(100, [20, 20, 20, 20, 20], 25)
{1: 5}
And negative slicing
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3))
{0: slice(-2, -20, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3))
{0: slice(-2, -8, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))
{4: slice(-1, -12, -3)}
"""
if isinstance(index, (int, long)):
i = 0
ind = index
lens = list(lengths)
while ind >= lens[0]:
i += 1
ind -= lens.pop(0)
return {i: ind}
assert isinstance(index, slice)
if index == colon:
return {k: colon for k in range(len(lengths))}
step = index.step or 1
if step > 0:
start = index.start or 0
stop = index.stop if index.stop is not None else dim_shape
else:
start = index.start or dim_shape - 1
start = dim_shape - 1 if start >= dim_shape else start
stop = -(dim_shape + 1) if index.stop is None else index.stop
# posify start and stop
if start < 0:
start += dim_shape
if stop < 0:
stop += dim_shape
d = dict()
if step > 0:
for i, length in enumerate(lengths):
if start < length and stop > 0:
d[i] = slice(start, min(stop, length), step)
start = (start - length) % step
else:
start = start - length
stop -= length
else:
rstart = start # running start
chunk_boundaries = list(accumulate(add, lengths))
for i, chunk_stop in reversed(list(enumerate(chunk_boundaries))):
# create a chunk start and stop
if i == 0:
chunk_start = 0
else:
chunk_start = chunk_boundaries[i - 1]
# if our slice is in this chunk
if (chunk_start <= rstart < chunk_stop) and (rstart > stop):
d[i] = slice(rstart - chunk_stop,
max(chunk_start - chunk_stop - 1,
stop - chunk_stop),
step)
# compute the next running start point,
offset = (rstart - (chunk_start - 1)) % step
rstart = chunk_start + offset - 1
# replace 0:20:1 with : if appropriate
for k, v in d.items():
if v == slice(0, lengths[k], 1):
d[k] = slice(None, None, None)
if not d: # special case x[:0]
d[0] = slice(0, 0, 1)
return d
def partition_by_size(sizes, seq):
"""
>>> partition_by_size([10, 20, 10], [1, 5, 9, 12, 29, 35])
[[1, 5, 9], [2, 19], [5]]
"""
seq = np.array(seq)
right = np.cumsum(sizes)
locations = np.searchsorted(seq, right)
locations = [0] + locations.tolist()
left = [0] + right.tolist()
return [(seq[locations[i]:locations[i + 1]] - left[i]).tolist()
for i in range(len(locations) - 1)]
def issorted(seq):
""" Is sequence sorted?
>>> issorted([1, 2, 3])
True
>>> issorted([3, 1, 2])
False
"""
if not seq:
return True
x = seq[0]
for elem in seq[1:]:
if elem < x:
return False
x = elem
return True
def take_sorted(outname, inname, blockdims, index, axis=0):
""" Index array with sorted list index
Forms a dask for the following case
x[:, [1, 3, 5, 10], ...]
where the index, ``[1, 3, 5, 10]`` is sorted in non-decreasing order.
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
>>> blockdims
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 1): (getitem, ('x', 2), ([7],))}
See Also
--------
take - calls this function
"""
sizes = blockdims[axis] # the blocksizes on the axis that we care about
index_lists = partition_by_size(sizes, sorted(index))
where_index = [i for i, il in enumerate(index_lists) if il]
index_lists = [il for il in index_lists if il]
dims = [range(len(bd)) for bd in blockdims]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon] * len(bd) for bd in blockdims]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
blockdims2 = list(blockdims)
blockdims2[axis] = tuple(map(len, index_lists))
return tuple(blockdims2), dict(zip(keys, values))
def take(outname, inname, blockdims, index, axis=0):
""" Index array with an iterable of index
Handles a single index by a single list
Mimics ``np.take``
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], axis=0)
>>> blockdims
((4,),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, (np.concatenate, [(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))],
0),
(2, 0, 4, 1))}
When list is sorted we retain original block structure
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
>>> blockdims
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 2): (getitem, ('x', 2), ([7],))}
"""
if issorted(index):
return take_sorted(outname, inname, blockdims, index, axis)
n = len(blockdims)
sizes = blockdims[axis] # the blocksizes on the axis that we care about
index_lists = partition_by_size(sizes, sorted(index))
dims = [[0] if axis == i else list(range(len(bd)))
for i, bd in enumerate(blockdims)]
keys = list(product([outname], *dims))
rev_index = list(map(sorted(index).index, index))
vals = [(getitem, (np.concatenate,
[(getitem, ((inname, ) + d[:axis] + (i, ) + d[axis + 1:]),
((colon, ) * axis + (IL, ) + (colon, ) * (n - axis - 1)))
for i, IL in enumerate(index_lists) if IL], axis),
((colon, ) * axis + (rev_index, ) + (colon, ) * (n - axis - 1)))
for d in product(*dims)]
blockdims2 = list(blockdims)
blockdims2[axis] = (len(index), )
return tuple(blockdims2), dict(zip(keys, vals))
def posify_index(shape, ind):
""" Flip negative indices around to positive ones
>>> posify_index(10, 3)
3
>>> posify_index(10, -3)
7
>>> posify_index(10, [3, -3])
[3, 7]
>>> posify_index((10, 20), (3, -3))
(3, 17)
>>> posify_index((10, 20), (3, [3, 4, -3]))
(3, [3, 4, 17])
"""
if isinstance(ind, tuple):
return tuple(map(posify_index, shape, ind))
if isinstance(ind, (int, long)):
if ind < 0:
return ind + shape
else:
return ind
if isinstance(ind, list):
return [i + shape if i < 0 else i for i in ind]
return ind
def insert_many(seq, where, val):
""" Insert value at many locations in sequence
>>> insert_many(['a', 'b', 'c'], [0, 2], 'z')
('z', 'a', 'z', 'b', 'c')
"""
seq = list(seq)
result = []
for i in range(len(where) + len(seq)):
if i in where:
result.append(val)
else:
result.append(seq.pop(0))
return tuple(result)
@memoize
def _expander(where):
if not where:
def expand(seq, val):
return seq
return expand
else:
decl = """def expand(seq, val):
return ({left}) + tuple({right})
"""
left = []
j = 0
for i in range(max(where) + 1):
if i in where:
left.append("val, ")
else:
left.append("seq[%d], " % j)
j += 1
right = "seq[%d:]" % j
left = "".join(left)
decl = decl.format(**locals())
ns = {}
exec(compile(decl, "<dynamic>", "exec"), ns, ns)
return ns['expand']
def expander(where):
""" An optimized version of insert_many() when *where*
is known upfront and used many times.
>>> expander([0, 2])(['a', 'b', 'c'], 'z')
('z', 'a', 'z', 'b', 'c')
"""
return _expander(tuple(where))
def new_blockdim(dim_shape, lengths, index):
"""
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(0, 90, 2))
[10, 5, 10, 5, 15]
>>> new_blockdim(100, [20, 10, 20, 10, 40], [5, 1, 30, 22])
[4]
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(90, 10, -2))
[16, 5, 10, 5, 4]
"""
if isinstance(index, list):
return [len(index)]
assert not isinstance(index, (int, long))
pairs = sorted(_slice_1d(dim_shape, lengths, index).items(),
key=itemgetter(0))
slices = [slice(0, lengths[i], 1) if slc == slice(None, None, None) else slc
for i, slc in pairs]
if isinstance(index, slice) and index.step and index.step < 0:
slices = slices[::-1]
return [int(ceil((1. * slc.stop - slc.start) / slc.step)) for slc in slices]
def replace_ellipsis(n, index):
""" Replace ... with slices, :, : ,:
>>> replace_ellipsis(4, (3, Ellipsis, 2))
(3, slice(None, None, None), slice(None, None, None), 2)
>>> replace_ellipsis(2, (Ellipsis, None))
(slice(None, None, None), slice(None, None, None), None)
"""
# Careful about using in or index because index may contain arrays
isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]
extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1)
if not isellipsis:
return index
else:
loc = isellipsis[0]
return (index[:loc] + (slice(None, None, None),) * extra_dimensions +
index[loc + 1:])
def check_index(ind, dimension):
""" Check validity of index for a given dimension
Examples
--------
>>> check_index(3, 5)
>>> check_index(5, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 5 >= 5
>>> check_index(6, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 6 >= 5
>>> check_index(-1, 5)
>>> check_index(-6, 5)
Traceback (most recent call last):
...
IndexError: Negative index is not greater than negative dimension -6 <= -5
>>> check_index([1, 2], 5)
>>> check_index([6, 3], 5)
Traceback (most recent call last):
...
IndexError: Index out of bounds 5
>>> check_index(slice(0, 3), 5)
"""
if isinstance(ind, list):
x = np.array(ind)
if (x >= dimension).any() or (x <= -dimension).any():
raise IndexError("Index out of bounds %s" % dimension)
elif isinstance(ind, slice):
return
elif ind >= dimension:
raise IndexError("Index is not smaller than dimension %d >= %d" %
(ind, dimension))
elif ind < -dimension:
msg = "Negative index is not greater than negative dimension %d <= -%d"
raise IndexError(msg % (ind, dimension))
|
import os.path
import posixpath
import pinax
PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
PINAX_THEME = 'default'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SERVE_MEDIA = DEBUG
INTERNAL_IPS = (
'127.0.0.1',
)
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = 'dev.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
TIME_ZONE = 'US/Eastern'
LANGUAGE_CODE = 'en'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'media')
MEDIA_URL = '/site_media/media/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'static')
STATIC_URL = '/site_media/static/'
STATICFILES_DIRS = (
('cms_project_company', os.path.join(PROJECT_ROOT, 'media')),
('pinax', os.path.join(PINAX_ROOT, 'media', PINAX_THEME)),
)
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
SECRET_KEY = ''
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
'dbtemplates.loader.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django_openid.consumer.SessionConsumer',
'account.middleware.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'pagination.middleware.PaginationMiddleware',
'pinax.middleware.security.HideSensistiveFieldsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'cms_project_company.urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), "templates"),
os.path.join(PINAX_ROOT, "templates", PINAX_THEME),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"pinax.core.context_processors.pinax_settings",
"notification.context_processors.notification",
"account.context_processors.openid",
"account.context_processors.account",
)
INSTALLED_APPS = (
# included
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.humanize',
'pinax.templatetags',
# external
'notification', # must be first
'django_openid',
'emailconfirmation',
'mailer',
'pagination',
'timezones',
'ajax_validation',
'uni_form',
'dbtemplates',
'staticfiles',
'debug_toolbar',
# internal (for now)
'basic_profiles',
'account',
'django.contrib.admin',
# project specific
'sorl.thumbnail',
'frontendadmin',
'attachments',
'django_generic_flatblocks',
'django_generic_flatblocks.contrib.gblocks',
'django.contrib.markup',
)
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
ABSOLUTE_URL_OVERRIDES = {
"auth.user": lambda o: "/profiles/profile/%s/" % o.username,
}
MARKUP_FILTER_FALLBACK = 'none'
MARKUP_CHOICES = (
('restructuredtext', u'reStructuredText'),
('textile', u'Textile'),
('markdown', u'Markdown'),
('creole', u'Creole'),
)
WIKI_MARKUP_CHOICES = MARKUP_CHOICES
AUTH_PROFILE_MODULE = 'basic_profiles.Profile'
NOTIFICATION_LANGUAGE_MODULE = 'account.Account'
ACCOUNT_OPEN_SIGNUP = False
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
CONTACT_EMAIL = "feedback@example.com"
SITE_NAME = "Acme, Inc."
LOGIN_URL = "/account/login/"
LOGIN_REDIRECT_URLNAME = "home"
SERIALIZATION_MODULES = {
"jsonfk": "pinax.core.serializers.jsonfk",
}
try:
from local_settings import *
except ImportError:
pass
|
from flask import Flask
from flask_webapi import WebAPI, authenticate, authorize, route
from flask_webapi.authenticators import Authenticator, AuthenticateResult
from flask_webapi.permissions import Permission, IsAuthenticated
from unittest import TestCase
class TestView(TestCase):
def setUp(self):
self.app = Flask(__name__)
self.api = WebAPI(self.app)
self.client = self.app.test_client()
def test_permission_granted(self):
@route('/view')
@authenticate(FakeAuthenticator)
@authorize(IsAuthenticated)
def view():
pass
self.api.add_view(view)
response = self.client.get('/view')
self.assertEqual(response.status_code, 204)
def test_permission_denied(self):
class Denied(Permission):
def has_permission(self):
return False
@route('/view')
@authenticate(FakeAuthenticator)
@authorize(Denied)
def view():
pass
self.api.add_view(view)
response = self.client.get('/view')
self.assertEqual(response.status_code, 403)
def test_unauthenticated(self):
@route('/view')
@authorize(IsAuthenticated)
def view():
pass
self.api.add_view(view)
response = self.client.get('/view')
self.assertEqual(response.status_code, 401)
class FakeAuthenticator(Authenticator):
def authenticate(self):
return AuthenticateResult.success('user1', '1234')
|
"""Figure generating functions to accompany behavior_analysis,
used by automatic scripts
All functions should return either a figure or list of figures.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import itertools as it
from scipy.misc import comb
try:
from bottleneck import nanmean, nanstd
except ImportError:
from numpy import nanmean, nanstd
from warnings import warn
import pandas as pd
import seaborn.apionly as sns
from collections import defaultdict
from copy import copy
import lab
from ..analysis import imaging_analysis as ia
from ..analysis import signals_analysis as sa
from ..analysis import reward_analysis as ra
from ..analysis import intervals as inter
from ..analysis import filters as af
from ..classes.classes import ExperimentGroup as eg
from .. import plotting
from .. import misc
from ..plotting import plot_metric, plot_paired_metrics, color_cycle
import lab.plotting.analysis_plotting as ap
def activityByExposureFigure(exptGrp, rasterized=False, **kwargs):
fig, axs = plt.subplots(2, 3, figsize=(15, 8))
ap.activityByExposure(
exptGrp, ax=axs[0][0], stat='mean', rasterized=rasterized, **kwargs)
ap.activityByExposure(
exptGrp, ax=axs[0][1], stat='responseMagnitude',
rasterized=rasterized, **kwargs)
ap.activityByExposure(
exptGrp, ax=axs[0][2], stat='norm transient auc2',
rasterized=rasterized, **kwargs)
ap.activityByExposure(
exptGrp, ax=axs[1][0], stat='amplitude', rasterized=rasterized,
**kwargs)
ap.activityByExposure(
exptGrp, ax=axs[1][1], stat='duration', rasterized=rasterized,
**kwargs)
ap.activityByExposure(
exptGrp, ax=axs[1][2], stat='frequency', rasterized=rasterized,
**kwargs)
return fig
def activityComparisonFigure(exptGrp, method='mean', rasterized=False):
nCols = 4
exposure = exptGrp.priorDaysOfExposure(ignoreContext=False)
pairs = it.combinations(exptGrp, 2)
nPairs = 0
valid_pairs = []
# A valid experiment pair is from the same mouse and either the same
# context or the same day of exposure
for pair in pairs:
if (pair[0].parent == pair[1].parent) \
and (pair[0].sameContext(pair[1]) or
(exposure[pair[0]] == exposure[pair[1]])):
valid_pairs.append(pair)
nPairs += 1
nFigs = int(np.ceil(nPairs / float(nCols)))
figs = []
axs = []
for f in range(nFigs):
fig, ax = plt.subplots(2, nCols, figsize=(15, 8), squeeze=False)
ax_pairs = [(ax[0][x], ax[1][x]) for x in range(nCols)]
axs.extend(ax_pairs)
figs.append(fig)
n_extras = (nFigs * nCols) - nPairs
if n_extras > 0:
for a in axs[-n_extras:]:
a[0].set_visible(False)
a[1].set_visible(False)
for pair, ax in it.izip(valid_pairs, axs):
grp = lab.ExperimentGroup(pair)
label1 = 'Day {}, Ctx {}'.format(
exposure[grp[0]] + 1, grp[0].get('environment'))
label2 = 'Day {}, Ctx {}'.format(
exposure[grp[1]] + 1, grp[1].get('environment'))
ap.activityComparisonPlot(
grp, method=method, ax=ax[0], mask1=None, mask2=None, label1=label1,
label2=label2, roiNamesToLabel=None, normalize=False,
rasterized=rasterized, dF='from_file')
grp2 = lab.ExperimentGroup(pair[::-1])
ap.activityComparisonPlot(
grp2, method=method, ax=ax[1], mask1=None, mask2=None, label1=label2,
label2=label1, roiNamesToLabel=None, normalize=False,
rasterized=rasterized, dF='from_file')
return figs
def salience_responses_figures(
exptGrp, stimuli, pre_time=None, post_time=None, channel='Ch2',
label=None, roi_filter=None, exclude_running=False, rasterized=False):
"""Plot each ROI's response to each stim in stimuli"""
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
# Stims labeled 'off' just flip the tail of the responsive distribution
# but are actually the same PSTH as the 'on' version
# No need to plot both
stimuli = [stim for stim in stimuli if 'off' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
cmap = matplotlib.cm.get_cmap(name='Spectral')
color_cycle = [cmap(i) for i in np.linspace(0, 0.9, len(stimuli))]
psths = []
for stim in stimuli:
psth, rois, x_ranges = ia.PSTH(
exptGrp, stimulus=stim, channel=channel, label=label, roi_filter=roi_filter,
pre_time=pre_time, post_time=post_time,
exclude='running' if exclude_running else None)
psths.append(psth)
figs, axs, axs_to_label = plotting.layout_subplots(
n_plots=len(psths[0]) + 1, rows=3, cols=4, polar=False,
sharex=False, figsize=(15, 8), rasterized=rasterized)
for fig in figs:
fig.suptitle('Salience Responses: {}'.format(
'running excluded' if exclude_running else 'running included'))
for psth, color, stim in it.izip(psths, color_cycle, stimuli):
for ax, roi_psth, roi, x_range in it.izip(axs, psth, rois, x_ranges):
ax.plot(x_range, roi_psth, color=color)
ax.set_title(roi[0].get('mouseID') + ', ' + roi[1] + ', ' + roi[2])
ax.axvline(0, linestyle='dashed', color='k')
ax.set_xlim(x_range[0], x_range[-1])
ylims = np.round(ax.get_ylim(), 2)
if ylims[1] != 0:
ax.set_yticks([0, ylims[1]])
elif ylims[0] != 0:
ax.set_yticks([ylims[0], 0])
else:
ax.set_yticks([0])
if ax not in axs_to_label:
ax.tick_params(labelbottom=False)
# Last axis will just be for labels
axs[-1].plot([0, 1],
[-color_cycle.index(color), -color_cycle.index(color)],
color=color, label=stim)
axs[-1].set_xlim(0, 1)
axs[-1].set_ylim(-len(stimuli), 1)
axs[-1].tick_params(labelbottom=False, labelleft=False, bottom=False,
left=False, top=False, right=False)
axs[-1].legend()
for ax in axs_to_label:
ax.set_ylabel(r'Average $\Delta$F/F')
ax.set_xlabel('Time (s)')
return figs
def salience_expt_summary_figure(
expt, stimuli, method='responsiveness', pre_time=None, post_time=None,
channel='Ch2', label=None, roi_filter=None, exclude_running=False,
rasterized=False, n_processes=1):
"""Summary of salience responses.
Includes trialAverageHeatmap, psth of responsive ROIs and image overlay of
responsive ROIs.
"""
fig, axs = plt.subplots(3, len(stimuli), figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.suptitle('Salience Experiment Summary: {}'.format(
'running excluded' if exclude_running else 'running included'))
frame_period = expt.frame_period()
pre_frames = None if pre_time is None else int(pre_time / frame_period)
post_frames = None if post_time is None else int(post_time / frame_period)
for stim_idx, stim in enumerate(stimuli):
expt.trialAverageHeatmap(
stimulus=stim, ax=axs[0, stim_idx], sort=False, smoothing=None,
window_length=5, channel=channel, label=label,
roi_filter=roi_filter, exclude_running=exclude_running)
axs[0, stim_idx].set_title(stim)
responsive_filter = af.identify_stim_responsive_cells(
expt, stimulus=stim, method=method, pre_frames=pre_frames,
post_frames=post_frames, data=None, ax=axs[1, stim_idx],
conf_level=95, sig_tail='upper', transients_conf_level=95,
plot_mean=True, exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter,
n_bootstraps=10000, save_to_expt=True, n_processes=n_processes)
rois = expt.roiVertices(
channel=channel, label=label, roi_filter=responsive_filter)
plotting.roiDataImageOverlay(
ax=axs[2, stim_idx],
background=expt.returnFinalPrototype(channel=channel),
rois=rois, values=None, vmin=0, vmax=.8)
return fig
def salience_exptGrp_summary_figure(
exptGrp, stimuli, method='responsiveness', pre_time=None,
post_time=None, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, rasterized=False, save_data=False,
n_processes=1, n_bootstraps=10000):
STIMS_PER_FIG = 6
data_to_save = {}
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
n_figs = int(np.ceil(len(stimuli) / float(STIMS_PER_FIG)))
figs, psth_axs, response_axs, fraction_axs, first_col_axs = \
[], [], [], [], []
for n in range(n_figs):
fig, axs = plt.subplots(
3, STIMS_PER_FIG, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.suptitle('Responsive ROIs summary: {}'.format(
'running excluded' if exclude_running else 'running included'))
figs.append(fig)
psth_axs.append(axs[0, :])
response_axs.append(axs[1, :])
fraction_axs.append(axs[2, :])
first_col_axs.append(axs[:, 0])
psth_axs = np.hstack(psth_axs)
response_axs = np.hstack(response_axs)
fraction_axs = np.hstack(fraction_axs)
first_col_axs = np.hstack(first_col_axs)
min_psth_y_lim = np.inf
max_psth_y_lim = -np.inf
responsive_cells = {}
for ax, stimulus in it.izip(psth_axs, stimuli):
responsive_cells[stimulus] = ia.identify_stim_responsive_cells(
exptGrp, stimulus=stimulus, method=method, ax=ax, pre_time=pre_time,
post_time=post_time, data=None, conf_level=95, sig_tail='upper',
plot_mean=True, exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter,
n_bootstraps=n_bootstraps, save_to_expt=True,
n_processes=n_processes)
ax.set_title(stimulus)
min_psth_y_lim = np.amin([min_psth_y_lim, ax.get_ylim()[0]])
max_psth_y_lim = np.amax([max_psth_y_lim, ax.get_ylim()[1]])
max_bar_y_lim = 0
n_responsive_rois = {}
data_to_save['responsive_responses'] = []
data_to_save['non_responsive_responses'] = []
for ax, stimulus in it.izip(response_axs, stimuli):
responses = ia.response_magnitudes(
exptGrp, stimulus, method=method, pre_time=pre_time, post_time=post_time,
data=None, exclude='running' if exclude_running else None,
channel=channel, label=label,
roi_filter=responsive_cells[stimulus])
data_to_save['responsive_responses'].append(
[stimulus] + ['{:f}'.format(val) for val in responses])
plotting.scatter_bar(
ax, [np.abs(responses)], labels=[''], jitter_x=True)
max_bar_y_lim = np.amax([max_bar_y_lim, ax.get_ylim()[1]])
ax.tick_params(bottom=False, labelbottom=False)
n_responsive_rois[stimulus] = len(responses)
non_responses = ia.response_magnitudes(
exptGrp, stimulus, method=method, pre_time=pre_time, post_time=post_time,
data=None, exclude='running' if exclude_running else None,
channel=channel, label=label,
roi_filter=misc.invert_filter(responsive_cells[stimulus]))
data_to_save['non_responsive_responses'].append(
[stimulus] + ['{:f}'.format(val) for val in non_responses])
fractions = []
n_rois = {}
for ax, stimulus in it.izip(fraction_axs, stimuli):
all_psths, _, _ = ia.PSTH(
exptGrp, stimulus=stimulus, pre_time=pre_time, post_time=post_time,
data=None, exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter)
# Find how many of the ROIs were imaged with the current stimulus
n_rois[stimulus] = np.sum(
[not np.all(np.isnan(psth)) for psth in all_psths])
# n_responsive_rois = len(responsive_psths[stimulus])
if n_rois[stimulus] > 0:
fractions.append(
n_responsive_rois[stimulus] / float(n_rois[stimulus]))
plotting.scatter_bar(
ax, [[fractions[-1]]],
labels=['{} / {}'.format(
n_responsive_rois[stimulus], n_rois[stimulus])],
jitter_x=False)
else:
fractions.append(np.nan)
ax.set_ylim(0, 1)
ax.tick_params(bottom=False)
for ax in set(psth_axs).difference(first_col_axs):
ax.set_xlabel('')
ax.set_ylabel('')
ax.tick_params(labelleft=False, labelbottom=False)
for ax in psth_axs:
ax.set_ylim(min_psth_y_lim, max_psth_y_lim)
for ax in set(response_axs).intersection(first_col_axs):
ax.set_ylabel('Stim response')
for ax in set(response_axs).difference(first_col_axs):
ax.tick_params(labelleft=False)
for ax in response_axs:
ax.set_ylim(0, max_bar_y_lim)
for ax in set(fraction_axs).intersection(first_col_axs):
ax.set_ylabel('Responsive cell fraction')
for ax in set(fraction_axs).difference(first_col_axs):
ax.tick_params(labelleft=False)
if len(stimuli) % STIMS_PER_FIG:
extra_axs = len(stimuli) - n_figs * STIMS_PER_FIG
for ax in it.chain(psth_axs[extra_axs:], response_axs[extra_axs:],
fraction_axs[extra_axs:]):
ax.set_visible(False)
if save_data:
# Need to update for multiple pages
raise NotImplemented
psths = {}
non_responsive_psths = {}
for stimulus in stimuli:
# Responders
psth, x_range = ia.PSTH(
exptGrp, stimulus=stimulus, pre_time=pre_time, post_time=post_time,
channel=channel, label=label,
roi_filter=responsive_cells[stimulus], return_full='norm',
exclude='running' if exclude_running else None)
psth_list = [x_range]
for roi in psth:
psth_list.append(['{:f}'.format(val) for val in roi])
label_strs = np.array(
['Time (s)'] + ['ROI ' + str(x) for x in range(psth.shape[0])])
psths[stimulus] = np.hstack([label_strs[:, None], psth_list])
# Non-responders
psth, x_range = ia.PSTH(
exptGrp, stimulus=stimulus, pre_time=pre_time, post_time=post_time,
channel=channel, label=label,
roi_filter=misc.invert_filter(responsive_cells[stimulus]),
return_full='norm', exclude='running' if exclude_running else None)
psth_list = [x_range]
for roi in psth:
psth_list.append(['{:f}'.format(val) for val in roi])
label_strs = np.array(
['Time (s)'] + ['ROI ' + str(x) for x in range(psth.shape[0])])
non_responsive_psths[stimulus] = np.hstack([label_strs[:, None], psth_list])
data_to_save['psths'] = psths
data_to_save['non_responsive_psths'] = non_responsive_psths
data_to_save['fractions'] = [stimuli, fractions]
data_to_save['n_responding'] = [
stimuli, [n_responsive_rois[stim] for stim in stimuli]]
data_to_save['n_rois'] = [stimuli, [n_rois[stim] for stim in stimuli]]
misc.save_data(
data_to_save, fig=fig, label='salience_summary', method=save_data)
return figs
def salience_expt_grp_dataframe_figure(
expt_grps, stimuli, plotby, method='responsiveness', pre_time=None,
post_time=None, channel='Ch2', label=None, roi_filters=None,
colors=None, exclude_running=False, rasterized=False, save_data=False,
n_bootstraps=10000, n_processes=1):
# data_to_save = {}
STIMS_PER_FIG = 4
if roi_filters is None:
roi_filters = [None] * len(expt_grps)
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
n_figs = int(np.ceil(len(stimuli) / float(STIMS_PER_FIG)))
figs, response_axs, fraction_axs, first_col_axs = [], [], [], []
for n in range(n_figs):
fig, axs = plt.subplots(
2, STIMS_PER_FIG, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.suptitle('Responsive ROIs by {}: {}'.format(
plotby,
'running excluded' if exclude_running else 'running included'))
figs.append(fig)
response_axs.append(axs[0, :])
fraction_axs.append(axs[1, :])
first_col_axs.append(axs[:, 0])
response_axs = np.hstack(response_axs)
fraction_axs = np.hstack(fraction_axs)
first_col_axs = np.hstack(first_col_axs)
if method == 'responsiveness':
activity_label = 'Responsiveness (dF/F)'
elif method == 'peak':
activity_label = 'Peak responsiveness (dF/F)'
else:
raise ValueError("Unrecognized 'method' value")
responsive_cells = {}
responsive_dfs = {}
for stimulus in stimuli:
responsive_cells[stimulus] = []
responsive_dfs[stimulus] = []
stimulus_filters = {}
stimulus_dfs = {}
for expt_grp, roi_filter in it.izip(expt_grps, roi_filters):
stimulus_filters = []
stimulus_dfs = []
for key, grp in expt_grp.groupby(plotby):
stimulus_filters.append(
ia.identify_stim_responsive_cells(
grp, stimulus=stimulus, method=method, pre_time=pre_time,
post_time=post_time, data=None, conf_level=95,
sig_tail='upper',
exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter,
n_bootstraps=n_bootstraps, save_to_expt=True,
n_processes=n_processes))
df = ia.response_magnitudes(
grp, stimulus, method=method, pre_time=pre_time,
post_time=post_time, data=None,
exclude='running' if exclude_running else None,
channel=channel, label=label,
roi_filter=stimulus_filters[-1], return_df=True)
# Put the grouping info back in the dataframe
# For example:
# plotby = ['condition_day']
# keys will be ['A_0', 'A_1', 'B_0', etc...]
# So df['condition_day'] == 'A_0' for the first group, etc.
for key_value, grouping in zip(key, plotby):
df[grouping] = key_value
stimulus_dfs.append(df)
responsive_dfs[stimulus].append(pd.concat(
stimulus_dfs, ignore_index=True))
responsive_cells[stimulus].append(misc.filter_union(
stimulus_filters))
#
# Plot mean PSTH for each stim/group
#
pass
#
# Plot the mean response of responsive cells
#
max_response_y_lim = 0
for ax, stimulus in it.izip(response_axs, stimuli):
plotting.plot_dataframe(
ax, responsive_dfs[stimulus],
labels=[expt_grp.label() for expt_grp in expt_grps],
activity_label=activity_label, groupby=None, plotby=plotby,
orderby=None, plot_method='line', plot_shuffle=False,
shuffle_plotby=False, pool_shuffle=False,
agg_fn=np.mean, colors=colors)
max_response_y_lim = np.amax([max_response_y_lim, ax.get_ylim()[1]])
ax.set_title(stimulus)
plt.setp(ax.get_xticklabels(), rotation='40',
horizontalalignment='right')
#
# Plot fraction of responsive ROIs
#
groupby = [['mouseID', 'uniqueLocationKey', 'roi_id'] + plotby,
['mouseID'] + plotby]
activity_kwargs = [
{'channel': channel, 'label': label, 'include_roi_filter': inc_filter}
for inc_filter in roi_filters]
for ax, stimulus in it.izip(fraction_axs, stimuli):
plot_metric(
ax, expt_grps, eg.filtered_rois, 'line',
roi_filters=responsive_cells[stimulus], groupby=groupby,
plotby=plotby, orderby=None, plot_shuffle=False,
shuffle_plotby=False, pool_shuffle=False, plot_abs=False,
activity_kwargs=activity_kwargs,
activity_label='Fraction responding', label_every_n=1,
rotate_labels=True, colors=colors)
# ax.set_ylim(0, 1)
ax.set_ylim(-0.05, 1.05)
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
for ax in set(response_axs).difference(first_col_axs):
ax.set_ylabel('')
for ax in response_axs:
ax.set_ylim(0, max_response_y_lim)
ax.set_xlabel('')
for ax in set(fraction_axs).difference(first_col_axs):
ax.set_ylabel('')
ax.set_title('')
if len(stimuli) % STIMS_PER_FIG:
extra_axs = len(stimuli) - n_figs * STIMS_PER_FIG
for ax in it.chain(response_axs[extra_axs:], fraction_axs[extra_axs:]):
ax.set_visible(False)
return figs
def compare_psth_summary_figure(
expt_grps, stimuli, pre_time=None, post_time=None, channel='Ch2',
label=None, roi_filters=None, colors=None, exclude_running=False,
rasterized=False):
STIMS_PER_FIG = 6
if colors is None:
colors = sns.color_palette()
data_to_save = {}
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
n_figs = int(np.ceil(len(stimuli) / float(STIMS_PER_FIG)))
figs, response_axs, first_col_axs = [], [], []
psth_axs = defaultdict(list)
for n in range(n_figs):
fig, axs = plt.subplots(
len(expt_grps) + 1, STIMS_PER_FIG, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.suptitle('All ROIs summary: {}'.format(
'running excluded' if exclude_running else 'running included'))
figs.append(fig)
for expt_grp, grp_axs in zip(expt_grps, axs):
psth_axs[expt_grp].append(grp_axs)
plotting.right_label(grp_axs[-1], expt_grp.label())
response_axs.append(axs[-1, :])
first_col_axs.append(axs[:, 0])
for expt_grp in expt_grps:
psth_axs[expt_grp] = np.hstack(psth_axs[expt_grp])
response_axs = np.hstack(response_axs)
first_col_axs = np.hstack(first_col_axs)
min_psth_y_lim = np.inf
max_psth_y_lim = -np.inf
for expt_grp, roi_filter, color in zip(expt_grps, roi_filters, colors):
for ax, stimulus in it.izip(psth_axs[expt_grp], stimuli):
ia.PSTH(
expt_grp, stimulus, ax=ax, pre_time=pre_time, post_time=post_time,
exclude='running' if exclude_running else None, data=None,
shade_ste=False, plot_mean=True, channel=channel, label=label,
roi_filter=roi_filter, color=color)
ax.set_title(stimulus)
min_psth_y_lim = np.amin([min_psth_y_lim, ax.get_ylim()[0]])
max_psth_y_lim = np.amax([max_psth_y_lim, ax.get_ylim()[1]])
max_bar_y_lim = 0
data_to_save['responses'] = {expt_grp.label(): [] for expt_grp in expt_grps}
for ax, stimulus in it.izip(response_axs, stimuli):
responses = []
for expt_grp, roi_filter, color in zip(expt_grps, roi_filters, colors):
responses.append(ia.response_magnitudes(
expt_grp, stimulus, method='responsiveness', pre_time=pre_time,
post_time=post_time, data=None,
exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter))
data_to_save['responses'][expt_grp.label()].append(
[stimulus] + ['{:f}'.format(val) for val in responses[-1]])
plotting.grouped_bar(
ax, values=[[np.abs(r)] for r in responses], cluster_labels=[''],
condition_labels=[expt_grp.label() for expt_grp in expt_grps],
bar_colors=colors, scatter_points=True, jitter_x=True, s=20)
max_bar_y_lim = np.amax([max_bar_y_lim, ax.get_ylim()[1]])
ax.tick_params(bottom=False, labelbottom=False)
for ax in set(it.chain(*psth_axs.itervalues())).difference(first_col_axs):
ax.set_xlabel('')
ax.set_ylabel('')
ax.tick_params(labelleft=False, labelbottom=False)
for ax in it.chain(*psth_axs.itervalues()):
ax.set_ylim(min_psth_y_lim, max_psth_y_lim)
for ax in set(response_axs).intersection(first_col_axs):
ax.set_ylabel('Stim response')
for ax in set(response_axs).difference(first_col_axs):
ax.tick_params(labelleft=False)
legend = ax.get_legend()
if legend is not None:
legend.set_visible(False)
for ax in response_axs:
ax.set_ylim(0, max_bar_y_lim)
# for ax in set(fraction_axs).intersection(first_col_axs):
# ax.set_ylabel('Responsive cell fraction')
# for ax in set(fraction_axs).difference(first_col_axs):
# ax.tick_params(labelleft=False)
if len(stimuli) % STIMS_PER_FIG:
extra_axs = len(stimuli) - n_figs * STIMS_PER_FIG
for ax in it.chain(response_axs[extra_axs:], *[
grp_axs[extra_axs:] for grp_axs in psth_axs.itervalues()]):
ax.set_visible(False)
return figs
def plotRoisOverlay(expt, channel='Ch2', label=None, roi_filter=None,
rasterized=False):
"""Generate a figure of the imaging location with all ROIs overlaid"""
figs = []
background_image = expt.returnFinalPrototype(channel=channel)
roiVerts = expt.roiVertices(
channel=channel, label=label, roi_filter=roi_filter)
labels = expt.roi_ids(channel=channel, label=label, roi_filter=roi_filter)
imaging_parameters = expt.imagingParameters()
aspect_ratio = imaging_parameters['pixelsPerLine'] \
/ imaging_parameters['linesPerFrame']
for plane in xrange(background_image.shape[0]):
fig = plt.figure()
ax = fig.add_subplot(111, rasterized=rasterized)
roi_inds = [i for i, v in enumerate(roiVerts) if v[0][0][2] == plane]
# plane_verts = np.array(roiVerts)[roi_inds].tolist()
plane_verts = [roiVerts[x] for x in roi_inds]
twoD_verts = []
for roi in plane_verts:
roi_polys = []
for poly in roi:
roi_polys.append(poly[:, :2])
twoD_verts.append(roi_polys)
plotting.roiDataImageOverlay(
ax, background_image[plane, :, :], twoD_verts, values=None,
vmin=0, vmax=1, labels=np.array(labels)[roi_inds].tolist(),
cax=None, alpha=0.2, aspect=aspect_ratio)
ax.set_title('{}_{}: plane {}'.format(
expt.parent.get('mouseID'), expt.get('startTime'), plane))
figs.append(fig)
return figs
def trial_responses(
exptGrp, stimuli, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, rasterized=False, plot_mean=False,
gray_traces=False, **psth_kwargs):
"""Plots the response to each stim in 'stimuli' for all rois and trials in
'exptGrp'
"""
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return
# Stims labeled 'off' just flip the tail of the responsive distribution
# but are actually the same PSTH as the 'on' version
# No need to plot both
stimuli = [stim for stim in stimuli if 'off' not in stim]
psths = {}
for stimulus in stimuli:
psths[stimulus], rois, x_range = ia.PSTH(
exptGrp, stimulus, channel=channel, label=label, roi_filter=roi_filter,
return_full=True, exclude='running' if exclude_running else None,
**psth_kwargs)
figs, axs, axs_to_label = plotting.layout_subplots(
n_plots=len(rois) * len(stimuli), rows=4, cols=len(stimuli),
polar=False, sharex=False, figsize=(15, 8), rasterized=rasterized)
for fig in figs:
fig.suptitle('Trial Responses: {}'.format(
'running excluded' if exclude_running else 'running included'))
for ax in axs_to_label:
ax.set_ylabel(r'Average $\Delta$F/F')
ax.set_xlabel('Time (s)')
ax_idx = 0
for roi_idx in xrange(len(rois)):
for stimulus in stimuli:
ax = axs[ax_idx]
# If there are no trial psths for this roi, just move along
if psths[stimulus][roi_idx].shape[1] > 0:
if gray_traces:
ax.plot(x_range[roi_idx], psths[stimulus][roi_idx],
color='0.8')
else:
ax.plot(x_range[roi_idx], psths[stimulus][roi_idx])
if plot_mean:
ax.plot(
x_range[roi_idx],
np.nanmean(psths[stimulus][roi_idx], axis=1),
lw=2, color='k')
ax.axvline(0, linestyle='dashed', color='k')
ax.set_xlim(x_range[roi_idx][0], x_range[roi_idx][-1])
ylims = np.round(ax.get_ylim(), 2)
if ylims[1] != 0:
ax.set_yticks([0, ylims[1]])
elif ylims[0] != 0:
ax.set_yticks([ylims[0], 0])
else:
ax.set_yticks([0])
ax_geometry = ax.get_geometry()
# If ax is in top row add a stim title
if ax_geometry[2] <= ax_geometry[1]:
ax.set_title(stimulus)
# If ax is in last column add an roi label
if ax_geometry[2] % ax_geometry[1] == 0:
roi_label = rois[roi_idx][0].get('mouseID') + '\n' + \
rois[roi_idx][1] + '\n' + rois[roi_idx][2]
# Bbox = ax.figbox
# ax.figure.text(Bbox.p1[0] + 0.02,
# (Bbox.p1[1] + Bbox.p0[1]) / 2,
# roi_label, rotation='vertical',
# verticalalignment='center')
plotting.right_label(
ax, roi_label, rotation='vertical',
verticalalignment='center', horizontalalignment='center')
# Remove extra labels
if ax not in axs_to_label:
ax.tick_params(labelbottom=False)
ax_idx += 1
if np.mod(roi_idx, 4) == 3:
yield figs[roi_idx / 4]
def compare_stim_responses(
exptGrp, stimuli, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, rasterized=False, plot_method='scatter',
z_score=True, **kwargs):
"""Plot of each pair of stims in stimuli against each other."""
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
figs, axs, _ = plotting.layout_subplots(
comb(len(stimuli), 2), rows=2, cols=4, figsize=(15, 8),
sharex=False, rasterized=rasterized)
rois = {}
means = {}
stds = {}
for stimulus in stimuli:
means[stimulus], stds[stimulus], _, rois[stimulus], _ = \
ia.response_magnitudes(
exptGrp, stimulus, channel=channel, label=label, roi_filter=roi_filter,
return_full=True, z_score=z_score,
exclude='running' if exclude_running else None, **kwargs)
for ax, (stim1, stim2) in zip(axs, it.combinations(stimuli, 2)):
if plot_method == 'ellipse':
raise NotImplemented
means_1 = []
means_2 = []
stds_1 = []
stds_2 = []
all_rois = rois[stim1] + rois[stim2]
for roi in set(all_rois):
if roi in rois[stim1] and roi in rois[stim2]:
idx_1 = rois[stim1].index(roi)
idx_2 = rois[stim2].index(roi)
means_1.append(means[stim1][idx_1])
means_2.append(means[stim2][idx_2])
stds_1.append(stds[stim1][idx_1])
stds_2.append(stds[stim2][idx_2])
max_x = np.nanmax(np.array(means_1) + np.array(stds_1))
x_std = 4 * nanstd(means_1)
max_x = min([max_x, nanmean(means_1) + x_std])
max_y = np.nanmax(np.array(means_2) + np.array(stds_2))
y_std = 4 * nanstd(means_2)
max_y = min([max_y, nanmean(means_2) + y_std])
min_x = np.nanmin(np.array(means_1) - np.array(stds_1))
min_x = max([min_x, nanmean(means_1) - x_std])
min_y = np.nanmin(np.array(means_2) - np.array(stds_2))
min_y = max([min_y, nanmean(means_2) - y_std])
finite_means = np.isfinite(means_1) & np.isfinite(means_2)
if not np.any(finite_means):
continue
plotting.ellipsePlot(ax, means_1, means_2, stds_1, stds_2,
axesCenter=False, print_stats=True)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
ax.axvline(0, linestyle=':', color='k')
ax.axhline(0, linestyle=':', color='k')
ax.set_xlabel(stim1)
ax.set_ylabel(stim2)
elif plot_method == 'scatter':
means_1 = []
means_2 = []
all_rois = rois[stim1] + rois[stim2]
for roi in set(all_rois):
if roi in rois[stim1] and roi in rois[stim2]:
idx_1 = rois[stim1].index(roi)
idx_2 = rois[stim2].index(roi)
means_1.append(means[stim1][idx_1])
means_2.append(means[stim2][idx_2])
finite_means = np.isfinite(means_1) & np.isfinite(means_2)
if not np.any(finite_means):
continue
plotting.scatterPlot(
ax, [means_1, means_2], [stim1, stim2], s=1.5,
print_stats=True)
ax.axvline(0, linestyle=':', color='k')
ax.axhline(0, linestyle=':', color='k')
else:
raise ValueError
for fig in figs:
fig.suptitle('Stim response {}comparison: {}'.format(
'z-score ' if z_score else '',
'running excluded' if exclude_running else 'running included'))
return figs
def quantify_multi_responses(
exptGrp, stimuli, method='responsiveness', channel='Ch2', label=None,
roi_filter=None, pre_time=None, post_time=None, rasterized=False,
n_processes=1, n_bootstraps=10000):
"""Quantifies the number of stimuli that each ROI responds to,
plots as a histogram"""
fig, axs = plt.subplots(1, 2, subplot_kw={'rasterized': rasterized},
figsize=(15, 8))
ia.plot_number_of_stims_responsive(
exptGrp, axs[0], stimuli, method=method, pre_time=pre_time,
post_time=post_time, exclude=None, channel=channel, label=label,
roi_filter=roi_filter, n_processes=n_processes,
n_bootstraps=n_bootstraps)
ia.plot_number_of_stims_responsive(
exptGrp, axs[1], stimuli, method=method, pre_time=pre_time,
post_time=post_time, exclude='running', channel=channel, label=label,
roi_filter=roi_filter, n_processes=n_processes,
n_bootstraps=n_bootstraps)
axs[0].set_title('Running included')
axs[1].set_title('Running excluded')
return fig
def response_linearity(
exptGrp, paired_stimuli, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, responsive_method=None, rasterized=False,
plot_method='ellipse', **kwargs):
"""Histogram of response linearities
Calculated as combined_response / (single_response_1 + single_response_2)
Parameters
----------
paired_stimuli : list of paired stimuli to analyze
responsive_method : None, to include all rois, or a method for identifying
stim responsive rois
"""
paired_stimuli = [stim for stim in paired_stimuli if 'Paired' in stim]
if not paired_stimuli:
return []
figs, axs, _ = plotting.layout_subplots(
len(paired_stimuli), rows=2, cols=4, figsize=(15, 8),
sharex=False, rasterized=rasterized)
for stimulus, ax in zip(paired_stimuli, axs):
stims = stimulus.split()[1:]
if responsive_method:
stimulus_filter = ia.identify_stim_responsive_cells(
exptGrp, stimulus=stimulus, method=responsive_method,
channel=channel, label=label, roi_filter=roi_filter,
exclude='running' if exclude_running else None,
**kwargs)
else:
stimulus_filter = roi_filter
psth1, rois_1, x_ranges1 = ia.PSTH(
exptGrp, stims[0], channel=channel, label=label, roi_filter=stimulus_filter,
return_full=True, exclude='running' if exclude_running else None,
**kwargs)
responses_1 = []
for roi_psth, roi_x_range in zip(psth1, x_ranges1):
responses_1.append(nanmean(roi_psth[roi_x_range > 0], axis=0) -
nanmean(roi_psth[roi_x_range < 0], axis=0))
psth2, rois_2, x_ranges2 = ia.PSTH(
exptGrp, stims[1], channel=channel, label=label, roi_filter=stimulus_filter,
return_full=True, exclude='running' if exclude_running else None,
**kwargs)
responses_2 = []
for roi_psth, roi_x_range in zip(psth2, x_ranges2):
responses_2.append(nanmean(roi_psth[roi_x_range > 0], axis=0) -
nanmean(roi_psth[roi_x_range < 0], axis=0))
psth_combo, rois_combo, x_ranges_combo = ia.PSTH(
exptGrp, stimulus, channel=channel, label=label, roi_filter=stimulus_filter,
return_full=True, exclude='running' if exclude_running else None,
**kwargs)
responses_combo = []
for roi_psth, roi_x_range in zip(psth_combo, x_ranges_combo):
responses_combo.append(
nanmean(roi_psth[roi_x_range > 0], axis=0) -
nanmean(roi_psth[roi_x_range < 0], axis=0))
shared_rois = set(rois_1).intersection(rois_2).intersection(rois_combo)
combined_mean = []
combined_std = []
summed_mean = []
summed_std = []
linearity_ratios = []
for roi in shared_rois:
combo = responses_combo[rois_combo.index(roi)]
stim1 = responses_1[rois_1.index(roi)]
stim2 = responses_2[rois_2.index(roi)]
combined_mean.append(nanmean(combo))
combined_std.append(nanstd(combo))
summed_mean.append(nanmean(stim1) + nanmean(stim2))
# Propagate summed std
summed_std.append(
np.sqrt(nanstd(stim1) ** 2 + nanstd(stim2) ** 2))
linearity_ratios.append(combined_mean[-1] / summed_mean[-1])
if np.all(np.isnan(linearity_ratios)):
ax.set_visible(False)
continue
if plot_method == 'hist':
linearity_ratios = [ratio for ratio in linearity_ratios
if not np.isnan(ratio)]
if len(linearity_ratios) == 0:
return []
plotting.histogram(ax, linearity_ratios, bins=10, plot_mean=True)
ax.set_title(stimulus)
ax.set_xlabel('combined / (stim1 + stim2)')
ax.set_ylabel('Number')
elif plot_method == 'ellipse':
plotting.ellipsePlot(
ax, summed_mean, combined_mean, summed_std, combined_std,
axesCenter=False, print_stats=True)
ax.set_title(stimulus)
ax.set_xlabel('stim1 + stim2')
ax.set_ylabel('combined')
combined_mean = np.array(combined_mean)
combined_std = np.array(combined_std)
summed_mean = np.array(summed_mean)
summed_std = np.array(summed_std)
max_x = np.nanmax(summed_mean + summed_std)
x_std = 4 * nanstd(summed_mean)
max_x = min([max_x, nanmean(summed_mean) + x_std])
max_y = np.nanmax(combined_mean + combined_std)
y_std = 4 * nanstd(combined_mean)
max_y = min([max_y, nanmean(combined_mean) + y_std])
min_x = np.nanmin(summed_mean - summed_std)
min_x = max([min_x, nanmean(summed_mean) - x_std])
min_y = np.nanmin(combined_mean - combined_std)
min_y = max([min_y, nanmean(combined_mean) - y_std])
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
ax.axvline(0, linestyle=':', color='k')
ax.axhline(0, linestyle=':', color='k')
elif plot_method == 'scatter':
plotting.scatterPlot(
ax, [summed_mean, combined_mean],
['stim1 + stim2', 'combined'], s=1, print_stats=True)
ax.axvline(0, linestyle=':', color='k')
ax.axhline(0, linestyle=':', color='k')
ax.set_title(stimulus)
else:
raise ValueError(
'Unrecognized plot method: {}'.format(plot_method))
for fig in figs:
fig.suptitle('Stim response linearity, {}: {}'.format(
'all ROIs' if responsive_method is None else 'responsive ROIs only',
'running excluded' if exclude_running else 'running included'))
return figs
def run_duration_responsiveness(
exptGrp, channel='Ch2', label=None, roi_filter=None, rasterized=False,
method='responsiveness', **psth_kwargs):
"""Create figure comparing the magnitude of running responses versus
duration of running bout.
"""
figs = []
fig, axs = plt.subplots(2, 2, subplot_kw={'rasterized': rasterized},
figsize=(15, 8))
ia.compare_run_response_by_running_duration(
exptGrp, axs[0, 0], run_intervals='running_start',
response_method='responsiveness', plot_method='scatter',
channel=channel, label=label, roi_filter=roi_filter,
responsive_method=method, **psth_kwargs)
ia.compare_run_response_by_running_duration(
exptGrp, axs[0, 1], run_intervals='running_stop',
response_method='responsiveness', plot_method='scatter',
channel=channel, label=label, roi_filter=roi_filter,
responsive_method=method, **psth_kwargs)
# ia.compare_run_response_by_running_duration(
# exptGrp, axs[1, 0], run_intervals='running_stim',
# response_method='responsiveness', plot_method='scatter',
# channel=channel, label=label, roi_filter=roi_filter,
# responsive_method=method, **psth_kwargs)
# ia.compare_run_response_by_running_duration(
# exptGrp, axs[1, 1], run_intervals='running_no_stim',
# response_method='responsiveness', plot_method='scatter',
# channel=channel, label=label, roi_filter=roi_filter,
# responsive_method=method, **psth_kwargs)
# figs.append(fig)
# fig, axs = plt.subplots(2, 2, subplot_kw={'rasterized': rasterized},
# figsize=(15, 8))
ia.compare_run_response_by_running_duration(
exptGrp, axs[1, 0], run_intervals='running_start',
response_method='mean', plot_method='scatter',
channel=channel, label=label, roi_filter=roi_filter,
responsive_method=method, **psth_kwargs)
ia.compare_run_response_by_running_duration(
exptGrp, axs[1, 1], run_intervals='running_stop',
response_method='mean', plot_method='scatter',
channel=channel, label=label, roi_filter=roi_filter,
responsive_method=method, **psth_kwargs)
# ia.compare_run_response_by_running_duration(
# exptGrp, axs[1, 0], run_intervals='running_stim',
# response_method='mean', plot_method='scatter',
# channel=channel, label=label, roi_filter=roi_filter,
# responsive_method=method, **psth_kwargs)
# ia.compare_run_response_by_running_duration(
# exptGrp, axs[1, 1], run_intervals='running_no_stim',
# response_method='mean', plot_method='scatter',
# channel=channel, label=label, roi_filter=roi_filter,
# responsive_method=method, **psth_kwargs)
figs.append(fig)
return figs
def imaging_and_behavior_summary(
exptGrp, channel='Ch2', label=None, roi_filter=None):
"""Creates a summary figure of imaging data and behavior data"""
nTrials = sum([len(expt.findall('trial')) for expt in exptGrp])
figs, axs, _ = plotting.layout_subplots(
nTrials, rows=1, cols=2, figsize=(15, 8), sharex=False)
for ax, trial in it.izip(
axs, it.chain(*[expt.findall('trial') for expt in exptGrp])):
if isinstance(trial.parent, lab.classes.SalienceExperiment):
stim = trial.get('stimulus')
if stim == 'air':
stim = 'airpuff'
stim_time = trial.parent.stimulusTime()
if 'Paired' in stim:
keys = stim.split(' ')[1:] + ['running', 'licking']
else:
keys = [stim, 'running', 'licking']
ap.plot_imaging_and_behavior(
trial, ax, keys=keys, channel=channel, label=label,
roi_filter=roi_filter, include_empty=True)
ax.axvline(stim_time, linestyle='dashed', color='k')
ax.set_xticklabels(ax.get_xticks() - stim_time)
ax.set_title('{}_{}: {}'.format(
trial.parent.parent.get('mouseID'),
trial.parent.get('uniqueLocationKey'), trial.get('time')))
else:
ap.plot_imaging_and_behavior(
trial, ax, channel=channel, label=label, roi_filter=roi_filter,
include_empty=False)
return figs
def response_cdfs(
exptGrp, stimuli, method='responsiveness', pre_time=None,
post_time=None, channel='Ch2', label=None, roi_filter=None,
rasterized=False):
"""Plot cdfs across all rois for each stim in stimuli.
Plots all stims except running/licking, all running/licking stims, and
all stims with running excluded"""
fig, axs = plt.subplots(
1, 3, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
cmap = matplotlib.cm.get_cmap(name='Spectral')
#
# Plot all stims except running/licking
#
axs[0].set_title('All stims (except running/licking)')
stims = [stim for stim in stimuli
if 'running' not in stim and 'licking' not in stim]
colors = [cmap(i) for i in np.linspace(0, 0.9, len(stims))]
for stim, color in zip(stims, colors):
responses = ia.response_magnitudes(
exptGrp, stim, method=method, pre_time=pre_time, post_time=post_time,
channel=channel, label=label, roi_filter=roi_filter,
return_full=False, exclude=None)
non_nan_responses = responses[np.isfinite(responses)]
if len(non_nan_responses):
plotting.cdf(axs[0], non_nan_responses, bins='exact', color=color)
axs[0].legend(stims, loc='lower right')
#
# Plot running/licking stims
#
axs[1].set_title('Running/licking responses')
stims = [stim for stim in stimuli
if 'running' in stim or 'licking' in stim]
colors = [cmap(i) for i in np.linspace(0, 0.9, len(stims))]
for stim, color in zip(stims, colors):
responses = ia.response_magnitudes(
exptGrp, stim, method=method, pre_time=pre_time, post_time=post_time,
channel=channel, label=label, roi_filter=roi_filter,
return_full=False, exclude=None)
non_nan_responses = responses[np.isfinite(responses)]
if len(non_nan_responses):
plotting.cdf(axs[1], non_nan_responses, bins='exact', color=color)
axs[1].legend(stims, loc='lower right')
#
# Plot all stims with running excluded
#
axs[2].set_title('All stims, running excluded')
stims = [stim for stim in stimuli if 'running' not in stim]
colors = [cmap(i) for i in np.linspace(0, 0.9, len(stims))]
for stim, color in zip(stims, colors):
responses = ia.response_magnitudes(
exptGrp, stim, method=method, pre_time=pre_time, post_time=post_time,
channel=channel, label=label, roi_filter=roi_filter,
return_full=False, exclude='running')
non_nan_responses = responses[np.isfinite(responses)]
if len(non_nan_responses):
plotting.cdf(axs[2], non_nan_responses, bins='exact', color=color)
axs[2].legend(stims, loc='lower right')
for ax in axs:
ax.set_xlabel('Responsiveness')
return fig
def paired_stims_response_heatmaps(
exptGrp, stimuli, exclude_running=False, rasterized=False,
**response_kwargs):
"""Plot heatmaps of response magnitude of paired stims versus
single stims
"""
paired_stims = [stim for stim in stimuli if 'Paired' in stim]
fig, axs = plt.subplots(
1, len(paired_stims), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(wspace=0.5)
for ax, paired_stim in it.izip(axs, paired_stims):
stims_in_pair = paired_stim.split()[1:]
stims_to_plot = [paired_stim] + stims_in_pair + \
[stim for stim in exptGrp.stimuli()
if 'Paired' not in stim and stim not in stims_in_pair]
ap.stim_response_heatmap(
exptGrp, ax, stims_to_plot, sort_by=paired_stim,
exclude='running' if exclude_running else None,
aspect_ratio=0.2, **response_kwargs)
ax.axvline(0.5, linewidth=3, color='k')
ax.axvline(2.5, linewidth=3, color='k')
for label in ax.get_yticklabels():
label.set_fontsize(7)
x_labels = []
for label in ax.get_xticklabels():
label.set_fontsize(5)
x_labels.append(''.join([s[0] for s in label.get_text().split()]))
ax.set_xticklabels(x_labels)
title = fig.suptitle(
'Paired stim heatmap, sort by paired stim, running {}'.format(
'excluded' if exclude_running else 'included'))
title.set_fontsize(7)
yield fig
fig, axs = plt.subplots(
1, len(paired_stims), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(wspace=0.5)
for ax, paired_stim in it.izip(axs, paired_stims):
stims_in_pair = paired_stim.split()[1:]
stims_to_plot = [paired_stim] + stims_in_pair + \
[stim for stim in exptGrp.stimuli()
if 'Paired' not in stim and stim not in stims_in_pair]
ap.stim_response_heatmap(
exptGrp, ax, stims_to_plot, sort_by=stims_in_pair,
exclude='running' if exclude_running else None,
aspect_ratio=0.2, **response_kwargs)
ax.axvline(0.5, linewidth=3, color='k')
ax.axvline(2.5, linewidth=3, color='k')
for label in ax.get_yticklabels():
label.set_fontsize(7)
x_labels = []
for label in ax.get_xticklabels():
label.set_fontsize(5)
x_labels.append(''.join([s[0] for s in label.get_text().split()]))
ax.set_xticklabels(x_labels)
title = fig.suptitle(
'Paired stim heatmap, sort by single stims in pair, running {}'.format(
'excluded' if exclude_running else 'included'))
title.set_fontsize(7)
yield fig
def compare_bouton_response_figure(
exptGrp, stimuli, plot_method='cdf', save_data=False, rasterized=False,
**response_kwargs):
"""Figure to compare different types of boutons"""
fig, axs = plt.subplots(2, 3, subplot_kw={'rasterized': rasterized})
data_to_save = {}
data_to_save['angle'] = ap.compare_bouton_responses(
exptGrp, axs[0, 0], stimuli, comp_method='angle', plot_method=plot_method,
**response_kwargs)
data_to_save['abs angle'] = ap.compare_bouton_responses(
exptGrp, axs[1, 0], stimuli, comp_method='abs angle', plot_method=plot_method,
**response_kwargs)
data_to_save['corr'] = ap.compare_bouton_responses(
exptGrp, axs[0, 1], stimuli, comp_method='corr', plot_method=plot_method,
**response_kwargs)
data_to_save['abs corr'] = ap.compare_bouton_responses(
exptGrp, axs[1, 1], stimuli, comp_method='abs corr', plot_method=plot_method,
**response_kwargs)
data_to_save['mean diff'] = ap.compare_bouton_responses(
exptGrp, axs[0, 2], stimuli, comp_method='mean diff', plot_method=plot_method,
**response_kwargs)
for line_idx, line in enumerate(axs[0, 2].lines):
axs[1, 2].axhline(
line_idx, color=line.get_color(), label=line.get_label())
axs[1, 2].set_ylim(-1, len(axs[0, 2].lines))
axs[1, 2].tick_params(labelbottom=False, labelleft=False, bottom=False,
left=False, top=False, right=False)
axs[1, 2].legend()
if save_data:
misc.save_data(data_to_save, fig=fig, label='compare_bouton_responses',
method=save_data)
return fig
def hidden_rewards_learning_summary(
exptGrps, save_data=False, rasterized=False, groupby=None, plotby=None,
orderby=None, colors=None, label_every_n=1):
"""Generates a summary figure of hidden reward analysis plots"""
if groupby is None:
groupby = [['expt', 'condition_day_session']]
if plotby is None:
plotby = ['condition_day_session']
data_to_save = {}
figs = []
fig, axs = plt.subplots(
2, 4, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
data_to_save['time_per_lap'] = plot_metric(
axs[0, 0], exptGrps, metric_fn=eg.time_per_lap,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Time per lap (sec)',
label_every_n=label_every_n, label_groupby=False)
data_to_save['fraction_rewarded_laps'] = plot_metric(
axs[0, 1], exptGrps, metric_fn=ra.fraction_of_laps_rewarded,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Fraction of laps rewarded',
label_every_n=label_every_n, label_groupby=False)
data_to_save['rewards_per_lap'] = plot_metric(
axs[0, 2], exptGrps, metric_fn=eg.stims_per_lap,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Number of rewards per lap',
activity_kwargs={'stimulus': 'water'},
label_every_n=label_every_n, label_groupby=False)
data_to_save['n_laps'] = plot_metric(
axs[0, 3], exptGrps, metric_fn=eg.number_of_laps,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Number of laps',
label_every_n=label_every_n, label_groupby=False)
data_to_save['water_rate'] = plot_metric(
axs[1, 0], exptGrps, metric_fn=ra.rate_of_water_obtained,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Rate of water obtained (ms/min)',
label_every_n=label_every_n, label_groupby=False)
data_to_save['rewarded_lick_duration'] = plot_metric(
axs[1, 1], exptGrps, metric_fn=eg.lick_bout_duration,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
activity_kwargs={'bouts_to_include': 'rewarded', 'threshold': 0.5},
activity_label='Duration of rewarded lick bouts (s)',
plot_method='line', label_every_n=label_every_n, label_groupby=False)
data_to_save['n_licks'] = plot_metric(
axs[1, 2], exptGrps, metric_fn=eg.behavior_dataframe,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
activity_kwargs={'key': 'licking'}, activity_label='Number of licks',
plot_method='line', agg_fn=np.sum, label_every_n=label_every_n,
label_groupby=False)
fig.suptitle('groupby = {}'.format(groupby))
figs.append(fig)
if save_data:
misc.save_data(data_to_save, fig=figs, method=save_data,
label='hidden_rewards_behavior_1')
data_to_save = {}
fig, axs = plt.subplots(
2, 4, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
data_to_save['rewarded_lick_intervals'] = plot_metric(
axs[0, 0], exptGrps, metric_fn=ra.fraction_rewarded_lick_intervals,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of lick intervals rewarded', colors=colors,
activity_kwargs={'threshold': 0.5}, label_every_n=label_every_n,
label_groupby=False)
data_to_save['licks_in_rewarded_intervals'] = plot_metric(
axs[1, 0], exptGrps,
metric_fn=ra.fraction_licks_in_rewarded_intervals,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of licks in rewarded intervals',
colors=colors, activity_kwargs={'threshold': 0.5},
label_every_n=label_every_n, label_groupby=False)
data_to_save['licks_in_reward_zone'] = plot_metric(
axs[0, 1], exptGrps, metric_fn=ra.fraction_licks_in_reward_zone,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of licks in reward zone', colors=colors,
label_every_n=label_every_n, label_groupby=False)
data_to_save['licks_near_rewards'] = plot_metric(
axs[1, 1], exptGrps, metric_fn=ra.fraction_licks_near_rewards,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of licks near rewards', colors=colors,
label_every_n=label_every_n, label_groupby=False)
data_to_save['licking_spatial_information'] = plot_metric(
axs[0, 2], exptGrps, metric_fn=ra.licking_spatial_information,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Licking spatial information (bits/sec)', colors=colors,
label_every_n=label_every_n, label_groupby=False)
# Licking circular variance
data_to_save['lick_to_reward_distance'] = plot_metric(
axs[0, 3], exptGrps, metric_fn=ra.lick_to_reward_distance,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Lick distance to reward (norm units)', colors=colors,
label_every_n=label_every_n, label_groupby=False)
data_to_save['licks_outside_reward_vicinity'] = plot_metric(
axs[1, 2], exptGrps, metric_fn=ra.licks_outside_reward_vicinity,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of licks outside reward vicinity', colors=colors,
label_every_n=label_every_n, label_groupby=False)
# data_to_save['anticipatory_licks'] = plot_metric(
# axs[1, 3], exptGrps, metric_fn=ra.anticipatory_licking,
# groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
# activity_label='Anticipatory licking', colors=colors,
# label_every_n=label_every_n, label_groupby=False)
data_to_save['anticipatory_lick_fraction'] = plot_metric(
axs[1, 3], exptGrps, metric_fn=ra.fraction_licks_near_rewards,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Anticipatory lick fraction', colors=colors,
label_every_n=label_every_n, label_groupby=False,
activity_kwargs={'pre_window_cm': 5, 'exclude_reward': True})
fig.suptitle('groupby = {}'.format(groupby))
figs.append(fig)
if save_data:
misc.save_data(data_to_save, fig=figs, method=save_data,
label='hidden_rewards_behavior_2')
return figs
def hidden_reward_behavior_control_summary(
exptGrps, save_data=False, rasterized=False, groupby=None, plotby=None,
orderby=None, colors=None, label_every_n=1):
"""Generate a control figure for hidden rewards behavior experiments."""
if groupby is None:
groupby = [['expt', 'condition_day_session']]
if plotby is None:
plotby = ['condition_day_session']
data_to_save = {}
fig, axs = plt.subplots(
2, 2, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
# Grouping by expt, trial, or mouse defeats the purpose of n_sessions plot
n_ses_groupby = []
for group in groupby:
new_groupby = filter(
lambda x: x not in ['expt', 'trial', 'mouseID'], group)
if len(new_groupby):
n_ses_groupby.append(new_groupby)
if not len(n_ses_groupby):
n_ses_groupby = None
data_to_save['n_sessions'] = plot_metric(
axs[0, 0], exptGrps, metric_fn=eg.dataframe,
groupby=n_ses_groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Total number of sessions',
label_every_n=label_every_n, agg_fn=np.sum)
data_to_save['n_laps'] = plot_metric(
axs[0, 1], exptGrps, metric_fn=eg.number_of_laps,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Number of laps',
label_every_n=label_every_n)
data_to_save['reward_windows_per_lap'] = plot_metric(
axs[1, 0], exptGrps, metric_fn=eg.stims_per_lap,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Number of reward windows per lap',
activity_kwargs={'stimulus': 'reward'},
label_every_n=label_every_n)
data_to_save['reward_position'] = plot_metric(
axs[1, 1], exptGrps, metric_fn=eg.stim_position,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Mean reward location',
activity_kwargs={'stimulus': 'reward', 'normalized': False},
label_every_n=label_every_n)
try:
expected_positions = [expt.rewardPositions(units=None)
for exptGrp in exptGrps for expt in exptGrp]
expected_positions = set(it.chain(*expected_positions))
except AttributeError:
pass
else:
for position in expected_positions:
axs[1, 1].axhline(position, color='red')
if save_data:
misc.save_data(data_to_save, fig=fig, method=save_data,
label='hidden_rewards_control')
return fig
def hidden_rewards_move_rewards_learning(
exptGrps, groupby=None, plotby=None, orderby=None, colors=None,
label_every_n=1, rasterized=False, save_data=False,
rewards='combined', by_condition=False):
if groupby is None:
groupby = [['expt', 'condition_day_session']]
if plotby is None:
plotby = ['condition_day_session']
data_to_save = {}
if rewards == 'combined':
reward_positions = set()
for exptGrp in exptGrps:
if by_condition:
conditions, _ = exptGrp.condition_label(by_mouse=True)
reward_positions = reward_positions.union(conditions.values())
else:
for expt in exptGrp:
for pos in expt.rewardPositions(units=None):
reward_positions.add(pos)
reward_positions = sorted(reward_positions)
elif rewards == 'separate':
reward_positions = {}
for exptGrp in exptGrps:
reward_positions[exptGrp] = set()
if by_condition:
conditions, _ = exptGrp.condition_label(by_mouse=True)
reward_positions = reward_positions[exptGrp].union(
conditions.values())
else:
for expt in exptGrp:
for pos in expt.rewardPositions(units=None):
reward_positions[exptGrp].add(pos)
reward_positions[exptGrp] = sorted(reward_positions[exptGrp])
if colors is None:
if rewards == 'combined':
colors = sns.color_palette(
"Paired", len(exptGrps) * len(reward_positions))
if rewards == 'separate':
colors = sns.color_palette(
"Paired", len(exptGrps) * sum(map(len, reward_positions)))
else:
# Lightest is too light, so add an extra color that we'll ignore
colors = [sns.light_palette(
color, len(reward_positions) + 1,
reverse=True)[:len(reward_positions)] for color in colors]
colors = list(it.chain(*colors))
new_exptGrps = []
activity_kwargs = []
for exptGrp in exptGrps:
if rewards == 'combined':
pos_iter = reward_positions
elif rewards == 'separate':
pos_iter = reward_positions[exptGrp]
for pos in pos_iter:
new_exptGrp = lab.classes.HiddenRewardExperimentGroup(exptGrp)
if by_condition:
new_exptGrp.label(exptGrp.label() + '_{}'.format(pos))
activity_kwargs.append({'rewardPositions': pos})
else:
new_exptGrp.label(exptGrp.label() + '_{:0.1f}'.format(pos))
activity_kwargs.append({'rewardPositions': [pos]})
new_exptGrps.append(new_exptGrp)
fig, axs = plt.subplots(
1, 3, figsize=(15, 8), subplot_kw={'rasterized': rasterized},
squeeze=False)
fig.subplots_adjust(hspace=0.3)
data_to_save['lick_to_reward_distance'] = plot_metric(
axs[0, 0], new_exptGrps, metric_fn=ra.lick_to_reward_distance,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_kwargs=activity_kwargs,
activity_label='lick distance to reward (norm units)',
label_every_n=label_every_n, label_groupby=False)
data_to_save['licks_near_rewards'] = plot_metric(
axs[0, 1], new_exptGrps, metric_fn=ra.fraction_licks_near_rewards,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_kwargs=activity_kwargs,
activity_label='fraction of licks near rewards',
label_every_n=label_every_n, label_groupby=False)
data_to_save['fraction_laps_licking'] = plot_metric(
axs[0, 2], new_exptGrps,
metric_fn=ra.fraction_of_laps_with_licking_near_reward,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_kwargs=activity_kwargs,
activity_label='fraction of laps w/ licks near rewards',
label_every_n=label_every_n, label_groupby=False)
fig.suptitle('groupby = {}'.format(groupby))
if save_data:
misc.save_data(data_to_save, fig=fig, method=save_data,
label='hidden_rewards_behavior')
return fig
def stim_response_summary(
expt_grp, stimuli, pre_time=None, post_time=None, channel='Ch2',
label=None, roi_filter=None):
fig, axs = plt.subplots(2, len(stimuli), figsize=(15, 8))
for stim, ax_pair in zip(stimuli, axs.T):
ia.PSTH(
expt_grp, stim, ax=ax_pair[0], pre_time=pre_time, post_time=post_time,
shade_ste=False, plot_mean=True, channel=channel, label=label,
roi_filter=roi_filter, gray_traces=True)
ia.PSTH(
expt_grp, stim, ax=ax_pair[1], pre_time=pre_time, post_time=post_time,
shade_ste='sem', plot_mean=True, channel=channel, label=label,
roi_filter=roi_filter)
ax_pair[0].set_title(stim)
ax_pair[0].set_xlabel('')
ax_pair[0].tick_params(axis='x', labelbottom=False)
min_y, max_y = np.inf, -np.inf
for ax in axs[0, :]:
min_y = np.amin([min_y, ax.get_ylim()[0]])
max_y = np.amax([max_y, ax.get_ylim()[1]])
for ax in axs[0, :]:
ax.set_ylim(min_y, max_y)
min_y, max_y = np.inf, -np.inf
for ax in axs[1, :]:
min_y = np.amin([min_y, ax.get_ylim()[0]])
max_y = np.amax([max_y, ax.get_ylim()[1]])
for ax in axs[1, :]:
ax.set_ylim(min_y, max_y)
for ax_row in axs[:, 1:]:
for ax in ax_row:
ax.set_ylabel('')
return fig
def licktogram_summary(expt_grps, rasterized=False, polar=False):
"""Plots licktograms for every condition/day by mouse"""
dataframes = [expt_grp.dataframe(
expt_grp, include_columns=['mouseID', 'expt', 'condition', 'session'])
for expt_grp in expt_grps]
dataframe = pd.concat(dataframes)
mouse_grp_dict = {
mouse: expt_grp.label() for expt_grp in expt_grps for mouse in
set(expt.parent.get('mouseID') for expt in expt_grp)}
fig_dict = {}
for mouse_id, df in dataframe.groupby('mouseID'):
n_rows = len(set(df['condition']))
n_cols = df['session'].max() + 1
fig, axs = plt.subplots(
n_rows, n_cols, figsize=(15, 8), sharey=not polar,
subplot_kw={'rasterized': rasterized, 'polar': polar},
squeeze=False)
for c_idx, condition in enumerate(sorted(set(df['condition']))):
for session in range(n_cols):
df_slice = df[(df['condition'] == condition) &
(df['session'] == session)]
if len(df_slice) == 1:
expt = df_slice['expt'].iloc[0]
if polar:
expt.polar_lick_plot(ax=axs[c_idx, session])
else:
expt.licktogram(
ax=axs[c_idx, session], plot_belt=False)
else:
axs[c_idx, session].set_visible(False)
for ax, condition in zip(axs[:, -1], sorted(set(df['condition']))):
plotting.right_label(ax, condition)
for ax, session in zip(axs[0, :], range(1, n_cols + 1)):
ax.set_title('Session {}'.format(session))
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
for ax in axs[1:, :].flat:
ax.set_title('')
for ax in axs[:-1, :].flat:
ax.set_xlabel('')
fig.suptitle('{}: {}'.format(mouse_grp_dict[mouse_id], mouse_id))
fig_dict[mouse_id] = fig
return [fig_dict[mouse] for mouse in sorted(fig_dict.keys())]
def behavior_cross_correlation(
expt_grps, roi_filters, behavior_key, channel='Ch2', label=None,
rasterized=False, max_lag=10, thresh=0.5, colors=None):
if colors is None:
colors = sns.color_palette()
fig, axs = plt.subplots(
3, len(expt_grps) + 1, squeeze=False,
subplot_kw={'rasterized': rasterized}, figsize=(15, 8))
fig.suptitle('Imaging-behavior cross-correlation: {}'.format(behavior_key))
corrs = {}
zero_lag, peak_offset = [], []
for grp_axs, color, expt_grp, roi_filter in zip(
axs.T, colors, expt_grps, roi_filters):
corr = sa.xcorr_imaging_behavior(
expt_grp, behavior_key, max_lag=max_lag, thresh=thresh,
return_full=False, channel=channel, label=label,
roi_filter=roi_filter)
assert 0. in corr.index
corrs[expt_grp] = corr
zero_lag.append([np.array(corr[corr.index == 0])[0]])
peak_offset.append([np.array(
[corr.index[i] for i in np.argmax(
np.abs(np.array(corr)), axis=0)])])
light_color = sns.light_palette(color)[1]
grp_axs[0].plot(corr.index, corr, color=light_color)
grp_axs[0].plot(corr.index, corr.mean(1), color=color)
grp_axs[0].set_xlim(corr.index[0], corr.index[-1])
grp_axs[0].set_title(expt_grp.label())
grp_axs[0].set_xlabel('Lag (s)')
grp_axs[0].set_ylabel('Cross-correlation')
plotting.histogram(
grp_axs[1], zero_lag[-1][0], bins=10,
range=(-1, 1), color=color, normed=False,
plot_mean=True, label=None, orientation='vertical', filled=True,
mean_kwargs=None)
grp_axs[1].set_xlabel('zero-lag cross-correlation')
grp_axs[1].set_ylabel('ROIs')
plotting.histogram(
grp_axs[2], peak_offset[-1][0], bins=10,
range=(-max_lag, max_lag), color=color, normed=False,
plot_mean=True, label=None, orientation='vertical', filled=True,
mean_kwargs=None)
grp_axs[2].set_xlabel('Time to peak (s)')
grp_axs[2].set_ylabel('ROIs')
#
# Directly compare
#
for expt_grp, color in zip(expt_grps, colors):
corr = corrs[expt_grp]
axs[0, -1].plot(
corr.index, corr.mean(1), color=color, label=expt_grp.label())
axs[0, -1].fill_between(
corr.index, corr.mean(1) - corr.sem(1), corr.mean(1) + corr.sem(1),
color=color, alpha=0.5)
axs[0, -1].set_xlim(corr.index[0], corr.index[-1])
axs[0, -1].set_xlabel('Lag (s)')
axs[0, -1].set_ylabel('Cross-correlation')
min_y, max_y = np.inf, - np.inf
for ax in axs[0, :]:
min_y = min(min_y, ax.get_ylim()[0])
max_y = max(max_y, ax.get_ylim()[1])
for ax in axs[0, :]:
ax.set_ylim(min_y, max_y)
axs[0, -1].legend(frameon=False, loc='best')
plotting.grouped_bar(
axs[1, -1], values=zero_lag, cluster_labels=[''],
condition_labels=[expt_grp.label() for expt_grp in expt_grps],
bar_colors=colors, scatter_points=True, jitter_x=True, s=20)
axs[1, -1].set_ylabel('zero-lag cross-correlation')
plotting.grouped_bar(
axs[2, -1], values=peak_offset, cluster_labels=[''],
condition_labels=[expt_grp.label() for expt_grp in expt_grps],
bar_colors=colors, scatter_points=True, jitter_x=True, s=20)
axs[2, -1].set_ylabel('Time to peak (s)')
return fig
def plotControlSummary(
exptGrps, roi_filters=None, channel='Ch2', label=None,
rasterized=False, groupby=None, plotby=None, **plot_kwargs):
"""Plot a series of potentially control analysis, looking at similarity of
data over time.
"""
fig, axs = plt.subplots(
2, 3, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
base_kwargs = {'channel': channel, 'label': label}
activity_kwargs = base_kwargs.copy()
activity_kwargs.update({'stat': 'mean'})
plot_metric(
ax=axs[0, 0], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean dF/F", **plot_kwargs)
activity_kwargs = base_kwargs.copy()
activity_kwargs.update({'stat': 'amplitude'})
plot_metric(
ax=axs[0, 1], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient amplitude", **plot_kwargs)
activity_kwargs = base_kwargs.copy()
activity_kwargs.update({'stat': 'duration'})
plot_metric(
ax=axs[1, 0], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient duration", **plot_kwargs)
activity_kwargs = base_kwargs.copy()
activity_kwargs.update({'stat': 'frequency'})
plot_metric(
ax=axs[1, 1], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient frequency", **plot_kwargs)
activity_kwargs = base_kwargs.copy()
plot_metric(
ax=axs[0, 2], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.trace_sigma, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label='Trace sigma', **plot_kwargs)
activity_kwargs = base_kwargs.copy()
plot_metric(
ax=axs[1, 2], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.mean_fluorescence, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label='Mean raw fluorescence', **plot_kwargs)
return fig
def plot_calcium_dynamics_summary(
expt_grps, roi_filters=None, channel='Ch2', label=None,
rasterized=False, groupby=None, plotby=None, plot_method='cdf',
**plot_kwargs):
"""A set of control plots designed to compare baseline calcium properties
between genotypes.
"""
fig, axs = plt.subplots(
2, 3, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
# Trans psth
# base_kwargs = {'channel': channel, 'label': label}
base_kwargs = []
for expt_grp in expt_grps:
grp_kwargs = {}
try:
grp_kwargs['channel'] = expt_grp.args['channel']
except KeyError:
grp_kwargs['channel'] = channel
try:
grp_kwargs['label'] = expt_grp.args['imaging_label']
except KeyError:
grp_kwargs['label'] = label
activity_kwargs = [dict(bkw.items() + [('stat', 'amplitude')])
for bkw in base_kwargs]
plot_metric(
ax=axs[0, 1], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient amplitude", **plot_kwargs)
activity_kwargs = [dict(bkw.items() + [('stat', 'duration')])
for bkw in base_kwargs]
plot_metric(
ax=axs[1, 0], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient duration", **plot_kwargs)
activity_kwargs = [dict(bkw.items() + [('stat', 'frequency')])
for bkw in base_kwargs]
plot_metric(
ax=axs[1, 1], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient frequency", **plot_kwargs)
activity_kwargs = base_kwargs
plot_metric(
ax=axs[0, 2], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.trace_sigma, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label='Trace sigma', **plot_kwargs)
activity_kwargs = base_kwargs
plot_metric(
ax=axs[1, 2], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.mean_fluorescence, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label='Mean raw fluorescence', **plot_kwargs)
return fig
def transient_summary(
expt_grps, plot_method, intervals='running', roi_filters=None,
groupby=None, plotby=None, label_every_n=1, save_data=False,
rasterized=False, interval_kwargs=None, channel='Ch2', label=None,
**plot_kwargs):
"""Generate a summary plot of place field transient statistics."""
if interval_kwargs is None:
interval_kwargs = {}
if roi_filters is None:
roi_filters = [None] * len(expt_grps)
if intervals == 'running':
kwargs = {}
kwargs.update(interval_kwargs)
in_intervals = [inter.running_intervals(
expt_grp, **kwargs) for expt_grp in expt_grps]
out_intervals = [~ints for ints in in_intervals]
elif intervals == 'place field':
kwargs = {}
kwargs.update(interval_kwargs)
in_intervals = [inter.place_fields(
expt_grp, roi_filter=roi_filter, **kwargs) for
expt_grp, roi_filter in zip(expt_grps, roi_filters)]
out_intervals = [~ints for ints in in_intervals]
elif intervals == 'reward':
kwargs = {'nearness': 0.1}
kwargs.update(interval_kwargs)
in_intervals = [inter.near_rewards(
expt_grp, **kwargs) for expt_grp in expt_grps]
out_intervals = [~ints for ints in in_intervals]
else:
raise ValueError("Unrecognized value for 'intervals' argument")
data_to_save = {}
fig, axs = plt.subplots(
3, 5, figsize=(15, 8), subplot_kw={'rasterized': rasterized},
sharey='col')
fig.subplots_adjust(hspace=0.3)
activity_kwargs = {'stat': 'amplitude', 'interval': None, 'channel': channel, 'label': label}
data_to_save['amplitude_all'] = plot_metric(
axs[0, 0], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 0].set_title('amplitude')
activity_kwargs = [
{'stat': 'amplitude', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['amplitude_in'] = plot_metric(
axs[1, 0], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'amplitude', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['amplitude_out'] = plot_metric(
axs[2, 0], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = {'stat': 'duration', 'interval': None, 'channel': channel, 'label': label}
data_to_save['duration_all'] = plot_metric(
axs[0, 1], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 1].set_title('duration')
activity_kwargs = [
{'stat': 'duration', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['duration_in'] = plot_metric(
axs[1, 1], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'duration', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['duration_out'] = plot_metric(
axs[2, 1], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = {'stat': 'responseMagnitude', 'interval': None, 'channel': channel, 'label': label}
data_to_save['magnitude_all'] = plot_metric(
axs[0, 2], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 2].set_title('responseMagnitude')
activity_kwargs = [
{'stat': 'responseMagnitude', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['magnitude_in'] = plot_metric(
axs[1, 2], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'responseMagnitude', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['magnitude_out'] = plot_metric(
axs[2, 2], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = {'stat': 'norm transient auc2', 'interval': None, 'channel': channel, 'label': label}
data_to_save['auc_all'] = plot_metric(
axs[0, 3], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 3].set_title('norm transient auc2')
activity_kwargs = [
{'stat': 'norm transient auc2', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['auc_in'] = plot_metric(
axs[1, 3], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'norm transient auc2', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['auc_out'] = plot_metric(
axs[2, 3], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = {'stat': 'frequency', 'interval': None, 'channel': channel, 'label': label}
data_to_save['frequency_all'] = plot_metric(
axs[0, 4], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 4].set_title('frequency')
activity_kwargs = [
{'stat': 'frequency', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['frequency_in'] = plot_metric(
axs[1, 4], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'frequency', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['frequency_out'] = plot_metric(
axs[2, 4], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
# Remove extra labels
for ax in axs[:2, :].flat:
ax.set_xlabel('')
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
for ax in axs[1:, :].flat:
ax.set_title('')
plotting.right_label(axs[0, -1], 'all trans')
plotting.right_label(axs[1, -1], 'trans in')
plotting.right_label(axs[2, -1], 'trans out')
fig.suptitle('Activity by {}\ngroupby={}'.format(intervals, groupby))
if save_data:
misc.save_data(data_to_save, fig=fig, label='transient_summary',
method=save_data)
return fig
def thresholded_metric_vs_metric_figure(
exptGrps, x_metric, y_metric, filter_metric, thresholds, roi_filters=None,
x_metric_kwargs=None, y_metric_kwargs=None, filter_metric_kwargs=None,
xlabel=None, ylabel=None, plot_method='scatter', groupby=None,
colorby=None, filter_on=('roi',), title='', save_data=None, filter_fn=None,
**plot_kwargs):
fig, axs = plt.subplots(3, len(thresholds), figsize=(15, 8))
data_to_save = {}
if xlabel is None:
xlabel = 'Metric 1'
if ylabel is None:
ylabel = 'Metric 2'
filter_fns = [misc.df_filter_intersection([None, filter_fn]),
misc.df_filter_intersection([lambda df: df['filter_metric_value'] < threshold, filter_fn]),
misc.df_filter_intersection([lambda df: df['filter_metric_value'] > threshold, filter_fn])]
filter_labels = ['all', 'less_than', 'greater_than']
for col, threshold in enumerate(thresholds):
for row, filter_fn, filter_label in zip(
it.count(), filter_fns, filter_labels):
label = '{}_{}'.format(filter_label, threshold)
data_to_save[label] = plot_paired_metrics(
exptGrps, roi_filters=roi_filters, ax=axs[row, col],
first_metric_fn=x_metric, second_metric_fn=y_metric,
first_metric_kwargs=x_metric_kwargs,
second_metric_kwargs=y_metric_kwargs,
first_metric_label=xlabel,
second_metric_label=ylabel,
plot_method=plot_method,
groupby=groupby,
colorby=colorby,
filter_metric_fn=filter_metric,
filter_metric_merge_on=filter_on,
filter_metric_fn_kwargs=filter_metric_kwargs,
filter_fn=filter_fn, **plot_kwargs)
axs[0, col].set_title('Threshold = {}'.format(threshold))
for ax, label in zip(axs[:, -1], filter_labels):
plotting.right_label(ax, label)
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
for ax in axs[:-1, :].flat:
ax.set_xlabel('')
fig.suptitle(title)
if save_data:
misc.save_data(data_to_save, fig=fig,
label='thresholded_metric_vs_metric', method=save_data)
return fig
def hidden_rewards_number_of_licks(
expt_grps, rasterized=False, groupby=None, plotby=None,
label_every_n=1, **plot_kwargs):
"""Plots the total number of licks in vs out of reward per mouse"""
if groupby is None:
groupby = [['expt', 'condition_day_session']]
if plotby is None:
plotby = ['condition_day_session']
mice = {}
max_mice = -1
for expt_grp in expt_grps:
mice[expt_grp] = {expt.parent for expt in expt_grp}
max_mice = max(max_mice, len(mice[expt_grp]))
fig, axs = plt.subplots(
len(expt_grps), max_mice, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
for expt_grp, grp_axs in zip(expt_grps, axs):
for mouse, ax in zip(sorted(mice[expt_grp]), grp_axs):
mouse_expt_grp = expt_grp.subGroup(
[expt for expt in expt_grp if expt.parent == mouse],
label='near')
colors = color_cycle()
plot_metric(
ax, [mouse_expt_grp],
metric_fn=ra.number_licks_near_rewards,
plot_method='line', groupby=groupby, plotby=plotby,
label_every_n=label_every_n, colors=[colors.next()],
activity_label='Number of licks', **plot_kwargs)
mouse_expt_grp.label('away')
plot_metric(
ax, [mouse_expt_grp],
metric_fn=ra.number_licks_away_rewards,
plot_method='line', groupby=groupby, plotby=plotby,
label_every_n=label_every_n, colors=[colors.next()],
activity_label='Number of licks', **plot_kwargs)
ax.set_title(mouse.get('mouseID'))
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
ax.tick_params(labelleft=False)
for ax in axs.flat:
ax.set_xlabel('')
ax.tick_params(top=False)
max_licks = -np.inf
for ax in axs.flat:
max_licks = max(max_licks, ax.get_ylim()[1])
for ax in axs.flat:
ax.set_ylim(top=max_licks)
for ax in list(axs.flat)[1:]:
legend = ax.get_legend()
if legend is not None:
legend.set_visible(False)
for expt_grp, ax in zip(expt_grps, axs[:, -1]):
plotting.right_label(ax, expt_grp.label())
fig.suptitle(
'Number of licks near/away from reward\ngroupby = {}'.format(groupby))
return fig
def salience_responsiveness_figure_by_cell(
expt_grp, stimuli, plotby, method='responsiveness', pre_time=None,
post_time=None, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, rasterized=False, save_data=False,
n_bootstraps=10000, n_processes=1):
"""Plots the stimulus responsiveness versus the 'plotby'. For example, the
response to water rewards over days of exposure.
Yields 1 figure per ROI with a grid of plots, 1 per stimulus in 'stimuli'.
Parameters
----------
expt_grp, channel, label, roi_filter
Standard analysis arguments.
stimuli : list
List of stimuli.
plotby : list
List of keys that will determine the x-axis of the plot.
See lab.plotting.plotting_helpers.prepare_dataframe
method : 'responsiveness' or 'peak'
Method to determine the response to the stimuli.
pre_time, post_time : float
Duration of baseline (pre_time) and response time (post_time).
Yields
------
mpl.pyplot.Figure
"""
# data_to_save = {}
N_COLS = 4
n_rows = int(np.ceil(len(stimuli) / float(N_COLS)))
n_extra_axs = (N_COLS * n_rows) % len(stimuli)
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return
if method == 'responsiveness':
activity_label = 'Responsiveness (dF/F)'
elif method == 'peak':
activity_label = 'Peak responsiveness (dF/F)'
else:
raise ValueError("Unrecognized 'method' value")
responsiveness = {}
all_roi_tuples = set()
for stimulus in stimuli:
stimulus_dfs = []
for key, grp in expt_grp.groupby(plotby):
df = ia.response_magnitudes(
grp, stimulus, method=method, pre_time=pre_time,
post_time=post_time, data=None,
exclude='running' if exclude_running else None,
channel=channel, label=label,
roi_filter=roi_filter, return_df=True)
# Put the grouping info back in the dataframe
# For example:
# plotby = ['condition_day']
# keys will be ['A_0', 'A_1', 'B_0', etc...]
# So df['condition_day'] == 'A_0' for the first group, etc.
for key_value, grouping in zip(key, plotby):
df[grouping] = key_value
stimulus_dfs.append(df)
joined_df = pd.concat(
stimulus_dfs, ignore_index=True)
joined_df['roi_tuple'] = zip(
joined_df['mouse'].apply(lambda mouse: mouse.get('mouseID')),
joined_df['uniqueLocationKey'],
joined_df['roi_id'])
responsiveness[stimulus] = joined_df
all_roi_tuples = all_roi_tuples.union(joined_df['roi_tuple'])
for roi_tuple in sorted(all_roi_tuples):
fig, axs = plt.subplots(
n_rows, N_COLS, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
first_col_axs = axs[:, 0]
fig.suptitle(roi_tuple)
min_response_y_lim, max_response_y_lim = np.inf, -np.inf
for ax, stimulus in it.izip(axs.flat, stimuli):
data = responsiveness[stimulus]
data = data[data['roi_tuple'].apply(lambda val: val == roi_tuple)]
plotting.plot_dataframe(
ax, [data],
activity_label=activity_label, groupby=None, plotby=plotby,
orderby=None, plot_method='line', plot_shuffle=False,
shuffle_plotby=False, pool_shuffle=False,
agg_fn=np.mean)
min_response_y_lim = np.amin([min_response_y_lim, ax.get_ylim()[0]])
max_response_y_lim = np.amax([max_response_y_lim, ax.get_ylim()[1]])
ax.set_title(stimulus)
plt.setp(ax.get_xticklabels(), rotation='40',
horizontalalignment='right')
if n_extra_axs:
for ax in np.array(axs.flat)[-n_extra_axs:]:
ax.set_visible(False)
for ax in set(axs.flat).difference(first_col_axs):
ax.set_ylabel('')
for ax in axs.flat:
ax.set_ylim(min_response_y_lim, max_response_y_lim)
ax.set_xlabel('')
legend = ax.get_legend()
if legend is not None:
legend.set_visible(False)
yield fig
def behavior_psth_figure(
expt_grps, stimulus_key, data_key, groupby, rasterized=False,
**behaviorPSTH_kwargs):
"""Returns a figure of behavior data PSTHS of experiment subgroups.
Figure will be an array of plots, n_expt_grps x n_groupby_groups.
"""
all_expts = lab.ExperimentGroup([expt for expt in it.chain(*expt_grps)])
n_groupbys = len(list(all_expts.groupby(groupby)))
fig, axs = plt.subplots(
len(expt_grps), n_groupbys, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
for grp_axs, (grp_label, subgrp) in zip(axs.T, all_expts.groupby(groupby)):
for ax, expt_grp in zip(grp_axs, expt_grps):
expt_grp_subgrp = copy(expt_grp)
expt_grp_subgrp.filter(lambda expt: expt in subgrp)
if not len(expt_grp_subgrp):
ax.set_visible(False)
continue
lab.analysis.behavior_analysis.plotBehaviorPSTH(
expt_grp_subgrp, stimulus_key, data_key, ax=ax,
**behaviorPSTH_kwargs)
grp_axs[0].set_title(str(grp_label))
for ax, expt_grp in zip(axs[:, -1], expt_grps):
plotting.right_label(ax, expt_grp.label())
fig.suptitle('{} triggered {} PSTH\ngroupby={}'.format(
stimulus_key, data_key, groupby))
return fig
|
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
island_num = neighbor_num = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 1:
island_num += 1
if i < len(grid) - 1 and grid[i + 1][j] == 1:
neighbor_num += 1
if j < len(grid[i]) - 1 and grid[i][j + 1] == 1:
neighbor_num += 1
return island_num * 4 - neighbor_num * 2
|
import time
import cv2
import numpy as np
from matplotlib import pyplot as plt
def empty_callback(value):
pass
def ex_1():
def load_image_and_filer(image_filename):
img = cv2.imread(image_filename, cv2.IMREAD_COLOR)
cv2.namedWindow('img')
cv2.createTrackbar('kernel_size', 'img', 0, 50, empty_callback)
key = ord('a')
while key != ord('q'):
kernel_size = 1 + 2*cv2.getTrackbarPos('kernel_size', 'img') # 1, 3, 5, 7, 9, 11
img_after_blur = cv2.blur(img, (kernel_size, kernel_size))
img_after_gaussian = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
img_after_median = cv2.medianBlur(img, kernel_size)
cv2.imshow('img', img)
cv2.imshow('img_after_blur', img_after_blur)
cv2.imshow('img_after_gaussian', img_after_gaussian)
cv2.imshow('img_after_median', img_after_median)
key = cv2.waitKey(10)
cv2.destroyAllWindows()
load_image_and_filer('../_data/s01e03/lenna_noise.bmp')
load_image_and_filer('../_data/s01e03/lenna_salt_and_pepper.bmp')
def ex_2():
# element =
# [ 0 1 0
# 1 0 1
# 0 1 0 ]
img = cv2.imread('../_data/no_idea.jpg', cv2.IMREAD_GRAYSCALE)
cv2.namedWindow('img')
cv2.createTrackbar('threshold', 'img', 0, 255, empty_callback)
cv2.createTrackbar('kernel_size', 'img', 1, 10, empty_callback)
cv2.createTrackbar('0: erosion, 1: dilation', 'img', 0, 1, empty_callback)
key = ord('a')
while key != ord('q'):
threshold = cv2.getTrackbarPos('threshold', 'img')
kernel_size = 1 + 2 * cv2.getTrackbarPos('kernel_size', 'img')
operation_type = cv2.getTrackbarPos('0: erosion, 1: dilation', 'img')
_, img_after_threshold = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
kernel = np.ones((kernel_size, kernel_size), np.uint8)
if operation_type == 0:
img_after_morphological = cv2.erode(img_after_threshold, kernel, iterations=1)
else:
img_after_morphological = cv2.dilate(img_after_threshold, kernel, iterations=1)
cv2.imshow('img', img)
cv2.imshow('img_after_threshold', img_after_threshold)
cv2.imshow('img_after_morphological', img_after_morphological)
key = cv2.waitKey(50)
cv2.destroyAllWindows()
def ex_3():
img: np.ndarray = cv2.imread('../_data/no_idea.jpg', cv2.IMREAD_COLOR)
if len(img.shape) == 2:
print('grayscale')
elif len(img.shape) == 3:
print('color')
else:
raise ValueError('Too many channels')
cpy = np.array(img)
start_time = time.time()
for i in range(0, 1):
window_size = 3 # 3x3
kernel_size = window_size // 2
for i in range(kernel_size, cpy.shape[0] - kernel_size):
for j in range(kernel_size, cpy.shape[1] - kernel_size):
tmp = np.full((1, 3), 0, dtype=np.float64)
# tmp = 0
for k in range(-kernel_size, 1+kernel_size):
for l in range(-kernel_size, 1+kernel_size):
tmp += img[i + k, j + l]
cpy[i, j] = np.round(tmp / (window_size*window_size))
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
for i in range(0, 1000):
img_blurred = cv2.blur(img, (3, 3))
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
kernel = np.full((3, 3), 1/9, dtype=np.float32)
for i in range(0, 1000):
img_filter2d = cv2.filter2D(img, -1, kernel=kernel)
print("--- %s seconds ---" % (time.time() - start_time))
print(np.array_equal(img_blurred, img_filter2d))
print(np.array_equal(img_blurred[1:-1, 1:-1], cpy[1:-1, 1:-1]))
print(np.array_equal(cpy[1:-1, 1:-1], img_filter2d[1:-1, 1:-1]))
key = ord('a')
while key != ord('q'):
cv2.imshow('img', img)
cv2.imshow('cpy', cpy)
cv2.imshow('img_blurred', img_blurred)
cv2.imshow('img_filter2d', img_filter2d)
# cv2.imshow('img_own_blur', img_after_threshold)
# cv2.imshow('img_after_morphological', img_after_morphological)
key = cv2.waitKey(50)
cv2.destroyAllWindows()
def main():
# ex_1()
# ex_2()
ex_3()
if __name__ == '__main__':
main()
|
from surveymonkey.calls.base import Call
class User(Call):
def __get_user_details(self, **kwargs):
return self.make_call(self.__get_user_details, {}, kwargs)
__get_user_details.allowed_params = []
get_user_details = __get_user_details
|
try:
import json
except ImportError:
from django.utils import simplejson as json
from respite.serializers.jsonserializer import JSONSerializer
class JSONPSerializer(JSONSerializer):
def serialize(self, request):
data = super(JSONPSerializer, self).serialize(request)
if 'callback' in request.GET:
callback = request.GET['callback']
else:
callback = 'callback'
return '%s(%s)' % (callback, data)
|
import sys
import logging
from pyqtgraph.Qt import QtGui, QtCore
from PyQt4.Qt import QMutex
import pyqtgraph as pg
import template
import datetime
import qdarkstyle
import krakenex
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('app_log.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
class DataGenerator(QtCore.QObject):
newData1 = QtCore.pyqtSignal(object, object, object, object, object, object)
newData2 = QtCore.pyqtSignal(object, object, object, object, object)
def __init__(self, parent=None, delay=1000):
QtCore.QObject.__init__(self)
self.k = krakenex.API()
self.parent = parent
self.delay = delay
self.mutex = QMutex()
self.run = True
self.depth = {}
self.spread = {}
self.bids_price = []
self.bids_volume = []
self.asks_price = []
self.asks_volume = []
self.unx_time_stmps = []
self.bids = []
self.asks = []
self.spr = []
self.cum_volume_a = []
self.cum_volume_b = []
self.last = 0
self.bids_volume_sum = 0
self.asks_volume_sum = 0
self.tstmps = []
self.tdisp = []
self.ddisp = []
self.days = ''
self.ticks = {}
# self.count_pair = 100
def generateData(self):
while 1:
if self.run:
try:
logger.debug('self.run in generateData is {}'.format(self.run))
self.mutex.lock()
self.depth = self.k.query_public('Depth', {'pair': 'XXBTZEUR'})
self.spread = self.k.query_public('Spread', {'pair': 'XXBTZEUR'})
self.bids_price = [float(i[0]) for i in self.depth['result']['XXBTZEUR']['bids']]
logger.debug('sending: '+'length of bids_price = {}'.format(len(self.bids_price)))
self.bids_volume = [float(i[1]) for i in self.depth['result']['XXBTZEUR']['bids']]
logger.debug('sending length of bids_volume = {}'.format(len(self.bids_volume)))
self.asks_price = [float(i[0]) for i in self.depth['result']['XXBTZEUR']['asks']]
self.asks_volume = [float(i[1]) for i in self.depth['result']['XXBTZEUR']['asks']]
self.unx_time_stmps = [int(i[0]) for i in self.spread['result']['XXBTZEUR']]
self.bids = [float(i[1]) for i in self.spread['result']['XXBTZEUR']]
self.asks = [float(i[2]) for i in self.spread['result']['XXBTZEUR']]
self.spr = [round(abs(i - j), 4) for i, j in zip(self.bids, self.asks)]
self.last = self.spread['result']['last']
self.cum_volume_b = [None] * len(self.bids_volume)
self.cum_volume_a = [None] * len(self.asks_volume)
self.bids_volume_sum = sum(self.bids_volume)
self.asks_volume_sum = sum(self.asks_volume)
for i in range(len(self.bids_volume)):
self.cum_volume_b[i] = round(self.bids_volume_sum - sum(self.bids_volume[i:-1]), 4)
for i in range(len(self.asks_volume)):
self.cum_volume_a[i] = round(self.asks_volume_sum - sum(self.asks_volume[i:-1]), 4)
# print len(self.cum_volume_b)
self.tstmps = [datetime.datetime.fromtimestamp(i) for i in self.unx_time_stmps]
self.tdisp = [i.strftime('%H:%M:%S') for i in self.tstmps]
self.ddisp = [i.strftime('%Y-%m-%d') for i in self.tstmps]
self.days = ' - '.join(list(set(self.ddisp)))
# self.ticks = dict(enumerate(self.tdisp))
self.mutex.unlock()
self.newData1.emit(self.cum_volume_b, self.cum_volume_a, self.bids_price, self.asks_price,
self.bids_volume, self.asks_volume)
self.newData2.emit(self.bids, self.asks, self.spr, self.days, self.unx_time_stmps)
QtCore.QThread.msleep(self.delay)
QtGui.QApplication.processEvents()
except Exception:
logger.error('Check the order of arguments')
else:
break
def stop_thread(self):
self.run = False
logger.debug('self.run in generateData method is {}'.format(self.run))
logger.info('Both threads stopped')
def restart_thread(self):
self.run = True
logger.debug('self.run in generateData method is {}'.format(self.run))
self.generateData()
logger.info('Both threads started')
def int2dt(ts):
return datetime.datetime.utcfromtimestamp(ts)
class MyThread(QtCore.QThread):
def run(self):
self.exec_()
class TimeAxisItem(pg.AxisItem):
def __init__(self, *args, **kwargs):
super(TimeAxisItem, self).__init__(*args, **kwargs)
def tickStrings(self, values, scale, spacing):
# return [QTime().addMSecs(value).toString('mm:ss') for value in values]
return [int2dt(value).strftime("%H:%M:%S") for value in values]
class MainWin(QtGui.QMainWindow, template.Ui_MainWindow, template.Ui_Dialog):
def __init__(self, *args, **kwargs):
pg.setConfigOption('background', '#0e1011') # '#31363b' -> background-color of GUI dark theme
QtGui.QMainWindow.__init__(self)
self.setupUi(self)
self.actionExit.setStatusTip('Exit the Application')
self.actionExit.triggered.connect(self.close_application)
self.actionAbout.setStatusTip('About the Application')
self.actionAbout.triggered.connect(self.show_aboutdialog)
self.plot3 = pg.PlotWidget(self.tab_2, axisItems={'bottom': TimeAxisItem(orientation='bottom')})
self.plot3.addLegend()
# self.plot3.setObjectName(_fromUtf8("plot3"))
self.verticalLayout_3.addWidget(self.plot3)
self.plot4 = pg.PlotWidget(self.tab_2, axisItems={'bottom': TimeAxisItem(orientation='bottom')})
# self.plot4.setObjectName(_fromUtf8("plot4"))
self.verticalLayout_3.addWidget(self.plot4)
self.text = pg.TextItem()
self.text.setColor('#00ff00')
self.font = QtGui.QFont()
self.font.setPointSize(12)
self.font.setBold(1)
self.text.setFont(self.font)
self.labelStyle1 = {'color': '#F42935'}
self.labelStyle2 = {'color': '#5DA5DB'}
self.plot1.setLabel('left', 'Cumulative Volume', units='')
self.plot1.setLabel('bottom', 'Price', units='EUR')
self.plot2.setLabel('left', 'Volume', units='')
self.plot2.setLabel('bottom', 'Price', units='EUR')
self.plot3.setLabel('left', 'Bids/Asks', units='EUR')
# self.plot3.setLabel('bottom', 'Time', units='')
self.plot4.setLabel('left', 'Spread', units='EUR')
# self.plot4.setLabel('bottom', 'Time', units='')
self.plot1.showGrid(x=True, y=True, alpha=0.5)
self.plot2.showGrid(x=True, y=True, alpha=0.5)
self.plot3.showGrid(x=True, y=True, alpha=0.5)
self.plot4.showGrid(x=True, y=True, alpha=0.5)
self.color_b = QtGui.QColor(145, 0, 0, 0.8)
self.color_a = QtGui.QColor(0, 0, 0, 0.7)
self.brush_b = QtGui.QBrush(self.color_b)
self.brush_a = QtGui.QBrush(self.color_a)
self.level = 0
self.pen1 = pg.mkPen(color=(255, 0, 0, 100), width=1.5)
self.pen2 = pg.mkPen(color=(0, 0, 255, 100), width=1.5)
# Depth Graph
self.curve1 = self.plot1.plot(fillLevel=-0.3, brush=(255, 0, 0, 100), name='Bids')
self.curve2 = self.plot1.plot(fillLevel=-0.3, brush=(0, 0, 255, 100), name='Asks') # (109, 187, 255, 100)
# self.curve1.setBrush(self.brush_b)
# self.curve2.setBrush(self.brush_a)
# self.curve1.setFillLevel(self.level)
# self.curve2.setFillLevel(self.level)
self.curve3 = self.plot2.plot()
self.curve4 = self.plot2.plot()
# Spread Graph
self.curve5 = self.plot3.plot(name='Bids')
self.curve6 = self.plot3.plot(name='Asks')
self.curve7 = self.plot4.plot(fillLevel=-0.3, brush=(114, 114, 114))
self.thread1 = QtCore.QThread()
self.dgen1 = DataGenerator(self, 3000)
self.dgen1.moveToThread(self.thread1)
self.dgen1.newData1.connect(self.update_plot1)
self.thread1.started.connect(self.dgen1.generateData)
self.thread1.start()
self.thread2 = QtCore.QThread() # QtCore.QThread()
self.dgen2 = DataGenerator(self, 3000)
self.dgen2.moveToThread(self.thread2)
self.dgen2.newData2.connect(self.update_plot2)
self.thread2.started.connect(self.dgen2.generateData)
self.thread2.start()
self.pushButton.clicked.connect(self.dgen1.stop_thread)
self.pushButton.clicked.connect(self.dgen2.stop_thread)
self.pushButton_2.clicked.connect(self.dgen1.restart_thread)
self.pushButton_2.clicked.connect(self.dgen2.restart_thread)
self.pushButton_7.clicked.connect(self.close_application)
def close_application(self):
logger.info('Closing the application')
sys.exit()
def show_aboutdialog(self):
logger.info('Showing the about dialog')
about_dialog = QtGui.QDialog()
u = template.Ui_Dialog()
u.setupUi(about_dialog)
about_dialog.exec_()
def update_plot1(self, cum_volume_b, cum_volume_a, bids_price, asks_price, bids_volume, asks_volume):
if self.dgen1.mutex.tryLock():
self.dgen1.mutex.unlock()
logger.debug('receiving first three data points of: '+'bids_price = [{},{},{}]'.format(bids_price[0],
bids_price[1], bids_price[2]))
self.curve1.setData(bids_price, cum_volume_b, pen=self.pen1)
self.curve2.setData(asks_price, cum_volume_a, pen=self.pen2)
self.curve3.setData(bids_price, bids_volume, pen=self.pen1)
self.curve4.setData(asks_price, asks_volume, pen=self.pen2)
def update_plot2(self, bids, asks, spr, days, unx_time_stmps):
if self.dgen2.mutex.tryLock():
self.dgen2.mutex.unlock()
logger.debug('receiving first three data points of: ' + 'bids = [{},{},{}]'.format(bids[0], bids[1],
bids[2]))
self.curve5.setData(unx_time_stmps, bids, pen=self.pen1)
self.curve6.setData(unx_time_stmps, asks, pen=self.pen2)
self.plot3.addItem(self.text)
self.text.setText('Date: {}'.format(days))
self.text.setPos(min(unx_time_stmps), max(max(bids), max(asks)) + 1)
self.curve7.setData(unx_time_stmps, spr)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=False))
main = MainWin()
main.show()
sys.exit(app.exec_())
# import sys
# if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
# QtGui.QApplication.instance().exec_()
|
import six
from .dsl import BaseQuery, MetaQuery
from .exceptions import NoQueryError
QUERIES = {
'match_all': {
'kwargs': ('boost',),
},
# Full text queries
#
'match': {
'field': True,
'args': ('query',),
'kwargs': (
'operator', 'zero_terms_query', 'cutoff_frequency', 'boost', 'rewrite',
'prefix_length', 'fuzziness', 'minimum_should_match', 'analyzer',
'max_expansions',
),
},
'multi_match': {
'args': ({'fields': []}, 'query'),
'kwargs': (
'operator', 'zero_terms_query', 'cutoff_frequency', 'boost', 'rewrite',
'prefix_length', 'fuzziness', 'minimum_should_match', 'analyzer',
'max_expansions',
),
},
'common': {
'args': ('query',),
'kwargs': (
'minimum_should_match', 'high_freq', 'low_freq', 'high_freq_operator',
'low_freq_operator', 'cutoff_frequency',
),
'process': lambda q: {'body': q},
},
'query_string': {
'args': ('query',),
'kwargs': (
{'fields': []}, 'default_field', 'default_operator', 'analyzer',
'allow_leading_wildcard', 'lowercase_expanded_terms',
'enable_position_increments', 'fuzzy_max_expansions', 'fuzziness',
'fuzzy_prefix_length', 'phrase_slop', 'boost', 'analyze_wildcard',
'auto_generate_phrase_queries', 'max_determinized_states',
'minimum_should_match', 'lenient', 'locale', 'time_zone',
),
},
'simple_query_string': {
'args': ('query',),
'kwargs': (
{'fields': []}, 'default_operator', 'analyzer', 'flags', 'locale', 'lenient',
'lowercase_expanded_terms', 'analyze_wildcard', 'minimum_should_match',
),
},
# Term level queries
#
'term': {
'field': True,
'args': ('value',),
'kwargs': ('boost',),
},
'terms': {
'field': True,
'value_only': True,
'args': ({'value': ['']},),
},
'range': {
'field': True,
'kwargs': ('gte', 'gt', 'lte', 'lt'),
},
'exists': {
'args': ('field',),
},
'missing': {
'args': ('field',),
},
'prefix': {
'field': True,
'args': ('value',),
'kwargs': ('boost',),
},
'wildcard': {
'field': True,
'args': ('value',),
'kwargs': ('boost',),
},
'regexp': {
'field': True,
'args': ('value',),
'kwargs': ('boost', 'flags'),
},
'fuzzy': {
'field': True,
'args': ('value',),
'kwargs': ('boost', 'fuzziness', 'prefix_length', 'max_expansions'),
},
'type': {
'args': ('value',),
},
'ids': {
'args': ({'values': []},),
'kwargs': ('type',),
},
# Compound queries
#
'constant_score': {
'kwargs': ({'query': '_query'},),
},
'bool': {
'kwargs': ({('must', 'must_not', 'should'): ['_query']},),
},
'function_score': {
'args': ({'functions': []},),
'kwargs': ({'query': '_query'},),
},
'dis_max': {
'args': ({'queries': ['_query']},),
},
'boosting': {
'kwargs': ({('positive', 'negative'): '_query'}),
},
'indices': {
'args': ({'indices': []},),
'kwargs': ({('query', 'no_match_query'): '_query'},),
},
'limit': {
'args': ('value',),
},
# Joining queries
#
'nested': {
'args': ('path', {'query': '_query'}),
},
'has_child': {
'args': ('type',),
'kwargs': ({'query': '_query'},),
},
'has_parent': {
'args': ('parent_type',),
'kwargs': ({'query': '_query'},),
},
# Geo queries
#
'geo_shape': {
'field': True,
'kwargs': ('type', {'coordinates': []}),
'field_process': lambda q: {'shape': q},
},
'geo_bounding_box': {
'field': True,
'kwargs': ('top_left', 'bottom_right'),
},
'geo_distance': {
'field': True,
'kwargs': ('lat', 'lon'),
},
'geo_distance_range': {
'field': True,
'kwargs': ('lat', 'lon'),
},
'geo_polygon': {
'field': True,
'args': ({'points': []},),
},
'geohash_cell': {
'field': True,
'kwargs': ('lat', 'lon'),
},
# Specialized queries
#
'more_like_this': {
'args': ({'fields': []}, 'like_text'),
},
'template': {
},
'script': {
},
# Span queries
#
'span_term': {
'field': True,
'args': ('value',),
'kwargs': ('boost',),
},
'span_multi': {
'args': ({'match': '_query'},),
},
'span_first': {
'args': ({'match': '_query'},),
},
'span_near': {
'args': ({'clauses': ['_query']},),
},
'span_or': {
'args': ({'clauses': ['_query']},),
},
'span_not': {
'kwargs': ({('include', 'exclude'): '_query'},),
},
'span_containing': {
'args': ({('little', 'big'): '_query'},),
},
'span_within': {
'args': ({('little', 'big'): '_query'},),
},
}
@six.add_metaclass(MetaQuery)
class Query(BaseQuery):
_eq_type = 'query'
_definitions = QUERIES
_exception = NoQueryError
|
import json
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.internet.threads import deferToThread
from twisted.web.server import NOT_DONE_YET
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
from backend import take_screenshot, run_script, screencast
def json_response(data, request):
j = json.dumps(data)
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
request.responseHeaders.addRawHeader(b"connection", b"close")
request.responseHeaders.addRawHeader(b"content-length", str(len(j)))
request.write(j)
request.finish()
class TakeScreenshot(Resource):
isLeaf = True
def render_GET(self, request):
d = deferToThread(take_screenshot)
d.addCallback(lambda screenshot: json_response({'screenshot': screenshot}, request))
return NOT_DONE_YET
class Screencast(Resource):
isLeaf = True
def __init__(self):
Resource.__init__(self)
self.screencast_recorder = screencast.ScreencastRecorder()
self.putChild('/start', startScreencast(self.screencast_recorder))
self.putChild('/stop', stopScreencast(self.screencast_recorder))
def render_GET(self, request):
if "/start" in request.path:
self.getStaticEntity('/start').render_GET(request)
return NOT_DONE_YET
if "/stop" in request.path:
self.getStaticEntity('/stop').render_GET(request)
return NOT_DONE_YET
return
class BaseScreencastCommand(Resource):
def __init__(self, recorder):
Resource.__init__(self)
self.recorder = recorder
class startScreencast(BaseScreencastCommand):
def render_GET(self, request):
d = deferToThread(lambda: self.recorder.start())
d.addCallback(lambda r: json_response(
{'status': 0, 'output': "Screencast was started with PID #{}".format(self.recorder.pid)}, request)
)
d.addErrback(lambda f: json_response(
{'status': 127, 'output': "Screencast starting was failed: {}".format(f)}, request)
)
return NOT_DONE_YET
class stopScreencast(BaseScreencastCommand):
def render_GET(self, request):
d = deferToThread(self.recorder.stop_and_convert)
d.addCallback(lambda r: json_response(
{'status': 0, 'output': "Screencast(PID:{}) was stopped and saved artifact to {}".format(
self.recorder.pid, self.recorder.screencast_file_abspath
)}, request)
)
d.addErrback(lambda f: json_response(
{'status': 127, 'output': "Screencast stopping was failed: {}".format(f)}, request)
)
return NOT_DONE_YET
class RunScript(Resource):
isLeaf = True
def __init__(self):
Resource.__init__(self)
self.putChild('/websocket', RunScriptWebSocket())
self.putChild('/http', RunScriptHTTP())
def render(self, request):
if request.requestHeaders.hasHeader('Upgrade'):
self.getStaticEntity('/websocket').render(request)
else:
self.getStaticEntity('/http').render_POST(request)
return NOT_DONE_YET
class RunScriptHTTP(Resource):
def render_POST(self, request):
data = json.loads(request.content.read())
d = deferToThread(run_script, data.get("script"), data.get("command", None))
d.addCallback(lambda r: json_response({'status': r[0], 'output': r[1]}, request))
d.addErrback(lambda f: json_response({'status': 127, 'output': str(f)}, request))
return NOT_DONE_YET
class RunScriptWebSocketProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
data = json.loads(payload)
d = deferToThread(run_script, data.get("script"), data.get("command", None), self)
d.addCallback(lambda r: self.endConnection(r))
d.addErrback(lambda f: self.endConnection(f))
def endConnection(self, result):
if isinstance(result, tuple) and result[0] == 0:
self.sendClose(code=WebSocketServerProtocol.CLOSE_STATUS_CODE_NORMAL,
reason=unicode(result[1])[:120])
else:
self.sendClose(code=3001, reason=unicode(result)[:120])
class RunScriptWebSocket(WebSocketResource):
isLeaf = True
def __init__(self):
wsfactory = WebSocketServerFactory()
wsfactory.protocol = RunScriptWebSocketProtocol
wsfactory.setProtocolOptions(echoCloseCodeReason=True)
super(RunScriptWebSocket, self).__init__(wsfactory)
class ApiServer(Site):
root = Resource()
def __init__(self):
Site.__init__(self, self.root)
self.root.server = self
self.root.putChild('takeScreenshot', TakeScreenshot())
self.root.putChild('runScript', RunScript())
self.root.putChild('screencast', Screencast())
|
class EvolucionDiaria(models.Model):
paciente = models.ForeignKey(Paciente, on_delete = models.CASCADE)
fecha_hora = models.DateTimeField()
Descricion = models.TextField()
class EncabezadoHistoriaIngreso(models.Model):
paciente = models.ForeignKey(Paciente, on_delete = models.CASCADE)
diagnostico = models.TextField()
otros_diagnosticos_y_antecedentes_patologicos=models.TextField()
REQUIERE_AISLAMIENTO_CHOICES = (('SI', 'SI'),('NO','NO'),)
requiere_aislamiento = models.CharField(max_length=50,choices=REQUIERE_AISLAMIENTO_CHOICES,)
CUAL_AISLAMIENTO_CHOICES = (('CONTACTO', 'CONTACTO'),('RESPIRATORIO', 'RESPIRATORIO'),('GOTAS', 'GOTAS'),('ESPECIAL', 'ESPECIAL'),('NO','NO'),)
cual_aislamiento = models.CharField(max_length=50,choices=CUAL_AISLAMIENTO_CHOICES, blank=True)
# -------- escalas de riesgo ----------------
BRADEN_CHOICES = (('RIESGO_BAJO', 'RIESGO_BAJO'),('RIESGO_MODERADO', 'RIESGO_MODERADO'),('RIESGO_ALTO', 'RIESGO_ALTO'),)
braden = models.CharField(max_length=50,choices=BRADEN_CHOICES)
MORSE_CHOICES = (('RIESGO_BAJO', 'RIESGO_BAJO'),('RIESGO_MODERADO', 'RIESGO_MODERADO'),('RIESGO_ALTO', 'RIESGO_ALTO'),)
morse = models.CharField(max_length=50,choices=MORSE_CHOICES)
BARTHEL_CHOICES = (('MAYOR_A_20', 'MAYOR_A_20'),('20_A_35', '20_A_35'),('40_a_55', '40_a_55'),('60_A_100', '60_A_100'))
barthel = models.CharField(max_length=50,choices=BARTHEL_CHOICES)
class FooterHistoriaIngreso(models.Model):
paciente = models.ForeignKey(Paciente, on_delete = models.CASCADE)
ACEPTADO_CHOICES = (('SI', 'SI'),('NO','NO'),)
aceptado = models.CharField(max_length=2,choices=ACEPTADO_CHOICES)
motivo_de_rechazo= models.CharField(max_length=100, blank=True)
PENDIENTES_CHOICES = (('SI', 'SI'),('NO','NO'),)
pendientes = models.CharField(max_length=2,choices=PENDIENTES_CHOICES, blank=True)
pendiente = models.CharField(max_length=100, blank=True)
fecha_1 = models.DateField(blank=True)
fecha_2 = models.DateField(blank=True)
REQUIERE_TRASLADO_EN_AMBULANCIA_CHOICES = (('SI', 'SI'),('NO','NO'),)
requiere_traslado_en_ambulancia = models.CharField(max_length=2,choices=REQUIERE_TRASLADO_EN_AMBULANCIA_CHOICES, blank=True)
TIPO_DE_AMBULANCIA_CHOICES = (('BASICA', 'BASICA'),('MEDICALIZADA','MEDICALIZADA'),)
tipo_de_ambulancia = models.CharField(max_length=2,choices=TIPO_DE_AMBULANCIA_CHOICES, blank=True)
fecha_y_hora_valoracion = models.DateTimeField()
class ModeloHistoriaIngreso(models.Model):
titulo = models.CharField(max_length=60)
comentario = models.CharField(max_length=100, blank=True)
fecha_publicacion = models.DateField(blank=True)
fecha_modificacion = models.DateField(blank=True)
def __str__(self):
return '{}'.format(self.titulo)
class SeccionHistoriaIngreso(models.Model):
modelo_historia=models.ForeignKey(ModeloHistoriaIngreso, on_delete=models.CASCADE)
numero_seccion = models.IntegerField()
nombre_seccion = models.CharField(max_length=100)
comentario = models.CharField(max_length=100, blank=True)
def __str__(self):
return '{}'.format(self.nombre_seccion)
class PreguntaHistoriaIngreso(models.Model):
seccion=models.ForeignKey(SeccionHistoriaIngreso, on_delete=models.CASCADE)
numero_pregunta = models.IntegerField()
pregunta_texto = models.CharField(max_length=200)
TIPO_PREGUNTA_CHOICES = (('SELECCION_MULTIPLE', 'SELECCION_MULTIPLE'),('RESPUESTA_LIBRE', 'RESPUESTA_LIBRE'),)
tipo_pregunta = models.CharField(max_length=20,choices=TIPO_PREGUNTA_CHOICES)
def __str__(self):
return '{}'.format(self.pregunta_texto)
class RespuestaSeleccionHistoriaIngreso(models.Model):
pregunta = models.ForeignKey(PreguntaHistoriaIngreso, on_delete=models.CASCADE)
respuesta_text = models.CharField(max_length=200, blank=True)
def __str__(self):
return '{}'.format(self.respuesta_text)
class ResultadoHistoriaIngreso(models.Model):
paciente = models.ForeignKey(Paciente, on_delete=models.CASCADE)
seccion = models.ForeignKey(SeccionHistoriaIngreso, on_delete=models.CASCADE)
pregunta = models.ForeignKey(PreguntaHistoriaIngreso, on_delete=models.CASCADE)
respuesta = models.ForeignKey(RespuestaSeleccionHistoriaIngreso, blank=True)
respuesta_libre = models.CharField(max_length=200)
def __str__(self):
return '{}'.format(self.paciente)
|
from test_framework.test_framework import mitcoinTestFramework
from test_framework.util import *
import os.path
class ReindexTest(mitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir))
def run_test(self):
self.nodes[0].generate(3)
stop_node(self.nodes[0], 0)
wait_mitcoinds()
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
assert_equal(self.nodes[0].getblockcount(), 3)
print "Success"
if __name__ == '__main__':
ReindexTest().main()
|
import subprocess
class ProxyTools(object):
'''Main class with methods to list/modify mac SOCKS proxy status'''
def __init__(self, interface):
self.interface_name = interface
def is_proxy_on(self):
'''Determines the current state of the SOCKS proxy. Returns True if the proxy is enabled, and False if it is disabled.'''
raw = subprocess.check_output(['networksetup', '-getsocksfirewallproxy', self.interface_name])
state = raw.split()[1].decode()
if state == 'Yes':
return True
elif state == 'No':
return False
else:
return 'Error'
def proxy_on(self):
'''Turns the proxy on'''
subprocess.call(['networksetup', '-setsocksfirewallproxystate', self.interface_name, 'on'])
def proxy_off(self):
'''Turns the proxy off'''
subprocess.call(['networksetup', '-setsocksfirewallproxystate', self.interface_name, 'off'])
def set_SOCKS_port(self, port):
'''Configures the socks proxy to localhost:port. Takes one argument: port.'''
subprocess.call(['networksetup', '-setsocksfirewallproxy', self.interface_name, 'localhost', port, 'off'])
|
"""Retriever script for direct download of vertnet data"""
from builtins import str
from retriever.lib.models import Table
from retriever.lib.templates import Script
import os
from pkg_resources import parse_version
try:
from retriever.lib.defaults import VERSION
except ImportError:
from retriever import VERSION
class main(Script):
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
self.title = "vertnet:"
self.name = "vertnet"
self.retriever_minimum_version = '2.0.dev'
self.version = '1.4.1'
self.ref = "http://vertnet.org/resources/datatoolscode.html"
self.urls = {
'amphibians': 'https://de.iplantcollaborative.org/anon-files//iplant/home/shared/commons_repo/curated/Vertnet_Amphibia_Sep2016/VertNet_Amphibia_Sept2016.zip',
'birds': 'https://de.iplantcollaborative.org/anon-files//iplant/home/shared/commons_repo/curated/Vertnet_Aves_Sep2016/VertNet_Aves_Sept2016.zip',
'fishes': 'https://de.iplantcollaborative.org/anon-files//iplant/home/shared/commons_repo/curated/Vertnet_Fishes_Sep2016/VertNet_Fishes_Sept2016.zip',
'mammals': 'https://de.iplantcollaborative.org/anon-files//iplant/home/shared/commons_repo/curated/Vertnet_Mammalia_Sep2016/VertNet_Mammalia_Sept2016.zip',
'reptiles': 'https://de.iplantcollaborative.org/anon-files//iplant/home/shared/commons_repo/curated/Vertnet_Reptilia_Sep2016/VertNet_Reptilia_Sept2016.zip'
}
self.description = " "
self.keywords = ['Taxon > animals']
if parse_version(VERSION) <= parse_version("2.0.0"):
self.shortname = self.name
self.name = self.title
self.tags = self.keywords
def download(self, engine=None, debug=False):
Script.download(self, engine, debug)
engine = self.engine
file_names = [ ('vertnet_latest_amphibians.csv','amphibians'),
('vertnet_latest_birds.csv','birds'),
('vertnet_latest_fishes.csv','fishes'),
('vertnet_latest_mammals.csv','mammals'),
('vertnet_latest_reptiles.csv','reptiles')
]
for(filename,tablename) in file_names:
table = Table(str(tablename) , delimiter=',' )
# all tables in vertnet data have same field names
table.columns = [ ("record_id", ("pk-auto",)),
("beginrecord", ("char",)),
("icode", ("char",)),
("title", ("char",)),
("citation", ("char",)),
("contact", ("char",)),
("email", ("char",)),
("emlrights", ("char",)),
("gbifdatasetid", ("char",)),
("gbifpublisherid", ("char",)),
("doi", ("char",)),
("migrator", ("char",)),
("networks", ("char",)),
("orgcountry", ("char",)),
("orgname", ("char",)),
("orgstateprovince", ("char",)),
("pubdate", ("char",)),
("source_url", ("char",)),
("iptrecordid", ("char",)),
("associatedmedia", ("char",)),
("associatedoccurrences", ("char",)),
("associatedorganisms", ("char",)),
("associatedreferences", ("char",)),
("associatedsequences", ("char",)),
("associatedtaxa", ("char",)),
("bed", ("char",)),
("behavior", ("char",)),
("catalognumber", ("char",)),
("continent", ("char",)),
("coordinateprecision", ("char",)),
("coordinateuncertaintyinmeters", ("char",)),
("country", ("char",)),
("countrycode", ("char",)),
("county", ("char",)),
("dateidentified", ("char",)),
("day", ("char",)),
("decimallatitude", ("char",)),
("decimallongitude", ("char",)),
("disposition", ("char",)),
("earliestageorloweststage", ("char",)),
("earliesteonorlowesteonothem", ("char",)),
("earliestepochorlowestseries", ("char",)),
("earliesteraorlowesterathem", ("char",)),
("earliestperiodorlowestsystem", ("char",)),
("enddayofyear", ("char",)),
("establishmentmeans", ("char",)),
("eventdate", ("char",)),
("eventid", ("char",)),
("eventremarks", ("char",)),
("eventtime", ("char",)),
("fieldnotes", ("char",)),
("fieldnumber", ("char",)),
("footprintspatialfit", ("char",)),
("footprintsrs", ("char",)),
("footprintwkt", ("char",)),
("formation", ("char",)),
("geodeticdatum", ("char",)),
("geologicalcontextid", ("char",)),
("georeferencedby", ("char",)),
("georeferenceddate", ("char",)),
("georeferenceprotocol", ("char",)),
("georeferenceremarks", ("char",)),
("georeferencesources", ("char",)),
("georeferenceverificationstatus", ("char",)),
("group", ("char",)),
("habitat", ("char",)),
("highergeography", ("char",)),
("highergeographyid", ("char",)),
("highestbiostratigraphiczone", ("char",)),
("identificationid", ("char",)),
("identificationqualifier", ("char",)),
("identificationreferences", ("char",)),
("identificationremarks", ("char",)),
("identificationverificationstatus", ("char",)),
("identifiedby", ("char",)),
("individualcount", ("char",)),
("island", ("char",)),
("islandgroup", ("char",)),
("latestageorhigheststage", ("char",)),
("latesteonorhighesteonothem", ("char",)),
("latestepochorhighestseries", ("char",)),
("latesteraorhighesterathem", ("char",)),
("latestperiodorhighestsystem", ("char",)),
("lifestage", ("char",)),
("lithostratigraphicterms", ("char",)),
("locality", ("char",)),
("locationaccordingto", ("char",)),
("locationid", ("char",)),
("locationremarks", ("char",)),
("lowestbiostratigraphiczone", ("char",)),
("materialsampleid", ("char",)),
("maximumdepthinmeters", ("char",)),
("maximumdistanceabovesurfaceinmeters", ("char",)),
("maximumelevationinmeters", ("char",)),
("member", ("char",)),
("minimumdepthinmeters", ("char",)),
("minimumdistanceabovesurfaceinmeters", ("char",)),
("minimumelevationinmeters", ("char",)),
("month", ("char",)),
("municipality", ("char",)),
("occurrenceid", ("char",)),
("occurrenceremarks", ("char",)),
("occurrencestatus", ("char",)),
("organismid", ("char",)),
("organismname", ("char",)),
("organismremarks", ("char",)),
("organismscope", ("char",)),
("othercatalognumbers", ("char",)),
("pointradiusspatialfit", ("char",)),
("preparations", ("char",)),
("previousidentifications", ("char",)),
("recordedby", ("char",)),
("recordnumber", ("char",)),
("reproductivecondition", ("char",)),
("samplingeffort", ("char",)),
("samplingprotocol", ("char",)),
("sex", ("char",)),
("startdayofyear", ("char",)),
("stateprovince", ("char",)),
("typestatus", ("char",)),
("verbatimcoordinates", ("char",)),
("verbatimcoordinatesystem", ("char",)),
("verbatimdepth", ("char",)),
("verbatimelevation", ("char",)),
("verbatimeventdate", ("char",)),
("verbatimlatitude", ("char",)),
("verbatimlocality", ("char",)),
("verbatimlongitude", ("char",)),
("verbatimsrs", ("char",)),
("waterbody", ("char",)),
("year", ("char",)),
("dctype", ("char",)),
("modified", ("char",)),
("language", ("char",)),
("license", ("char",)),
("rightsholder", ("char",)),
("accessrights", ("char",)),
("bibliographiccitation", ("char",)),
("dc_references", ("char",)),
("institutionid", ("char",)),
("collectionid", ("char",)),
("datasetid", ("char",)),
("institutioncode", ("char",)),
("collectioncode", ("char",)),
("datasetname", ("char",)),
("ownerinstitutioncode", ("char",)),
("basisofrecord", ("char",)),
("informationwithheld", ("char",)),
("datageneralizations", ("char",)),
("dynamicproperties", ("char",)),
("scientificnameid", ("char",)),
("namepublishedinid", ("char",)),
("scientificname", ("char",)),
("acceptednameusage", ("char",)),
("originalnameusage", ("char",)),
("namepublishedin", ("char",)),
("namepublishedinyear", ("char",)),
("higherclassification", ("char",)),
("kingdom", ("char",)),
("phylum", ("char",)),
("class", ("char",)),
("order", ("char",)),
("family", ("char",)),
("genus", ("char",)),
("subgenus", ("char",)),
("specificepithet", ("char",)),
("infraspecificepithet", ("char",)),
("taxonrank", ("char",)),
("verbatimtaxonrank", ("char",)),
("scientificnameauthorship", ("char",)),
("vernacularname", ("char",)),
("nomenclaturalcode", ("char",)),
("taxonomicstatus", ("char",)),
("keyname", ("char",)),
("haslicense", ("int",)),
("vntype", ("char",)),
("rank", ("int",)),
("mappable", ("int",)),
("hashid", ("char",)),
("hastypestatus", ("int",)),
("wascaptive", ("int",)),
("wasinvasive", ("int",)),
("hastissue", ("int",)),
("hasmedia", ("int",)),
("isfossil", ("int",)),
("haslength", ("int",)),
("haslifestage", ("int",)),
("hasmass", ("int",)),
("hassex", ("int",)),
("lengthinmm", ("double",)),
("massing", ("double",)),
("lengthunitsinferred", ("char",)),
("massunitsinferred", ("char",)),
("underivedlifestage", ("char",)),
("underivedsex", ("char",))]
engine.table = table
if not os.path.isfile(engine.format_filename(filename)):
engine.download_files_from_archive(self.urls[tablename], [filename], filetype="zip", archivename="vertnet_latest_" + str(tablename))
engine.create_table()
engine.insert_data_from_file(engine.format_filename(str(filename)))
SCRIPT = main()
|
"""
Atheloph: an IRC bot that writes a log.
TODOs:
DONE) implement proactive ping so bot knows if it's disconnected
DONE) implement auto-reconnect
3) HTML logs with one anchor per line (or write a separate script to convert text to html)
4) NICK and TOPIC functions
0) regularly tidy up code!
"""
import socket
import sys
import datetime
import time
import select
BOT_QUIT = "hau*ab"
SERVER = 'chat.freenode.net'
PORT = 6667
REALNAME = "ateloph"
NICK = ['ateltest']
IDENT = "posiputt"
CHAN = '#ateltest'
ENTRY_MSG = 'entry.'
INFO = 'info.'
FLUSH_INTERVAL = 3 # num of lines to wait between log buffer flushes
CON_TIMEOUT = 260.0
PT_PAUSE = 10 # sleep time before reconnecting after ping timeout
log_enabled = False
'''
Secure shutdown:
The method closes the socket and flushes the buffer to the log.
Input: Socket socket, String msg, List buf
Outcome: Quits program.
'''
def shutdown(socket, msg, buf):
socket.close()
buf = flush_log(buf)
print msg
sys.exit("Exiting. Log has been written.")
'''
Flush log:
save current buffer to file, return empty buffer to avoid redundancy
Input: String buf
Return: empty String buf
'''
def flush_log(buf):
#print 'flushing log buffer to file'
now = datetime.datetime.today()
with open(str(now.date()) +'.log', 'a') as out:
#print "in with-statement"
out.write(buf)
#print "written to file"
out.close()
#print "file closed"
buf = ""
#print "buf reset"
return buf
def conbot(connects):
s = socket.socket()
s.connect((SERVER, PORT))
s.send('NICK ' + NICK[connects%len(NICK)] + '\n')
s.send('USER ' + IDENT + ' ' + SERVER +' bla: ' + REALNAME + '\n')
return s
def parse(line):
# Parser to get rid of irrelvant information
def log_privmsg(timestamp, nickname, words):
'''
log_privmsg
format IRC PRIVMSG for log
input: string timestamp, string nickname, list words
return: string logline
'''
#print "in log_privmsg"
message = ''
message_startindex = 3
separator = ':'
words[3] = words[3][1:] # remove the leading colon
if words[3][1:] == 'ACTION':
separator = ''
message_startindex = 4
'''
remove last character of message
for it is a special character
that we! shall not! show!
'''
words[-1] = words[-1][:-1]
else:
pass
message = ' '.join(words[message_startindex:])
logline = ' '.join([timestamp, nickname+separator , message])
#print "log_privmsg ended"
return logline
def log_join(timestamp, nickname, words):
'''
log_join
format IRC JOIN for log
input: string timestamp, string nickname, list words
return: string logline
'''
#print "in log_join"
channel = words[2]
logline = ' '.join([timestamp, nickname, 'joined', channel])
#print "log_join ended"
return logline
def log_quit(timestamp, nickname, words):
'''
log_quit
format IRC QUIT for log
input: string timestamp, string nickname, list words
return: string logline
'''
#print "in log_quit"
#channel = words[2]
logline = ' '.join([timestamp, nickname, 'left', CHAN])
#print "log_part ended"
return logline
def log_nick(timestamp, nickname, words):
'''
log_nick
format IRC NICK for log
input: string timestamp, string nickname, list words
return string logline
'''
print words
new_nickname = words[2][1:]
logline = ' '.join([timestamp, nickname, 'is now known as', new_nickname])
return logline
def log_topic(timestamp, nickname, words):
'''
log_nick
format IRC TOPIC for log
input: string timestamp, string nickname, list words
return string logline
'''
print words
'''
join only the words of the topic
then remove leading colon
'''
topic = ' '.join(words[3:])[1:]
logline = ' '.join([timestamp, nickname, 'changed the topic to:', topic])
return logline
functions = {
'PRIVMSG': log_privmsg,
'NOTICE': log_privmsg,
'JOIN': log_join,
'QUIT': log_quit,
'PART': log_quit,
'NICK': log_nick,
'TOPIC': log_topic
}
out = ''
print line
words = line.split()
timestamp = datetime.datetime.today().strftime("%H:%M:%S")
nickname = words[0].split('!')[0][1:]
#print nickname
indicator = words[1]
try:
l = functions[indicator](timestamp, nickname, words)
#print l
out = l + '\n'
except Exception as e:
print 'Exception in parse - failed to pass to any appropriate function: ' + str(e)
return out
def main():
'''
initializations
'''
recv_time = time.time() # time of most recent data from socket
s = socket.socket()
line = '' # data from socket
line_tail = '' # helper to deal with cropped lines
loglines = '' # lines to be written to the log file
reconnect = True
connects = 0
joined = False # True if bot is in a channel
run = True
buf = [] # data from socket split by EOL
try:
while run:
clean_eol = False
log_enabled = False
timestamp = datetime.datetime.today().strftime("%H:%M:%S")
'''
detect connection loss
try to reconnect
'''
if time.time() - recv_time > CON_TIMEOUT:
print "connection lost."
reconnect = True
print "set reconnect: True"
joined = False
print "set joined: False"
loglines += timestamp + ' Connection lost.\n'
if reconnect:
try:
s.close()
print "socket closed"
except Exception as recon_e:
print "Exception trying to reconnect: " + str(e)
print "connecting ..."
s = conbot(connects)
connects += 1
s.setblocking(0) # needet for the select below
reconnect = False
print "set reconnect: False"
loglines += timestamp + ' Connecting to ' + SERVER + '\n'
'''
avoid reconnect spam
'''
s_ready = select.select([s], [], [], 10)
if s_ready:
'''
avoid 'resource temporarily not available' error
because of s.setblocking(0) above
'''
try:
line = line_tail + s.recv(2048)
recv_time = time.time()
except:
line = ''
buf = line.split('\n')
'''
avoid line stubs
'''
if line[-1:] == '\n':
clean_eol = True
if not clean_eol:
line_tail = buf.pop(-1)
'''
answer PINGs from server,
separate the internal junk from lines for the log file
'''
for b in buf:
if b == '':
continue
#print b
words = b.split(' ')
if words[0] == 'PING':
pong = 'PONG ' + words[1] + '\n'
#print pong
s.send(pong)
continue
'''
anything above this point should not go into the log file
'''
log_enabled = True
if not joined and words[1] == '376':
s.send('JOIN ' + CHAN + '\n')
joined = True
print "set joined: True"
if log_enabled:
loglines += parse(b)
else:
reconnect = True
if log_enabled:
loglines = flush_log(loglines)
except Exception as main_e:
print "Exception in main(): " + str(main_e)
shutdown(s, main_e, loglines)
raise main_e
if __name__ == '__main__':
main()
|
from .base import *
DEBUG = True
FIXTURE_DIRS = (
os.path.join(BASE_DIR, 'fixtures'),
)
|
import sys
def prime(n):
yield 2
primes = {}
for m in range(3,n,2):
if all(m%p for p in primes):
primes.get(m)
yield m
def printPrimeNumber(a):
while True:
yield next(a)
def main(argv):
a = prime(int(argv))
print(list(printPrimeNumber(a)))
if __name__ == "__main__":
main(sys.argv[1])
|
import unittest
from nose.tools import *
from ni.core.stack import Stack
def test_new_stack():
s = Stack()
assert repr(s) == repr([])
assert str(s) == str([])
assert unicode(s) == unicode([])
assert len(s) == len([])
def test_new_stack_sized():
s = Stack(10)
assert s.size == 10
def test_stack_push():
s = Stack()
s.push(1)
assert len(s) == 1
def test_stack_last():
s = Stack()
s.push(1)
assert s.last() == 1
assert len(s) == 1 # the value is still on the stack
def test_stack_push_pop():
s = Stack()
s.push(1)
assert s.pop() == 1
assert len(s) == 0
def test_stack_push_pop_pop():
s = Stack()
s.push(1)
s.pop()
assert s.pop() == None # is this really the right thing to do?
def test_stack_clear():
s = Stack()
s.push(1)
s.clear()
assert len(s) == 0
def test_stack_limit_reached():
s = Stack(5)
for x in xrange(5):
s.push(x)
assert len(s) == 5
s.push(5) # 0..4 and now 5
assert len(s) == 5 # length is still 5
|
"""
Created on Tue Nov 6 19:34:21 2018
@author: alek
"""
import sys
sys.path.append('.')
import pyDist.endpointSetup
print('starting a ClusterExecutorNode...')
node = pyDist.endpointSetup.setup_cluster_node(num_cores=4)
node.boot('0.0.0.0', 9000)
|
class Solution(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
def findWordFromPos(board, m, w, i, j):
print board
print m
print w
print i
print j
if i >= len(board) or j > len(board[0]):
return False
if len(w) == 0:
return True
if board[i][j] != w[0]:
return False
m[i][j] = True
# board[i][j] == w[0]
if i > 0 and m[i-1][j] != True and findWordFromPos(board, m, w[1:], i-1, j):
return True
if j > 0 and m[i][j-1] != True and findWordFromPos(board, m, w[1:], i, j-1):
return True
if i < len(board) - 1 and m[i+1][j] != True and findWordFromPos(board, m, w[1:], i+1, j):
return True
if j < len(board[0]) - 1 and m[i][j+1] != True and findWordFromPos(board, m, w[1:], i, j+1):
return True
m[i][j] = False
return False
def findWord(board, word):
m = []
for i in range(len(board)):
m.append([])
for j in range(len(board[i])):
m[i].append(False)
return findWordFromPos(board, m, word, 0, 0)
ret = []
for w in words:
if findWord(board, w):
ret.append(w)
return ret
if __name__ == '__main__':
s = Solution()
ws = s.findWords(["oaan","etae","ihkr","iflv"], ["oath","pea","eat","rain"])
print ws
|
import os, csv
if "scrapeMORB_output.csv" in os.listdir(os.getcwd()):
os.remove("scrapeMORB_output.csv")
outfile = open("scapeMORB_output.csv", 'a')
for i in os.listdir(os.getcwd()):
if "MORB_OUTPUT.csv" in i and enumerate(i, 1) >= 100:
with open(i, 'r') as infile:
reader = csv.reader(infile)
header = next(reader)
star_name = header[1]
for num, line in enumerate(reader, 1):
if "liquid_0" in line:
next(reader)
capture = list(next(reader))
string = star_name + "," + ",".join(i for i in capture)
outfile.write(string + "\n")
break
infile.close()
outfile.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.