text
stringlengths 3
1.05M
|
|---|
import { expect } from 'chai';
import sinon from 'sinon';
import load from '../widget/load';
describe('widget load', () => {
afterEach(() => {
if ('restore' in Math.random) {
Math.random.restore(); // reset the Math.random fixture
}
});
describe('successful', () => {
beforeEach(() => {
sinon.stub(Math, 'random').returns(0.4);
});
it('uses the widgets from the session', () => {
load({ session: { user: {}, widgets: ['a', 'b', 'c'] } }, undefined).then(widgets => {
expect(widgets.length).to.equal(3);
});
});
it('initializes the widgets ', () => {
load({ session: { user: {} } }, undefined).then(widgets => {
expect(widgets.length).to.equal(4);
expect(widgets[0].color).to.equal('Red');
});
});
});
describe('unsuccessful', () => {
beforeEach(() => {
sinon.stub(Math, 'random').returns(0.2);
});
it('rejects the call', () => {
load({ session: { user: {} } }, undefined).then(
() => {},
err => {
expect(err).to.equal('Widget load fails 33% of the time. You were unlucky.');
});
});
});
});
|
const state = {
config: {
googleMaps: {
apiKey: 'AIzaSyA7PA38yK-eREk4fjhMtoxB7m3C9zMRxRE'
},
windowMatchSizeLg: '(min-width: 992px)',
},
isLoading: true
}
export default {
state
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-23 20:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0017_auto_20160822_1913'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'get_latest_by': 'post_date', 'ordering': ['-post_date'], 'verbose_name': 'Artículo', 'verbose_name_plural': 'Artículos'},
),
]
|
# Generated by Django 4.0.3 on 2022-03-09 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='USState',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('capital', models.CharField(blank=True, max_length=255, null=True)),
('nickname', models.CharField(blank=True, max_length=255, null=True)),
('abbreviation', models.CharField(blank=True, max_length=2, null=True)),
('population', models.PositiveIntegerField(default=True)),
('flag_image', models.ImageField(default=True, upload_to='')),
],
),
]
|
#import <Foundation/Foundation.h>
#import <objc/runtime.h>
@interface NSObject (Category)
@property (nonatomic, strong, readonly) NSMutableArray *associatedObjectNames;
/**
* 为当前object动态增加分类
*
* @param propertyName 分类名称
* @param value 分类值
* @param policy 分类内存管理类型
*/
- (void)objc_setAssociatedObject:(NSString *)propertyName value:(id)value policy:(objc_AssociationPolicy)policy;
/**
* 获取当前object某个动态增加的分类
*
* @param propertyName 分类名称
*
* @return 值
*/
- (id)objc_getAssociatedObject:(NSString *)propertyName;
/**
* 删除动态增加的所有分类
*/
- (void)objc_removeAssociatedObjects;
/**
* 获取对象的所有属性
*
* @return 属性dict
*/
- (NSArray *)getProperties;
@end
|
#!/usr/bin/env python3
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Still needed:
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve
# Dower
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.8 - changed Windows support to read version from kernel32.dll
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field empty)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.8'
import collections
# TODO: Truffle reenable me once subprocess is supported (GR-9141)
# import sys, os, re, subprocess
import sys, os, re
import warnings
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos', 'win32', 'win16'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
# Directory to search for configuration information on Unix.
# Constant used by test_platform to test linux_distribution().
_UNIXCONFDIR = '/etc'
### Platform specific APIs
_libc_search = re.compile(b'(__libc_init)'
b'|'
b'(GLIBC_([0-9.]+))'
b'|'
br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII)
def libc_ver(executable=sys.executable, lib='', version='',
chunksize=16384):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
with open(executable, 'rb') as f:
binary = f.read(chunksize)
pos = 0
while 1:
if b'libc' in binary or b'GLIBC' in binary:
m = _libc_search.search(binary, pos)
else:
m = None
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit, glibc, glibcversion, so, threads, soversion = [
s.decode('latin1') if s is not None else s
for s in m.groups()]
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
return lib, version
def _dist_try_harder(distname, version, id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
distname = 'SuSE'
for line in open('/var/adm/inst-log/info'):
tv = line.split()
if len(tv) == 2:
tag, value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = value.strip()
elif tag == 'DIST_IDENT':
values = value.split('-')
id = values[2]
return distname, version, id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
for line in open('/etc/.installed'):
pkg = line.split('-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux', pkg[1], id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname, version, id
return distname, version, id
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
_lsb_release_version = re.compile(r'(.+)'
r' release '
r'([\d.]+)'
r'[^(]*(?:\((.+)\))?', re.ASCII)
_release_version = re.compile(r'([^0-9]+)'
r'(?: release )?'
r'([\d.]+)'
r'[^(]*(?:\((.+)\))?', re.ASCII)
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux', 'arch', 'mageia')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = firstline.strip().split()
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
import warnings
warnings.warn("dist() and linux_distribution() functions are deprecated "
"in Python 3.5", PendingDeprecationWarning, stacklevel=2)
return _linux_distribution(distname, version, id, supported_dists,
full_distribution_name)
def _linux_distribution(distname, version, id, supported_dists,
full_distribution_name):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname, version, id) which default to the
args given as parameters.
"""
try:
etc = os.listdir(_UNIXCONFDIR)
except OSError:
# Probably not a Unix system
return distname, version, id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname, dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname, version, id)
# Read the first line
with open(os.path.join(_UNIXCONFDIR, file), 'r',
encoding='utf-8', errors='surrogateescape') as f:
firstline = f.readline()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='', version='', id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname, version, id) which default to the
args given as parameters.
"""
import warnings
warnings.warn("dist() and linux_distribution() functions are deprecated "
"in Python 3.5", PendingDeprecationWarning, stacklevel=2)
return _linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
def popen(cmd, mode='r', bufsize=-1):
""" Portable popen() interface.
"""
import warnings
warnings.warn('use os.popen instead', DeprecationWarning, stacklevel=2)
return os.popen(cmd, mode, bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = version.split('.')
if build:
l.append(build)
try:
ints = map(int, l)
except ValueError:
strings = l
else:
strings = list(map(str, ints))
version = '.'.join(strings[:3])
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
r'.*'
r'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32', 'win16', 'dos')):
""" Tries to figure out the OS version used and returns
a tuple (system, release, version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system, release, version
# Try some common cmd strings
for cmd in ('ver', 'command /c ver', 'cmd /c ver'):
try:
pipe = os.popen(cmd)
info = pipe.read()
if pipe.close():
raise OSError('command failed')
# XXX How can I suppress shell errors from being written
# to stderr ?
except OSError as why:
#print 'Command %s failed: %s' % (cmd, why)
continue
else:
break
else:
return system, release, version
# Parse the output
info = info.strip()
m = _ver_output.match(info)
if m is not None:
system, release, version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system, release, version
_WIN32_CLIENT_RELEASES = {
(5, 0): "2000",
(5, 1): "XP",
# Strictly, 5.2 client is XP 64-bit, but platform.py historically
# has always called it 2003 Server
(5, 2): "2003Server",
(5, None): "post2003",
(6, 0): "Vista",
(6, 1): "7",
(6, 2): "8",
(6, 3): "8.1",
(6, None): "post8.1",
(10, 0): "10",
(10, None): "post10",
}
# Server release name lookup will default to client names if necessary
_WIN32_SERVER_RELEASES = {
(5, 2): "2003Server",
(6, 0): "2008Server",
(6, 1): "2008ServerR2",
(6, 2): "2012Server",
(6, 3): "2012ServerR2",
(6, None): "post2012ServerR2",
}
def win32_ver(release='', version='', csd='', ptype=''):
try:
from sys import getwindowsversion
except ImportError:
return release, version, csd, ptype
try:
from winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
except ImportError:
from _winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
winver = getwindowsversion()
maj, min, build = winver.platform_version or winver[:3]
version = '{0}.{1}.{2}'.format(maj, min, build)
release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or
_WIN32_CLIENT_RELEASES.get((maj, None)) or
release)
# getwindowsversion() reflect the compatibility mode Python is
# running under, and so the service pack value is only going to be
# valid if the versions match.
if winver[:2] == (maj, min):
try:
csd = 'SP{}'.format(winver.service_pack_major)
except AttributeError:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
# VER_NT_SERVER = 3
if getattr(winver, 'product_type', None) == 3:
release = (_WIN32_SERVER_RELEASES.get((maj, min)) or
_WIN32_SERVER_RELEASES.get((maj, None)) or
release)
key = None
try:
key = OpenKeyEx(HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion')
ptype = QueryValueEx(key, 'CurrentType')[0]
except:
pass
finally:
if key:
CloseKey(key)
return release, version, csd, ptype
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
with open(fn, 'rb') as f:
pl = plistlib.load(f)
release = pl['ProductVersion']
versioninfo = ('', '', '')
machine = os.uname().machine
if machine in ('ppc', 'Power Macintosh'):
# Canonical name
machine = 'PowerPC'
return release, versioninfo, machine
def mac_ver(release='', versioninfo=('', '', ''), machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that also doesn't work return the default values
return release, versioninfo, machine
def _java_getprop(name, default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):
""" Version interface for Jython.
Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being
a tuple (vm_name, vm_release, vm_vendor) and osinfo being a
tuple (os_name, os_version, os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release, vendor, vminfo, osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system, release, version):
""" Returns (system, release, version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server', system+release, version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system, release, version
# Modify release (marketing release = SunOS release - 3)
l = release.split('.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = '.'.join(l)
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32', 'win16'):
# In case one of the other tricks
system = 'Windows'
return system, release, version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
platform = platform.replace(' ', '_')
platform = platform.replace('/', '-')
platform = platform.replace('\\', '-')
platform = platform.replace(':', '-')
platform = platform.replace(';', '-')
platform = platform.replace('"', '-')
platform = platform.replace('(', '-')
platform = platform.replace(')', '-')
# No need to report 'unknown' information...
platform = platform.replace('unknown', '')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = platform.replace('--', '-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except OSError:
# Still not working...
return default
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = os.path.abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath), os.readlink(filepath)))
return filepath
def _syscmd_uname(option, default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos', 'win32', 'win16'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError, OSError):
return default
output = f.read().strip()
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target, default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
"""
if sys.platform in ('dos', 'win32', 'win16'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError, OSError):
return default
output = proc.communicate()[0].decode('latin-1')
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('', 'WindowsPE'),
'win16': ('', 'Windows'),
'dos': ('', 'MSDOS'),
}
def architecture(executable=sys.executable, bits='', linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits, linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
fileout = _syscmd_file(executable, '')
else:
fileout = ''
if not fileout and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b, l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits, linkage
if 'executable' not in fileout:
# Format not supported
return bits, linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits, linkage
### Portable uname() interface
uname_result = collections.namedtuple("uname_result",
"system node release version machine processor")
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system, node, release, version, machine, processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system, node, release, version, machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not list(filter(None, (system, node, release, version, machine))):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release, version, csd, ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system, release, version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32', 'win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release, vendor, vminfo, osinfo = java_ver()
system = 'Java'
version = ', '.join(vminfo)
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU', 0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p', '')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = uname_result(system, node, release, version,
machine, processor)
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname().system
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname().node
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname().release
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname().version
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname().machine
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname().processor
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*' # "version<space>"
r'\(#?([^,]+)' # "(#buildno"
r'(?:,\s*([\w ]*)' # ", builddate"
r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)<space>"
r'\[([^\]]+)\]?', re.ASCII) # "[compiler]"
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
r'([\d\.]+)'
r'(?: \(([\d\.]+)\))?'
r' on (.NET [\d\.]+)', re.ASCII)
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
r'\(IronPython\s*'
r'[\d.]+\s*'
r'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
r'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
r'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_git'):
_, branch, revision = sys._git
elif hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
elif hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(_sys_version()[1].split('.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system, node, release, version, machine, processor = uname()
if machine == processor:
processor = ''
if aliased:
system, release, version = system_alias(system, release, version)
if system == 'Windows':
# MS platforms
rel, vers, csd, ptype = win32_ver(version)
if terse:
platform = _platform(system, release)
else:
platform = _platform(system, release, version, csd)
elif system in ('Linux',):
# Linux based systems
with warnings.catch_warnings():
# see issue #1322 for more information
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
distname, distversion, distid = dist('')
if distname and not terse:
platform = _platform(system, release, machine, processor,
'with',
distname, distversion, distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname, libcversion = libc_ver(sys.executable)
platform = _platform(system, release, machine, processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r, v, vminfo, (os_name, os_version, os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system, release, version)
else:
platform = _platform(system, release, version,
'on',
os_name, os_version, os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system, release)
else:
platform = _platform(system, release, machine)
else:
# Generic handler
if terse:
platform = _platform(system, release)
else:
bits, linkage = architecture(sys.executable)
platform = _platform(system, release, machine,
processor, bits, linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print(platform(aliased, terse))
sys.exit(0)
|
import Ember from 'ember';
import { alias, sort, oneWay } from 'ember-decorators/object/computed';
import { dropTask } from 'ember-concurrency-decorators';
import OLIPersistence from 'aeonvera/mixins/registration/order-line-item-persistence';
const { inject } = Ember;
export default Ember.Controller.extend(OLIPersistence, {
flash: inject.service('flash-notification'),
rollbar: inject.service('rollbar'),
@alias('model.registration.unpaidOrder') order: null,
@alias('model.registration') registration: null,
@alias('model.event') event: null,
@dropTask
removeShirt: function * (orderLineItem) {
// just in case it's a promise
const oli = yield orderLineItem;
try {
yield oli.destroyRecord();
} catch (e) {
this.get('flash').alert('Could not remove shirt');
this.get('rollbar').warning('deleting shirt orderLineItem', e);
}
},
@dropTask
addShirt: function * (size, shirt) {
yield this.get('addOrderLineItem').perform(shirt, {
size,
quantity: 1
});
},
actions: {
updateShirt(orderLineItem, quantity) {
orderLineItem.set('quantity', quantity);
orderLineItem.save();
},
didFinishSelectingShirts() {
this.transitionToRoute('register.event-registration.show.edit.competitions', this.get('model'));
}
}
});
|
# import unittest
# from d3m import container, utils
# from d3m.metadata import base as metadata_base
# from tods.detection_algorithm.Telemanom import TelemanomPrimitive
# class TelemanomTest(unittest.TestCase):
# def test_basic(self):
# self.maxDiff = None
# main = container.DataFrame({'a': [1., 2., 3., 4.,5,6,7,8,9], 'b': [2., 3., 4., 5.,6,7,8,9,10], 'c': [3., 4., 5., 6.,7,8,9,10,11]},
# columns=['a', 'b', 'c'],
# generate_metadata=True)
# print(main)
# self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{
# 'selector': [],
# 'metadata': {
# # 'top_level': 'main',
# 'schema': metadata_base.CONTAINER_SCHEMA_VERSION,
# 'structural_type': 'd3m.container.pandas.DataFrame',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'],
# 'dimension': {
# 'name': 'rows',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'],
# 'length': 9,
# },
# },
# }, {
# 'selector': ['__ALL_ELEMENTS__'],
# 'metadata': {
# 'dimension': {
# 'name': 'columns',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'],
# 'length': 3,
# },
# },
# }, {
# 'selector': ['__ALL_ELEMENTS__', 0],
# 'metadata': {'structural_type': 'numpy.float64', 'name': 'a'},
# }, {
# 'selector': ['__ALL_ELEMENTS__', 1],
# 'metadata': {'structural_type': 'numpy.float64', 'name': 'b'},
# }, {
# 'selector': ['__ALL_ELEMENTS__', 2],
# 'metadata': {'structural_type': 'numpy.float64', 'name': 'c'}
# }])
# self.assertIsInstance(main, container.DataFrame)
# hyperparams_class = TelemanomPrimitive.metadata.get_hyperparams()
# hyperparams = hyperparams_class.defaults()
# hyperparams = hyperparams.replace({'l_s': 2,'n_predictions':1,'return_result':'new','return_subseq_inds':True,'use_columns':(0,1,2)})
# # print("hyperparams",hyperparams)
# primitive = TelemanomPrimitive(hyperparams=hyperparams)
# primitive.set_training_data(inputs=main)
# primitive.fit()
# new_main = primitive.produce_score(inputs=main).value
# print("new main",new_main)
# # print(utils.to_json_structure(new_main.metadata.to_internal_simple_structure()))
# self.assertEqual(utils.to_json_structure(new_main.metadata.to_internal_simple_structure()), [{
# 'selector': [],
# 'metadata': {
# # 'top_level': 'main',
# 'schema': metadata_base.CONTAINER_SCHEMA_VERSION,
# 'structural_type': 'd3m.container.pandas.DataFrame',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'],
# 'dimension': {
# 'name': 'rows',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'],
# 'length': 6,
# },
# },
# }, {
# 'selector': ['__ALL_ELEMENTS__'],
# 'metadata': {
# 'dimension': {
# 'name': 'columns',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'],
# 'length': 3,
# },
# },
# }, {
# 'selector': ['__ALL_ELEMENTS__', 0],
# 'metadata': {
# 'name': 'Telemanom0_0',
# 'structural_type': 'numpy.int64',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
# },
# }, {
# 'selector': ['__ALL_ELEMENTS__', 1],
# 'metadata': {
# 'name': 'Telemanom0_1',
# 'structural_type': 'numpy.int64',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
# },
# }, {
# 'selector': ['__ALL_ELEMENTS__', 2],
# 'metadata': {
# 'name': 'Telemanom0_2',
# 'structural_type': 'numpy.int64',
# 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
# }
# }])
# if __name__ == '__main__':
# unittest.main()
|
import React, {useState, useEffect, useContext } from 'react'
import { Form, Input, FormGroup } from 'reactstrap'
import PerfectScrollbar from 'react-perfect-scrollbar'
import 'react-perfect-scrollbar/dist/css/styles.css'
import Send from '../../svg/paper-plane.svg'
import Plus from '../../svg/plus.svg'
import ChatRoom from './chatRoom'
import { OverlayTrigger, Tooltip, Modal, Button } from 'react-bootstrap'
import Avatar from 'react-avatar'
import {
Accordion,
AccordionItem,
AccordionItemHeading,
AccordionItemButton,
AccordionItemPanel,
} from 'react-accessible-accordion'
import moment from 'moment'
import Drawer from '@material-ui/core/Drawer'
import { FirebaseContext, db, auth } from '../../Firebase'
export default function CommunIzi() {
const [info, setInfo] = useState([])
const [note, setNote] = useState('')
const [mood, setMood] = useState('')
const [room, setRoom] = useState('')
const [startDate, setStartDate] = useState(new Date())
const [expanded, setExpanded] = useState('')
const [showModal, setShowModal] = useState(false)
const [activate, setActivate] = useState(false)
const [user, setUser] = useState(auth.currentUser)
const { userDB, setUserDB } = useContext(FirebaseContext)
const handleChange = event =>{
setNote(event.currentTarget.value)
}
const handleChangeRoomName = event =>{
setRoom(event.currentTarget.value)
}
const handleClose = () => setShowModal(false)
const handleShow = () => {
if(window.innerWidth > 480) {
setShowModal(true)
}else{
setActivate(true)
}
}
const handleHideDrawer = () => {
setActivate(false)
}
let hours = new Date().getHours() + "h"
let minutes = new Date().getMinutes()
let time = hours + minutes
Date.prototype.yyyymmdd = function() {
let day = this.getDate()
let month = this.getMonth()
let calendar = ["Janvier", "Février", "Mars", "Avril", "Mai", "Juin", "Juillet", "Août", "Septembre", "Octobre", "Novembre", "Décembre"]
let year = this.getFullYear()
let date = day + " " + calendar[month] + " " + year
return date
};
let dayIn = new Date()
let today = dayIn.yyyymmdd()
const handleSubmit = (event) =>{
event.preventDefault()
setNote("")
let date = startDate.yyyymmdd()
return this.db.collection('mySweetHotel')
.doc('country')
.collection('France')
.doc('collection')
.collection('hotel')
.doc('region')
.collection(region)
.doc('departement')
.collection(departement)
.doc(`${hotelId}`)
.collection('chat')
.doc(`${roomName}`)
.collection('chatRoom')
.add({
author: author,
text: text,
date: date,
hour: hour,
userId: userId,
mood: mood,
markup: Date.now()
})
}
const handleRoomnameSubmit = (event) => {
event.preventDefault()
setRoom('')
db.addChatRoom({hotelId: userDB.hotelId, region: userDB.hotelRegion, departement: userDB.hotelDept, userId: user.uid})
handleClose()
}
const handleShowMoodList = () => {
let moodList = document.getElementById('moodList')
moodList.classList.toggle('moodList')
}
const handleChangeExpanded = (title) => setExpanded(title)
useEffect(() => {
const abortController = new AbortController()
const signal = abortController.signal
db.chatOnAir({hotelId: userDB.hotelId, region: userDB.hotelRegion, departement: userDB.hotelDept, signal : signal}).onSnapshot(function(snapshot) {
const snapInfo = []
snapshot.forEach(function(doc) {
snapInfo.push({
id: doc.id,
...doc.data()
})
});
console.log(snapInfo)
setInfo(snapInfo)
});
return () => {
abortController.abort()
}
},[])
console.log(expanded)
return (
<div className="communizi-container">
<PerfectScrollbar>
<div className="communizi_notebox">
<Accordion allowZeroExpanded>
{info.map((flow) => (
<AccordionItem key={flow.id} onClick={() => handleChangeExpanded(flow.id)}>
<AccordionItemHeading style={{
backgroundColor: "rgb(33, 35, 39)",
padding: "2%",
borderTopLeftRadius: "5px",
borderTopRightRadius: "5px",
marginTop: "1vh"
}}>
<AccordionItemButton style={{outline: "none"}}>
<Avatar
round={true}
name={flow.user}
size="30"
color={'#'+(Math.random()*0xFFFFFF<<0).toString(16)}
style={{marginRight: "1vw"}} />
{flow.id}
<i style={{color: "gray", float: "right", fontSize: "13px"}}>{moment(flow.markup).format('ll')}</i>
</AccordionItemButton>
</AccordionItemHeading>
<AccordionItemPanel style={{backgroundColor: 'lightgray', marginBottom: "1vh"}}>
{!!firebase &&
<ChatRoom title={flow.id} firebase={firebase} />}
</AccordionItemPanel>
</AccordionItem>
))}
</Accordion>
</div>
</PerfectScrollbar>
<div>
<Form inline className="communizi_form">
<FormGroup className="communizi_form_input_container">
<Input type="text" placeholder="Participer à la conversation..."
value={note}
onChange={handleChange}
id="dark_message_note" />
</FormGroup>
<div className="communizi-button-container">
<OverlayTrigger
placement="top"
overlay={
<Tooltip id="title">
Créer une conversation
</Tooltip>
}>
<img src={Plus} alt="plus" style={{width: "40%", cursor: "pointer"}} onClick={handleShow} />
</OverlayTrigger>
<img src={Send} alt="sendIcon" style={{width: "40%", cursor: "pointer"}} onClick={handleSubmit} />
</div>
</Form>
</div>
<Modal show={showModal}
onHide={handleClose}
aria-labelledby="contained-modal-title-vcenter"
centered>
<Modal.Header closeButton>
<Modal.Title id="example-modal-sizes-title-sm">
Créer une conversation
</Modal.Title>
</Modal.Header>
<Modal.Body>
<Input type="text" placeholder="Donnez un nom à la conversation" value={room} style={{borderTop: "none", borderLeft: "none", borderRight: "none"}} maxLength="60" onChange={handleChangeRoomName} />
</Modal.Body>
<Modal.Footer style={{borderTop: "none"}}>
<Button variant="success" onClick={handleRoomnameSubmit}>Créer</Button>
</Modal.Footer>
</Modal>
<Drawer anchor="bottom" open={activate} onClose={handleHideDrawer}>
<div id="drawer-container" style={{
display: "flex",
flexFlow: "column",
justifyContent: "flex-end",
padding: "5%",
maxHeight: "30vh"}}>
<div><Input type="text" placeholder="Donnez un nom à la conversation..." value={room} style={{borderTop: "none", borderLeft: "none", borderRight: "none", marginBottom: "3vh"}} maxLength="35" onChange={handleChangeRoomName} /></div>
<div><Button variant="success" style={{width: "100%"}} onClick={handleRoomnameSubmit}>Créer</Button></div>
</div>
</Drawer>
</div>
)
}
|
import argparse
import copy
import neuro_shooting.command_line_execution_tools as ce
import os
import glob
def setup_cmdline_parsing(cmdline_type='simple_functional_mapping',cmdline_title=None):
if cmdline_title is None:
cmdline_title = cmdline_type
supported_types = ['simple_functional_mapping','simple_functional_weighting','spiral']
if cmdline_type not in supported_types:
raise ValueError('Unsupported command line type {}'.format(cmdline_type))
if cmdline_type=='simple_functional_mapping':
parser = argparse.ArgumentParser(cmdline_title)
parser.add_argument('--gpu', type=int, default=0, help='Enable GPU computation on specified GPU.')
parser.add_argument('--path_to_python', type=str, default=os.popen('which python').read().rstrip(), help='Full path to python in your conda environment.')
parser.add_argument('--nr_of_seeds', type=int, default=1, help='Number of consecutive random seeds which we should run; i.e., number of random runs')
parser.add_argument('--starting_seed_id', type=int, default=0, help='Seed that we start with.')
parser.add_argument('--fcn', type=str, default='cubic', choices=['cubic','quadratic'])
parser.add_argument('--shooting_model', type=str, default='updown_universal', choices=['updown_universal', 'universal','periodic','dampened_updown','simple', '2nd_order', 'updown', 'general_updown'])
parser.add_argument('--output_base_directory', type=str, default='sfm_results', help='Main directory that the results will be stored in')
parser.add_argument('--force_recompute', action='store_true', default=False, help='Default behavior is not to recompute if there was a pt file in the output directory. But we can force it.')
args = parser.parse_args()
return args
if cmdline_type == 'spiral':
parser = argparse.ArgumentParser(cmdline_title)
parser.add_argument('--gpu', type=int, default=0, help='Enable GPU computation on specified GPU.')
parser.add_argument('--path_to_python', type=str, default=os.popen('which python').read().rstrip(),
help='Full path to python in your conda environment.')
parser.add_argument('--nr_of_seeds', type=int, default=1,
help='Number of consecutive random seeds which we should run; i.e., number of random runs')
parser.add_argument('--starting_seed_id', type=int, default=0, help='Seed that we start with.')
parser.add_argument('--shooting_model', type=str, default='updown_universal',
choices=['updown_universal', 'universal', 'periodic', 'dampened_updown', 'simple',
'2nd_order', 'updown', 'general_updown'])
parser.add_argument('--output_base_directory', type=str, default='sfm_results',
help='Main directory that the results will be stored in')
parser.add_argument('--force_recompute', action='store_true', default=False,
help='Default behavior is not to recompute if there was a pt file in the output directory. But we can force it.')
args = parser.parse_args()
return args
if cmdline_type=='simple_functional_weighting':
parser = argparse.ArgumentParser(cmdline_title)
parser.add_argument('--gpu', type=int, default=0, help='Enable GPU computation on specified GPU.')
parser.add_argument('--path_to_python', type=str, default=os.popen('which python').read().rstrip(), help='Full path to python in your conda environment.')
parser.add_argument('--nr_of_seeds', type=int, default=1, help='Number of consecutive random seeds which we should run; i.e., number of random runs')
parser.add_argument('--starting_seed_id', type=int, default=0, help='Seed that we start with.')
parser.add_argument('--fcn', type=str, default='cubic', choices=['cubic','quadratic'])
parser.add_argument('--sweep_updown', action='store_true', default=False)
parser.add_argument('--sweep_updown_universal', action='store_true', default=False)
parser.add_argument('--output_base_directory', type=str, default='sfm_results', help='Main directory that the results will be stored in')
parser.add_argument('--force_recompute', action='store_true', default=False, help='Default behavior is not to recompute if there was a pt file in the output directory. But we can force it.')
args = parser.parse_args()
return args
def create_experiment_name(basename,d):
name = basename
for k in d:
name += '_{}_{}'.format(k,d[k])
return name
def merge_args(run_args_template,add_args):
merged_args = copy.deepcopy(run_args_template)
for k in add_args:
v = add_args[k]
if (v is True) or (v is False): # check if v is binary
if v:
merged_args[k] = None # just add the flag
else:
merged_args[k] = v
return merged_args
def sweep_parameters(args,run_args_to_sweep,run_args_template,python_script='simple_functional_mapping_example.py',output_dir_prefix='',do_not_recompute=True):
swept_parameter_list = ce.recursively_sweep_parameters(pars_to_sweep=run_args_to_sweep)
# base settings
seeds = list(range(0 + args.starting_seed_id,
args.nr_of_seeds + args.starting_seed_id)) # do 10 runs each, we can also specify this manually [1,20] # seeds we iterate over (for multiple runs)
output_base_directory = output_dir_prefix + args.output_base_directory
if not os.path.exists(output_base_directory):
os.mkdir(output_base_directory)
# now go over all these parameter structures and run the experiments
for sidx, seed in enumerate(seeds):
for d in swept_parameter_list:
if 'shooting_model' in d: # we are sweeping over it
current_shooting_model = d['shooting_model']
elif 'shooting_model' in run_args_template:
current_shooting_model = run_args_template['shooting_model']
else:
current_shooting_model = args.shooting_model
if 'fcn' in d: # we are sweeping over it
current_fcn = d['fcn']
elif 'fcn' in run_args_template:
current_fcn = run_args_template['fcn']
else:
if hasattr(args,'fcn'):
current_fcn = args.fcn
else:
current_fcn = ''
basename = 'run_{:02d}_{}_{}'.format(sidx + args.starting_seed_id, current_fcn, current_shooting_model)
experiment_name = create_experiment_name(basename, d)
output_directory = os.path.join(output_base_directory, experiment_name)
log_file = os.path.join(output_directory, 'runlog.log')
if not os.path.exists(output_directory):
os.mkdir(output_directory)
run_args = merge_args(run_args_template=run_args_template, add_args=d)
# add the output-directory
run_args['output_directory'] = output_directory
run_args['seed'] = seed
# check if it already contains an output pt file. If so ignore.
pt_files = glob.glob(os.path.join(output_directory, '*.pt'), recursive=False)
if (len(pt_files)>0) and do_not_recompute:
print('Found the following pt files {} in {}. SKIPPING.\n'.format(pt_files,output_directory))
else:
print('Running {}'.format(experiment_name))
ce.run_command_with_args(python_script=python_script,
run_args=run_args,
path_to_python=args.path_to_python,
cuda_visible_devices=args.gpu,
log_file=log_file)
|
import unittest
import itertools
import pytest
import numpy
from csep.core.regions import CartesianGrid2D, compute_vertex, compute_vertices, _bin_catalog_spatio_magnitude_counts, \
_bin_catalog_spatial_counts, _bin_catalog_probability, Polygon
class TestPolygon(unittest.TestCase):
def setUp(self):
dh = 1
origin = (0,0)
self.polygon = Polygon(compute_vertex(origin, dh))
def test_object_creation(self):
self.assertTupleEqual(self.polygon.origin,(0,0))
numpy.testing.assert_allclose(self.polygon.points,[(0,0),(0,1),(1,1),(1,0)])
def test_contains_inside(self):
test_point = (0.5, 0.5)
self.assertTrue(self.polygon.contains(test_point))
def test_contains_outside(self):
test_point = (-0.5, -0.5)
self.assertFalse(self.polygon.contains(test_point))
def test_compute_centroid(self):
expected = (0.5, 0.5)
numpy.testing.assert_almost_equal(self.polygon.centroid(), expected)
class TestCartesian2D(unittest.TestCase):
def setUp(self):
# create some arbitrary grid
self.nx = 8
self.ny = 10
self.dh = 0.1
x_points = numpy.arange(self.nx)*self.dh
y_points = numpy.arange(self.ny)*self.dh
self.origins = list(itertools.product(x_points, y_points))
# grid is missing first and last block
self.origins.pop(0)
self.origins.pop(-1)
self.num_nodes = len(self.origins)
# this is kinda ugly, maybe we want to create this in a different way, class method?
self.cart_grid = CartesianGrid2D([Polygon(bbox) for bbox in compute_vertices(self.origins, self.dh)], self.dh)
def test_object_creation(self):
self.assertEqual(self.cart_grid.dh, self.dh, 'dh did not get initialized properly')
self.assertEqual(self.cart_grid.num_nodes, self.num_nodes, 'num nodes is not correct')
def test_xs_and_xy_correct(self):
numpy.testing.assert_allclose(self.cart_grid.xs, numpy.arange(0,self.nx)*self.dh)
numpy.testing.assert_allclose(self.cart_grid.ys, numpy.arange(0,self.ny)*self.dh)
def test_bitmask_indices_mapping(self):
test_idx = self.cart_grid.idx_map[1,0]
numpy.testing.assert_allclose(test_idx, 0, err_msg='mapping for first polygon index (good) not correct')
test_idx = self.cart_grid.idx_map[0,1]
numpy.testing.assert_allclose(test_idx, 9, err_msg='mapping for polygon (good) not correct.')
test_idx = self.cart_grid.idx_map[2,0]
numpy.testing.assert_allclose(test_idx, 1, err_msg='mapping for polygon (good) not correct.')
test_idx = self.cart_grid.idx_map[0,2]
numpy.testing.assert_allclose(test_idx, 19, err_msg='mapping for polygon (good) not correct.')
test_idx = self.cart_grid.idx_map[-1,-1]
numpy.testing.assert_allclose(test_idx, numpy.nan, err_msg='mapping for last index (bad) not correct.')
test_idx = self.cart_grid.idx_map[0,0]
numpy.testing.assert_allclose(test_idx, numpy.nan, err_msg='mapping for first index (bad) not correct.')
def test_domain_mask(self):
test_flag = self.cart_grid.mask[0,0]
self.assertEqual(test_flag, 1)
test_flag = self.cart_grid.mask[-1,1]
self.assertEqual(test_flag, 0)
test_flag = self.cart_grid.mask[1, -1]
self.assertEqual(test_flag, 0)
test_flag = self.cart_grid.mask[2,2]
self.assertEqual(test_flag, 0)
test_flag = self.cart_grid.mask[-1, -1]
self.assertEqual(test_flag, 1)
def test_get_index_of_outside_bbox(self):
test = (-0.05, -0.05)
with pytest.raises(ValueError):
self.cart_grid.get_index_of([test[0]], [test[1]])
def test_get_index_of_inside_bbox_but_masked(self):
test = (0.05, 0.05)
with pytest.raises(ValueError):
self.cart_grid.get_index_of([test[0]], [test[1]])
def test_get_index_of_good(self):
test = (0.05, 0.15)
test_idx = self.cart_grid.get_index_of([test[0]], [test[1]])
numpy.testing.assert_allclose(test_idx, 0)
class TestCatalogBinning(unittest.TestCase):
def setUp(self):
# create some arbitrary grid
self.nx = 8
self.ny = 10
self.dh = 0.1
x_points = numpy.arange(self.nx) * self.dh
y_points = numpy.arange(self.ny) * self.dh
self.origins = list(itertools.product(x_points, y_points))
# grid is missing first and last block
self.origins.pop(0)
self.origins.pop(-1)
self.num_nodes = len(self.origins)
# this is kinda ugly, maybe we want to create this in a different way, class method?
self.cart_grid = CartesianGrid2D([Polygon(bbox) for bbox in compute_vertices(self.origins, self.dh)], self.dh)
# define an arbitrary magnitude range
self.magnitudes = numpy.arange(2.5,8.5,0.1)
def test_bin_spatial_counts(self):
""" this will test both good and bad points within the region.
1) 2 inside the domain
2) outside the bbox but not in domain
3) completely outside the domain
we will check that only 1 event is placed in the grid and ensure its location is correct.
"""
lons = numpy.array([0.05, 0.05, 0.15, -0.5])
lats = numpy.array([0.05, 0.15, 0.05, -0.5])
test_result = _bin_catalog_spatial_counts(lons, lats,
self.cart_grid.num_nodes,
self.cart_grid.mask,
self.cart_grid.idx_map,
self.cart_grid.xs,
self.cart_grid.ys)
# we have tested 2 inside the domain
self.assertEqual(numpy.sum(test_result), 2)
# we know that (0.05, 0.15) corresponds to index 0 from the above test
self.assertEqual(test_result[0], 1)
self.assertEqual(test_result[9], 1)
def test_bin_spatial_probability(self):
""" this will test both good and bad points within the region. added a point to the lons and lats
to ensure that multiple events are only being counted once
1) 2 inside the domain
2) outside the bbox but not in domain
3) completely outside the domain
we will check that only 1 event is placed in the grid and ensure its location is correct.
"""
lons = numpy.array([0.05, 0.05, 0.15, 0.15, -0.5])
lats = numpy.array([0.05, 0.15, 0.05, 0.05, -0.5])
test_result = _bin_catalog_probability(lons, lats,
self.cart_grid.num_nodes,
self.cart_grid.mask,
self.cart_grid.idx_map,
self.cart_grid.xs,
self.cart_grid.ys)
# we have tested 2 inside the domain
self.assertEqual(numpy.sum(test_result), 2)
# we know that (0.05, 0.15) corresponds to index 0 from the above test
self.assertEqual(test_result[0], 1)
self.assertEqual(test_result[9], 1)
def test_bin_spatial_magnitudes(self):
""" this will test both good and bad points within the region. added a point to the lons and lats
to ensure that multiple events are only being counted once
1) 2 inside the domain
2) outside the bbox but not in domain
3) completely outside the domain
we will check that only 1 event is placed in the grid and ensure its location is correct.
"""
lons = numpy.array([0.05, 0.05, 0.15, 0.15, -0.5])
lats = numpy.array([0.05, 0.15, 0.05, 0.05, -0.5])
mags = numpy.array([2.55, 2.65, 2.55, 2.05, 2.0])
# expected bins None, (0, 1), (9, 0), None, None
test_result, _ = _bin_catalog_spatio_magnitude_counts(lons, lats, mags,
self.cart_grid.num_nodes,
self.cart_grid.mask,
self.cart_grid.idx_map,
self.cart_grid.xs,
self.cart_grid.ys,
self.magnitudes)
# we have tested 2 inside the domain
self.assertEqual(numpy.sum(test_result), 2)
# we know that (0.05, 0.15) corresponds to index 0 from the above test
self.assertEqual(test_result[0, 1], 1)
self.assertEqual(test_result[9, 0], 1)
if __name__ == '__main__':
unittest.main()
|
import nipype.pipeline.engine as pe
from dmriprep.workflows.dwi.conversions.nii_to_mif.configurations import (
INPUT_NODE_FIELDS,
LOCATE_ASSOCIATED_KWARGS,
OUTPUT_NODE_FIELDS,
)
from dmriprep.workflows.dwi.conversions.nii_to_mif.utils import (
locate_associated_files,
)
from nipype.interfaces import mrtrix3 as mrt
from nipype.interfaces import utility as niu
#: i/o
INPUT_NODE = pe.Node(
niu.IdentityInterface(fields=INPUT_NODE_FIELDS), name="inputnode"
)
OUTPUT_NODE = pe.Node(
niu.IdentityInterface(fields=OUTPUT_NODE_FIELDS), name="outputnode"
)
#: Building blocks
LOCATE_ASSOCIATED_DWI_NODE = pe.Node(
niu.Function(**LOCATE_ASSOCIATED_KWARGS, function=locate_associated_files),
name="locate_associated_dwi",
)
LOCATE_ASSOCIATED_FMAP_NODE = pe.Node(
niu.Function(**LOCATE_ASSOCIATED_KWARGS, function=locate_associated_files),
name="locate_associated_fmap",
)
DWI_CONVERSION_NODE = pe.Node(mrt.MRConvert(), name="dwi_conversion")
FMAP_CONVERSION_NODE = pe.Node(mrt.MRConvert(), name="fmap_conversion")
|
export const navigation = {
home: "/",
login: "/login",
registration: "/registration",
movies: "/movies",
moviesFavorite: "/movies/favorite",
actors: "/actors",
};
|
const mongoose =require("mongoose");
const Schema = mongoose.Schema;
const blogSchema= new Schema ({
title:{
type:String,
require:true,
},
short : {
type:String,
require:true,
},
long :{
type :String,
require:true
}
},{timestamps:true});
const Blog = mongoose.model("Blog",blogSchema)
module.exports =Blog ;
|
import {
signUp,
sendEmailVerification,
logIn,
logOut,
deleteUser,
reauthenticateUser,
facebookLogIn,
googleLogIn,
twitterLogIn
} from '@/services/auth'
import { actionCodeSettings } from '@utils/auth'
import {
SIGNUP_USER,
LOGIN_USER,
LOGOUT_USER,
DELETE_USER,
REAUTHENTICATE_USER,
SIGNUP_SOCIAL
} from '@/store/types/actions_types'
export default {
/**
* Nuevo usuario
*
* @param {Object} registeredUser - datos a añadir al nuevo usuario
*/
// FIXME: desarrollar correctamente async y el catcher de errores.
async [SIGNUP_USER]({ commit, dispatch }, newUserData) {
console.log('Estoy en SIGNUP_USER')
commit('shared/LOAD_ACTION', null, { root: true })
commit('shared/CLEAR_ERROR', null, { root: true })
const userSignUpData = newUserData
await signUp(userSignUpData)
// .then(async result => {
// console.log(`user: ${JSON.stringify(result)}`)
// // const newUser = await setUser(result)
// // Set the new user at the userStore
// // commit('user/SET_USER', result, { root: true })
// const user = result
// await dispatch('user/LOAD_NEW_USER', user, { root: true })
// // return result
// })
// // TODO: implementar CouchDb
// // .then(
// // async newUser => await dispatch('user/LOAD_NEW_USER', result, { root: true })
// // )
// .then(async () => {
// // Enviamos el email de confirmación
// console.log('Enviamos el mensaje')
// // const actionCodeSettings = actionCodeSettings
// await sendEmailVerification(actionCodeSettings)
// commit('shared/LOAD_ACTION', true, { root: true })
// })
// .catch(error => {
// console.log('SIGNUP_USER error: ' + error.message)
// commit('shared/SET_ERROR', null, { root: true })
// dispatch('errors/AUTH_ERROR', error.code, { root: true })
// })
// commit('shared/SET_ERROR', null, { root: true })
// dispatch('errors/AUTH_ERROR', 'auth/user-empty', { root: true })
// }
},
/**
* Log In de usuario existente
*
* @param {*} commit
* @param {String} user
*/
async [LOGIN_USER]({ commit, dispatch }, userData) {
console.log('LOGIN_USER')
commit('shared/CLEAR_ERROR', null, { root: true })
commit('shared/LOAD_ACTION', null, { root: true })
logIn(userData)
.then(user => {
commit('user/SET_USER', user, { root: true })
})
.catch(error => {
console.log('logUserIn error: ' + error.message)
commit('shared/SET_ERROR', null, { root: true })
dispatch('errors/AUTH_ERROR', error.code, { root: true })
})
},
/**
* Log In con la red social elegida
*
* @param {*} providerName - social provider name
*/
async [SIGNUP_SOCIAL] ({ commit, dispatch }, providerName) {
commit('shared/CLEAR_ERROR', null, { root: true })
commit('shared/LOAD_ACTION', null, { root: true })
const provider = providerName
switch (provider) {
case 'Facebook': {
const newUser = await facebookLogIn()
dispatch('user/LOAD_NEW_USER', newUser, { root: true })
break
}
case 'Google': {
const newUser = await googleLogIn()
dispatch('user/LOAD_NEW_USER', newUser, { root: true })
break
}
case 'Twitter': {
const newUser = await twitterLogIn()
dispatch('user/LOAD_NEW_USER', newUser, { root: true })
break
}
}
},
/**
* Log Out de Usuario
*
* @param {*} commit
*/
async [LOGOUT_USER]({ commit, dispatch }) {
commit('shared/LOAD_ACTION', null, { root: true })
commit('shared/CLEAR_ERROR', null, { root: true })
await logOut()
.then(() => {
commit('user/RESET_USER', null, { root: true })
// commit('alerts/RESET_ALERTS', null, { root: true })
console.log('LOGOUT_USER')
})
.then(commit('shared/LOAD_ACTION', true, { root: true }))
.catch(error => {
console.log('LOGOUT_USER error: ' + error.message)
commit('shared/SET_ERROR', null, { root: true })
dispatch('errors/AUTH_ERROR', error.code, { root: true })
})
},
/**
* Elimina el usuario
*/
[DELETE_USER]: ({ _, commit, dispatch, rootGetters }) => {
console.log('Estoy en deleteUser')
commit('shared/LOAD_ACTION', null, { root: true })
commit('shared/CLEAR_ERROR', null, { root: true })
// const providerId = rootGetters.USER_PROVIDER_ID
deleteUser()
.then(() => {
commit('user/RESET_USER', null, { root: true })
})
.catch(error => {
console.log('DELETE_USER error: ' + error.message)
commit('shared/SET_ERROR', null, { root: true })
dispatch('errors/AUTH_ERROR', error.code, { root: true })
})
},
/**
* Reautenticación automática del usuario
* Se utiliza para poder elimnar la cuenta de usuario
*/
[REAUTHENTICATE_USER]: ({ commit, dispatch }) => {
console.log('Estoy en reauthenticateUser')
commit('shared/CLEAR_ERROR', null, { root: true })
reauthenticateUser()
.then(() => {
dispatch('DELETE_FIREBASE_USER_ACCOUNT')
})
.catch(error => {
console.log('REAUTHENTICATE_USER error: ' + error)
commit('shared/SET_ERROR', null, { root: true })
dispatch('errors/AUTH_ERROR', error.code, { root: true })
})
}
}
|
import numpy as np
import sys
sys.path.append('./../')
from scipy import signal as spsp
from scipy import special as spspes
from scipy import linalg as spla
from matplotlib import pyplot as plt
from copy import deepcopy
import h5py
from w3t._exp import Experiment
from w3t._functions import group_motions
#from ._exp import Experiment
class AerodynamicDerivative:
"""
A class used to represent a aerodynamic derivative
Arguments
---------
reduced_velocities : float
reduced velocities
ad_load_cell_1 : float
contribution to aerodynamic derivative from load cell 1
ad_load_cell_2 : float
contribution to aerodynamic derivative from load cell 2
ad_load_cell_3 : float
contribution to aerodynamic derivative from load cell 3
ad_load_cell_4 : float
contribution to aerodynamic derivative from load cell 4
mean_wind_speeds : float
mean wind velocities
frequencies : float
frequencies of the motions applied to obtain ads
label : str
aerodynamic derivative label
---------
Methods:
--------
plot()
plots the aerodynamic derivative
"""
def __init__(self,label="x",reduced_velocities=[],ad_load_cell_1=[],ad_load_cell_2=[],ad_load_cell_3=[],ad_load_cell_4=[],mean_wind_speeds=[], frequencies=[]):
"""
Arguments
---------
reduced_velocities : float
reduced velocities
ad_load_cell_1 : float
contribution to aerodynamic derivative from load cell 1
ad_load_cell_2 : float
contribution to aerodynamic derivative from load cell 2
ad_load_cell_3 : float
contribution to aerodynamic derivative from load cell 3
ad_load_cell_4 : float
contribution to aerodynamic derivative from load cell 4
mean_wind_speeds : float
mean wind velocities
frequencies : float
frequencies of the motions applied to obtain ads
label : str
aerodynamic derivative label
---------
"""
self.reduced_velocities = reduced_velocities
self.ad_load_cell_1 = ad_load_cell_1
self.ad_load_cell_2 = ad_load_cell_2
self.ad_load_cell_3 = ad_load_cell_3
self.ad_load_cell_4 = ad_load_cell_4
self.mean_wind_speeds = mean_wind_speeds
self.frequencies = frequencies
self.label = label
@property
def value(self):
return self.ad_load_cell_1 + self.ad_load_cell_2 + self.ad_load_cell_3 + self.ad_load_cell_4
def plot(self, mode = "all", conv = "normal", ax=[] ):
""" plots the aerodynamic derivative
The method plots the aerodynamic derivative as function of the mean
wind speed. Four optimal modes are abailable.
parameters:
----------
mode : str, optional
selects the plot mode
conv: str, optional
selects which convention to use when plotting
fig : pyplot figure instance
---------
"""
if bool(ax) == False:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
if conv == "normal":
if mode == "all":
ax.plot(self.reduced_velocities,self.ad_load_cell_1 + self.ad_load_cell_2+ self.ad_load_cell_3 + self.ad_load_cell_4, "o", label="Total")
ax.plot(self.reduced_velocities,self.ad_load_cell_1, "o", label="Load cell 1", alpha = 0.5)
ax.plot(self.reduced_velocities,self.ad_load_cell_2, "o", label="Load cell 2", alpha = 0.5)
ax.plot(self.reduced_velocities,self.ad_load_cell_3, "o", label="Load cell 3", alpha = 0.5)
ax.plot(self.reduced_velocities,self.ad_load_cell_4, "o", label="Load cell 4", alpha = 0.5)
ax.set_ylabel(("$" + self.label + "$"))
ax.set_xlabel(r"Reduced velocity $\hat{V}$")
ax.legend()
ax.grid(True)
elif mode == "decks":
ax.plot(self.reduced_velocities,self.ad_load_cell_1 + self.ad_load_cell_2+ self.ad_load_cell_3 + self.ad_load_cell_4, "o", label="Total")
ax.plot(self.reduced_velocities,self.ad_load_cell_1 + self.ad_load_cell_2, "o", label="Upwind deck", alpha = 0.5)
ax.plot(self.reduced_velocities,self.ad_load_cell_3 + self.ad_load_cell_4, "o", label="Downwind deck", alpha = 0.5)
ax.set_ylabel(("$" + self.label + "$"))
ax.set_xlabel(r"Reduced velocity $\hat{V}$")
ax.grid(True)
ax.legend()
elif mode == "total":
ax.plot(self.reduced_velocities,self.ad_load_cell_1 + self.ad_load_cell_2+ self.ad_load_cell_3 + self.ad_load_cell_4, "o", label="Total")
ax.set_ylabel(("$" + self.label + "$"))
ax.set_xlabel(r"Reduced velocity $\hat{V}$")
ax.grid(True)
#plt.tight_layout()
elif conv == "zasso" and len(self.reduced_velocities) != 0:
damping_ads =["P_1^*","P_2^*", "P_5^*", "H_1^*", "H_2^*", "H_5^*", "A_1^*", "A_2^*", "A_5^*" ]
stiffness_ads =["P_3^*","P_4^*", "P_6^*", "H_3^*", "H_4^*", "H_6^*", "A_3^*", "A_4^*", "A_6^*" ]
if self.label in damping_ads:
factor = 1.0/self.reduced_velocities
K_label = "K"
elif self.label in stiffness_ads:
factor = 1.0/self.reduced_velocities**2
K_label = "K^2"
else:
print("ERROR")
if mode == "all":
ax.plot(self.reduced_velocities,factor*(self.ad_load_cell_1 + self.ad_load_cell_2+ self.ad_load_cell_3 + self.ad_load_cell_4), "o", label="Total")
ax.plot(self.reduced_velocities,factor*self.ad_load_cell_1, "o", label="Load cell 1", alpha = 0.5)
ax.plot(self.reduced_velocities,factor*self.ad_load_cell_2, "o", label="Load cell 2", alpha = 0.5)
ax.plot(self.reduced_velocities,factor*self.ad_load_cell_3, "o", label="Load cell 3", alpha = 0.5)
ax.plot(self.reduced_velocities,factor*self.ad_load_cell_4, "o", label="Load cell 4", alpha = 0.5)
ax.set_ylabel(("$" + K_label + self.label + "$"))
ax.set_xlabel(r"Reduced velocity $\hat{V}$")
ax.legend()
ax.grid(True)
elif mode == "decks":
ax.plot(self.reduced_velocities,factor*(self.ad_load_cell_1 + self.ad_load_cell_2+ self.ad_load_cell_3 + self.ad_load_cell_4), "o", label="Total")
ax.plot(self.reduced_velocities,factor*(self.ad_load_cell_1 + self.ad_load_cell_2), "o", label="Upwind deck", alpha = 0.5)
ax.plot(self.reduced_velocities,factor*(self.ad_load_cell_3 + self.ad_load_cell_4), "o", label="Downwind deck", alpha = 0.5)
ax.set_ylabel(("$" + K_label + self.label + "$"))
ax.set_xlabel(r"Reduced velocity $\hat{V}$")
ax.legend()
ax.grid(True)
elif mode == "total":
ax.plot(self.reduced_velocities,factor*(self.ad_load_cell_1 + self.ad_load_cell_2+ self.ad_load_cell_3 + self.ad_load_cell_4), "o", label="Total")
ax.set_ylabel(("$" + K_label + self.label + "$"))
ax.set_xlabel(r"Reduced velocity $\hat{V}$")
ax.grid(True)
#plt.tight_layout()
class AerodynamicDerivatives:
"""
A class used to represent all aerodynamic derivatives for a 3 dof motion
parameters:
----------
p1...p6 : obj
aerodynamic derivatives related to the horizontal self-excited force
h1...h6 : obj
aerodynamic derivatives related to the vertical self-excited force
a1...a6 : obj
aerodynamic derivative related to the pitchingmoment
---------
methods:
-------
.fromWTT()
obtains aerodynamic derivatives from a sequence of single harmonic wind tunnel tests
.append()
appends an instance of the class AerodynamicDerivtives to self
.plot()
plots all aerodynamic derivatives
"""
def __init__(self, p1=AerodynamicDerivative(label="P_1^*"), p2=AerodynamicDerivative(label="P_2^*"), p3=AerodynamicDerivative(label="P_3^*"), p4=AerodynamicDerivative(label="P_4^*"), p5=AerodynamicDerivative(label="P_5^*"), p6=AerodynamicDerivative(label="P_6^*"), h1=AerodynamicDerivative(label="H_1^*"), h2=AerodynamicDerivative(label="H_2^*"), h3=AerodynamicDerivative(label="H_3^*"), h4=AerodynamicDerivative(label="H_4^*"), h5=AerodynamicDerivative(label="H_5^*"), h6=AerodynamicDerivative(label="H_6^*"), a1=AerodynamicDerivative(label="A_1^*"), a2=AerodynamicDerivative(label="A_2^*"), a3=AerodynamicDerivative(label="A_3^*"), a4=AerodynamicDerivative(label="A_4^*"), a5=AerodynamicDerivative(label="A_5^*"), a6=AerodynamicDerivative(label="A_6^*")):
"""
parameters:
----------
p1...p6 : obj
aerodynamic derivatives related to the horizontal self-excited force
h1...h6 : obj
aerodynamic derivatives related to the vertical self-excited force
a1...a6 : obj
aerodynamic derivative related to the pitchingmoment
---------
"""
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.p4 = p4
self.p5 = p5
self.p6 = p6
self.h1 = h1
self.h2 = h2
self.h3 = h3
self.h4 = h4
self.h5 = h5
self.h6 = h6
self.a1 = a1
self.a2 = a2
self.a3 = a3
self.a4 = a4
self.a5 = a5
self.a6 = a6
@classmethod
def fromWTT(cls,experiment_in_still_air,experiment_in_wind,section_width,section_length, filter_order = 6, cutoff_frequency = 7):
""" obtains an instance of the class Aerodynamic derivatives from a wind tunnel experiment
parameters:
----------
experiment_in_still_air : instance of the class experiment
experiment_in_wind : instance of the class experiment
section_width : width of the bridge deck section model
section_length : length of the section model
---------
returns:
--------
an instance of the class AerodynamicDerivatives
to instances of the class Experiment, one for model predictions and one for data used to fit the model
"""
experiment_in_wind.align_with(experiment_in_still_air)
experiment_in_wind_still_air_forces_removed = deepcopy(experiment_in_wind)
experiment_in_wind_still_air_forces_removed.substract(experiment_in_still_air)
starts, stops = experiment_in_wind_still_air_forces_removed.harmonic_groups()
frequencies_of_motion = np.zeros(len(starts))
reduced_velocities = np.zeros(len(starts))
mean_wind_speeds = np.zeros(len(starts))
normalized_coefficient_matrix = np.zeros((2,3,len(starts),4))
forces_predicted_by_ads = np.zeros((experiment_in_wind_still_air_forces_removed.forces_global_center.shape[0],24))
#model_forces = np.zeros((experiment_in_wind_still_air_forces_removed.forces_global_center.shape[0],3))
# loop over all single harmonic test in the time series
for k in range(len(starts)):
sampling_frequency = 1/(experiment_in_still_air.time[1]- experiment_in_still_air.time[0])
sos = spsp.butter(filter_order,cutoff_frequency, fs=sampling_frequency, output="sos")
motions = experiment_in_wind_still_air_forces_removed.motion
motions = spsp.sosfiltfilt(sos,motions,axis=0)
time_derivative_motions = np.vstack((np.array([0,0,0]),np.diff(motions,axis=0)))*sampling_frequency
max_hor_vert_pitch_motion = [np.max(motions[:,0]), np.max(motions[:,1]), np.max(motions[:,2]) ]
motion_type = np.argmax(max_hor_vert_pitch_motion)
fourier_amplitudes = np.fft.fft(motions[starts[k]:stops[k],motion_type])
time_step = experiment_in_wind_still_air_forces_removed.time[1]- experiment_in_wind_still_air_forces_removed.time[0]
peak_index = np.argmax(np.abs(fourier_amplitudes[0:np.int(len(fourier_amplitudes)/2)]))
frequencies = np.fft.fftfreq(len(fourier_amplitudes),time_step)
frequency_of_motion = frequencies[peak_index]
frequencies_of_motion[k] = frequency_of_motion
regressor_matrix = np.vstack((time_derivative_motions[starts[k]:stops[k],motion_type],motions[starts[k]:stops[k],motion_type])).T
pseudo_inverse_regressor_matrix = spla.pinv(regressor_matrix)
selected_forces = np.array([0,2,4])
mean_wind_speed = np.mean(experiment_in_wind_still_air_forces_removed.wind_speed[starts[k]:stops[k]])
mean_wind_speeds[k] = mean_wind_speed
reduced_frequency = frequency_of_motion*2*np.pi*section_width/mean_wind_speed
reduced_velocities[k] = 1/reduced_frequency
#model_forces = np.zeros((experiment_in_wind_still_air_forces_removed.forces_global_center.shape))
# Loop over all load cells
for m in range(4):
forces = experiment_in_wind_still_air_forces_removed.forces_global_center[starts[k]:stops[k],selected_forces + 6*m]
froces_mean_wind_removed = forces - np.mean(experiment_in_wind_still_air_forces_removed.forces_global_center[0:400,selected_forces + 6*m],axis= 0)
coefficient_matrix = pseudo_inverse_regressor_matrix @ froces_mean_wind_removed
normalized_coefficient_matrix[:,:,k,m] = np.copy(coefficient_matrix)
normalized_coefficient_matrix[0,:,k,m] = normalized_coefficient_matrix[0,:,k,m]*2 / experiment_in_wind_still_air_forces_removed.air_density / mean_wind_speed / reduced_frequency / section_width / section_length
normalized_coefficient_matrix[1,:,k,m] = normalized_coefficient_matrix[1,:,k,m]*2 /experiment_in_wind_still_air_forces_removed.air_density / mean_wind_speed**2 / reduced_frequency**2 /section_length
normalized_coefficient_matrix[:,2,k,m] = normalized_coefficient_matrix[:,2,k,m]/section_width
if motion_type ==2:
normalized_coefficient_matrix[:,:,k,m] = normalized_coefficient_matrix[:,:,k,m]/section_width
forces_predicted_by_ads[starts[k]:stops[k],selected_forces + 6*m] = forces_predicted_by_ads[starts[k]:stops[k],selected_forces + 6*m] + regressor_matrix @ coefficient_matrix + np.mean(experiment_in_wind_still_air_forces_removed.forces_global_center[0:400,selected_forces + 6*m],axis= 0)
# Make Experiment object for simulation of model
obj1 = experiment_in_wind_still_air_forces_removed
obj2 = experiment_in_still_air
model_prediction = Experiment(obj1.name, obj1.time, obj1.temperature, obj1.air_density, obj1.wind_speed,[],forces_predicted_by_ads,obj2.motion)
p1 = AerodynamicDerivative()
p2 = AerodynamicDerivative()
p3 = AerodynamicDerivative()
p4 = AerodynamicDerivative()
p5 = AerodynamicDerivative()
p6 = AerodynamicDerivative()
h1 = AerodynamicDerivative()
h2 = AerodynamicDerivative()
h3 = AerodynamicDerivative()
h4 = AerodynamicDerivative()
h5 = AerodynamicDerivative()
h6 = AerodynamicDerivative()
a1 = AerodynamicDerivative()
a2 = AerodynamicDerivative()
a3 = AerodynamicDerivative()
a4 = AerodynamicDerivative()
a5 = AerodynamicDerivative()
a6 = AerodynamicDerivative()
if motion_type ==0:
row = 0
col = 0
p1 = AerodynamicDerivative("P_1^*", reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 1
h5 = AerodynamicDerivative("H_5^*", reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 2
a5 = AerodynamicDerivative("A_5^*", reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
row = 1
col = 0
p4 = AerodynamicDerivative("P_4^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 1
h6 = AerodynamicDerivative("H_6^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 2
a6 = AerodynamicDerivative("A_6^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
elif motion_type ==1:
row = 0
col = 0
p5 = AerodynamicDerivative("P_5^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 1
h1 = AerodynamicDerivative("H_1^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 2
a1 = AerodynamicDerivative("A_1^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
row = 1
col = 0
p6 = AerodynamicDerivative("P_6^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 1
h4 = AerodynamicDerivative("H_4^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 2
a4 = AerodynamicDerivative("A_4^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
elif motion_type ==2:
row = 0
col = 0
p2 = AerodynamicDerivative("P_2^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 1
h2 = AerodynamicDerivative("H_2^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 2
a2 = AerodynamicDerivative("A_2^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
row = 1
col = 0
p3 = AerodynamicDerivative("P_3^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 1
h3 = AerodynamicDerivative("H_3^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
col = 2
a3 = AerodynamicDerivative("A_3^*",reduced_velocities,normalized_coefficient_matrix[row,col,:,0],normalized_coefficient_matrix[row,col,:,1],normalized_coefficient_matrix[row,col,:,2],normalized_coefficient_matrix[row,col,:,3],mean_wind_speeds,frequencies_of_motion)
return cls(p1, p2, p3, p4, p5, p6, h1, h2, h3, h4, h5, h6, a1, a2, a3, a4, a5, a6), model_prediction, experiment_in_wind_still_air_forces_removed
@classmethod
def from_Theodorsen(cls,vred):
vred[vred==0] = 1.0e-10
k = 0.5/vred
j0 = spspes.jv(0,k)
j1 = spspes.jv(1,k)
y0 = spspes.yn(0,k)
y1 = spspes.yn(1,k)
a = j1 + y0
b = y1-j0
c = a**2 + b**2
f = (j1*a + y1*b)/c
g = -(j1*j0 + y1*y0)/c
h1_value = -2*np.pi*f*vred
h2_value = np.pi/2*(1+f+4*g*vred)*vred
h3_value = 2*np.pi*(f*vred-g/4)*vred
h4_value = np.pi/2*(1+4*g*vred)
a1_value = -np.pi/2*f*vred
a2_value = -np.pi/8*(1-f-4*g*vred)*vred
a3_value = np.pi/2*(f*vred-g/4)*vred
a4_value = np.pi/2*g*vred
p1 = AerodynamicDerivative("P_1^*",vred, vred*0, vred*0, vred*0, vred*0)
p2 = AerodynamicDerivative("P_2^*",vred, vred*0, vred*0, vred*0, vred*0)
p3 = AerodynamicDerivative("P_3^*",vred, vred*0, vred*0, vred*0, vred*0)
p4 = AerodynamicDerivative("P_4^*",vred, vred*0, vred*0, vred*0, vred*0)
p5 = AerodynamicDerivative("P_5^*",vred, vred*0, vred*0, vred*0, vred*0)
p6 = AerodynamicDerivative("P_6^*",vred, vred*0, vred*0, vred*0, vred*0)
h1 = AerodynamicDerivative("H_1^*",vred, h1_value/2, h1_value/2, vred*0, vred*0)
h2 = AerodynamicDerivative("H_2^*",vred, h2_value/2, h2_value/2, vred*0, vred*0)
h3 = AerodynamicDerivative("H_3^*",vred, h3_value/2, h3_value/2, vred*0, vred*0)
h4 = AerodynamicDerivative("H_4^*",vred, h4_value/2, h4_value/2, vred*0, vred*0)
h5 = AerodynamicDerivative("H_5^*",vred, vred*0, vred*0, vred*0, vred*0)
h6 = AerodynamicDerivative("H_6^*",vred, vred*0, vred*0, vred*0, vred*0)
a1 = AerodynamicDerivative("A_1^*",vred, a1_value/2, a1_value/2, vred*0, vred*0)
a2 = AerodynamicDerivative("A_2^*",vred, a2_value/2, a2_value/2, vred*0, vred*0)
a3 = AerodynamicDerivative("A_3^*",vred, a3_value/2, a3_value/2, vred*0, vred*0)
a4 = AerodynamicDerivative("A_4^*",vred, a4_value/2, a4_value/2, vred*0, vred*0)
a5 = AerodynamicDerivative("A_5^*",vred, vred*0, vred*0, vred*0, vred*0)
a6 = AerodynamicDerivative("A_6^*",vred, vred*0, vred*0, vred*0, vred*0)
return cls(p1, p2, p3, p4, p5, p6, h1, h2, h3, h4, h5, h6, a1, a2, a3, a4, a5, a6)
@classmethod
def from_poly_k(cls,poly_k,k_range, vred):
vred[vred==0] = 1.0e-10
uit_step = lambda k,kc: 1./(1 + np.exp(-2*20*(k-kc)))
fit = lambda p,k,k1c,k2c : np.polyval(p,k)*uit_step(k,k1c)*(1-uit_step(k,k2c)) + np.polyval(p,k1c)*(1-uit_step(k,k1c)) + np.polyval(p,k2c)*(uit_step(k,k2c))
damping_ad = np.array([True, True, False, False, True, False, True, True, False, False, True, False, True, True, False, False, True, False ])
labels = ["P_1^*", "P_2^*", "P_3^*", "P_4^*", "P_5^*", "P_6^*", "H_1^*", "H_2^*", "H_3^*", "H_4^*", "H_5^*", "H_6^*", "A_1^*", "A_2^*", "A_3^*", "A_4^*", "A_5^*", "A_6^*"]
ads = []
for k in range(18):
if damping_ad[k] == True:
ad_value = vred*fit(poly_k[k,:],1/vred,k_range[k,0],k_range[k,1])
else:
ad_value = vred**2*fit(poly_k[k,:],1/vred,k_range[k,0],k_range[k,1])
ads.append(AerodynamicDerivative(labels[k],vred,ad_value/2 , ad_value/2 , vred*0, vred*0))
return cls(ads[0], ads[1], ads[2], ads[3], ads[4], ads[5], ads[6], ads[7], ads[8], ads[9], ads[10], ads[11], ads[12], ads[13], ads[14], ads[15], ads[16], ads[17])
def append(self,ads):
""" appends and instance of AerodynamicDerivatives to self
Arguments:
----------
ads : an instance of the class AerodynamicDerivatives
"""
objs1 = [self.p1, self.p2, self.p3, self.p4, self.p5, self.p6, self.h1, self.h2, self.h3, self.h4, self.h5, self.h6, self.a1, self.a2, self.a3, self.a4, self.a5, self.a6 ]
objs2 = [ads.p1, ads.p2, ads.p3, ads.p4, ads.p5, ads.p6, ads.h1, ads.h2, ads.h3, ads.h4, ads.h5, ads.h6, ads.a1, ads.a2, ads.a3, ads.a4, ads.a5, ads.a6 ]
for k in range(len(objs1)):
objs1[k].ad_load_cell_1 = np.append(objs1[k].ad_load_cell_1,objs2[k].ad_load_cell_1)
objs1[k].ad_load_cell_2 = np.append(objs1[k].ad_load_cell_2,objs2[k].ad_load_cell_2)
objs1[k].ad_load_cell_3 = np.append(objs1[k].ad_load_cell_3,objs2[k].ad_load_cell_3)
objs1[k].ad_load_cell_4 = np.append(objs1[k].ad_load_cell_4,objs2[k].ad_load_cell_4)
objs1[k].frequencies = np.append(objs1[k].frequencies,objs2[k].frequencies)
objs1[k].mean_wind_speeds = np.append(objs1[k].mean_wind_speeds,objs2[k].mean_wind_speeds)
objs1[k].reduced_velocities = np.append(objs1[k].reduced_velocities,objs2[k].reduced_velocities)
@property
def ad_matrix(self):
""" Returns a matrix of aerodynamic derivatives and reduced velocities
Returns
-------
ads : float
a matrix of aerodynamic derivatives [18 x N reduced velocities]
vreds : float
a matrix of reduced velocities [18 x N reduced velocities]
"""
ads = np.zeros((18,self.p1.reduced_velocities.shape[0]))
vreds = np.zeros((18,self.p1.reduced_velocities.shape[0]))
ads[0,:] = self.p1.value
ads[1,:] = self.p2.value
ads[2,:] = self.p3.value
ads[3,:] = self.p4.value
ads[4,:] = self.p5.value
ads[5,:] = self.p6.value
ads[6,:] = self.h1.value
ads[7,:] = self.h2.value
ads[8,:] = self.h3.value
ads[9,:] = self.h4.value
ads[10,:] = self.h5.value
ads[11,:] = self.h6.value
ads[12,:] = self.a1.value
ads[13,:] = self.a2.value
ads[14,:] = self.a3.value
ads[15,:] = self.a4.value
ads[16,:] = self.a5.value
ads[17,:] = self.a6.value
vreds[0,:] = self.p1.reduced_velocities
vreds[1,:] = self.p2.reduced_velocities
vreds[2,:] = self.p3.reduced_velocities
vreds[3,:] = self.p4.reduced_velocities
vreds[4,:] = self.p5.reduced_velocities
vreds[5,:] = self.p6.reduced_velocities
vreds[6,:] = self.h1.reduced_velocities
vreds[7,:] = self.h2.reduced_velocities
vreds[8,:] = self.h3.reduced_velocities
vreds[9,:] = self.h4.reduced_velocities
vreds[10,:] = self.h5.reduced_velocities
vreds[11,:] = self.h6.reduced_velocities
vreds[12,:] = self.a1.reduced_velocities
vreds[13,:] = self.a2.reduced_velocities
vreds[14,:] = self.a3.reduced_velocities
vreds[15,:] = self.a4.reduced_velocities
vreds[16,:] = self.a5.reduced_velocities
vreds[17,:] = self.a6.reduced_velocities
return ads, vreds
@property
def frf_mat(self,mean_wind_velocity = 1.0, section_width = 1.0, air_density = 1.25):
frf_mat = np.zeros((3,3,len(self.p1.reduced_velocities)))
frf_mat[0,0,:] = 1/2*air_density*mean_wind_velocity**2 * (1/self.p1.reduced_velocities)**2 * (self.p1.value*1j + self.p4.value)
frf_mat[0,1,:] = 1/2*air_density*mean_wind_velocity**2 * (1/self.p5.reduced_velocities)**2 * (self.p5.value*1j + self.p6.value)
frf_mat[0,2,:] = 1/2*air_density*mean_wind_velocity**2 * section_width*(1/self.p2.reduced_velocities)**2 * (self.p2.value*1j + self.p3.value)
frf_mat[1,0,:] = 1/2*air_density*mean_wind_velocity**2 * (1/self.h5.reduced_velocities)**2 * (self.h5.value*1j + self.h6.value)
frf_mat[1,1,:] = 1/2*air_density*mean_wind_velocity**2 * (1/self.h1.reduced_velocities)**2 * (self.h1.value*1j + self.h4.value)
frf_mat[1,2,:] = 1/2*air_density*mean_wind_velocity**2 * section_width*(1/self.h3.reduced_velocities)**2 * (self.h2.value*1j + self.h3.value)
frf_mat[2,0,:] = 1/2*air_density*mean_wind_velocity**2 * section_width*(1/self.a5.reduced_velocities)**2 * (self.a5.value*1j + self.a6.value)
frf_mat[2,1,:] = 1/2*air_density*mean_wind_velocity**2 * section_width*(1/self.a1.reduced_velocities)**2 * (self.a1.value*1j + self.a4.value)
frf_mat[2,2,:] = 1/2*air_density*mean_wind_velocity**2 * section_width**2*(1/self.a2.reduced_velocities)**2 * (self.a2.value*1j + self.a3.value)
return frf_mat
@property
def fit_poly_k(self,orders = np.ones(18,dtype=int)*2):
ad_matrix, vreds = self.ad_matrix
poly_coeff = np.zeros((18,np.max(orders)+1))
k_range = np.zeros((18,2))
damping_ad = np.array([True, True, False, False, True, False, True, True, False, False, True, False, True, True, False, False, True, False ])
for k in range(18):
k_range[k,0] = 1/np.max(vreds)
k_range[k,1] = 1/np.min(vreds)
if damping_ad[k] == True:
poly_coeff[k,-orders[k]-1:] = np.polyfit(1/vreds[k,:],1/vreds[k,:]*ad_matrix[k,:],orders[k])
elif damping_ad[k] == False:
poly_coeff[k,-orders[k]-1:] = np.polyfit(1/vreds[k,:],(1/vreds[k,:])**2*ad_matrix[k,:],orders[k])
return poly_coeff, k_range
def plot(self, fig_damping=[],fig_stiffness=[],conv='normal', mode='total'):
""" plots all aerodynamic derivatives
Arguments:
----------
fig_damping : figure object
fig_stiffness : figure object
conv : normal or zasso
mode : total, all or decks
"""
# Make figure objects if not given
if bool(fig_damping) == False:
fig_damping = plt.figure()
for k in range(9):
fig_damping.add_subplot(3,3,k+1)
if bool(fig_stiffness) == False:
fig_stiffness = plt.figure()
for k in range(9):
fig_stiffness.add_subplot(3,3,k+1)
axs_damping = fig_damping.get_axes()
#
self.p1.plot(mode=mode, conv=conv, ax=axs_damping[0])
self.p5.plot(mode=mode, conv=conv, ax=axs_damping[1])
self.p2.plot(mode=mode, conv=conv, ax=axs_damping[2])
self.h5.plot(mode=mode, conv=conv, ax=axs_damping[3])
self.h1.plot(mode=mode, conv=conv, ax=axs_damping[4])
self.h2.plot(mode=mode, conv=conv, ax=axs_damping[5])
self.a5.plot(mode=mode, conv=conv, ax=axs_damping[6])
self.a1.plot(mode=mode, conv=conv, ax=axs_damping[7])
self.a2.plot(mode=mode, conv=conv, ax=axs_damping[8])
axs_stiffness = fig_stiffness.get_axes()
self.p4.plot(mode=mode, conv=conv, ax=axs_stiffness[0])
self.p6.plot(mode=mode, conv=conv, ax=axs_stiffness[1])
self.p3.plot(mode=mode, conv=conv, ax=axs_stiffness[2])
self.h6.plot(mode=mode, conv=conv, ax=axs_stiffness[3])
self.h4.plot(mode=mode, conv=conv, ax=axs_stiffness[4])
self.h3.plot(mode=mode, conv=conv, ax=axs_stiffness[5])
self.a6.plot(mode=mode, conv=conv, ax=axs_stiffness[6])
self.a4.plot(mode=mode, conv=conv, ax=axs_stiffness[7])
self.a3.plot(mode=mode, conv=conv, ax=axs_stiffness[8])
for k in range(6):
axs_damping[k].set_xlabel("")
axs_stiffness[k].set_xlabel("")
fig_damping.set_size_inches(20/2.54,15/2.54)
fig_stiffness.set_size_inches(20/2.54,15/2.54)
fig_damping.tight_layout()
fig_stiffness.tight_layout()
#%%
plt.close("all")
h5_file = "TD21_S1_G1"
#%% Load all experiments
f = h5py.File((h5_file + ".hdf5"), "r")
data_set_groups = list(f)
exps = np.array([])
for data_set_group in data_set_groups:
exps = np.append(exps,Experiment.fromWTT(f[data_set_group]))
#exps.append(w3t.Experiment(f[group]))
tests_with_equal_motion = group_motions(exps)
#%%
plt.close("all")
exp0 = exps[tests_with_equal_motion[3][0]]
exp1 = exps[tests_with_equal_motion[3][1]]
exp2 = exps[tests_with_equal_motion[0][2]]
#exp0.plot_experiment(mode="decks")
#exp0.plot_experiment(mode="total")
#exp0.plot_experiment(mode="all")
filter_order = 6
filter_cutoff_frequency = 4
exp0.filt_forces(filter_order,filter_cutoff_frequency)
exp1.filt_forces(filter_order,filter_cutoff_frequency)
#exp1.plot_forces(mode="total")
#exp1.plot_forces(mode="decks")
#exp1.plot_forces(mode="all")
#%%
plt.close("all")
section_width = 750/1000
section_length = 2640/1000
#ads_list = []
#val_list = []
#expf_list = []
#
#all_ads = AerodynamicDerivatives()
#
#for k1 in range(3):
# print(k1)
# for k2 in range(2):
# exp0 = exps[tests_with_equal_motion[k1+1][0]]
# exp1 = exps[tests_with_equal_motion[k1+1][k2+1]]
# exp0.filt_forces(6,5)
# exp1.filt_forces(6,5)
#
# ads, val, expf = AerodynamicDerivatives.fromWTT(exp0,exp1,section_width,section_length)
# ads_list.append(ads)
# val_list.append(val)
# expf_list.append(expf)
# all_ads.append(ads)
# fig, _ = plt.subplots(4,2,sharex=True)
# expf.plot_experiment(fig=fig)
# val.plot_experiment(fig=fig)
#%%
#ads_list[3].h3.plot(mode="total", conv="zasso")
#ads_list[3].h3.plot(mode="decks", conv="zasso")
#ads_list[3].h3.plot(mode="all", conv="zasso")
#%%
vred = np.linspace(0.1,4,20)
ads = AerodynamicDerivatives.from_Theodorsen(vred)
poly, k_range = ads.fit_poly_k
vred = np.linspace(0.001,8,100)
ads_fit = AerodynamicDerivatives.from_poly_k(poly, k_range, vred)
fig_k, axs_k = plt.subplots(3,3)
fig_c, axs_c = plt.subplots(3,3)
ads.plot(conv="zasso",fig_damping = fig_c, fig_stiffness = fig_k)
ads_fit.plot(conv="zasso",fig_damping = fig_c, fig_stiffness = fig_k)
#%%
|
/* P U L L B A C K C U R V E . H
* BRL-CAD
*
* Copyright (c) 2011-2022 United States Government as represented by
* the U.S. Army Research Laboratory.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* version 2.1 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this file; see the file named COPYING for more
* information.
*/
#ifndef CONV_IGES_PULLBACK_CURVE_H
#define CONV_IGES_PULLBACK_CURVE_H
#include "opennurbs.h"
/*
*
* Pull an arbitrary model-space *curve* onto the given *surface* as a
* curve within the surface's domain when, for each point c = C(t) on
* the curve and the closest point s = S(u, v) on the surface, we have:
* distance(c, s) <= *tolerance*.
*
* The resulting 2-dimensional curve will be approximated using the
* following process:
*
* 1. Adaptively sample the 3d curve in the domain of the surface
* (ensure tolerance constraint). Sampling terminates when the
* following flatness criterion is met:
* given two parameters on the curve t1 and t2 (which map to points p1 and p2 on the curve)
* let m be a parameter randomly chosen near the middle of the interval [t1, t2]
* ____
* then the curve between t1 and t2 is flat if distance(C(m), p1p2) < flatness
*
* 2. Use the sampled points to perform a global interpolation using
* universal knot generation to build a B-Spline curve.
*
* 3. If the curve is a line or an arc (determined with openNURBS routines),
* return the appropriate ON_Curve subclass (otherwise, return an ON_NurbsCurve).
*
*
*/
extern ON_Curve*
pullback_curve(const ON_Surface* surface,
const ON_Curve* curve,
double tolerance = 1.0e-6,
double flatness = 1.0e-3);
#endif /* CONV_IGES_PULLBACK_CURVE_H */
/*
* Local Variables:
* tab-width: 8
* mode: C
* indent-tabs-mode: t
* c-file-style: "stroustrup"
* End:
* ex: shiftwidth=4 tabstop=8
*/
|
# -*- coding: utf-8 -*-
"""
space-whiskey.library
~~~~~~~~~~~~~~
:copyright: © 2018 by Phil Royer.
:license: BSD, see LICENSE for more details.
"""
import pygame
import logging
import utils
import json
from game import *
class Library:
def __init__(self, screen, messages):
self.screen = screen
self.messages = messages
self.games = []
self.index = 0
utils.verifyGamesDirectory()
self.path = utils.getGamesDirectory()
self.directories = utils.listDirectories()
def build(self):
logging.info('Building library from directories')
self.buildLibraryFromDirectories()
logging.info('Building library from file')
self.buildLibraryFromFile()
if self.getCount() > 0:
self.games[self.index].focus()
for idx, game in enumerate(self.games):
game.setIndex(idx)
def buildLibraryFromDirectories(self, folder=utils.getGamesDirectory()):
directories = utils.listDirectories(folder)
for directory in directories:
if utils.verifyMetadata(folder + '/' + directory):
with open(folder + '/' + directory + '/metadata.json') as f:
data = json.load(f)
self.jsonToGame(folder + '/' + directory + '/', data)
def buildLibraryFromFile(self):
if utils.verifyLibraryFile():
with open(self.path + '/library.json') as f:
library_file = json.load(f)
if 'games' in library_file:
for item in library_file['games']:
self.jsonToGame('External', item)
if 'directories' in library_file:
for directory in library_file['directories']:
try:
self.buildLibraryFromDirectories(directory)
except OSError as error:
self.messages.append(Message('DIRECTORY ERROR', 'Unable to load Directory', error))
def jsonToGame(self, folder, data):
try:
if folder != 'External':
image = folder + data['image']
else:
image = data['image']
game = Game(
folder,
data['title'],
data['description'],
image,
data['command'])
game.create(self.screen, self.messages)
self.games.append(game)
except OSError as error:
self.messages.append(Message('READ ERROR', 'Unable to read game from config file.', error))
except KeyError as error:
self.messages.append(Message('READ ERROR', 'Unable to read game from config file.', error))
def nextGame(self):
if self.index < len(self.games) - 1:
self.index += 1
self.setFocus(self.index)
for game in self.games:
game.moveLeft()
def previousGame(self):
if self.index > 0:
self.index -= 1
self.setFocus(self.index)
for game in self.games:
game.moveRight()
def setFocus(self, index):
self.index = int(index)
for game in self.games:
game.unfocus()
self.games[self.index].focus()
pygame.display.flip()
def launch(self):
self.games[self.index].launch(self.messages)
def getCount(self):
return len(self.games)
|
# -*- coding: utf-8 -*-
'''
Return/control aspects of the grains data
'''
# Import python libs
from __future__ import absolute_import
import collections
import math
# Import salt libs
import salt.utils
import salt.utils.dictupdate
from salt.exceptions import SaltException
# Import 3rd-party libs
import salt.ext.six as six
# Seed the grains dict so cython will build
__grains__ = {}
def _serial_sanitizer(instr):
'''Replaces the last 1/4 of a string with X's'''
length = len(instr)
index = int(math.floor(length * .75))
return '{0}{1}'.format(instr[:index], 'X' * (length - index))
_FQDN_SANITIZER = lambda x: 'MINION.DOMAINNAME'
_HOSTNAME_SANITIZER = lambda x: 'MINION'
_DOMAINNAME_SANITIZER = lambda x: 'DOMAINNAME'
# A dictionary of grain -> function mappings for sanitizing grain output. This
# is used when the 'sanitize' flag is given.
_SANITIZERS = {
'serialnumber': _serial_sanitizer,
'domain': _DOMAINNAME_SANITIZER,
'fqdn': _FQDN_SANITIZER,
'id': _FQDN_SANITIZER,
'host': _HOSTNAME_SANITIZER,
'localhost': _HOSTNAME_SANITIZER,
'nodename': _HOSTNAME_SANITIZER,
}
def get(key, default=''):
'''
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict in grains looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
CLI Example:
.. code-block:: bash
salt '*' grains.get pkg:apache
'''
return salt.utils.traverse_dict_and_list(__grains__, key, default)
def items(sanitize=False):
'''
Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True
'''
if salt.utils.is_true(sanitize):
out = dict(__grains__)
for key, func in six.iteritems(_SANITIZERS):
if key in out:
out[key] = func(out[key])
return out
else:
return __grains__
def item(*args, **kwargs):
'''
Return one or more grains
CLI Example:
.. code-block:: bash
salt '*' grains.item os
salt '*' grains.item os osrelease oscodename
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.item host sanitize=True
'''
ret = {}
for arg in args:
try:
ret[arg] = __grains__[arg]
except KeyError:
pass
if salt.utils.is_true(kwargs.get('sanitize')):
for arg, func in six.iteritems(_SANITIZERS):
if arg in ret:
ret[arg] = func(ret[arg])
return ret
def ls(): # pylint: disable=C0103
'''
Return a list of all available grains
CLI Example:
.. code-block:: bash
salt '*' grains.ls
'''
return sorted(__grains__)
def filter_by(lookup_dict, grain='os_family', merge=None, default='default'):
'''
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}), default='Debian' %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
:param merge: A dictionary to merge with the ``lookup_dict`` before doing
the lookup. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict.
.. versionadded:: 2014.1.0
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C'
'''
ret = lookup_dict.get(
__grains__.get(
grain, default),
lookup_dict.get(
default, None)
)
if merge:
if not isinstance(merge, collections.Mapping):
raise SaltException('filter_by merge argument must be a dictionary.')
else:
if ret is None:
ret = merge
else:
salt.utils.dictupdate.update(ret, merge)
return ret
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_layers_twotone = void 0;
var ic_layers_twotone = {
"viewBox": "0 0 24 24",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0h24v24H0V0z",
"fill": "none"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M6.26 9L12 13.47 17.74 9 12 4.53z",
"opacity": ".3"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M19.37 12.8l-7.38 5.74-7.37-5.73L3 14.07l9 7 9-7zM12 2L3 9l1.63 1.27L12 16l7.36-5.73L21 9l-9-7zm0 11.47L6.26 9 12 4.53 17.74 9 12 13.47z"
},
"children": []
}]
};
exports.ic_layers_twotone = ic_layers_twotone;
|
"""Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 19)
input_str = event.pattern_match.group(1)
if input_str == "jio":
await event.edit(input_str)
animation_chars = [
"`Connecting To Jio Network...`",
"`█ ▇ ▆ ▅ ▄ ▂ ▁`",
"`▒ ▇ ▆ ▅ ▄ ▂ ▁`",
"`▒ ▒ ▆ ▅ ▄ ▂ ▁`",
"`▒ ▒ ▒ ▅ ▄ ▂ ▁`",
"`▒ ▒ ▒ ▒ ▄ ▂ ▁`",
"`▒ ▒ ▒ ▒ ▒ ▂ ▁`",
"`▒ ▒ ▒ ▒ ▒ ▒ ▁`",
"`▒ ▒ ▒ ▒ ▒ ▒ ▒`",
"*Optimising Network...*",
"`▒ ▒ ▒ ▒ ▒ ▒ ▒`",
"`▁ ▒ ▒ ▒ ▒ ▒ ▒`",
"`▁ ▂ ▒ ▒ ▒ ▒ ▒`",
"`▁ ▂ ▄ ▒ ▒ ▒ ▒`",
"`▁ ▂ ▄ ▅ ▒ ▒ ▒`",
"`▁ ▂ ▄ ▅ ▆ ▒ ▒`",
"`▁ ▂ ▄ ▅ ▆ ▇ ▒`",
"`▁ ▂ ▄ ▅ ▆ ▇ █`",
"**Jio Network Boosted....**"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 19])
|
define([
'Underscore',
'views/topBarViewBase',
'text!templates/purchaseOrders/TopBarTemplate.html',
'constants'
], function (_, BaseView, ContentTopBarTemplate, CONSTANTS) {
'use strict';
var TopBarView = BaseView.extend({
el : '#top-bar',
contentType : CONSTANTS.PURCHASEORDERS,
contentHeader: 'Orders',
template : _.template(ContentTopBarTemplate)
});
return TopBarView;
});
|
"""mochad conftest."""
from tests.components.light.conftest import mock_light_profiles # noqa: F401
|
import _plotly_utils.basevalidators
class StartlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="startlinecolor", parent_name="carpet.baxis", **kwargs
):
super(StartlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
import React, { Component } from "react";
import { Link } from "react-router-dom";
class App extends Component {
constructor(props) {
super(props);
this.state = {
name: "",
tasks: []
};
// bind
this.handleChange = this.handleChange.bind(this);
this.handleSubmit = this.handleSubmit.bind(this);
this.renderTasks = this.renderTasks.bind(this);
this.handleDelete = this.handleDelete.bind(this);
}
// handle change
handleChange(e) {
this.setState({
name: e.target.value
});
// console.log(e.target.value);
}
// handle submit
handleSubmit(e) {
e.preventDefault();
axios
.post("/tasks", {
name: this.state.name
})
.then(response => {
// console.log('from handle sumit', response);
this.setState({
tasks: [response.data, ...this.state.tasks],
name: ""
});
});
}
// render tasks
renderTasks() {
return this.state.tasks.map(task => (
<div key={task.id} className="media">
<div className="media-body">
<div>
{task.name}{" "}
<span className="text-muted">
<br />
by {task.user.name} |{" "}
{task.updated_at
.split(" ")
.slice(1)
.join(" ")}
</span>
<Link
to={`/${task.id}/edit`}
className="btn btn-sm btn-success float-right"
>
Update
</Link>
<button
onClick={() => this.handleDelete(task.id)}
className="btn btn-sm btn-warning float-right"
>
Delete
</button>
</div>
<hr />
</div>
</div>
));
}
// get all the tasks from backend
getTasks() {
axios.get("/tasks").then(response =>
this.setState({
tasks: [...response.data.tasks]
})
);
}
// lifecycle mehtod
componentWillMount() {
this.getTasks();
}
// handle delete
handleDelete(id) {
// remove from local state
const isNotId = task => task.id !== id;
const updatedTasks = this.state.tasks.filter(isNotId);
this.setState({ tasks: updatedTasks });
// make delete request to the backend
axios.delete(`/tasks/${id}`);
}
render() {
return (
<div className="container">
<div className="row justify-content-center">
<div className="col-md-8">
<div className="card">
<div className="card-header">Create Task</div>
<div className="card-body">
<form onSubmit={this.handleSubmit}>
<div className="form-group">
<textarea
onChange={this.handleChange}
value={this.state.name}
className="form-control"
rows="5"
maxLength="255"
placeholder="Create a new task"
required
/>
</div>
<button
type="submit"
className="btn btn-primary"
>
Create Task
</button>
</form>
<hr />
{this.renderTasks()}
</div>
</div>
</div>
</div>
</div>
);
}
}
export default App;
|
const isProd = process.env.NODE_ENV === 'production';
module.exports = {
API_HOST: isProd ? 'https://example.com/1' : 'http://localhost:3000/1'
}
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
logger = logging.getLogger(__name__)
@dataclass
class InputExample:
"""
A single training/test example for token classification.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
guid: str
words: List[str]
labels: Optional[List[str]]
@dataclass
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
"""
input_ids: List[int]
attention_mask: List[int]
token_type_ids: Optional[List[int]] = None
label_ids: Optional[List[int]] = None
class Split(Enum):
train = "train"
dev = "dev"
test = "test"
if is_torch_available():
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
class NerDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index
# Use cross entropy ignore_index as padding label id so that only
# real label ids contribute to the loss later.
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
labels: List[str],
model_type: str,
max_seq_length: Optional[int] = None,
overwrite_cache=False,
mode: Split = Split.train,
):
# Load data features from cache or dataset file
cached_features_file = os.path.join(
data_dir, "cached_{}_{}_{}".format(mode.value, tokenizer.__class__.__name__, str(max_seq_length)),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}")
self.features = torch.load(cached_features_file)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
examples = read_examples_from_file(data_dir, mode)
# TODO clean up all this to leverage built-in features of tokenizers
self.features = convert_examples_to_features(
examples,
labels,
max_seq_length,
tokenizer,
cls_token_at_end=bool(model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(tokenizer.padding_side == "left"),
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
)
logger.info(f"Saving features into cached file {cached_features_file}")
torch.save(self.features, cached_features_file)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class TFNerDataset:
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
pad_token_label_id: int = -1
# Use cross entropy ignore_index as padding label id so that only
# real label ids contribute to the loss later.
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
labels: List[str],
model_type: str,
max_seq_length: Optional[int] = None,
overwrite_cache=False,
mode: Split = Split.train,
):
examples = read_examples_from_file(data_dir, mode)
# TODO clean up all this to leverage built-in features of tokenizers
self.features = convert_examples_to_features(
examples,
labels,
max_seq_length,
tokenizer,
cls_token_at_end=bool(model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(tokenizer.padding_side == "left"),
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
self.dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
(
{"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])},
tf.TensorShape([None]),
),
)
else:
self.dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([None]),
),
)
def get_dataset(self):
return self.dataset
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def read_examples_from_file(data_dir, mode: Union[Split, str]) -> List[InputExample]:
if isinstance(mode, Split):
mode = mode.value
file_path = os.path.join(data_dir, f"{mode}.txt")
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split(" ")
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=words, labels=labels))
return examples
def convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_seq_length: int,
tokenizer: PreTrainedTokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
) -> List[InputFeatures]:
""" Loads a data file into a list of `InputFeatures`
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
# TODO clean up all this to leverage built-in features of tokenizers
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(word_tokens) > 0:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = tokenizer.num_special_tokens_to_add()
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
print("Input ids", len(input_ids))
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
print("max seq len", max_seq_length)
print("Label ids", len(label_ids))
print("Input ids", len(input_ids))
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
segment_ids = None
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids
)
)
return features
def get_labels(path: str) -> List[str]:
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
|
import store from './../store';
import axios from 'axios';
axios.defaults.withCredentials = true;
// axios.defaults.baseURL = process.env.MIX_APP_URL;
const client = axios.create({
baseURL: '/api/v1/',
});
client.defaults.headers.common['X-Requested-With'] = 'XMLHttpRequest';
const configHandler = (config) => {
store.dispatch('incrementLoading');
return config;
};
const successHandler = (response) => {
store.dispatch('decrementLoading');
return response.data;
};
const errorHandler = (error) => {
store.dispatch('decrementLoading');
console.log(error);
let messageData = Object.create(null);
messageData.title = 'Ocurrió un error en la solicitud!';
messageData.content = '';
if (error.response) {
if(error.response.status === 401) {
messageData.title = 'No autorizado';
}
if(error.response.status === 403) {
messageData.title = 'No autorizado para la acción solicitada.';
}
if(error.response.status === 404) {
messageData.title = 'Dirección no encontrada!';
}
if(error.response.status === 422) {
messageData.title = 'Revisa los siguientes datos!';
messageData.content = error.response.data.errors;
}
} else if (error.request) {
messageData.title = 'Sin respuesta del servidor!';
}
return Promise.reject(messageData)
};
client.interceptors.request.use(
config => configHandler(config),
error => console.log(error)
);
client.interceptors.response.use(
response => successHandler(response),
error => errorHandler(error)
);
export default client;
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.tile = P.Tile()
def construct(self, x):
return self.tile(x, (1, 4))
arr_x = np.array([[0], [1], [2], [3]]).astype(np.int32)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_net():
tile = Net()
print(arr_x)
output = tile(Tensor(arr_x))
print(output.asnumpy())
arr_x = np.array([[0], [1], [2], [3]]).astype(np.float64)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_net_float64():
tile = Net()
print(arr_x)
output = tile(Tensor(arr_x))
print(output.asnumpy())
arr_x = np.array([[0], [1], [2], [3]]).astype(np.bool_)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_net_bool():
tile = Net()
print(arr_x)
output = tile(Tensor(arr_x))
print(output.asnumpy())
|
import math
import cv2 as cv
import numpy as np
from PIL import Image
from align_faces import get_reference_facial_points, warp_and_crop_face
from config import image_h, image_w
from mtcnn.detector import detect_faces
def align_face(img_fn, facial5points):
raw = cv.imread(img_fn, True)
facial5points = np.reshape(facial5points, (2, 5))
crop_size = (image_h, image_w)
default_square = True
inner_padding_factor = 0.25
outer_padding = (0, 0)
output_size = (image_h, image_w)
# get the reference 5 landmarks position in the crop settings
reference_5pts = get_reference_facial_points(
output_size, inner_padding_factor, outer_padding, default_square)
# dst_img = warp_and_crop_face(raw, facial5points)
dst_img = warp_and_crop_face(raw, facial5points, reference_pts=reference_5pts, crop_size=crop_size)
return dst_img
def get_face_all_attributes(full_path):
try:
img = Image.open(full_path).convert('RGB')
bounding_boxes, landmarks = detect_faces(img)
if len(landmarks) > 0:
i = select_central_face(img.size, bounding_boxes)
return True, [bounding_boxes[i]], [landmarks[i]]
except KeyboardInterrupt:
raise
except:
pass
return False, None, None
def select_central_face(im_size, bounding_boxes):
width, height = im_size
nearest_index = -1
nearest_distance = 100000
for i, b in enumerate(bounding_boxes):
x_box_center = (b[0] + b[2]) / 2
y_box_center = (b[0] + b[2]) / 2
x_img = width / 2
y_img = height / 2
distance = math.sqrt((x_box_center - x_img) ** 2 + (y_box_center - y_img) ** 2)
if distance < nearest_distance:
nearest_distance = distance
nearest_index = i
return nearest_index
|
class Response:
def __init__(self, status_code: int = 200, headers: dict = None, body: str = ''):
self.status_code = status_code
self.headers = {}
self.body = b''
self._set_base_headers()
if headers is not None:
self._update_headers(headers)
self._set_body(body)
def _set_base_headers(self):
self.headers = {
'Content-Type': 'text/html; charset=utf-8',
'Content-Length': 0
}
def _set_body(self, raw_body: str):
self.body = raw_body.encode('utf-8')
self._update_headers(headers={
'Content-Length': str(len(self.body))
})
def _update_headers(self, headers: dict):
self.headers.update(headers)
|
#!python
# pyright: reportUnknownVariableType=false, reportUnknownMemberType=false
"""
This is the final pre-training application that takes the original images
and cuts them into 224x224 sub-images, saving the tens of thousands
of sub-images into `true` and `false` sub-folders according to the
tagging data from `animals.json`
"""
from dataclasses import dataclass
import cv2
import random
import os
from PIL import Image as pilImage
import piexif
from pathlib import Path
from typing import Any, Callable, Union
from src import model
from src import data_serialization_json as ds
from src import grouping
from src import sub_image_regions as sir
from tagger_ui.ui_model.timer import Timer
print(f"OpenCV version: {cv2.__version__}")
# The image output directory
out_dir = r"D:\data\NRSI\__ai_training_images"
# The image dimensions that we'll produce for training an AI
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
BLOCK_SIZE = model.Size2d(IMAGE_WIDTH, IMAGE_HEIGHT)
def createOutputFilePath(
out_dir: str,
image_info: model.ImageInfo,
region: model.TaggedRegion2d,
rotation: str = "",
) -> str:
"""Creates the complete output file path for the given tagged region
Args:
out_dir (str): The output folder where we want to save the sub-images
image_info (model.ImageInfo): The original input image
region (model.TaggedRegion): The tag result
Returns:
str: The new complete output file path
"""
path = Path(image_info.filePath)
sub_folder = "true" if region.tag else "false"
# Pad the x/y dimension to keep the images sort of sorted
x = str(region.x).zfill(4)
y = str(region.y).zfill(4)
# Now we can build the complete output file path (including the file name and ext)
if rotation:
rotation = "_" + rotation
dest_file_name = f"{path.stem}_@{x}x{y}{rotation}{path.suffix}"
result = os.path.join(out_dir, sub_folder, dest_file_name)
return result
# The callable (function) type that the `breakUpImageIntoTaggedSubImages` function
# uses to save either positive or negatively tagged images
SaveSubImageFn = Callable[[str, model.ImageInfo, model.TaggedRegion2d, Any], None]
def saveTaggedSubImage(
out_dir: str,
image_info: model.ImageInfo,
region: model.TaggedRegion2d,
sub_image: Any,
) -> None:
"""Save a positively tagged sub-image
In the future this function will likely flip horizontally (and possibly vertically as well)
to increase the number of positively tagged images.
Args:
out_dir (str): The top-level output folder
image_info (model.ImageInfo): The original large image information
region (model.TaggedRegion): The tagged information about the sub-image being saved
sub_image (Any): The actual sub-image data
"""
output_file = createOutputFilePath(out_dir, image_info, region)
cv2.imwrite(output_file, sub_image)
def saveUntaggedSubImage(
out_dir: str,
image_info: model.ImageInfo,
region: model.TaggedRegion2d,
sub_image: Any,
) -> None:
"""Save a sub-image that was tagged False
In the future this function will likely flip horizontally (and possibly vertically as well)
to increase the number of positively tagged images.
Args:
out_dir (str): The top-level output folder
image_info (model.ImageInfo): The original large image information
region (model.TaggedRegion): The tagged information about the sub-image being saved
sub_image (Any): The actual sub-image data
"""
output_file = createOutputFilePath(out_dir, image_info, region)
cv2.imwrite(output_file, sub_image)
def create_image_exif_metadata(image_info: model.ImageInfo) -> bytes:
"""Create basic image EXIF metadata for the given image information.
Args:
image_info (model.ImageInfo): The original image path is saved into the EXIF metadata
Returns:
[type]: The byte array containing the encoded EXIF metadata
"""
zeroth_ifd = {
piexif.ImageIFD.Make: "Stealth Cam", # NSCI seems to use this brand of camera
piexif.ImageIFD.Software: "animal-ai", # This software!
}
first_ifd = {}
exif_ifd = {
piexif.ExifIFD.ImageUniqueID: image_info.filePath,
}
gps_ifd = {}
# exif_dict = {"Exif":exif_ifd }
exif_dict = {
"0th": zeroth_ifd,
"Exif": exif_ifd,
"GPS": gps_ifd,
"1st": first_ifd,
}
exif_bytes = piexif.dump(exif_dict)
return exif_bytes
def create_directory_if_not_exists(dir: str) -> None:
"""Create the given directory if it doesn't already exist
Args:
dir (str): The full path to the directory to create
"""
if not os.path.exists(dir):
os.makedirs(dir)
@dataclass(frozen=True)
class OutputImageInfo:
"""The required information to save a resulting sub-image"""
out_dir: str
image_info: model.ImageInfo
sub_region: model.TaggedRegion2d
def save_sub_image(
output_info: OutputImageInfo, sub_image_diff: Any, rotation: str = ""
) -> None:
"""Saves a sub-image
Args:
out_dir (str): The top-level output folder where we're saving all sub-images
image_info (model.ImageInfo): The original main image information (for the file name)
sub_region (model.TaggedRegion2d): The tagged region within the main image where
this sub-image is being taken from
sub_image_diff (Any): The actual sub-image pre-calculated diff to save
rotation (str): The optional rotational information for the output file name
"""
assert output_info
# Determine which folder to save the file in
output_file = createOutputFilePath(
output_info.out_dir, output_info.image_info, output_info.sub_region, rotation
)
# Create the image metadata (which contains the original image source file path)
exif_metadata_bytes = create_image_exif_metadata(output_info.image_info)
# Create a PIL/Pillow image from our OpenCV2 image (so that we can save it with metadata)
pillow_image = pilImage.fromarray(sub_image_diff)
pillow_image.save(output_file, format="JPEG", exif=exif_metadata_bytes)
# cv2.imwrite(output_file, sub_image_diff) # Can't save metadata with OpenCV2
def save_sub_image_tagged_true(
output_info: OutputImageInfo, sub_image_diff: Any
) -> None:
"""
Saves a positively (true) tagged sub-image.
To generate more positive examples this function also saves copies of the sub-region
flipped and roated in various ways.
See:
https://note.nkmk.me/en/python-opencv-numpy-rotate-flip
Args:
out_dir (str): The top-level output folder where we're saving all sub-images
image_info (model.ImageInfo): The original main image information (for the file name)
sub_region (model.TaggedRegion2d): The tagged region within the main image where
this sub-image is being taken from
sub_image_diff (Any): The actual sub-image pre-calculated diff to save
"""
# First save the original image
save_sub_image(output_info, sub_image_diff)
# Rotate the image 90° clockwise
img_rotate_90_c = cv2.rotate(sub_image_diff, cv2.ROTATE_90_CLOCKWISE)
save_sub_image(output_info, img_rotate_90_c, "rotate_90°c")
# Rotate the image 90° counter-clockwise
img_rotate_90_cc = cv2.rotate(sub_image_diff, cv2.ROTATE_90_COUNTERCLOCKWISE)
save_sub_image(output_info, img_rotate_90_cc, "rotate_90°cc")
# Same as flipping in both x and y axis
# img_rotate_180 = cv2.rotate(sub_image_diff, cv2.ROTATE_180)
# save_sub_image(output_info, img_rotate_180, "rotate_180°")
# Flip the image along the x axis
img_flip_x = cv2.flip(sub_image_diff, 1) # > 0 is flip horizontally
save_sub_image(output_info, img_flip_x, "flipped_x")
# Flip the image along the y axis
img_flip_y = cv2.flip(sub_image_diff, 0) # = 0 is flip vertically
save_sub_image(output_info, img_flip_y, "flipped_y")
# Flip the image along both the x and y axis
img_flip_xy = cv2.flip(sub_image_diff, -1) # < 0 is flip both x and y
save_sub_image(output_info, img_flip_xy, "flipped_xy")
def save_sub_image_tagged_false(
output_info: OutputImageInfo,
sub_image_diff: Any,
) -> None:
"""
Saves a positively (true) tagged sub-image.
To generate fewer negative examples for AI training this function may or may not
save the given sub-image.
Currently it only has a 7.5% chance of actually saving the negative example
Args:
out_dir (str): The top-level output folder where we're saving all sub-images
image_info (model.ImageInfo): The original main image information (for the file name)
sub_region (model.TaggedRegion2d): The tagged region within the main image where
this sub-image is being taken from
sub_image_diff (Any): The actual sub-image pre-calculated diff to save
"""
value: float = random.random()
if value < 0.075:
save_sub_image(output_info, sub_image_diff)
def main():
"""Process the main images `.json` data file to create 224x224 training sub-images."""
# Load the list of animals from the animals JSON file
images_data_file: model.ImagesCollection = ds.loadImagesCollectionFromJson(
"animals.json"
)
# Group them
image_groups: list[list[model.ImageInfo]] = grouping.groupImages(
images_data_file.images
)
# Where we will save the 128x128 training images - create true/false sub dirs if required
print("Output folder: ", out_dir)
create_directory_if_not_exists(out_dir)
create_directory_if_not_exists(os.path.join(out_dir, "true"))
create_directory_if_not_exists(os.path.join(out_dir, "false"))
# For every group
group_count = 0
for animal_group in image_groups:
# For each group we want to track the previous
group_count += 1
print(f"Group #{group_count} of {len(image_groups)}")
previous_image: Union[Any, None] = None
for image_info in animal_group:
# Load the image and grab its dimensions
current_image: Any = cv2.imread(image_info.filePath)
height, width = current_image.shape[0], current_image.shape[1]
image_size = model.Size2d(width, height)
# Check that we have a previous image to operate on
if previous_image is None:
previous_image = current_image
continue
# Check that the current image is the same shape as the previous image
# (for some reason the images are sometimes different shapes)
if previous_image.shape != current_image.shape:
print("Different image sizes - skipping")
previous_image = current_image
continue
# Calculate the difference with the previous image
print("Processing: ", image_info.filePath)
image_diff = current_image - previous_image
# Create the sub-regions that we need to break the large image into
sub_image_regions = sir.createSubImageRegions(BLOCK_SIZE, image_size)
sub_image_tagged_regions = sir.createSubImageTaggedRegions(
sub_image_regions, image_info.regions
)
# Loop over each sub-image region
for sub_region in sub_image_tagged_regions:
# Numpy uses row, col notation instead of col, row
# From: https://stackoverflow.com/questions/67353650/extract-part-of-a-image-using-opencv
# or: https://stackoverflow.com/questions/15589517/how-to-crop-an-image-in-opencv-using-python
# or: https://stackoverflow.com/questions/9084609/how-to-copy-a-image-region-using-opencv-in-python
sub_image_diff = image_diff[
sub_region.y1 : sub_region.y2, sub_region.x1 : sub_region.x2
]
output_info = OutputImageInfo(out_dir, image_info, sub_region)
if sub_region.tag:
save_sub_image_tagged_true(output_info, sub_image_diff)
else:
save_sub_image_tagged_false(output_info, sub_image_diff)
# Update the previous image for the next image subtraction
previous_image = current_image
if __name__ == "__main__":
with Timer("Extract tagged sub-images"):
main()
|
/*
* Copyright 2015-2019 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
import * as types from '../constants/action-types';
const initialState = {
isLoading: false,
trace: [],
};
const trace = (state = initialState, action) => {
switch (action.type) {
case types.FETCH_TRACE_REQUEST:
return {
...state,
isLoading: true,
trace: [], /* Initialize trace */
};
case types.FETCH_TRACE_SUCCESS:
return {
...state,
isLoading: false,
trace: action.trace,
};
case types.FETCH_TRACE_FAILURE:
return {
...state,
isLoading: false,
trace: [],
};
default:
return state;
}
};
export default trace;
|
/*!
ARIA Radio Generator R2.3
Copyright 2010-2016 Bryan Garaventa (WhatSock.com)
Part of AccDC, a Cross-Browser JavaScript accessibility API, distributed under the terms of the Open Source Initiative OSI - MIT License
*/
$A.RadioGroup=function(i,e,t,a,n){function r(i,e){isNaN(i)&&(i=0);for(var t=0;t<d.radios.length;t++)$A.setAttr(d.radios[t],{tabindex:"-1","aria-checked":"false"});$A.setAttr(d.radios[i],{tabindex:"0","aria-checked":"true"}),e&&d.radios[i].focus(),d.value=d.radios[i].id,d.index=i,n&&"function"==typeof n&&n.apply(d,[d.radios[i],d.radios])}var o="string"==typeof i?document.getElementById(i):i,s={},d=this;$A.setAttr(o,"aria-label",a);var c=[];d.radios=$A.query(e,o,function(i,e){s[e.id]=i,s.max=i+1,$A.setAttr(e,{tabindex:0===i&&-1===t?"0":"-1","aria-checked":"false","aria-posinset":s.max}),c.push(e.id)}),$A.setAttr(o,"aria-owns",c.join(" "));for(var f=0;f<d.radios.length;f++)$A.setAttr(d.radios[f],"aria-setsize",s.max);$A.unbind(d.radios,"click keydown"),$A.bind(d.radios,{click:function(i){r(s[this.id]),i.preventDefault()},keydown:function(i){var e=i.which||i.keyCode;37==e||38==e?(i.preventDefault(),i.stopPropagation(),isNaN(d.index)&&(d.index=0),0<d.index?r(d.index-1,!0):r(s.max-1,!0)):39==e||40==e?(i.preventDefault(),i.stopPropagation(),isNaN(d.index)&&(d.index=0),d.index<s.max-1?r(d.index+1,!0):r(0,!0)):13!=e&&32!=e||(i.preventDefault(),i.stopPropagation(),$A.trigger(this,"click"))}}),d.set=function(i){"string"==typeof i&&!i||"number"==typeof i&&i<0||"object"==typeof i||r("string"==typeof i?s[i]:i)},-1!==t&&r(isNaN(t)?0:t)};
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports["default"] = void 0;
var _createIcon = _interopRequireDefault(require("./util/createIcon"));
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
var _default = (0, _createIcon["default"])('M14 2H6C4.89 2 4 2.9 4 4V20C4 21.11 4.89 22 6 22H18C19.11 22 20 21.11 20 20V8L14 2M18 20H6V4H13V9H18V20M9.54 15.65L11.63 17.74L10.35 19L7 15.65L10.35 12.3L11.63 13.56L9.54 15.65M17 15.65L13.65 19L12.38 17.74L14.47 15.65L12.38 13.56L13.65 12.3L17 15.65Z');
exports["default"] = _default;
|
//===--- Cuda.h - Cuda ToolChain Implementations ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CUDA_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CUDA_H
#include "SYCL.h"
#include "clang/Basic/Cuda.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Multilib.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/VersionTuple.h"
#include <bitset>
#include <set>
#include <vector>
namespace clang {
namespace driver {
/// A class to find a viable CUDA installation
class CudaInstallationDetector {
private:
const Driver &D;
bool IsValid = false;
CudaVersion Version = CudaVersion::UNKNOWN;
std::string InstallPath;
std::string BinPath;
std::string LibPath;
std::string LibDevicePath;
std::string IncludePath;
llvm::StringMap<std::string> LibDeviceMap;
// CUDA architectures for which we have raised an error in
// CheckCudaVersionSupportsArch.
mutable std::bitset<(int)CudaArch::LAST> ArchsWithBadVersion;
public:
CudaInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
const llvm::opt::ArgList &Args);
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const;
/// Emit an error if Version does not support the given Arch.
///
/// If either Version or Arch is unknown, does not emit an error. Emits at
/// most one error per Arch.
void CheckCudaVersionSupportsArch(CudaArch Arch) const;
/// Check whether we detected a valid Cuda install.
bool isValid() const { return IsValid; }
/// Print information about the detected CUDA installation.
void print(raw_ostream &OS) const;
/// Get the detected Cuda install's version.
CudaVersion version() const {
return Version == CudaVersion::NEW ? CudaVersion::PARTIALLY_SUPPORTED
: Version;
}
/// Get the detected Cuda installation path.
StringRef getInstallPath() const { return InstallPath; }
/// Get the detected path to Cuda's bin directory.
StringRef getBinPath() const { return BinPath; }
/// Get the detected Cuda Include path.
StringRef getIncludePath() const { return IncludePath; }
/// Get the detected Cuda library path.
StringRef getLibPath() const { return LibPath; }
/// Get the detected Cuda device library path.
StringRef getLibDevicePath() const { return LibDevicePath; }
/// Get libdevice file for given architecture
std::string getLibDeviceFile(StringRef Gpu) const {
return LibDeviceMap.lookup(Gpu);
}
void WarnIfUnsupportedVersion();
};
namespace tools {
namespace NVPTX {
// Run ptxas, the NVPTX assembler.
class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC) : Tool("NVPTX::Assembler", "ptxas", TC) {}
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
// Runs fatbinary, which combines GPU object files ("cubin" files) and/or PTX
// assembly into a single output file.
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
Linker(const ToolChain &TC) : Tool("NVPTX::Linker", "fatbinary", TC) {}
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
class LLVM_LIBRARY_VISIBILITY OpenMPLinker : public Tool {
public:
OpenMPLinker(const ToolChain &TC)
: Tool("NVPTX::OpenMPLinker", "nvlink", TC) {}
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
class LLVM_LIBRARY_VISIBILITY SYCLLinker : public Linker {
public:
SYCLLinker(const ToolChain &TC) : Linker(TC) {}
Tool* GetSYCLToolChainLinker() const {
if (!SYCLToolChainLinker)
SYCLToolChainLinker.reset(new SYCL::Linker(getToolChain()));
return SYCLToolChainLinker.get();
}
private:
mutable std::unique_ptr<Tool> SYCLToolChainLinker;
};
} // end namespace NVPTX
} // end namespace tools
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY CudaToolChain : public ToolChain {
public:
CudaToolChain(const Driver &D, const llvm::Triple &Triple,
const ToolChain &HostTC, const llvm::opt::ArgList &Args,
const Action::OffloadKind OK);
const llvm::Triple *getAuxTriple() const override {
return &HostTC.getTriple();
}
std::string getInputFilename(const InputInfo &Input) const override;
llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
llvm::DenormalMode getDefaultDenormalModeForType(
const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
const llvm::fltSemantics *FPType = nullptr) const override;
// Never try to use the integrated assembler with CUDA; always fork out to
// ptxas.
bool useIntegratedAs() const override { return false; }
bool isCrossCompiling() const override { return true; }
bool isPICDefault() const override { return false; }
bool isPIEDefault() const override { return false; }
bool isPICDefaultForced() const override { return false; }
bool SupportsProfiling() const override { return false; }
bool supportsDebugInfoOption(const llvm::opt::Arg *A) const override;
void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
const llvm::opt::ArgList &Args) const override;
bool IsMathErrnoDefault() const override { return false; }
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CC1Args) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
SanitizerMask getSupportedSanitizers() const override;
VersionTuple
computeMSVCVersion(const Driver *D,
const llvm::opt::ArgList &Args) const override;
unsigned GetDefaultDwarfVersion() const override { return 2; }
// NVPTX supports only DWARF2.
unsigned getMaxDwarfVersion() const override { return 2; }
Tool *SelectTool(const JobAction &JA) const override;
const ToolChain &HostTC;
CudaInstallationDetector CudaInstallation;
protected:
Tool *buildAssembler() const override; // ptxas
Tool *buildLinker() const override; // fatbinary (ok, not really a linker)
private:
const Action::OffloadKind OK;
};
} // end namespace toolchains
} // end namespace driver
} // end namespace clang
#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CUDA_H
|
import logger from "../lib/logger";
export default (client) => {
logger.success(`${client.user.tag} succesfully online!`);
};
|
from src.augmented_reality_service import AugmentedRealityService
if __name__ == "__main__":
# Starting AR Service
ar_service = AugmentedRealityService()
ar_service.set_service_parameter_json("data/aruco_params.json")
#ar_service.set_service_parameter_json("data/nft_params.json")
ar_service.run_service()
ar_service.get_output()
|
import streamlit as st
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
#Load-Model
classifier = pickle.load(open('dtrmodel.pkl','rb'))
#Page-Design
title = '<h2 style="font-family:arial; color:black; font-size: 47px;text-align:center;"><b>Pre-owned Car Price Prediction</b></h2>'
st.markdown(title,unsafe_allow_html=True)
title2 = '<p style="font-family:Sans-serif; color:#a1a6ad; font-size: 18px;text-align:center;"><b>Not Sure at what price you want to sell your car?<br>Use our tool to predict the selling price of your car</b></p>'
st.markdown(title2,unsafe_allow_html=True)
title1 = '<h4 style="font-family:arial; color:black; font-size: 30px;text-align:center;"><b>Parameters</b></h4>'
st.markdown(title1,unsafe_allow_html=True)
#Input-Fields
col1,col2 = st.columns(2)
with col1:
cp = st.text_input("Current Price (eg: 4.56)")
with col2:
kms = st.text_input('Kilometer Driven')
col3,col4 = st.columns(2)
with col3:
seller = st.selectbox('Select Seller Type',('Dealer','Individual'))
with col4:
trans = st.selectbox('Select Transmission Type',('Manual','Automatic'))
col5,col6 = st.columns(2)
with col5:
own = st.selectbox('Select Owner Type',('First (0)','Second (1)','Third (2)'))
with col6:
age = st.text_input("Age Difference (Current Year - MFG Year of Car)")
#Encoding
if seller == 'Dealer':
y = 0
else:
y = 1
if trans == 'Manual':
z = 1
else:
z = 0
if own == 'First (0)':
m = 0
elif own == 'Second (1)':
m = 1
else:
m = 3
#Prediction
col10, col11, col12, col13, col14,col15,col16 = st.columns(7)
if col13.button('Submit'):
predict = classifier.predict([[cp,kms,y,z,m,age]])[0]
res = 'The Price of Car : {} (in Lakhs)'.format(round(predict,3))
st.success(res)
|
# Copyright Contributors to the Tapqir project.
# SPDX-License-Identifier: Apache-2.0
import os
import setuptools
import versioneer
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
VERSION = """
# This file is auto-generated with the version information during setup.py installation.
__version__ = '{}'
"""
with open("README.rst", "r") as fh:
long_description = fh.read()
# examples/tutorials
EXTRAS_REQUIRE = [
"notebook",
]
# tests
TEST_REQUIRE = [
"black[jupyter]",
"flake8",
"isort",
"pytest",
"pytest-xvfb",
]
# docs
DOCS_REQUIRE = [
"IPython",
"nbsphinx>=0.8.5",
"pydata_sphinx_theme",
"sphinx",
"sphinx-autodoc-typehints",
"sphinx-click",
"sphinx-gallery",
"sphinx-panels",
]
setuptools.setup(
name="tapqir",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Yerdos Ordabayev",
author_email="ordabayev@brandeis.edu",
description="Bayesian analysis of the single-molecule image data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://tapqir.readthedocs.io",
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=[
"cmake>=3.18",
"colorama",
"funsor==0.4.1",
"future",
"ipyfilechooser",
"ipympl",
"ipywidgets",
"matplotlib",
"pandas",
"pykeops==1.5",
"pyro-ppl>=1.7.0",
"pyyaml>=6.0",
"scikit-learn",
"scipy",
"tensorboard",
"typer",
"voila",
],
extras_require={
"extras": EXTRAS_REQUIRE,
"test": EXTRAS_REQUIRE + TEST_REQUIRE,
"docs": DOCS_REQUIRE,
"dev": EXTRAS_REQUIRE + TEST_REQUIRE + DOCS_REQUIRE,
},
keywords="image-classification probabilistic-programming cosmos pyro",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
python_requires=">=3.7",
entry_points={
"console_scripts": ["tapqir=tapqir.main:app", "tapqir-gui=tapqir.gui:app"],
},
)
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import six
import re, copy, os, subprocess
import frappe
from frappe import _
from frappe.utils import now, cint
from frappe.model import no_value_fields, default_fields, data_fieldtypes, table_fields
from frappe.model.document import Document
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
from frappe.desk.notifications import delete_notification_count_for
from frappe.modules import make_boilerplate, get_doc_path
from frappe.database.schema import validate_column_name, validate_column_length
from frappe.model.docfield import supports_translation
from frappe.modules.import_file import get_file_path
from six import iteritems
import frappe.website.render
import json
class InvalidFieldNameError(frappe.ValidationError): pass
class UniqueFieldnameError(frappe.ValidationError): pass
class IllegalMandatoryError(frappe.ValidationError): pass
class DoctypeLinkError(frappe.ValidationError): pass
class WrongOptionsDoctypeLinkError(frappe.ValidationError): pass
class HiddenAndMandatoryWithoutDefaultError(frappe.ValidationError): pass
class NonUniqueError(frappe.ValidationError): pass
class CannotIndexedError(frappe.ValidationError): pass
class CannotCreateStandardDoctypeError(frappe.ValidationError): pass
form_grid_templates = {
"fields": "templates/form_grid/fields.html"
}
class DocType(Document):
def get_feed(self):
return self.name
def validate(self):
"""Validate DocType before saving.
- Check if developer mode is set.
- Validate series
- Check fieldnames (duplication etc)
- Clear permission table for child tables
- Add `amended_from` and `amended_by` if Amendable"""
self.check_developer_mode()
self.validate_name()
if self.issingle:
self.allow_import = 0
self.is_submittable = 0
self.istable = 0
elif self.istable:
self.allow_import = 0
self.permissions = []
self.scrub_field_names()
self.set_default_in_list_view()
self.set_default_translatable()
self.validate_series()
self.validate_document_type()
validate_fields(self)
if self.istable:
# no permission records for child table
self.permissions = []
else:
validate_permissions(self)
self.make_amendable()
self.validate_website()
if not self.is_new():
self.before_update = frappe.get_doc('DocType', self.name)
if not self.is_new():
self.setup_fields_to_fetch()
if self.default_print_format and not self.custom:
frappe.throw(_('Standard DocType cannot have default print format, use Customize Form'))
def set_default_in_list_view(self):
'''Set default in-list-view for first 4 mandatory fields'''
if not [d.fieldname for d in self.fields if d.in_list_view]:
cnt = 0
for d in self.fields:
if d.reqd and not d.hidden and not d.fieldtype in table_fields:
d.in_list_view = 1
cnt += 1
if cnt == 4: break
def set_default_translatable(self):
'''Ensure that non-translatable never will be translatable'''
for d in self.fields:
if d.translatable and not supports_translation(d.fieldtype):
d.translatable = 0
def check_developer_mode(self):
"""Throw exception if not developer mode or via patch"""
if frappe.flags.in_patch or frappe.flags.in_test:
return
if not frappe.conf.get("developer_mode") and not self.custom:
frappe.throw(_("Not in Developer Mode! Set in site_config.json or make 'Custom' DocType."), CannotCreateStandardDoctypeError)
def setup_fields_to_fetch(self):
'''Setup query to update values for newly set fetch values'''
try:
old_meta = frappe.get_meta(frappe.get_doc('DocType', self.name), cached=False)
old_fields_to_fetch = [df.fieldname for df in old_meta.get_fields_to_fetch()]
except frappe.DoesNotExistError:
old_fields_to_fetch = []
new_meta = frappe.get_meta(self, cached=False)
self.flags.update_fields_to_fetch_queries = []
if set(old_fields_to_fetch) != set([df.fieldname for df in new_meta.get_fields_to_fetch()]):
for df in new_meta.get_fields_to_fetch():
if df.fieldname not in old_fields_to_fetch:
link_fieldname, source_fieldname = df.fetch_from.split('.', 1)
link_df = new_meta.get_field(link_fieldname)
if frappe.db.db_type == 'postgres':
update_query = '''
UPDATE `tab{doctype}`
SET `{fieldname}` = source.`{source_fieldname}`
FROM `tab{link_doctype}` as source
WHERE `{link_fieldname}` = source.name
AND ifnull(`{fieldname}`, '')=''
'''
else:
update_query = '''
UPDATE `tab{doctype}` as target
INNER JOIN `tab{link_doctype}` as source
ON `target`.`{link_fieldname}` = `source`.`name`
SET `target`.`{fieldname}` = `source`.`{source_fieldname}`
WHERE ifnull(`target`.`{fieldname}`, '')=""
'''
self.flags.update_fields_to_fetch_queries.append(update_query.format(
link_doctype = link_df.options,
source_fieldname = source_fieldname,
doctype = self.name,
fieldname = df.fieldname,
link_fieldname = link_fieldname
)
)
def update_fields_to_fetch(self):
'''Update fetch values based on queries setup'''
if self.flags.update_fields_to_fetch_queries and not self.issingle:
for query in self.flags.update_fields_to_fetch_queries:
frappe.db.sql(query)
def validate_document_type(self):
if self.document_type=="Transaction":
self.document_type = "Document"
if self.document_type=="Master":
self.document_type = "Setup"
def validate_website(self):
"""Ensure that website generator has field 'route'"""
if self.has_web_view:
# route field must be present
if not 'route' in [d.fieldname for d in self.fields]:
frappe.throw(_('Field "route" is mandatory for Web Views'), title='Missing Field')
# clear website cache
frappe.website.render.clear_cache()
def change_modified_of_parent(self):
"""Change the timestamp of parent DocType if the current one is a child to clear caches."""
if frappe.flags.in_import:
return
parent_list = frappe.db.get_all('DocField', 'parent',
dict(fieldtype=['in', frappe.model.table_fields], options=self.name))
for p in parent_list:
frappe.db.sql('UPDATE `tabDocType` SET modified=%s WHERE `name`=%s', (now(), p.parent))
def scrub_field_names(self):
"""Sluggify fieldnames if not set from Label."""
restricted = ('name','parent','creation','modified','modified_by',
'parentfield','parenttype','file_list', 'flags', 'docstatus')
for d in self.get("fields"):
if d.fieldtype:
if (not getattr(d, "fieldname", None)):
if d.label:
d.fieldname = d.label.strip().lower().replace(' ','_')
if d.fieldname in restricted:
d.fieldname = d.fieldname + '1'
if d.fieldtype=='Section Break':
d.fieldname = d.fieldname + '_section'
elif d.fieldtype=='Column Break':
d.fieldname = d.fieldname + '_column'
else:
d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx)
else:
if d.fieldname in restricted:
frappe.throw(_("Fieldname {0} is restricted").format(d.fieldname), InvalidFieldNameError)
d.fieldname = re.sub('''['",./%@()<>{}]''', '', d.fieldname)
# fieldnames should be lowercase
d.fieldname = d.fieldname.lower()
# unique is automatically an index
if d.unique: d.search_index = 0
def validate_series(self, autoname=None, name=None):
"""Validate if `autoname` property is correctly set."""
if not autoname: autoname = self.autoname
if not name: name = self.name
if not autoname and self.get("fields", {"fieldname":"naming_series"}):
self.autoname = "naming_series:"
# validate field name if autoname field:fieldname is used
# Create unique index on autoname field automatically.
if autoname and autoname.startswith('field:'):
field = autoname.split(":")[1]
if not field or field not in [ df.fieldname for df in self.fields ]:
frappe.throw(_("Invalid fieldname '{0}' in autoname".format(field)))
else:
for df in self.fields:
if df.fieldname == field:
df.unique = 1
break
if autoname and (not autoname.startswith('field:')) \
and (not autoname.startswith('eval:')) \
and (not autoname.lower() in ('prompt', 'hash')) \
and (not autoname.startswith('naming_series:')) \
and (not autoname.startswith('format:')):
prefix = autoname.split('.')[0]
used_in = frappe.db.sql("""
SELECT `name`
FROM `tabDocType`
WHERE `autoname` LIKE CONCAT(%s, '.%%')
AND `name`!=%s
""", (prefix, name))
if used_in:
frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0]))
def on_update(self):
"""Update database schema, make controller templates if `custom` is not set and clear cache."""
self.delete_duplicate_custom_fields()
try:
frappe.db.updatedb(self.name, self)
except Exception as e:
print("\n\nThere was an issue while migrating the DocType: {}\n".format(self.name))
raise e
self.change_modified_of_parent()
make_module_and_roles(self)
self.update_fields_to_fetch()
from frappe import conf
allow_doctype_export = frappe.flags.allow_doctype_export or (not frappe.flags.in_test and conf.get('developer_mode'))
if not self.custom and not frappe.flags.in_import and allow_doctype_export:
self.export_doc()
self.make_controller_template()
if self.has_web_view:
self.set_base_class_for_controller()
# update index
if not self.custom:
self.run_module_method("on_doctype_update")
if self.flags.in_insert:
self.run_module_method("after_doctype_insert")
delete_notification_count_for(doctype=self.name)
frappe.clear_cache(doctype=self.name)
if not frappe.flags.in_install and hasattr(self, 'before_update'):
self.sync_global_search()
# clear from local cache
if self.name in frappe.local.meta_cache:
del frappe.local.meta_cache[self.name]
clear_linked_doctype_cache()
def delete_duplicate_custom_fields(self):
if not (frappe.db.table_exists(self.name) and frappe.db.table_exists("Custom Field")):
return
fields = [d.fieldname for d in self.fields if d.fieldtype in data_fieldtypes]
if fields:
frappe.db.sql('''delete from
`tabCustom Field`
where
dt = {0} and fieldname in ({1})
'''.format('%s', ', '.join(['%s'] * len(fields))), tuple([self.name] + fields), as_dict=True)
def sync_global_search(self):
'''If global search settings are changed, rebuild search properties for this table'''
global_search_fields_before_update = [d.fieldname for d in
self.before_update.fields if d.in_global_search]
if self.before_update.show_name_in_global_search:
global_search_fields_before_update.append('name')
global_search_fields_after_update = [d.fieldname for d in
self.fields if d.in_global_search]
if self.show_name_in_global_search:
global_search_fields_after_update.append('name')
if set(global_search_fields_before_update) != set(global_search_fields_after_update):
now = (not frappe.request) or frappe.flags.in_test or frappe.flags.in_install
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype',
now=now, doctype=self.name)
def set_base_class_for_controller(self):
'''Updates the controller class to subclass from `WebsiteGenertor`,
if it is a subclass of `Document`'''
controller_path = frappe.get_module_path(frappe.scrub(self.module),
'doctype', frappe.scrub(self.name), frappe.scrub(self.name) + '.py')
with open(controller_path, 'r') as f:
code = f.read()
class_string = '\nclass {0}(Document)'.format(self.name.replace(' ', ''))
if '\nfrom frappe.model.document import Document' in code and class_string in code:
code = code.replace('from frappe.model.document import Document',
'from frappe.website.website_generator import WebsiteGenerator')
code = code.replace('class {0}(Document)'.format(self.name.replace(' ', '')),
'class {0}(WebsiteGenerator)'.format(self.name.replace(' ', '')))
with open(controller_path, 'w') as f:
f.write(code)
def run_module_method(self, method):
from frappe.modules import load_doctype_module
module = load_doctype_module(self.name, self.module)
if hasattr(module, method):
getattr(module, method)()
def before_rename(self, old, new, merge=False):
"""Throw exception if merge. DocTypes cannot be merged."""
if not self.custom and frappe.session.user != "Administrator":
frappe.throw(_("DocType can only be renamed by Administrator"))
self.check_developer_mode()
self.validate_name(new)
if merge:
frappe.throw(_("DocType can not be merged"))
# Do not rename and move files and folders for custom doctype
if not self.custom and not frappe.flags.in_test and not frappe.flags.in_patch:
self.rename_files_and_folders(old, new)
def after_rename(self, old, new, merge=False):
"""Change table name using `RENAME TABLE` if table exists. Or update
`doctype` property for Single type."""
if self.issingle:
frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old))
frappe.db.sql("""update tabSingles set value=%s
where doctype=%s and field='name' and value = %s""", (new, new, old))
else:
frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new))
def rename_files_and_folders(self, old, new):
# move files
new_path = get_doc_path(self.module, 'doctype', new)
subprocess.check_output(['mv', get_doc_path(self.module, 'doctype', old), new_path])
# rename files
for fname in os.listdir(new_path):
if frappe.scrub(old) in fname:
subprocess.check_output(['mv', os.path.join(new_path, fname),
os.path.join(new_path, fname.replace(frappe.scrub(old), frappe.scrub(new)))])
self.rename_inside_controller(new, old, new_path)
frappe.msgprint(_('Renamed files and replaced code in controllers, please check!'))
def rename_inside_controller(self, new, old, new_path):
for fname in ('{}.js', '{}.py', '{}_list.js', '{}_calendar.js', 'test_{}.py', 'test_{}.js'):
fname = os.path.join(new_path, fname.format(frappe.scrub(new)))
if os.path.exists(fname):
with open(fname, 'r') as f:
code = f.read()
with open(fname, 'w') as f:
f.write(code.replace(frappe.scrub(old).replace(' ', ''), frappe.scrub(new).replace(' ', '')))
def before_reload(self):
"""Preserve naming series changes in Property Setter."""
if not (self.issingle and self.istable):
self.preserve_naming_series_options_in_property_setter()
def preserve_naming_series_options_in_property_setter(self):
"""Preserve naming_series as property setter if it does not exist"""
naming_series = self.get("fields", {"fieldname": "naming_series"})
if not naming_series:
return
# check if atleast 1 record exists
if not (frappe.db.table_exists(self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))):
return
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name,
"property": "options", "field_name": "naming_series"})
if not existing_property_setter:
make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False)
if naming_series[0].default:
make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False)
def before_export(self, docdict):
# remove null and empty fields
def remove_null_fields(o):
to_remove = []
for attr, value in iteritems(o):
if isinstance(value, list):
for v in value:
remove_null_fields(v)
elif not value:
to_remove.append(attr)
for attr in to_remove:
del o[attr]
remove_null_fields(docdict)
# retain order of 'fields' table and change order in 'field_order'
docdict["field_order"] = [f.fieldname for f in self.fields]
path = get_file_path(self.module, "DocType", self.name)
if os.path.exists(path):
try:
with open(path, 'r') as txtfile:
olddoc = json.loads(txtfile.read())
old_field_names = [f['fieldname'] for f in olddoc.get("fields", [])]
if old_field_names:
new_field_dicts = []
remaining_field_names = [f.fieldname for f in self.fields]
for fieldname in old_field_names:
field_dict = list(filter(lambda d: d['fieldname'] == fieldname, docdict['fields']))
if field_dict:
new_field_dicts.append(field_dict[0])
remaining_field_names.remove(fieldname)
for fieldname in remaining_field_names:
field_dict = list(filter(lambda d: d['fieldname'] == fieldname, docdict['fields']))
new_field_dicts.append(field_dict[0])
docdict['fields'] = new_field_dicts
except ValueError:
pass
@staticmethod
def prepare_for_import(docdict):
# set order of fields from field_order
if docdict.get("field_order"):
new_field_dicts = []
remaining_field_names = [f['fieldname'] for f in docdict.get('fields', [])]
for fieldname in docdict.get('field_order'):
field_dict = list(filter(lambda d: d['fieldname'] == fieldname, docdict.get('fields', [])))
if field_dict:
new_field_dicts.append(field_dict[0])
remaining_field_names.remove(fieldname)
for fieldname in remaining_field_names:
field_dict = list(filter(lambda d: d['fieldname'] == fieldname, docdict.get('fields', [])))
new_field_dicts.append(field_dict[0])
docdict['fields'] = new_field_dicts
if "field_order" in docdict:
del docdict["field_order"]
def export_doc(self):
"""Export to standard folder `[module]/doctype/[name]/[name].json`."""
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['DocType', self.name]], create_init=True)
def import_doc(self):
"""Import from standard folder `[module]/doctype/[name]/[name].json`."""
from frappe.modules.import_module import import_from_files
import_from_files(record_list=[[self.module, 'doctype', self.name]])
def make_controller_template(self):
"""Make boilerplate controller template."""
make_boilerplate("controller._py", self)
if not self.istable:
make_boilerplate("test_controller._py", self.as_dict())
make_boilerplate("controller.js", self.as_dict())
#make_boilerplate("controller_list.js", self.as_dict())
if self.has_web_view:
templates_path = frappe.get_module_path(frappe.scrub(self.module), 'doctype', frappe.scrub(self.name), 'templates')
if not os.path.exists(templates_path):
os.makedirs(templates_path)
make_boilerplate('templates/controller.html', self.as_dict())
make_boilerplate('templates/controller_row.html', self.as_dict())
def make_amendable(self):
"""If is_submittable is set, add amended_from docfields."""
if self.is_submittable:
if not frappe.db.sql("""select name from tabDocField
where fieldname = 'amended_from' and parent = %s""", self.name):
self.append("fields", {
"label": "Amended From",
"fieldtype": "Link",
"fieldname": "amended_from",
"options": self.name,
"read_only": 1,
"print_hide": 1,
"no_copy": 1
})
def get_max_idx(self):
"""Returns the highest `idx`"""
max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""",
self.name)
return max_idx and max_idx[0][0] or 0
def validate_name(self, name=None):
if not name:
name = self.name
# a DocType's name should not start with a number or underscore
# and should only contain letters, numbers and underscore
if six.PY2:
is_a_valid_name = re.match("^(?![\W])[^\d_\s][\w ]+$", name)
else:
is_a_valid_name = re.match("^(?![\W])[^\d_\s][\w ]+$", name, flags = re.ASCII)
if not is_a_valid_name:
frappe.throw(_("DocType's name should start with a letter and it can only consist of letters, numbers, spaces and underscores"), frappe.NameError)
def validate_fields_for_doctype(doctype):
doc = frappe.get_doc("DocType", doctype)
doc.delete_duplicate_custom_fields()
validate_fields(frappe.get_meta(doctype, cached=False))
# this is separate because it is also called via custom field
def validate_fields(meta):
"""Validate doctype fields. Checks
1. There are no illegal characters in fieldnames
2. If fieldnames are unique.
3. Validate column length.
4. Fields that do have database columns are not mandatory.
5. `Link` and `Table` options are valid.
6. **Hidden** and **Mandatory** are not set simultaneously.
7. `Check` type field has default as 0 or 1.
8. `Dynamic Links` are correctly defined.
9. Precision is set in numeric fields and is between 1 & 6.
10. Fold is not at the end (if set).
11. `search_fields` are valid.
12. `title_field` and title field pattern are valid.
13. `unique` check is only valid for Data, Link and Read Only fieldtypes.
14. `unique` cannot be checked if there exist non-unique values.
:param meta: `frappe.model.meta.Meta` object to check."""
def check_illegal_characters(fieldname):
validate_column_name(fieldname)
def check_unique_fieldname(docname, fieldname):
duplicates = list(filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields)))
if len(duplicates) > 1:
frappe.throw(_("{0}: Fieldname {1} appears multiple times in rows {2}").format(docname, fieldname, ", ".join(duplicates)), UniqueFieldnameError)
def check_fieldname_length(fieldname):
validate_column_length(fieldname)
def check_illegal_mandatory(docname, d):
if (d.fieldtype in no_value_fields) and d.fieldtype not in table_fields and d.reqd:
frappe.throw(_("{0}: Field {1} of type {2} cannot be mandatory").format(docname, d.label, d.fieldtype), IllegalMandatoryError)
def check_link_table_options(docname, d):
if d.fieldtype in ("Link",) + table_fields:
if not d.options:
frappe.throw(_("{0}: Options required for Link or Table type field {1} in row {2}").format(docname, d.label, d.idx), DoctypeLinkError)
if d.options=="[Select]" or d.options==d.parent:
return
if d.options != d.parent:
options = frappe.db.get_value("DocType", d.options, "name")
if not options:
frappe.throw(_("{0}: Options must be a valid DocType for field {1} in row {2}").format(docname, d.label, d.idx), WrongOptionsDoctypeLinkError)
elif not (options == d.options):
frappe.throw(_("{0}: Options {1} must be the same as doctype name {2} for the field {3}", DoctypeLinkError)
.format(docname, d.options, options, d.label))
else:
# fix case
d.options = options
def check_hidden_and_mandatory(docname, d):
if d.hidden and d.reqd and not d.default:
frappe.throw(_("{0}: Field {1} in row {2} cannot be hidden and mandatory without default").format(docname, d.label, d.idx), HiddenAndMandatoryWithoutDefaultError)
def check_width(d):
if d.fieldtype == "Currency" and cint(d.width) < 100:
frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx))
def check_in_list_view(d):
if d.in_list_view and (d.fieldtype in not_allowed_in_list_view):
frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx))
def check_in_global_search(d):
if d.in_global_search and d.fieldtype in no_value_fields:
frappe.throw(_("'In Global Search' not allowed for type {0} in row {1}")
.format(d.fieldtype, d.idx))
def check_dynamic_link_options(d):
if d.fieldtype=="Dynamic Link":
doctype_pointer = list(filter(lambda df: df.fieldname==d.options, fields))
if not doctype_pointer or (doctype_pointer[0].fieldtype not in ("Link", "Select")) \
or (doctype_pointer[0].fieldtype=="Link" and doctype_pointer[0].options!="DocType"):
frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'"))
def check_illegal_default(d):
if d.fieldtype == "Check" and not d.default:
d.default = '0'
if d.fieldtype == "Check" and d.default not in ('0', '1'):
frappe.throw(_("Default for 'Check' type of field must be either '0' or '1'"))
if d.fieldtype == "Select" and d.default and (d.default not in d.options.split("\n")):
frappe.throw(_("Default for {0} must be an option").format(d.fieldname))
def check_precision(d):
if d.fieldtype in ("Currency", "Float", "Percent") and d.precision is not None and not (1 <= cint(d.precision) <= 6):
frappe.throw(_("Precision should be between 1 and 6"))
def check_unique_and_text(docname, d):
if meta.issingle:
d.unique = 0
d.search_index = 0
if getattr(d, "unique", False):
if d.fieldtype not in ("Data", "Link", "Read Only"):
frappe.throw(_("{0}: Fieldtype {1} for {2} cannot be unique").format(docname, d.fieldtype, d.label), NonUniqueError)
if not d.get("__islocal") and frappe.db.has_column(d.parent, d.fieldname):
has_non_unique_values = frappe.db.sql("""select `{fieldname}`, count(*)
from `tab{doctype}` where ifnull({fieldname}, '') != ''
group by `{fieldname}` having count(*) > 1 limit 1""".format(
doctype=d.parent, fieldname=d.fieldname))
if has_non_unique_values and has_non_unique_values[0][0]:
frappe.throw(_("{0}: Field '{1}' cannot be set as Unique as it has non-unique values").format(docname, d.label), NonUniqueError)
if d.search_index and d.fieldtype in ("Text", "Long Text", "Small Text", "Code", "Text Editor"):
frappe.throw(_("{0}:Fieldtype {1} for {2} cannot be indexed").format(docname, d.fieldtype, d.label), CannotIndexedError)
def check_fold(fields):
fold_exists = False
for i, f in enumerate(fields):
if f.fieldtype=="Fold":
if fold_exists:
frappe.throw(_("There can be only one Fold in a form"))
fold_exists = True
if i < len(fields)-1:
nxt = fields[i+1]
if nxt.fieldtype != "Section Break":
frappe.throw(_("Fold must come before a Section Break"))
else:
frappe.throw(_("Fold can not be at the end of the form"))
def check_search_fields(meta, fields):
"""Throw exception if `search_fields` don't contain valid fields."""
if not meta.search_fields:
return
# No value fields should not be included in search field
search_fields = [field.strip() for field in (meta.search_fields or "").split(",")]
fieldtype_mapper = { field.fieldname: field.fieldtype \
for field in filter(lambda field: field.fieldname in search_fields, fields) }
for fieldname in search_fields:
fieldname = fieldname.strip()
if (fieldtype_mapper.get(fieldname) in no_value_fields) or \
(fieldname not in fieldname_list):
frappe.throw(_("Search field {0} is not valid").format(fieldname))
def check_title_field(meta):
"""Throw exception if `title_field` isn't a valid fieldname."""
if not meta.get("title_field"):
return
if meta.title_field not in fieldname_list:
frappe.throw(_("Title field must be a valid fieldname"), InvalidFieldNameError)
def _validate_title_field_pattern(pattern):
if not pattern:
return
for fieldname in re.findall("{(.*?)}", pattern, re.UNICODE):
if fieldname.startswith("{"):
# edge case when double curlies are used for escape
continue
if fieldname not in fieldname_list:
frappe.throw(_("{{{0}}} is not a valid fieldname pattern. It should be {{field_name}}.").format(fieldname),
InvalidFieldNameError)
df = meta.get("fields", filters={"fieldname": meta.title_field})[0]
if df:
_validate_title_field_pattern(df.options)
_validate_title_field_pattern(df.default)
def check_image_field(meta):
'''check image_field exists and is of type "Attach Image"'''
if not meta.image_field:
return
df = meta.get("fields", {"fieldname": meta.image_field})
if not df:
frappe.throw(_("Image field must be a valid fieldname"), InvalidFieldNameError)
if df[0].fieldtype != 'Attach Image':
frappe.throw(_("Image field must be of type Attach Image"), InvalidFieldNameError)
def check_is_published_field(meta):
if not meta.is_published_field:
return
if meta.is_published_field not in fieldname_list:
frappe.throw(_("Is Published Field must be a valid fieldname"), InvalidFieldNameError)
def check_timeline_field(meta):
if not meta.timeline_field:
return
if meta.timeline_field not in fieldname_list:
frappe.throw(_("Timeline field must be a valid fieldname"), InvalidFieldNameError)
df = meta.get("fields", {"fieldname": meta.timeline_field})[0]
if df.fieldtype not in ("Link", "Dynamic Link"):
frappe.throw(_("Timeline field must be a Link or Dynamic Link"), InvalidFieldNameError)
def check_sort_field(meta):
'''Validate that sort_field(s) is a valid field'''
if meta.sort_field:
sort_fields = [meta.sort_field]
if ',' in meta.sort_field:
sort_fields = [d.split()[0] for d in meta.sort_field.split(',')]
for fieldname in sort_fields:
if not fieldname in fieldname_list + list(default_fields):
frappe.throw(_("Sort field {0} must be a valid fieldname").format(fieldname),
InvalidFieldNameError)
def check_illegal_depends_on_conditions(docfield):
''' assignment operation should not be allowed in the depends on condition.'''
depends_on_fields = ["depends_on", "collapsible_depends_on"]
for field in depends_on_fields:
depends_on = docfield.get(field, None)
if depends_on and ("=" in depends_on) and \
re.match("""[\w\.:_]+\s*={1}\s*[\w\.@'"]+""", depends_on):
frappe.throw(_("Invalid {0} condition").format(frappe.unscrub(field)), frappe.ValidationError)
def check_table_multiselect_option(docfield):
'''check if the doctype provided in Option has atleast 1 Link field'''
if not docfield.fieldtype == 'Table MultiSelect': return
doctype = docfield.options
meta = frappe.get_meta(doctype)
link_field = [df for df in meta.fields if df.fieldtype == 'Link']
if not link_field:
frappe.throw(_('DocType <b>{0}</b> provided for the field <b>{1}</b> must have atleast one Link field')
.format(doctype, docfield.fieldname), frappe.ValidationError)
def scrub_options_in_select(field):
"""Strip options for whitespaces"""
if field.fieldtype == "Select" and field.options is not None:
options_list = []
for i, option in enumerate(field.options.split("\n")):
_option = option.strip()
if i==0 or _option:
options_list.append(_option)
field.options = '\n'.join(options_list)
def scrub_fetch_from(field):
if hasattr(field, 'fetch_from') and getattr(field, 'fetch_from'):
field.fetch_from = field.fetch_from.strip('\n').strip()
fields = meta.get("fields")
fieldname_list = [d.fieldname for d in fields]
not_allowed_in_list_view = list(copy.copy(no_value_fields))
not_allowed_in_list_view.append("Attach Image")
if meta.istable:
not_allowed_in_list_view.remove('Button')
for d in fields:
if not d.permlevel: d.permlevel = 0
if d.fieldtype not in table_fields: d.allow_bulk_edit = 0
if not d.fieldname:
d.fieldname = d.fieldname.lower()
check_illegal_characters(d.fieldname)
check_unique_fieldname(meta.get("name"), d.fieldname)
check_fieldname_length(d.fieldname)
check_illegal_mandatory(meta.get("name"), d)
check_link_table_options(meta.get("name"), d)
check_dynamic_link_options(d)
check_hidden_and_mandatory(meta.get("name"), d)
check_in_list_view(d)
check_in_global_search(d)
check_illegal_default(d)
check_unique_and_text(meta.get("name"), d)
check_illegal_depends_on_conditions(d)
check_table_multiselect_option(d)
scrub_options_in_select(d)
scrub_fetch_from(d)
check_fold(fields)
check_search_fields(meta, fields)
check_title_field(meta)
check_timeline_field(meta)
check_is_published_field(meta)
check_sort_field(meta)
check_image_field(meta)
def validate_permissions_for_doctype(doctype, for_remove=False):
"""Validates if permissions are set correctly."""
doctype = frappe.get_doc("DocType", doctype)
validate_permissions(doctype, for_remove)
# save permissions
for perm in doctype.get("permissions"):
perm.db_update()
clear_permissions_cache(doctype.name)
def clear_permissions_cache(doctype):
frappe.clear_cache(doctype=doctype)
delete_notification_count_for(doctype)
for user in frappe.db.sql_list("""
SELECT
DISTINCT `tabHas Role`.`parent`
FROM
`tabHas Role`,
`tabDocPerm`
WHERE `tabDocPerm`.`parent` = %s
AND `tabDocPerm`.`role` = `tabHas Role`.`role`
""", doctype):
frappe.clear_cache(user=user)
def validate_permissions(doctype, for_remove=False):
permissions = doctype.get("permissions")
if not permissions:
frappe.msgprint(_('No Permissions Specified'), alert=True, indicator='orange')
issingle = issubmittable = isimportable = False
if doctype:
issingle = cint(doctype.issingle)
issubmittable = cint(doctype.is_submittable)
isimportable = cint(doctype.allow_import)
def get_txt(d):
return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx)
def check_atleast_one_set(d):
if not d.read and not d.write and not d.submit and not d.cancel and not d.create:
frappe.throw(_("{0}: No basic permissions set").format(get_txt(d)))
def check_double(d):
has_similar = False
similar_because_of = ""
for p in permissions:
if p.role==d.role and p.permlevel==d.permlevel and p!=d:
if p.if_owner==d.if_owner:
similar_because_of = _("If Owner")
has_similar = True
break
if has_similar:
frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and {1}")\
.format(get_txt(d), similar_because_of))
def check_level_zero_is_set(d):
if cint(d.permlevel) > 0 and d.role != 'All':
has_zero_perm = False
for p in permissions:
if p.role==d.role and (p.permlevel or 0)==0 and p!=d:
has_zero_perm = True
break
if not has_zero_perm:
frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d)))
for invalid in ("create", "submit", "cancel", "amend"):
if d.get(invalid): d.set(invalid, 0)
def check_permission_dependency(d):
if d.cancel and not d.submit:
frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d)))
if (d.submit or d.cancel or d.amend) and not d.write:
frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d)))
if d.amend and not d.write:
frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d)))
if d.get("import") and not d.create:
frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d)))
def remove_rights_for_single(d):
if not issingle:
return
if d.report:
frappe.msgprint(_("Report cannot be set for Single types"))
d.report = 0
d.set("import", 0)
d.set("export", 0)
for ptype, label in [["set_user_permissions", _("Set User Permissions")]]:
if d.get(ptype):
d.set(ptype, 0)
frappe.msgprint(_("{0} cannot be set for Single types").format(label))
def check_if_submittable(d):
if d.submit and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d)))
elif d.amend and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d)))
def check_if_importable(d):
if d.get("import") and not isimportable:
frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype))
for d in permissions:
if not d.permlevel:
d.permlevel=0
check_atleast_one_set(d)
if not for_remove:
check_double(d)
check_permission_dependency(d)
check_if_submittable(d)
check_if_importable(d)
check_level_zero_is_set(d)
remove_rights_for_single(d)
def make_module_and_roles(doc, perm_fieldname="permissions"):
"""Make `Module Def` and `Role` records if already not made. Called while installing."""
try:
if hasattr(doc,'restrict_to_domain') and doc.restrict_to_domain and \
not frappe.db.exists('Domain', doc.restrict_to_domain):
frappe.get_doc(dict(doctype='Domain', domain=doc.restrict_to_domain)).insert()
if ("tabModule Def" in frappe.db.get_tables()
and not frappe.db.exists("Module Def", doc.module)):
m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module})
m.app_name = frappe.local.module_app[frappe.scrub(doc.module)]
m.flags.ignore_mandatory = m.flags.ignore_permissions = True
m.insert()
default_roles = ["Administrator", "Guest", "All"]
roles = [p.role for p in doc.get("permissions") or []] + default_roles
for role in list(set(roles)):
if not frappe.db.exists("Role", role):
r = frappe.get_doc(dict(doctype= "Role", role_name=role, desk_access=1))
r.flags.ignore_mandatory = r.flags.ignore_permissions = True
r.insert()
except frappe.DoesNotExistError as e:
pass
except frappe.db.ProgrammingError as e:
if frappe.db.is_table_missing(e):
pass
else:
raise
def init_list(doctype):
"""Make boilerplate list views."""
doc = frappe.get_meta(doctype)
make_boilerplate("controller_list.js", doc)
make_boilerplate("controller_list.html", doc)
def check_if_fieldname_conflicts_with_methods(doctype, fieldname):
doc = frappe.get_doc({"doctype": doctype})
method_list = [method for method in dir(doc) if isinstance(method, str) and callable(getattr(doc, method))]
if fieldname in method_list:
frappe.throw(_("Fieldname {0} conflicting with meta object").format(fieldname))
def clear_linked_doctype_cache():
frappe.cache().delete_value('linked_doctypes_without_ignore_user_permissions_enabled')
|
import argparse
from itertools import chain
from typing import List
from matchms import Spectrum
from matchms.exporting import save_as_msp
from matchms.importing import load_from_msp
def read_spectra(filenames: str) -> List[Spectrum]:
"""Read spectra from files.
Args:
filenames (str): Paths to MSP files from which to load each spectrum.
Returns:
List[Spectrum]: Spectra stored in the file.
"""
spectra = list(chain(*[load_from_msp(file) for file in filenames]))
return spectra
listarg = argparse.ArgumentParser()
listarg.add_argument('--filenames', nargs='+', type=str)
listarg.add_argument('--outfilename', type=str)
args = listarg.parse_args()
if __name__ == "__main__":
spectra = read_spectra(args.filenames)
save_as_msp(spectra, args.outfilename)
|
angular.module('ui.utils', [
'ui.event',
'ui.format',
'ui.highlight',
'ui.include',
'ui.indeterminate',
'ui.inflector',
'ui.jq',
'ui.keypress',
'ui.mask',
'ui.reset',
'ui.route',
'ui.scrollfix',
'ui.scroll',
'ui.scroll.jqlite',
'ui.showhide',
'ui.unique',
'ui.validate'
]);
|
//----------------------------------------------------------------------------
// electron-vibrancy
// Copyright 2016 arkenthera
//
// MIT License
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial
// portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//----------------------------------------------------------------------------
#ifndef SRC_VIBRANCYHELPER_H_
#define SRC_VIBRANCYHELPER_H_
//----------------------------------------------------------------------------
#include <map>
#include "./Common.h"
//----------------------------------------------------------------------------
namespace Vibrancy {
class VibrancyHelper {
public:
VibrancyHelper();
~VibrancyHelper() { }
bool DisableVibrancy(unsigned char* buffer);
int32_t AddView(unsigned char* buffer, v8::Local<v8::Array> options);
bool UpdateView(unsigned char* buffer, v8::Local<v8::Array> options);
bool RemoveView(unsigned char* buffer, v8::Local<v8::Array> options);
};
} // namespace Vibrancy
//----------------------------------------------------------------------------
#endif // SRC_VIBRANCYHELPER_H_
|
'use strict';
exports.__esModule = true;
exports.RootCloseWrapper = exports.Transition = exports.Position = exports.Portal = exports.Overlay = exports.Modal = exports.AutoAffix = exports.Affix = undefined;
var _Affix2 = require('./Affix');
var _Affix3 = _interopRequireDefault(_Affix2);
var _AutoAffix2 = require('./AutoAffix');
var _AutoAffix3 = _interopRequireDefault(_AutoAffix2);
var _Modal2 = require('./Modal');
var _Modal3 = _interopRequireDefault(_Modal2);
var _Overlay2 = require('./Overlay');
var _Overlay3 = _interopRequireDefault(_Overlay2);
var _Portal2 = require('./Portal');
var _Portal3 = _interopRequireDefault(_Portal2);
var _Position2 = require('./Position');
var _Position3 = _interopRequireDefault(_Position2);
var _Transition2 = require('./Transition');
var _Transition3 = _interopRequireDefault(_Transition2);
var _RootCloseWrapper2 = require('./RootCloseWrapper');
var _RootCloseWrapper3 = _interopRequireDefault(_RootCloseWrapper2);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
exports.Affix = _Affix3.default;
exports.AutoAffix = _AutoAffix3.default;
exports.Modal = _Modal3.default;
exports.Overlay = _Overlay3.default;
exports.Portal = _Portal3.default;
exports.Position = _Position3.default;
exports.Transition = _Transition3.default;
exports.RootCloseWrapper = _RootCloseWrapper3.default;
|
if(document.URL.includes('web.whatsapp.com') == true){
console.log('script added')
var p=setInterval(function(){
// console.log('1st setInterval')
if(document.querySelectorAll('.DuUXI')[0]){
var t=setInterval(function(){
// console.log('setting again')
document.querySelectorAll('.DuUXI')[0].addEventListener('keydown',function(){
if (event.keyCode === 13) {
// console.log('entered')
var y = document.querySelector('#pane-side').scrollTop
// console.log(y)
setTimeout(function() {
console.log('done')
document.querySelector('#pane-side').scrollTop = y
}, 1500);
}
})
},1000);
clearInterval(p)
}
},500)
}
|
class mode {
}
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from aria.orchestrator.decorators import operation, workflow
from aria.orchestrator.workflows.core import engine, graph_compiler
from aria.orchestrator.workflows.executor.thread import ThreadExecutor
from aria.orchestrator.workflows import api
from aria.modeling.service_instance import NodeBase
from tests import mock, storage
global_test_dict = {} # used to capture transitional node state changes
@pytest.fixture
def ctx(tmpdir):
context = mock.context.simple(str(tmpdir))
yield context
storage.release_sqlite_storage(context.model)
# TODO another possible approach of writing these tests:
# Don't create a ctx for every test.
# Problem is, that if for every test we create a workflow that contains just one standard
# lifecycle operation, then by the time we try to run the second test, the workflow failes since
# the execution tries to go from 'terminated' to 'pending'.
# And if we write a workflow that contains all the lifecycle operations, then first we need to
# change the api of `mock.models.create_interface`, which a lot of other tests use, and second how
# do we check all the state transition during the workflow execution in a convenient way.
TYPE_URI_NAME = 'tosca.interfaces.node.lifecycle.Standard'
SHORTHAND_NAME = 'Standard'
def test_node_state_changes_as_a_result_of_standard_lifecycle_create(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=TYPE_URI_NAME, op_name='create', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'create')
def test_node_state_changes_as_a_result_of_standard_lifecycle_configure(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=TYPE_URI_NAME, op_name='configure', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'configure')
def test_node_state_changes_as_a_result_of_standard_lifecycle_start(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=TYPE_URI_NAME, op_name='start', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'start')
def test_node_state_changes_as_a_result_of_standard_lifecycle_stop(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=TYPE_URI_NAME, op_name='stop', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'stop')
def test_node_state_changes_as_a_result_of_standard_lifecycle_delete(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=TYPE_URI_NAME, op_name='delete', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'delete')
def test_node_state_changes_as_a_result_of_standard_lifecycle_create_shorthand_name(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=SHORTHAND_NAME, op_name='create', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'create')
def test_node_state_changes_as_a_result_of_standard_lifecycle_configure_shorthand_name(
ctx, executor):
node = run_operation_on_node(
ctx, interface_name=SHORTHAND_NAME, op_name='configure', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'configure')
def test_node_state_changes_as_a_result_of_standard_lifecycle_start_shorthand_name(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=SHORTHAND_NAME, op_name='start', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'start')
def test_node_state_changes_as_a_result_of_standard_lifecycle_stop_shorthand_name(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=SHORTHAND_NAME, op_name='stop', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'stop')
def test_node_state_changes_as_a_result_of_standard_lifecycle_delete_shorthand_name(ctx, executor):
node = run_operation_on_node(
ctx, interface_name=SHORTHAND_NAME, op_name='delete', executor=executor)
_assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'delete')
def test_node_state_doesnt_change_as_a_result_of_an_operation_that_is_not_standard_lifecycle1(
ctx, executor):
node = run_operation_on_node(
ctx, interface_name='interface_name', op_name='op_name', executor=executor)
assert node.state == node.INITIAL
def test_node_state_doesnt_change_as_a_result_of_an_operation_that_is_not_standard_lifecycle2(
ctx, executor):
node = run_operation_on_node(
ctx, interface_name='interface_name', op_name='create', executor=executor)
assert node.state == node.INITIAL
def run_operation_on_node(ctx, op_name, interface_name, executor):
node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
interface = mock.models.create_interface(
service=node.service,
interface_name=interface_name,
operation_name=op_name,
operation_kwargs=dict(function='{name}.{func.__name__}'.format(name=__name__, func=func)))
node.interfaces[interface.name] = interface
graph_compiler.GraphCompiler(ctx, ThreadExecutor).compile(
single_operation_workflow(ctx, node=node, interface_name=interface_name, op_name=op_name)
)
eng = engine.Engine(executor)
eng.execute(ctx)
return node
def run_standard_lifecycle_operation_on_node(ctx, op_name, executor):
return run_operation_on_node(ctx,
interface_name='aria.interfaces.lifecycle.Standard',
op_name=op_name,
executor=executor)
def _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, op_name):
assert global_test_dict['transitional_state'] == NodeBase._OP_TO_STATE[op_name]['transitional']
assert node.state == NodeBase._OP_TO_STATE[op_name]['finished']
@workflow
def single_operation_workflow(graph, node, interface_name, op_name, **_):
graph.add_tasks(api.task.OperationTask(
node,
interface_name=interface_name,
operation_name=op_name))
@operation
def func(ctx):
global_test_dict['transitional_state'] = ctx.node.state
@pytest.fixture
def executor():
result = ThreadExecutor()
try:
yield result
finally:
result.close()
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import eq_
from ryu.ofproto.ether import *
LOG = logging.getLogger('test_ether')
class TestInet(unittest.TestCase):
""" Test case for ether
"""
def test_ether_type(self):
eq_(ETH_TYPE_IP, 0x0800)
eq_(ETH_TYPE_ARP, 0x0806)
eq_(ETH_TYPE_8021Q, 0x8100)
eq_(ETH_TYPE_IPV6, 0x86dd)
eq_(ETH_TYPE_MPLS, 0x8847)
eq_(ETH_TYPE_SLOW, 0x8809)
|
//
// SerializerTests.h
// Redland Objective-C Bindings
// $Id: SerializerTests.h 1115 2005-08-23 15:38:11Z kianga $
//
// Copyright 2004 Rene Puls <http://purl.org/net/kianga/>
// Copyright 2012 Pascal Pfiffner <http://www.chip.org/>
//
// This file is available under the following three licenses:
// 1. GNU Lesser General Public License (LGPL), version 2.1
// 2. GNU General Public License (GPL), version 2
// 3. Apache License, version 2.0
//
// You may not use this file except in compliance with at least one of
// the above three licenses. See LICENSE.txt at the top of this package
// for the complete terms and further details.
//
// The most recent version of this software can be found here:
// <https://github.com/p2/Redland-ObjC>
//
// For information about the Redland RDF Application Framework, including
// the most recent version, see <http://librdf.org/>.
//
#import <Foundation/Foundation.h>
#import <SenTestingKit/SenTestingKit.h>
@class RedlandModel, RedlandURI;
@interface SerializerTests : SenTestCase {
RedlandModel *model;
RedlandURI *uri;
}
@end
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path
basedir = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(basedir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# locate our version number
def read_version_py(file_name):
try:
version_string_line = open(file_name, "rt").read()
except EnvironmentError:
return None
else:
version_regex = r"^version_str = ['\"]([^'\"]*)['\"]"
mo = re.search(version_regex, version_string_line, re.M)
if mo:
return mo.group(1)
VERSION_PY_FILENAME = 'kalliope/_version.py'
version = read_version_py(VERSION_PY_FILENAME)
py2_prefix = ''
if sys.version_info[0] < 3:
py2_prefix = 'python2-'
setup(
name='kalliope',
version=version,
description='Kalliope is a modular always-on voice controlled personal assistant designed for home automation.',
long_description=long_description,
url='https://github.com/kalliope-project/kalliope',
author='The dream team of Kalliope-project',
author_email='kalliope-project@googlegroups.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Home Automation',
'Topic :: Multimedia :: Sound/Audio :: Speech',
'Topic :: Multimedia :: Sound/Audio :: Sound Synthesis',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords='assistant bot TTS STT jarvis',
# included packages
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# required libs
install_requires=[
'pyyaml>=3.12',
'six>=1.10.0',
'SpeechRecognition>=3.7.1',
'markupsafe>=1.0',
'pyaudio>=0.2.11',
'pyasn1>=0.2.3',
'ansible>=2.3,<2.4',
py2_prefix + 'pythondialog>=3.4.0',
'jinja2>=2.8,<=2.9.6',
'cffi>=1.9.1',
'ipaddress>=1.0.17',
'flask>=0.12',
'Flask-Restful>=0.3.5',
'flask_cors==3.0.2',
'requests>=2.13',
'httpretty>=0.8.14',
'mock>=2.0.0',
'Flask-Testing>=0.6.2',
'apscheduler>=3.3.1',
'GitPython>=2.1.3',
'packaging>=16.8',
'transitions>=0.4.3',
'sounddevice>=0.3.7',
'SoundFile>=0.9.0',
'pyalsaaudio>=0.8.4',
'sox>=1.3.0',
'paho-mqtt>=1.3.0',
'voicerss_tts>=1.0.3'
],
# additional files
package_data={
'kalliope': [
'brain.yml',
'settings.yml',
'trigger/snowboy/armv7l/python27/_snowboydetect.so',
'trigger/snowboy/x86_64/python27/_snowboydetect.so',
'trigger/snowboy/x86_64/python34/_snowboydetect.so',
'trigger/snowboy/x86_64/python35/_snowboydetect.so',
'trigger/snowboy/x86_64/python36/_snowboydetect.so',
'trigger/snowboy/resources/*',
'sounds/*'
],
},
# entry point script
entry_points={
'console_scripts': [
'kalliope=kalliope:main',
],
},
)
|
import { Matrix } from 'ml-matrix'
export const fixPrec = (n) => Number.parseFloat(Number(n).toFixed(15))
export const fixArrPrec = (arr) => arr.map((el) => fixPrec(el))
export const fixMatrPrec = (matr) => matr.map((arr) => fixArrPrec(arr))
export const eye = (n) => {
return Matrix.eye(n, n).to2DArray();
}
export const swapRows = (m, r1, r2) => {
[m[r1], m[r2]] = [m[r2], m[r1]];
return m;
}
export const mmul = (m1, m2) => {
const matr1 = new Matrix(m1)
const matr2 = new Matrix(m2)
const matr3 = matr1.mmul(matr2)
return fixMatrPrec(matr3.to2DArray());
}
export const vsubt = (v1, v2) => {
if (v1.length != v2.length) {
throw new Error("Vectors must have equal length when subtracting!");
}
let res = new Array(v1.length);
for (let i = 0; i < v1.length; i++) {
res[i] = fixPrec(v1[i] - v2[i]);
}
return res;
}
export const mvmul = (m, v) => {
const matr = new Matrix(m)
const vec = Matrix.columnVector(v)
const res = matr.mmul(vec)
return fixArrPrec(res.to2DArray());
}
export const firstMNorm = (m) => {
const matrM = new Matrix(m);
let max = -Infinity;
for (let col = 0; col < matrM.columns; col++) {
const sum = matrM.getColumn(col).reduce((acc, curr) => acc + curr);
if (sum > max) {
max = sum;
}
}
return max;
}
export const continuousVNorm = (vec) => {
return vec.reduce((max, val) => Math.abs(val) > max ? Math.abs(val) : max, -Infinity)
}
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@文件 :utils_helper.py
@说明 :
@时间 :2020/07/17 10:10:02
@作者 :Riven
@版本 :1.0.0
'''
def is_empty(value):
return (not value) and (value != 0) and (value is not False)
|
#!/usr/bin/python
# Wflow is Free software, see below:
#
# Copyright (c) J. Schellekens 2005-2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
wflow_upscale -- resample a working wflow model to a lower resolution version
Usage::
-C CaseName
-N NewCaseName
-r resample factor
-I skip input mapstacks if specified
-f force overwrite an existing model
-M maxcpu
maximum number of cpu's/cores to use (default = 4)
The script uses the pcraster resample program to reduce the maps. The original
river network is used to force the river network in the reduced version of the
model. Nevertheless it may be needed to manually adjust the locations of
the gauges in the gauges.col file.
A more sophisticated method for resampling is implemented in the wflow_prepare
scripts. In general, if you need a high and a low resolution model it is best to
run the wflow_prepare scripts twice to create the different models.
"""
import getopt
import glob
import os.path
import shlex
from wflow.wflow_lib import *
def usage(*args):
sys.stdout = sys.stderr
for msg in args:
print(msg)
print(__doc__)
sys.exit(0)
def runCommands(commands, maxCpu):
"""
Runs a list of processes dividing
over maxCpu number of cores.
"""
def removeFinishedProcesses(processes):
""" given a list of (commandString, process),
remove those that have completed and return the result
"""
newProcs = []
for pollCmd, pollProc in processes:
retCode = pollProc.poll()
if retCode == None:
# still running
newProcs.append((pollCmd, pollProc))
elif retCode != 0:
# failed
raise Exception("Command %s failed" % pollCmd)
else:
print("Command %s completed successfully" % pollCmd)
return newProcs
processes = []
for command in commands:
command = command.replace(
"\\", "/"
) # otherwise shlex.split removes all path separators
proc = subprocess.Popen(shlex.split(command))
procTuple = (command, proc)
processes.append(procTuple)
while len(processes) >= maxCpu:
time.sleep(0.2)
processes = removeFinishedProcesses(processes)
# wait for all processes
while len(processes) > 0:
time.sleep(0.5)
processes = removeFinishedProcesses(processes)
print("All processes in que (" + str(len(commands)) + ") completed.")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "fhC:N:Ir:M:")
except getopt.error as msg:
usage(msg)
factor = 1
Verbose = 1
inmaps = True
force = False
caseName = "rhineNew"
caseNameNew = "rhineNew_resamp"
maxcpu = 4
for o, a in opts:
if o == "-C":
caseName = a
if o == "-N":
caseNameNew = a
if o == "-r":
factor = int(a)
if o == "-I":
inmaps = False
if o == "-h":
usage()
if o == "-f":
force = True
if o == "-M":
maxcpu = int(a)
dirs = ["/intbl/", "/inmaps/", "/staticmaps/", "/intss/", "/instate/", "/outstate/"]
ext_to_copy = ["*.tss", "*.tbl", "*.col", "*.xml"]
if os.path.isdir(caseNameNew) and not force:
print("Refusing to write into an existing directory:" + caseNameNew)
sys.exit()
if not os.path.isdir(caseNameNew):
for ddir in dirs:
os.makedirs(caseNameNew + ddir)
for inifile in glob.glob(caseName + "/*.ini"):
shutil.copy(inifile, inifile.replace(caseName, caseNameNew))
for ddir in dirs:
allcmd = []
for mfile in glob.glob(caseName + ddir + "/*.map"):
if "_ldd.map" not in mfile:
mstr = (
"resample -r "
+ str(factor)
+ " "
+ mfile
+ " "
+ mfile.replace(caseName, caseNameNew)
)
# print mstr
allcmd.append(mstr)
# os.system(mstr)
runCommands(allcmd, maxcpu)
if inmaps:
allcmd = []
for mfile in glob.glob(caseName + ddir + "/*.[0-9][0-9][0-9]"):
mstr = (
"resample -r "
+ str(factor)
+ " "
+ mfile
+ " "
+ mfile.replace(caseName, caseNameNew)
)
if not os.path.exists(mfile.replace(caseName, caseNameNew)):
# print mstr
allcmd.append(mstr)
# os.system(mstr)
else:
print("skipping " + mfile.replace(caseName, caseNameNew))
runCommands(allcmd, maxcpu)
for ext in ext_to_copy:
for mfile in glob.glob(caseName + ddir + ext):
shutil.copy(mfile, mfile.replace(caseName, caseNameNew))
# Because the ldd cannot be resampled this way we have to recreate
# in including the subcatchments that are derived from it
print("recreating static maps ...")
# Create new ldd using old river network
dem = pcr.readmap(caseNameNew + "/staticmaps/wflow_dem.map")
# orig low res river
riverburn = pcr.readmap(caseNameNew + "/staticmaps/wflow_river.map")
# save it
pcr.report(riverburn, caseNameNew + "/staticmaps/wflow_riverburnin.map")
demburn = pcr.cover(pcr.ifthen(pcr.boolean(riverburn), dem - 600), dem)
print("Creating ldd...")
ldd = lddcreate_save(
caseNameNew + "/staticmaps/wflow_ldd.map", demburn, True, 10.0e35
)
## Find catchment (overall)
outlet = find_outlet(ldd)
sub = subcatch(ldd, outlet)
pcr.report(sub, caseNameNew + "/staticmaps/wflow_catchment.map")
pcr.report(outlet, caseNameNew + "/staticmaps/wflow_outlet.map")
# os.system("col2map --clone " + caseNameNew + "/staticmaps/wflow_subcatch.map " + caseNameNew + "/staticmaps/gauges.col " + caseNameNew + "/staticmaps/wflow_gauges.map")
gmap = pcr.readmap(caseNameNew + "/staticmaps/wflow_gauges.map")
scatch = subcatch(ldd, gmap)
pcr.report(scatch, caseNameNew + "/staticmaps/wflow_subcatch.map")
if __name__ == "__main__":
main()
|
CAAT.Module({
/**
* @name BoxLayout
* @memberOf CAAT.Foundation.UI.Layout
* @extends CAAT.Foundation.UI.Layout.LayoutManager
* @constructor
*/
defines:"CAAT.Foundation.UI.Layout.BoxLayout",
aliases:["CAAT.UI.BoxLayout"],
depends:[
"CAAT.Foundation.UI.Layout.LayoutManager",
"CAAT.Math.Dimension"
],
extendsClass:"CAAT.Foundation.UI.Layout.LayoutManager",
extendsWith:function () {
return {
/**
* @lends CAAT.Foundation.UI.Layout.BoxLayout.prototype
*/
/**
* Stack elements in this axis.
* @type {CAAT.Foundation.UI.Layout.LayoutManager}
*/
axis:CAAT.Foundation.UI.Layout.LayoutManager.AXIS.Y,
/**
* Vertical alignment.
* @type {CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT}
*/
valign:CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.CENTER,
/**
* Horizontal alignment.
* @type {CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT}
*/
halign:CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.CENTER,
setAxis:function (axis) {
this.axis = axis;
this.invalidateLayout();
return this;
},
setHorizontalAlignment:function (align) {
this.halign = align;
this.invalidateLayout();
return this;
},
setVerticalAlignment:function (align) {
this.valign = align;
this.invalidateLayout();
return this;
},
doLayout:function (container) {
if (this.axis === CAAT.Foundation.UI.Layout.LayoutManager.AXIS.Y) {
this.doLayoutVertical(container);
} else {
this.doLayoutHorizontal(container);
}
CAAT.Foundation.UI.Layout.BoxLayout.superclass.doLayout.call(this, container);
},
doLayoutHorizontal:function (container) {
var computedW = 0, computedH = 0;
var yoffset = 0, xoffset;
var i, l, actor;
// calculamos ancho y alto de los elementos.
for (i = 0, l = container.getNumChildren(); i < l; i += 1) {
actor = container.getChildAt(i);
if (!actor.preventLayout && actor.isVisible() && actor.isInAnimationFrame(CAAT.getCurrentSceneTime())) {
if (computedH < actor.height) {
computedH = actor.height;
}
computedW += actor.width;
if (i > 0) {
computedW += this.hgap;
}
}
}
switch (this.halign) {
case CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.LEFT:
xoffset = this.padding.left;
break;
case CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.RIGHT:
xoffset = container.width - computedW - this.padding.right;
break;
default:
xoffset = (container.width - computedW) / 2;
}
for (i = 0, l = container.getNumChildren(); i < l; i += 1) {
actor = container.getChildAt(i);
if (!actor.preventLayout && actor.isVisible() && actor.isInAnimationFrame(CAAT.getCurrentSceneTime())) {
switch (this.valign) {
case CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.TOP:
yoffset = this.padding.top;
break;
case CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.BOTTOM:
yoffset = container.height - this.padding.bottom - actor.height;
break;
default:
yoffset = (container.height - actor.height) / 2;
}
this.__setActorPosition(actor, xoffset, yoffset);
xoffset += actor.width + this.hgap;
}
}
},
__setActorPosition:function (actor, xoffset, yoffset) {
if (this.animated) {
if (this.newChildren.indexOf(actor) !== -1) {
actor.setPosition(xoffset, yoffset);
actor.setScale(0, 0);
actor.scaleTo(1, 1, 500, 0, .5, .5, this.newElementInterpolator);
} else {
actor.moveTo(xoffset, yoffset, 500, 0, this.moveElementInterpolator);
}
} else {
actor.setPosition(xoffset, yoffset);
}
},
doLayoutVertical:function (container) {
var computedW = 0, computedH = 0;
var yoffset, xoffset;
var i, l, actor;
// calculamos ancho y alto de los elementos.
for (i = 0, l = container.getNumChildren(); i < l; i += 1) {
actor = container.getChildAt(i);
if (!actor.preventLayout && actor.isVisible() && actor.isInAnimationFrame(CAAT.getCurrentSceneTime())) {
if (computedW < actor.width) {
computedW = actor.width;
}
computedH += actor.height;
if (i > 0) {
computedH += this.vgap;
}
}
}
switch (this.valign) {
case CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.TOP:
yoffset = this.padding.top;
break;
case CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.BOTTOM:
yoffset = container.height - computedH - this.padding.bottom;
break;
default:
yoffset = (container.height - computedH) / 2;
}
for (i = 0, l = container.getNumChildren(); i < l; i += 1) {
actor = container.getChildAt(i);
if (!actor.preventLayout && actor.isVisible() && actor.isInAnimationFrame(CAAT.getCurrentSceneTime())) {
switch (this.halign) {
case CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.LEFT:
xoffset = this.padding.left;
break;
case CAAT.Foundation.UI.Layout.LayoutManager.ALIGNMENT.RIGHT:
xoffset = container.width - this.padding.right - actor.width;
break;
default:
xoffset = (container.width - actor.width) / 2;
}
this.__setActorPosition(actor, xoffset, yoffset);
yoffset += actor.height + this.vgap;
}
}
},
getPreferredLayoutSize:function (container) {
var dim = new CAAT.Math.Dimension();
var computedW = 0, computedH = 0;
var i, l;
// calculamos ancho y alto de los elementos.
for (i = 0, l = container.getNumChildren(); i < l; i += 1) {
var actor = container.getChildAt(i);
if (!actor.preventLayout && actor.isVisible() && actor.isInAnimationFrame(CAAT.getCurrentSceneTime())) {
var ps = actor.getPreferredSize();
if (computedH < ps.height) {
computedH = ps.height;
}
computedW += ps.width;
}
}
dim.width = computedW;
dim.height = computedH;
return dim;
},
getMinimumLayoutSize:function (container) {
var dim = new CAAT.Math.Dimension();
var computedW = 0, computedH = 0;
var i, l;
// calculamos ancho y alto de los elementos.
for (i = 0, l = container.getNumChildren(); i < l; i += 1) {
var actor = container.getChildAt(i);
if (!actor.preventLayout && actor.isVisible() && actor.isInAnimationFrame(CAAT.getCurrentSceneTime())) {
var ps = actor.getMinimumSize();
if (computedH < ps.height) {
computedH = ps.height;
}
computedW += ps.width;
}
}
dim.width = computedW;
dim.height = computedH;
return dim;
}
}
}
});
|
#ifndef DRIVER_H
#define DRIVER_H
#include <iostream>
#include "Product.h"
#include "vehicle.h"
class Driver
{
public:
driver();
Vehicle van;
unsigned int distance(Vehicle &van,std::string &address);
void doo();
Product take();
void deliver (Product &p);
};
#endif // DRIVER_H
|
// Copyright (c) 2015-2018 The ReBitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
/**
* Functionality for communicating with Tor.
*/
#ifndef REBITCOIN_TORCONTROL_H
#define REBITCOIN_TORCONTROL_H
extern const std::string DEFAULT_TOR_CONTROL;
static const bool DEFAULT_LISTEN_ONION = true;
void StartTorControl();
void InterruptTorControl();
void StopTorControl();
#endif /* REBITCOIN_TORCONTROL_H */
|
// MESSAGE BATTERY2 PACKING
#define MAVLINK_MSG_ID_BATTERY2 181
typedef struct __mavlink_battery2_t
{
uint16_t voltage; /*< voltage in millivolts*/
int16_t current_battery; /*< Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current*/
} mavlink_battery2_t;
#define MAVLINK_MSG_ID_BATTERY2_LEN 4
#define MAVLINK_MSG_ID_181_LEN 4
#define MAVLINK_MSG_ID_BATTERY2_CRC 174
#define MAVLINK_MSG_ID_181_CRC 174
#define MAVLINK_MESSAGE_INFO_BATTERY2 { \
"BATTERY2", \
2, \
{ { "voltage", NULL, MAVLINK_TYPE_UINT16_T, 0, 0, offsetof(mavlink_battery2_t, voltage) }, \
{ "current_battery", NULL, MAVLINK_TYPE_INT16_T, 0, 2, offsetof(mavlink_battery2_t, current_battery) }, \
} \
}
/**
* @brief Pack a battery2 message
* @param system_id ID of this system
* @param component_id ID of this component (e.g. 200 for IMU)
* @param msg The MAVLink message to compress the data into
*
* @param voltage voltage in millivolts
* @param current_battery Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current
* @return length of the message in bytes (excluding serial stream start sign)
*/
static inline uint16_t mavlink_msg_battery2_pack(uint8_t system_id, uint8_t component_id, mavlink_message_t* msg,
uint16_t voltage, int16_t current_battery)
{
#if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS
char buf[MAVLINK_MSG_ID_BATTERY2_LEN];
_mav_put_uint16_t(buf, 0, voltage);
_mav_put_int16_t(buf, 2, current_battery);
memcpy(_MAV_PAYLOAD_NON_CONST(msg), buf, MAVLINK_MSG_ID_BATTERY2_LEN);
#else
mavlink_battery2_t packet;
packet.voltage = voltage;
packet.current_battery = current_battery;
memcpy(_MAV_PAYLOAD_NON_CONST(msg), &packet, MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
msg->msgid = MAVLINK_MSG_ID_BATTERY2;
#if MAVLINK_CRC_EXTRA
return mavlink_finalize_message(msg, system_id, component_id, MAVLINK_MSG_ID_BATTERY2_LEN, MAVLINK_MSG_ID_BATTERY2_CRC);
#else
return mavlink_finalize_message(msg, system_id, component_id, MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
}
/**
* @brief Pack a battery2 message on a channel
* @param system_id ID of this system
* @param component_id ID of this component (e.g. 200 for IMU)
* @param chan The MAVLink channel this message will be sent over
* @param msg The MAVLink message to compress the data into
* @param voltage voltage in millivolts
* @param current_battery Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current
* @return length of the message in bytes (excluding serial stream start sign)
*/
static inline uint16_t mavlink_msg_battery2_pack_chan(uint8_t system_id, uint8_t component_id, uint8_t chan,
mavlink_message_t* msg,
uint16_t voltage,int16_t current_battery)
{
#if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS
char buf[MAVLINK_MSG_ID_BATTERY2_LEN];
_mav_put_uint16_t(buf, 0, voltage);
_mav_put_int16_t(buf, 2, current_battery);
memcpy(_MAV_PAYLOAD_NON_CONST(msg), buf, MAVLINK_MSG_ID_BATTERY2_LEN);
#else
mavlink_battery2_t packet;
packet.voltage = voltage;
packet.current_battery = current_battery;
memcpy(_MAV_PAYLOAD_NON_CONST(msg), &packet, MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
msg->msgid = MAVLINK_MSG_ID_BATTERY2;
#if MAVLINK_CRC_EXTRA
return mavlink_finalize_message_chan(msg, system_id, component_id, chan, MAVLINK_MSG_ID_BATTERY2_LEN, MAVLINK_MSG_ID_BATTERY2_CRC);
#else
return mavlink_finalize_message_chan(msg, system_id, component_id, chan, MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
}
/**
* @brief Encode a battery2 struct
*
* @param system_id ID of this system
* @param component_id ID of this component (e.g. 200 for IMU)
* @param msg The MAVLink message to compress the data into
* @param battery2 C-struct to read the message contents from
*/
static inline uint16_t mavlink_msg_battery2_encode(uint8_t system_id, uint8_t component_id, mavlink_message_t* msg, const mavlink_battery2_t* battery2)
{
return mavlink_msg_battery2_pack(system_id, component_id, msg, battery2->voltage, battery2->current_battery);
}
/**
* @brief Encode a battery2 struct on a channel
*
* @param system_id ID of this system
* @param component_id ID of this component (e.g. 200 for IMU)
* @param chan The MAVLink channel this message will be sent over
* @param msg The MAVLink message to compress the data into
* @param battery2 C-struct to read the message contents from
*/
static inline uint16_t mavlink_msg_battery2_encode_chan(uint8_t system_id, uint8_t component_id, uint8_t chan, mavlink_message_t* msg, const mavlink_battery2_t* battery2)
{
return mavlink_msg_battery2_pack_chan(system_id, component_id, chan, msg, battery2->voltage, battery2->current_battery);
}
/**
* @brief Send a battery2 message
* @param chan MAVLink channel to send the message
*
* @param voltage voltage in millivolts
* @param current_battery Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current
*/
#ifdef MAVLINK_USE_CONVENIENCE_FUNCTIONS
static inline void mavlink_msg_battery2_send(mavlink_channel_t chan, uint16_t voltage, int16_t current_battery)
{
#if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS
char buf[MAVLINK_MSG_ID_BATTERY2_LEN];
_mav_put_uint16_t(buf, 0, voltage);
_mav_put_int16_t(buf, 2, current_battery);
#if MAVLINK_CRC_EXTRA
_mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_BATTERY2, buf, MAVLINK_MSG_ID_BATTERY2_LEN, MAVLINK_MSG_ID_BATTERY2_CRC);
#else
_mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_BATTERY2, buf, MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
#else
mavlink_battery2_t packet;
packet.voltage = voltage;
packet.current_battery = current_battery;
#if MAVLINK_CRC_EXTRA
_mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_BATTERY2, (const char *)&packet, MAVLINK_MSG_ID_BATTERY2_LEN, MAVLINK_MSG_ID_BATTERY2_CRC);
#else
_mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_BATTERY2, (const char *)&packet, MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
#endif
}
#if MAVLINK_MSG_ID_BATTERY2_LEN <= MAVLINK_MAX_PAYLOAD_LEN
/*
This varient of _send() can be used to save stack space by re-using
memory from the receive buffer. The caller provides a
mavlink_message_t which is the size of a full mavlink message. This
is usually the receive buffer for the channel, and allows a reply to an
incoming message with minimum stack space usage.
*/
static inline void mavlink_msg_battery2_send_buf(mavlink_message_t *msgbuf, mavlink_channel_t chan, uint16_t voltage, int16_t current_battery)
{
#if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS
char *buf = (char *)msgbuf;
_mav_put_uint16_t(buf, 0, voltage);
_mav_put_int16_t(buf, 2, current_battery);
#if MAVLINK_CRC_EXTRA
_mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_BATTERY2, buf, MAVLINK_MSG_ID_BATTERY2_LEN, MAVLINK_MSG_ID_BATTERY2_CRC);
#else
_mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_BATTERY2, buf, MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
#else
mavlink_battery2_t *packet = (mavlink_battery2_t *)msgbuf;
packet->voltage = voltage;
packet->current_battery = current_battery;
#if MAVLINK_CRC_EXTRA
_mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_BATTERY2, (const char *)packet, MAVLINK_MSG_ID_BATTERY2_LEN, MAVLINK_MSG_ID_BATTERY2_CRC);
#else
_mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_BATTERY2, (const char *)packet, MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
#endif
}
#endif
#endif
// MESSAGE BATTERY2 UNPACKING
/**
* @brief Get field voltage from battery2 message
*
* @return voltage in millivolts
*/
static inline uint16_t mavlink_msg_battery2_get_voltage(const mavlink_message_t* msg)
{
return _MAV_RETURN_uint16_t(msg, 0);
}
/**
* @brief Get field current_battery from battery2 message
*
* @return Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current
*/
static inline int16_t mavlink_msg_battery2_get_current_battery(const mavlink_message_t* msg)
{
return _MAV_RETURN_int16_t(msg, 2);
}
/**
* @brief Decode a battery2 message into a struct
*
* @param msg The message to decode
* @param battery2 C-struct to decode the message contents into
*/
static inline void mavlink_msg_battery2_decode(const mavlink_message_t* msg, mavlink_battery2_t* battery2)
{
#if MAVLINK_NEED_BYTE_SWAP
battery2->voltage = mavlink_msg_battery2_get_voltage(msg);
battery2->current_battery = mavlink_msg_battery2_get_current_battery(msg);
#else
memcpy(battery2, _MAV_PAYLOAD(msg), MAVLINK_MSG_ID_BATTERY2_LEN);
#endif
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
import React from 'react';
import {shallowWithIntl} from 'test/intl-test-helper';
import Preferences from '@mm-redux/constants/preferences';
import EditChannel from './edit_channel';
jest.mock('@utils/theme', () => {
const original = jest.requireActual('../../../utils/theme');
return {
...original,
changeOpacity: jest.fn(),
};
});
describe('ChannelInfo -> EditChannel', () => {
const baseProps = {
canEdit: true,
isLandscape: false,
theme: Preferences.THEMES.default,
};
test('should match snapshot for Edit Channel', () => {
const wrapper = shallowWithIntl(
<EditChannel
{...baseProps}
/>,
);
expect(wrapper.getElement()).toMatchSnapshot();
});
test('should match snapshot Not render EditChannel', () => {
const wrapper = shallowWithIntl(
<EditChannel
{...baseProps}
canEdit={false}
/>,
);
expect(wrapper.getElement()).toBeNull();
});
});
|
#!/usr/bin/env python
"""
_WorkflowSummary_
List the task name and number of jobs running for a given site and subscription
type.
"""
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.JobStateMachine.Transitions import Transitions
from future.utils import listvalues
class WorkflowSummary(DBFormatter):
sql = """SELECT MAX(wmbs_workflow.id) AS id, wmbs_workflow.name AS wmspec,
COUNT(wmbs_job.id) AS num_job,
SUM(wmbs_job.outcome) AS success, wmbs_job_state.name AS state
FROM wmbs_workflow
INNER JOIN wmbs_subscription ON
wmbs_workflow.id = wmbs_subscription.workflow
INNER JOIN wmbs_jobgroup ON
wmbs_subscription.id = wmbs_jobgroup.subscription
INNER JOIN wmbs_job ON
wmbs_jobgroup.id = wmbs_job.jobgroup
INNER JOIN wmbs_job_state ON
wmbs_job.state = wmbs_job_state.id
GROUP BY wmbs_workflow.name, wmbs_job_state.name
ORDER BY id DESC"""
def failCount(self, result):
if result["state"] == 'success' or result["state"] == 'cleanout' \
or result["state"] == 'exhausted':
return (result["num_job"] - int(result["success"]))
return 0
def pendingCount(self, result):
if result["state"] == 'none' or result["state"] == 'new':
return (result["num_job"] - int(result["success"]))
return 0
def processingCount(self, result):
if result["state"] != 'success' and result["state"] != 'cleanout' \
and result["state"] != 'exhausted' and result['state'] != 'none' \
and result["state"] != 'new':
return result["num_job"]
else:
return 0
def formatWorkflow(self, results):
workflow = {}
tran = Transitions()
for result in results:
if result["wmspec"] not in workflow:
workflow[result["wmspec"]] = {}
for state in tran.states():
workflow[result["wmspec"]][state] = 0
workflow[result["wmspec"]][result["state"]] = result["num_job"]
workflow[result["wmspec"]]['total_jobs'] = result["num_job"]
workflow[result["wmspec"]]["real_success"] = int(result["success"])
workflow[result["wmspec"]]["id"] = result["id"]
workflow[result["wmspec"]]["wmspec"] = result["wmspec"]
workflow[result["wmspec"]]["pending"] = self.pendingCount(result)
workflow[result["wmspec"]]["real_fail"] = self.failCount(result)
workflow[result["wmspec"]]['processing'] = self.processingCount(result)
else:
workflow[result["wmspec"]][result["state"]] = result["num_job"]
workflow[result["wmspec"]]['total_jobs'] += result["num_job"]
workflow[result["wmspec"]]["real_success"] += int(result["success"])
workflow[result["wmspec"]]["pending"] += self.pendingCount(result)
workflow[result["wmspec"]]["real_fail"] += self.failCount(result)
workflow[result["wmspec"]]['processing'] += self.processingCount(result)
# need to order by id (client side)
return listvalues(workflow)
def execute(self, conn = None, transaction = False):
results = self.dbi.processData(self.sql,
conn = conn, transaction = transaction)
return self.formatWorkflow(self.formatDict(results))
|
import re,os,sys
import xml.etree.ElementTree as ET
from rdkit import Chem
from rdkit.Chem import rdChemReactions as Reactions
from rdkit.Chem.Scaffolds.MurckoScaffold import *
srcPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(1, srcPath)
from SyntOn_Classifier import BBClassifier
from UsefulFunctions import *
def mainSynthonsGenerator(initSmi, keepPG=False, Classes=None, returnDict=False, returnBoolAndDict=False):
solventsToIgnore = ["OC(=O)C(=O)O", "CC(=O)O", "OS(=O)(=O)O", "[O-]Cl(=O)(=O)=O", "OP(=O)(O)O", "OC(=O)C(F)(F)F",
"OS(=O)(=O)C(F)(F)F", "OC(=O)O", "[O-]S(=O)(=O)C(F)(F)F", "OC=O", "OC(=O)/C=C\C(=O)O", "[O-]C(=O)C(F)(F)F",
"OC(=O)/C=C/C(=O)O"]
canonicalSmilesOfSolventsToIgnore = set([Chem.MolToSmiles(Chem.MolFromSmiles(x), canonical=True) for x in solventsToIgnore])
initMol = readMol(initSmi)
query = Chem.MolFromSmarts(
"[#6]-[#6]-[#8]-[#6].[#6]-[#8]-[#6](-[#6])=O.[#6]-[#8]-[#6](-[#6])=O.[#6]-[#8]-[#6](-[#6])=O")
if initMol == None or initMol.HasSubstructMatch(query):
finalSynthon = {}
azoles = False
elif len(initSmi.split(".")) > 1: # case of input mixtures
finalSynthon = {}
azoles = False
for smi in initSmi.split("."):
mol = readMol(smi)
if Chem.MolToSmiles(mol, canonical=True) not in canonicalSmilesOfSolventsToIgnore:
nAzoles, nFinalSynthon = mainSynthonsGenerator(smi, keepPG, returnBoolAndDict=True)
if nAzoles:
azoles = True
if nFinalSynthon:
for newSynth in nFinalSynthon:
if newSynth not in finalSynthon:
finalSynthon[newSynth] = nFinalSynthon[newSynth].copy()
else:
if Classes == None:
AllClasses = BBClassifier(mol=initMol)
Classes = [clas for clas in AllClasses if "MedChemHighlights" not in clas and "DEL" not in clas]
BBmarks = os.path.join(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0], "config" , "BB_Marks.xml")
tree = ET.parse(BBmarks)
BB_Marks = tree.getroot()
MarksSetup = __getReactionSMARTS(BB_Marks)
polyfunc = False
polyfuncName = []
keepSynthonsWithPG = keepPG
molsToWorkWith = {initSmi: set()}
ind = 0
finalSynthon = {}
polyfuncInd = []
synthonsAfterMonofuncClasses = {}
for Cls in Classes:
if "Bifunctional" in Cls or "Trifunctional" in Cls:
polyfunc = True
polyfuncName.append(Cls)
if not ("Nboc" in Cls or "Ncbz" in Cls or "Nfmoc" in Cls or "Ester" in Cls or "TFAc" in Cls):
keepSynthonsWithPG = True
break
while ind < len(Classes):
if "Bifunctional" in Classes[ind] or "Trifunctional" in Classes[ind]:
polyfuncInd.append(ind)
ind += 1
continue
else:
if polyfunc and "Bifunctional" not in Classes[ind]:
ignoreThisClass = False
for subName in [y for x in polyfuncName for y in x.split("_")]:
if subName in Classes[ind] and "Bifunctional_NbnDi_Amines" not in polyfuncName:
ignoreThisClass = True
ind += 1
break
if ignoreThisClass:
continue
for mol in molsToWorkWith:
synthons = __synthonsAssignement(Classes[ind], molsToWorkWith[mol], mol, MarksSetup, keepSynthonsWithPG)
if synthons:
for synth in synthons:
if synth not in synthonsAfterMonofuncClasses:
synthonsAfterMonofuncClasses[synth] = synthons[synth].copy()
else:
synthonsAfterMonofuncClasses[synth].update(synthons[synth])
for synth in synthonsAfterMonofuncClasses:
if synth not in molsToWorkWith:
molsToWorkWith[synth] = synthonsAfterMonofuncClasses[synth].copy()
else:
molsToWorkWith[synth].update(synthonsAfterMonofuncClasses[synth])
ind += 1
if keepSynthonsWithPG or not polyfunc:
if synthonsAfterMonofuncClasses:
for synth in synthonsAfterMonofuncClasses:
if synth not in finalSynthon:
finalSynthon[synth] = synthonsAfterMonofuncClasses[synth].copy()
else:
finalSynthon[synth].update(synthonsAfterMonofuncClasses[synth])
if polyfunc:
for i, ind in enumerate(polyfuncInd):
if i < len(polyfuncInd) - 1 :
extraMols = {}
for mol in molsToWorkWith:
synthons = __synthonsAssignement(Classes[ind], molsToWorkWith[mol], mol, MarksSetup, keepSynthonsWithPG)
if synthons:
for synth in synthons:
if synth not in finalSynthon:
finalSynthon[synth] = synthons[synth].copy()
else:
finalSynthon[synth].update(synthons[synth])
if i < len(polyfuncInd) - 1 and synth not in molsToWorkWith:
if synth not in extraMols:
extraMols[synth] = synthons[synth].copy()
else:
extraMols[synth].update(synthons[synth])
if i < len(polyfuncInd) - 1 and extraMols:
for synth in extraMols:
if synth not in molsToWorkWith:
molsToWorkWith[synth] = extraMols[synth].copy()
"""for mol in extraMols:
for ind in polyfuncInd:
synthons = __synthonsAssignement(Classes[ind], extraMols[mol], mol, MarksSetup, keepSynthonsWithPG)
if synthons:
for synth in synthons:
if synth not in finalSynthon:
finalSynthon[synth] = synthons[synth].copy()"""
for clas in Classes:
if "Trifunctional" in clas and "Ester" in clas and "Acid" in clas:
additionalSynthons = __generateBiacideSynthonForTrifunctional(finalSynthon, clas)
if additionalSynthons:
for synth in additionalSynthons:
if synth not in finalSynthon:
finalSynthon[synth] = additionalSynthons[synth].copy()
else:
finalSynthon[synth].update(additionalSynthons[synth])
additionalSynthons = __azolesSynthonPostGeneration(finalSynthon)
if additionalSynthons:
azoles = True
for synth in additionalSynthons:
if synth not in finalSynthon:
finalSynthon[synth] = additionalSynthons[synth].copy()
else:
finalSynthon[synth].update(additionalSynthons[synth])
else:
azoles = False
if not finalSynthon and "Esters_Esters" in Classes:
ReactionLIST = "[C;$(C(=O)[#6]):1][O:2]>>*[C;+0:1]|[O;!R;$(O(C(=O)[#6])[CX4,c]):1][C;$(C(=O)):2]>>*[O;+0:1]"
LabelList = "*C->C:10,*[13C]->13C:10,*[13CH]->13C:10|*O->O:20"
finalSynthon = __NormalSynthonsGenerator(LabelList, ReactionLIST,
set(), "Esters_Esters", initMol,
func=1)
if "Ketones_Ketones" in Classes:
newSynthToAdd = {}
for synthon in finalSynthon:
if "Ketones_Ketones" in finalSynthon[synthon]:
synthMol = readMol(synthon)
newClasses = BBClassifier(mol=synthMol)
for cls in newClasses:
if "Alcohols" in cls:
nAzoles, nFinalSynthon = mainSynthonsGenerator( synthon, keepPG, [cls], returnBoolAndDict=True)
for newSynth in nFinalSynthon:
if newSynth not in finalSynthon and newSynth not in newSynthToAdd:
newSynthToAdd[newSynth] = set()
newSynthToAdd[newSynth].update(finalSynthon[synthon])
newSynthToAdd[newSynth].update(nFinalSynthon[newSynth])
if newSynthToAdd:
for newSynth in newSynthToAdd:
finalSynthon[newSynth] = newSynthToAdd[newSynth]
if returnDict:
return finalSynthon
elif returnBoolAndDict:
return azoles, finalSynthon
else:
print("\n\n\n___________________________________________________________________________________________________")
print("All generated synthons (" + str(len(finalSynthon)) +"): " + ".".join([x for x in finalSynthon]))
print("Col1-Synton Col2-RespectiveBBsClass")
for synth in finalSynthon:
print(synth + " " + "+".join(finalSynthon[synth]))
def __synthonsAssignement(CurrentClass, PreviousClasses, molSmi, MarksSetup, keepSynthonsWithPG=True):
additionalBifuncClasses = ["Aminoacids_N-AliphaticAmino_Acid", "Aminoacids_N-AromaticAmino_Acid", "Reagents_DiAmines"]
PGBifunctional = ["Bifunctional_Acid_Ester","Bifunctional_Acid_Nitro", "Bifunctional_Aldehyde_Ester", "Bifunctional_Amine_Ester",
"Bifunctional_Ester_Isocyanates", "Bifunctional_Ester_SO2X", "Bifunctional_Aldehyde_Nitro",
"Bifunctional_NbocAmino_Acid", "Bifunctional_NcbzAmino_Acid", "Bifunctional_Isothiocyanates_Acid", "Bifunctional_NfmocAmino_Acid",
"Bifunctional_Aldehyde_Nboc", "Bifunctional_NTFAcAmino_Acid",
"Bifunctional_Boronics_Ncbz", "Bifunctional_Boronics_Nfmoc",
"Bifunctional_NbnDi_Amines", "Bifunctional_NbocDi_Amines", 'Bifunctional_NcbzDi_Amines', "Bifunctional_NfmocDi_Amines",
"Bifunctional_NTFAcDi_Amines", 'Bifunctional_Di_Amines_NotherCarbamates',
'Trifunctional_Acid_Aldehyde_Nitro', "Trifunctional_Acid_ArylHalide_Ester",
"Trifunctional_Acid_ArylHalide_Nitro", 'Trifunctional_Amines_ArylHalide_Nitro',
"Trifunctional_NbocAmino_Acid_AlkyneCH", "Trifunctional_NbocAmino_Acid_ArylHalide",
"Trifunctional_NfmocAmino_Acid_AlkyneCH", "Trifunctional_NfmocAmino_Acid_ArylHalide"]
__FirstReactionAsPreparation = ["Bifunctional_Acid_Aldehyde", "Bifunctional_Aldehyde_ArylHalide",
"Bifunctional_Aldehyde_SO2X", "Bifunctional_Boronics_Acid",
"Bifunctional_Boronics_Aldehyde", "Bifunctional_Hydroxy_Aldehyde",
'Trifunctional_Acid_Aldehyde_ArylHalide', "Trifunctional_Acid_Aldehyde_Acetylenes",
'Trifunctional_Acid_Aldehyde_Nitro', 'Trifunctional_Amines_ArylHalide_Nitro',
"Trifunctional_NbocAmino_Acid_AlkyneCH", "Trifunctional_NfmocAmino_Acid_AlkyneCH",
"Trifunctional_Di_Esters_Amino"]
PolymerReagents = ["Reagents_PoliOxiranes", "Esters_PoliEsters", "Reagents_PoliIsocyanates", "SulfonylHalides_Poli_Sulfonylhalides"]
trifuncClassesWithTwoPGs = ['Trifunctional_Acid_Ester_Nitro', "Trifunctional_NbocAmino_Acid_Ester",
"Trifunctional_NbocAmino_Acid_Nitro", "Trifunctional_Amines_Nboc_Ester",
"Trifunctional_Nboc_NCbz_Amino_Acid", "Trifunctional_Nboc_Nfmoc_Amino_Acid",
"Trifunctional_NfmocAmino_Acid_Ester", "Trifunctional_NfmocAmino_Acid_Nitro",
"Trifunctional_Di_Esters_Amino"]
if CurrentClass in trifuncClassesWithTwoPGs or "Trifunctional" in CurrentClass:
func = 3
elif "Bifunctional" in CurrentClass or CurrentClass in additionalBifuncClasses:
func = 2
else:
func = 1
if CurrentClass in __FirstReactionAsPreparation:
firstReactionAsPrep = True
else:
firstReactionAsPrep=False
if CurrentClass in trifuncClassesWithTwoPGs:
twoPGs=True
else:
twoPGs=False
labledSynthons = {}
mol = readMol(molSmi)
if CurrentClass in PolymerReagents:
MolsToWorkWith = {Chem.MolToSmiles(mol, canonical=True): PreviousClasses}
for i in range(len(MarksSetup[CurrentClass]['Labels'].split("|"))):
synthons = __SynthonsGeneratorsForPolymerReagents(MarksSetup[CurrentClass]['Labels'].split("|")[i],
MarksSetup[CurrentClass]['SMARTS'].split("|")[i], CurrentClass, MolsToWorkWith)
if synthons:
for synth in synthons:
if synth not in labledSynthons:
labledSynthons[synth] = synthons[synth].copy()
else:
labledSynthons[synth].update(synthons[synth])
return labledSynthons
elif CurrentClass in PGBifunctional or twoPGs:
synthons = __ProtectiveGroupRemoval(MarksSetup[CurrentClass]['Labels'], MarksSetup[CurrentClass]['SMARTS'],
mol, keepSynthonsWithPG,
firstReactionAsPrep, func, PreviousClasses, CurrentClass, twoPGs)
elif firstReactionAsPrep:
synthons = __FirstReactionAsPrep(MarksSetup[CurrentClass]['Labels'], MarksSetup[CurrentClass]['SMARTS'],
PreviousClasses, CurrentClass, mol, func)
else:
synthons = __NormalSynthonsGenerator(MarksSetup[CurrentClass]['Labels'], MarksSetup[CurrentClass]['SMARTS'],
PreviousClasses, CurrentClass, mol,
func=func)
if synthons:
for synth in synthons:
if synth not in labledSynthons:
labledSynthons[synth] = synthons[synth].copy()
else:
labledSynthons[synth].update(synthons[synth])
return labledSynthons
def __azolesSynthonPostGeneration(labledSynthons):
pat = re.compile("\[\w*:\w*\]")
Class = "nHAzoles_nHAzoles"
additionalSynthons = {}
maxMark = 0
for molSmiles in labledSynthons:
marksPrevious = [molSmiles[m.start():m.start() + 2] + molSmiles[m.end() - 4:m.end()] for m in
re.finditer(pat, molSmiles)]
if len(marksPrevious)>maxMark:
maxMark=len(marksPrevious)
for molSmiles in labledSynthons:
query = Chem.MolFromSmarts("[nHr5;!$(nc=O)]")
mol = readMol(molSmiles)
marksPrevious = [molSmiles[m.start():m.start() + 2] + molSmiles[m.end() - 4:m.end()] for m in re.finditer(pat, molSmiles)]
if len(marksPrevious)==maxMark and mol.HasSubstructMatch(query):
cuttingRule = Reactions.ReactionFromSmarts("[nH;r5:1]>>*[n:1]")
label = "*n->n:20"
products = cuttingRule.RunReactants((mol,))
for productSet in products:
for product in productSet:
labledSynthon = __getBBLabledSmiles(product, label)
if marksPrevious:
for synth in labledSynthon:
marksNew = [synth[m.start():m.start() + 2] + synth[m.end() - 4:m.end()] for m in
re.finditer(pat, synth)]
if len(marksNew)>len(marksPrevious):
if synth not in additionalSynthons:
additionalSynthons[synth] = labledSynthons[molSmiles].copy()
else:
additionalSynthons[synth].update(labledSynthons[molSmiles])
additionalSynthons[synth].add(Class)
else:
for synth in labledSynthon:
if synth not in additionalSynthons:
additionalSynthons[synth] = labledSynthons[molSmiles].copy()
else:
additionalSynthons[synth].update(labledSynthons[molSmiles])
additionalSynthons[synth].add(Class)
return additionalSynthons
def __generateBiacideSynthonForTrifunctional(labledSynthons, Class):
additionalSynthons = {}
for molSmiles in labledSynthons:
query = Chem.MolFromSmarts(
"[O;$(O=C([#6])[OD1])].[O;$(O([CH3])C([#6])=O),$(O([CH2][CH3])C([#6])=O),$(O([CH2]c1[cH][cH][cH][cH][cH]1)C([#6])=O),$(O(C([CH3])([CH3])[CH3])C([#6])=O),$(O([CH2][CH]=[CH2])C([#6])=O)]")
mol = readMol(molSmiles)
pat = re.compile("\[\w*:\w*\]")
marksPrevious = [molSmiles[m.start():m.start() + 2] + molSmiles[m.end() - 4:m.end()] for m in re.finditer(pat, molSmiles)]
if mol.HasSubstructMatch(query):
cuttingRule = Reactions.ReactionFromSmarts(
"[O;$(O(C)C([#6])=O):1][C;$([CH3]),$([CH2][CH3]),$([CH2]c1[cH][cH][cH][cH][cH]1),$(C([CH3])([CH3])[CH3]),$([CH2][CH]=[CH2]):2]>>[OH:1]")
label = "No"
products = cuttingRule.RunReactants((mol,))
for productSet in products:
for product in productSet:
labledSynthon = __getBBLabledSmiles(product, label)
if marksPrevious:
for synth in labledSynthon:
marksNew = [synth[m.start():m.start() + 2] + synth[m.end() - 4:m.end()] for m in
re.finditer(pat, synth)]
if len(marksNew)>len(marksPrevious):
if synth not in additionalSynthons:
additionalSynthons[synth] = labledSynthons[molSmiles].copy()
else:
additionalSynthons[synth].update(labledSynthons[molSmiles])
additionalSynthons[synth].add(Class)
else:
for synth in labledSynthon:
if synth not in additionalSynthons:
additionalSynthons[synth] = labledSynthons[molSmiles].copy()
else:
additionalSynthons[synth].update(labledSynthons[molSmiles])
additionalSynthons[synth].add(Class)
return additionalSynthons
def __SynthonsGeneratorsForPolymerReagents(Label, rule, Class, MolsToWorkWith, finalSynthons=None, firstLaunch=True, Deprotection=False):
if finalSynthons==None:
finalSynthons = {}
newMolsToWorkWith = {}
cuttingRule = Reactions.ReactionFromSmarts(rule)
for molSmiles in MolsToWorkWith:
pat = re.compile("\[\w*:\w*\]")
marksPrevious = [molSmiles[m.start():m.start() + 2] + molSmiles[m.end() - 4:m.end()] for m in re.finditer(pat, molSmiles)]
products = cuttingRule.RunReactants((Chem.MolFromSmiles(molSmiles),))
if not products and not firstLaunch:
if molSmiles not in finalSynthons:
finalSynthons[molSmiles] = MolsToWorkWith[molSmiles].copy()
else:
finalSynthons[molSmiles].update(MolsToWorkWith[molSmiles])
continue
for productSet in products:
for product in productSet:
labledSynthons = __getBBLabledSmiles(product, Label)
if marksPrevious and not Deprotection:
for synth in labledSynthons:
marksNew = [synth[m.start():m.start() + 2] + synth[m.end() - 4:m.end()] for m in
re.finditer(pat, synth)]
if len(marksNew) > len(marksPrevious):
if synth not in newMolsToWorkWith:
newMolsToWorkWith[synth] = MolsToWorkWith[molSmiles].copy()
else:
newMolsToWorkWith[synth].update(MolsToWorkWith[molSmiles])
newMolsToWorkWith[synth].add(Class)
else:
for synth in labledSynthons:
if synth not in newMolsToWorkWith:
newMolsToWorkWith[synth] = MolsToWorkWith[molSmiles].copy()
else:
newMolsToWorkWith[synth].update(MolsToWorkWith[molSmiles])
newMolsToWorkWith[synth].add(Class)
if newMolsToWorkWith:
__SynthonsGeneratorsForPolymerReagents(Label, rule, Class, newMolsToWorkWith, finalSynthons, firstLaunch=False,
Deprotection=Deprotection)
if firstLaunch:
return finalSynthons
def __ProtectiveGroupRemoval(LabelsLIST, ReactionLIST, mol, keepSynthonsWithPG, firstReactionAsPrep, func, PreviousClasses,
CurrentClass, twoPGs=False):
LabelsLISTBeforePGRemoval = LabelsLIST.split("|No|")[0]
ReactionLISTBeforePGRemoval = ReactionLIST.split("|")[:len(LabelsLISTBeforePGRemoval.split("|"))]
firstStopInd = len(LabelsLISTBeforePGRemoval.split("|"))
finalSynthons = {}
if firstReactionAsPrep:
synthonsBeforeFirstPGremoval = __FirstReactionAsPrep(LabelsLISTBeforePGRemoval, "|".join(ReactionLISTBeforePGRemoval),
PreviousClasses, CurrentClass, mol, func)
else:
if "Ester" in CurrentClass and "Acid" in CurrentClass and func==3 and not twoPGs:
synthonsBeforeFirstPGremoval = __NormalSynthonsGenerator(LabelsLISTBeforePGRemoval,
"|".join(ReactionLISTBeforePGRemoval),
PreviousClasses, CurrentClass, mol, func=3)
elif (func==3 and twoPGs) or func==2:
synthonsBeforeFirstPGremoval = __NormalSynthonsGenerator(LabelsLISTBeforePGRemoval, "|".join(ReactionLISTBeforePGRemoval),
PreviousClasses, CurrentClass, mol, func=1)
elif func==3:
synthonsBeforeFirstPGremoval = __NormalSynthonsGenerator(LabelsLISTBeforePGRemoval,
"|".join(ReactionLISTBeforePGRemoval),
PreviousClasses, CurrentClass, mol, func=2)
if CurrentClass == "Trifunctional_Di_Esters_Amino":
SynthonsWithoutPG = __SynthonsGeneratorsForPolymerReagents(LabelsLIST.split("|")[2],
ReactionLIST.split("|")[2], CurrentClass,
synthonsBeforeFirstPGremoval, Deprotection=True)
if SynthonsWithoutPG:
for synth in SynthonsWithoutPG:
if synth not in finalSynthons:
finalSynthons[synth] = SynthonsWithoutPG[synth].copy()
else:
finalSynthons[synth].update(SynthonsWithoutPG[synth])
lastSynthons = __SynthonsGeneratorsForPolymerReagents(LabelsLIST.split("|")[3],
ReactionLIST.split("|")[3],
CurrentClass, SynthonsWithoutPG)
if lastSynthons:
for synth in lastSynthons:
if synth not in finalSynthons:
finalSynthons[synth] = lastSynthons[synth].copy()
else:
finalSynthons[synth].update(lastSynthons[synth])
return finalSynthons
PGremovalRule = ReactionLIST.split("|")[firstStopInd]
if keepSynthonsWithPG or PGremovalRule =="[N;+0,+1;$([N+](=O)([#6])[O-]),$(N(=O)([#6])=O):1](=[O:2])=,-[O;+0,-1:3]>>[NH2,+0:1]":
for synth in synthonsBeforeFirstPGremoval:
if synth not in finalSynthons:
finalSynthons[synth] = synthonsBeforeFirstPGremoval[synth].copy()
else:
finalSynthons[synth].update(synthonsBeforeFirstPGremoval[synth])
cuttingRule = Reactions.ReactionFromSmarts(PGremovalRule)
PGlable = LabelsLIST.split("|")[firstStopInd]
SynthonsWithoutPG = {}
for smi in synthonsBeforeFirstPGremoval:
if func==3 and not twoPGs and smi.count(":")<2 and "Ester" not in CurrentClass and "AlkyneCH" not in CurrentClass: #this force algorythm to use for the PG removal step only synthons where all unprotected functional groups has been transformed into synthons
continue
products = cuttingRule.RunReactants((Chem.MolFromSmiles(smi),))
for productSet in products:
for product in productSet:
labledSynthon = __getBBLabledSmiles(product, PGlable)
for synth in labledSynthon:
if synth not in SynthonsWithoutPG:
SynthonsWithoutPG[synth] = set()
SynthonsWithoutPG[synth].update(PreviousClasses)
SynthonsWithoutPG[synth].add(CurrentClass)
LabelsLISTBetweenPGRemoval = LabelsLIST.split("|No|")[1]
ReactionLISTBetweenPGRemoval = ReactionLIST.split("|")[len(LabelsLISTBeforePGRemoval.split("|")) + 1:len(
LabelsLISTBeforePGRemoval.split("|")) + len(LabelsLISTBetweenPGRemoval.split("|")) + 1]
synthonsBetweenPGremoval = SynthonsWithoutPG.copy()
for newSynthon in SynthonsWithoutPG:
newSynthonsBetweenPGremoval = __NormalSynthonsGenerator(LabelsLISTBetweenPGRemoval,
"|".join(ReactionLISTBetweenPGRemoval),
PreviousClasses, CurrentClass, Chem.MolFromSmiles(newSynthon), func=1)
if newSynthonsBetweenPGremoval:
for synth in newSynthonsBetweenPGremoval:
if synth not in synthonsBetweenPGremoval:
synthonsBetweenPGremoval[synth] = newSynthonsBetweenPGremoval[synth].copy()
else:
synthonsBetweenPGremoval[synth].update(newSynthonsBetweenPGremoval[synth])
if len(LabelsLIST.split("|No|"))==3:
secondStopInd = len(LabelsLISTBeforePGRemoval.split("|")) + len(LabelsLISTBetweenPGRemoval.split("|")) + 1
PGremovalRule = ReactionLIST.split("|")[secondStopInd]
if len(LabelsLIST.split("|No|"))==2 or keepSynthonsWithPG or \
PGremovalRule =="[N;+0,+1;$([N+](=O)([#6])[O-]),$(N(=O)([#6])=O):1](=[O:2])=,-[O;+0,-1:3]>>[NH2,+0:1]":
for synth in SynthonsWithoutPG:
if synth not in finalSynthons:
finalSynthons[synth] = SynthonsWithoutPG[synth].copy()
else:
finalSynthons[synth].update(SynthonsWithoutPG[synth])
for synth in synthonsBetweenPGremoval:
if synth not in finalSynthons:
finalSynthons[synth] = synthonsBetweenPGremoval[synth].copy()
else:
finalSynthons[synth].update(synthonsBetweenPGremoval[synth])
if len(LabelsLIST.split("|No|"))==3:
cuttingRule = Reactions.ReactionFromSmarts(PGremovalRule)
PGlable = LabelsLIST.split("|")[secondStopInd]
SynthonsWithout2PG = {}
for mol in synthonsBetweenPGremoval:
products = cuttingRule.RunReactants((Chem.MolFromSmiles(mol),))
for productSet in products:
for product in productSet:
labledSynthon = __getBBLabledSmiles(product, PGlable)
if labledSynthon:
for synth in labledSynthon:
if synth not in SynthonsWithout2PG:
SynthonsWithout2PG[synth] = set()
SynthonsWithout2PG[synth].update(PreviousClasses)
SynthonsWithout2PG[synth].add(CurrentClass)
for synth in SynthonsWithout2PG:
if synth not in finalSynthons:
finalSynthons[synth] = SynthonsWithout2PG[synth].copy()
else:
finalSynthons[synth].update(SynthonsWithout2PG[synth])
LabelsLast = LabelsLIST.split("|No|")[2]
ReactionLast = ReactionLIST.split("|")[len(LabelsLISTBeforePGRemoval.split("|")) + 1 + len(LabelsLISTBetweenPGRemoval.split("|"))+1:]
for newSynthon in SynthonsWithout2PG:
lastSynthons = __NormalSynthonsGenerator(LabelsLast,"|".join(ReactionLast),
PreviousClasses, CurrentClass, Chem.MolFromSmiles(newSynthon), func=1)
#lastSynthons.extend(__FirstReactionAsPrep(LabelsLast, "|".join(ReactionLast),Chem.MolFromSmiles(newSynthon), func=1, Class=Class)
if lastSynthons:
for synth in lastSynthons:
if synth not in finalSynthons:
finalSynthons[synth] = lastSynthons[synth].copy()
else:
finalSynthons[synth].update(lastSynthons[synth])
return finalSynthons
def __NormalSynthonsGenerator(LabelsLIST, ReactionLIST, PreviousClasses, CurrentClass, mol, func=1, usedInds = None):
if usedInds == None:
usedInds = []
labledSynthons = {}
pat = re.compile("\[\w*:\w*\]")
molSmiles = Chem.MolToSmiles(mol, canonical=True)
marksPrevious = [molSmiles[m.start():m.start() + 2] + molSmiles[m.end() - 4:m.end()] for m in re.finditer(pat, molSmiles)]
for ind, rule in enumerate(ReactionLIST.split("|")):
if ind not in usedInds:
try:
cuttingRule = Reactions.ReactionFromSmarts(rule)
except:
print("########################")
print(Chem.MolToSmiles(mol, canonical=True))
print(rule)
exit()
products = cuttingRule.RunReactants((mol,))
Label = LabelsLIST.split("|")[ind]
for productSet in products:
for product in productSet:
labledSynthon = __getBBLabledSmiles(product, Label)
if labledSynthon==None:
print(Chem.MolToSmiles(mol, canonical=True))
exit()
if marksPrevious:
for synth in labledSynthon:
marksNew = [synth[m.start():m.start() + 2] + synth[m.end() - 4:m.end()] for m in
re.finditer(pat, synth)]
if len(marksNew)>len(marksPrevious):
if synth not in labledSynthons:
labledSynthons[synth] = set()
labledSynthons[synth].update(PreviousClasses)
labledSynthons[synth].add(CurrentClass)
else:
for synth in labledSynthon:
if synth not in labledSynthons:
labledSynthons[synth] = set()
labledSynthons[synth].update(PreviousClasses)
labledSynthons[synth].add(CurrentClass)
newSynthons = None
if func == 2:
newMol = Chem.MolFromSmiles(labledSynthon[0])
usedInds.append(ind)
newSynthons = __NormalSynthonsGenerator(LabelsLIST, ReactionLIST, PreviousClasses, CurrentClass, newMol, func=1,
usedInds=usedInds)
elif func == 3:
newMol = Chem.MolFromSmiles(labledSynthon[0])
usedInds.append(ind)
newSynthons = __NormalSynthonsGenerator(LabelsLIST, ReactionLIST, PreviousClasses, CurrentClass, newMol, func=2,
usedInds=usedInds)
if newSynthons:
for synth in newSynthons:
if synth not in labledSynthons:
labledSynthons[synth] = newSynthons[synth].copy()
else:
labledSynthons[synth].update(newSynthons[synth])
return labledSynthons
def __FirstReactionAsPrep(LabelsLIST, ReactionLIST, PreviousClasses, CurrentClass, mol, func):
rule = ReactionLIST.split("|")[0]
cuttingRule = Reactions.ReactionFromSmarts(rule)
products = cuttingRule.RunReactants((mol,))
if not products:
if len(LabelsLIST.split("|"))>1:
return __FirstReactionAsPrep("|".join(LabelsLIST.split("|")[1:]), "|".join(ReactionLIST.split("|")[1:]),
PreviousClasses, CurrentClass, mol, func)
else:
return {Chem.MolToSmiles(mol, canonical=True): PreviousClasses}
else:
Label = LabelsLIST.split("|")[0]
synthonsAsInpForTheNextStep = {}
for productSet in products:
for product in productSet:
labledSynthon = __getBBLabledSmiles(product, Label)
if labledSynthon:
for synth in labledSynthon:
if synth not in synthonsAsInpForTheNextStep:
synthonsAsInpForTheNextStep[synth] = set()
synthonsAsInpForTheNextStep[synth].update(PreviousClasses)
synthonsAsInpForTheNextStep[synth].add(CurrentClass)
if len(ReactionLIST.split("|"))==1:
return synthonsAsInpForTheNextStep
if not synthonsAsInpForTheNextStep and "Boronics" in CurrentClass:
rule = ReactionLIST.split("|")[1]
cuttingRule = Reactions.ReactionFromSmarts(rule)
products = cuttingRule.RunReactants((mol,))
Label = LabelsLIST.split("|")[1]
for productSet in products:
for product in productSet:
labledSynthon = __getBBLabledSmiles(product, Label)
if labledSynthon:
for synth in labledSynthon:
if synth not in synthonsAsInpForTheNextStep:
synthonsAsInpForTheNextStep[synth] = set()
synthonsAsInpForTheNextStep[synth].update(PreviousClasses)
synthonsAsInpForTheNextStep[synth].add(CurrentClass)
for synth in synthonsAsInpForTheNextStep:
labledSynthons = __NormalSynthonsGenerator("|".join(LabelsLIST.split("|")[1:]), "|".join(ReactionLIST.split("|")[1:]),
PreviousClasses, CurrentClass, Chem.MolFromSmiles(synth), func=func - 1)
if labledSynthons:
for synth in labledSynthons:
if synth not in synthonsAsInpForTheNextStep:
synthonsAsInpForTheNextStep[synth] = labledSynthons[synth].copy()
else:
synthonsAsInpForTheNextStep[synth].update(labledSynthons[synth])
return synthonsAsInpForTheNextStep
lastSynthons = {}
for synth in synthonsAsInpForTheNextStep:
labledSynthons = __NormalSynthonsGenerator("|".join(LabelsLIST.split("|")[1:]), "|".join(ReactionLIST.split("|")[1:]),
PreviousClasses, CurrentClass, Chem.MolFromSmiles(synth), func=func-1)
if labledSynthons:
for synth in labledSynthons:
if synth not in lastSynthons:
lastSynthons[synth] = labledSynthons[synth].copy()
else:
lastSynthons[synth].update(labledSynthons[synth])
for synth in lastSynthons:
if synth not in synthonsAsInpForTheNextStep:
synthonsAsInpForTheNextStep[synth] = lastSynthons[synth]
else:
synthonsAsInpForTheNextStep[synth].update(lastSynthons[synth])
return synthonsAsInpForTheNextStep
def __getBBLabledSmiles(productMolecule: Chem.rdchem.Mol, Label:str):
productSmiles = Chem.MolToSmiles(productMolecule, canonical=True)
labeledSmilesList = []
if Label != "No":
for sublabel in Label.split(","):
if productSmiles.find(sublabel.split("->")[0]) != -1:
labeledSmiles = checkLable(productSmiles, sublabel)
if labeledSmiles==None:
return None
if "*" in labeledSmiles:
productSmiles = labeledSmiles
continue
elif labeledSmiles:
labeledSmilesList.append(labeledSmiles)
if labeledSmilesList:
return list(set(labeledSmilesList))
#print("WARNING! No lable was assigned to the smiles: " + productSmiles)
return [productSmiles]
def __getReactionSMARTS(BB_Marks: ET.Element):
MarksSetup = {}
for child in BB_Marks:
for subCh in child:
if subCh.get('SMARTS'):
MarksSetup[child.tag + "_" + subCh.tag] = {}
MarksSetup[child.tag + "_" + subCh.tag]["SMARTS"] = subCh.get('SMARTS')
MarksSetup[child.tag + "_" + subCh.tag]["Labels"] = subCh.get('Labels')
return MarksSetup
def generateScaffoldForBB(smiles, returnObjects=False):
scaffold = None
mol = None
PGdict = {"NCbz": "[N:1][C;$(C(=O)O[CH2]c1[cH][cH][cH][cH][cH]1):2]>>[N:1]",
"NFmoc": "[N:1][C;$(C(=O)O[CH2][CH]1c2[cH][cH][cH][cH]c2-c3[cH][cH][cH][cH]c13):2]>>[N:1]",
"NBnz": "[N;+0;$(N[CH2]c1[cH][cH][cH][cH][cH]1);!$(N[C,S,P]=[O,S,N]):1][C;$([CH2]c1[cH][cH][cH][cH][cH]1):2]>>[N:1]",
"COOBnz": "[O;$(O(C)C([#6])=O):1][C;$([CH2]c1[cH][cH][cH][cH][cH]1):2]>>[OH:1]",
"Boronics": "[B;$(B(O@C)O@C):1][#6:2]>>[#6:2]",
"Oxiranes": "[C:1]1[O:2][C:3]1>>[C:1]([OH:2])[C;+0:3]"}
mol = readMol(smiles)
if mol:
for pg in PGdict:
mol = __removePGforScaffolds(PGdict[pg], mol)
scaffold = MurckoScaffoldSmiles(mol=mol)
if returnObjects:
return scaffold,mol
else:
return scaffold
def __removePGforScaffolds(reactionRule, mol):
q = Chem.MolFromSmarts(reactionRule.split(">>")[0])
cuttingRule = Reactions.ReactionFromSmarts(reactionRule)
while mol.HasSubstructMatch(q):
products = cuttingRule.RunReactants((mol,))
mol = products[0][0]
mol.UpdatePropertyCache()
Chem.GetSymmSSSR(mol)
mol.GetRingInfo().NumRings()
return mol
"""def __checkLable(productSmiles:str, Label:str):
goodValenceSmiles = None
if Label.split("->")[0][1] == "S":
hCount = 1
out = productSmiles.replace(Label.split("->")[0],
"[" + Label.split("->")[1].split(":")[0] + "H" + str(hCount) + ":" +
Label.split("->")[1].split(":")[1] + "]")
goodValenceSmiles = out
else:
for hCount in range(1, 5):
if "+" in Label:
out = productSmiles.replace(Label.split("->")[0],
"[" + Label.split("->")[1].split(":")[0] + "H" + str(hCount) + "+:" +
Label.split("->")[1].split(":")[1] + "]")
else:
out = productSmiles.replace(Label.split("->")[0],
"[" + Label.split("->")[1].split(":")[0] + "H" + str(hCount) + ":" +
Label.split("->")[1].split(":")[1] + "]")
newMol = Chem.MolFromSmiles(out)
if not newMol:
if "[nH1:" in out:
modifiedSmiles = out.replace("[nH1:", "[n:", 1)
check2 = CheckMolStructure(modifiedSmiles, Label)
if check2:
break
else:
goodValenceSmiles = modifiedSmiles
else:
break
else:
goodValenceSmiles = out
check = CheckMolStructure(goodValenceSmiles, Label)
if check:
break
if not goodValenceSmiles:
print("Problem with structure check: " + productSmiles + " " + out)
return None
return generateMajorTautFromSynthonSmiles(goodValenceSmiles)"""
|
from .network import ValueNet, PolicyNet, SoftQNet
|
var w = 600,
h = 400,
w_full = w,
h_full = h;
if (w > $( window ).width()) {
w = $( window ).width();
}
// set the dimensions and margins of the graph
var margin = {top: 10, right: 10, bottom: 50, left: 40},
w = w - margin.left - margin.right,
h = h - margin.top - margin.bottom;
var urlNuevos = "https://raw.githubusercontent.com/mexicovid19/Mexico-datos/master/datos_abiertos/formato_especial/comparativo_muertes_nuevas.csv";
var widthBar = 6;
var tipH = d3.select("#barplot_comparativo_muertes").append("div")
.attr("class", "tipH")
.style("opacity", 0);
// append the svg object to the body of the page
var svgBarC = d3.select("#barplot_comparativo_muertes")
.append("svg")
.attr("width", w_full)
.attr("height", h_full)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// X axis
var today = new Date();
//var two_weeks_ago = new Date()
var dd = String(today.getDate()).padStart(2, '0');
var mm = String(today.getMonth() + 1).padStart(2, '0'); //January is 0!
var yyyy = today.getFullYear();
var mindate = new Date(2020,2,18);
var two_weeks_ago = new Date(today.getFullYear(),today.getMonth(),today.getDay()-14);
var x = d3.scaleTime()
.domain([mindate, today])
.range([0, w]);
var xAxis = svgBarC.append("g")
.attr("transform", "translate(0," + h + ")")
.attr("class","graph_date")
.call(d3.axisBottom(x))
.selectAll("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", ".15em")
.attr("transform", "rotate(-65)");
// Initialize the Y axis
var y = d3.scaleLinear()
.range([ h, 0]);
var yAxis = svgBarC.append("g")
.attr("class", "myYaxis")
// A function that create / update the plot for a given variable:
function update(selectedVar) {
// Parse the Data
d3.csv(urlNuevos, function(data) {
data.forEach(function(d) {
d.Fecha = new Date(d.Fecha);
});
// Add Y axis
y.domain([0, d3.max(data, function(d) { return +d.Nuevas_JH }) ]);
yAxis.transition().duration(1000).call(d3.axisLeft(y));
// variable u: map data to existing bars
var u = svgBarC.selectAll("rect")
.data(data)
// update bars
u
.enter()
.append("rect")
.merge(u)
//.transition()
//.duration(400)
//.delay(function(d,i){ return(i*100)})
.attr("x", function(d) { return x(d.Fecha)-widthBar/2; })
.attr("y", function(d) { return y(d[selectedVar]); })
.attr("width", widthBar)
.attr("height", function(d) { return h - y(d[selectedVar]); })
.attr("fill", function(d){ if (selectedVar == "Nuevas_JH") { return "mediumorchid"} else {return "darkorange"}})
.on("mouseover", function(d) {
tipH.transition()
.duration(200)
.style("opacity", .9);
tipH.html("<h6>" + formatDay(d.Fecha) + "/" + formatMonth(d.Fecha) + "</h6>"+ " <p class='text-primary'>" + (+d[selectedVar]).toLocaleString() + "</p>")
.style("left", (d3.event.pageX) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function(d) {
tipH.transition()
.duration(500)
.style("opacity", 0);
})
.attr("opacity", function(d){if (d.Fecha > two_weeks_ago && selectedVar != "Nuevas_JH"){ return 0.5 } else { return 1. }})
})
};
//Lineas fases
//Fase 3
var fase3=new Date(2020,3,20);
var fase = svgBarC.append("line")
.attr("x1", x(fase3))
.attr("y1", y(y.domain()[0]))
.attr("x2", x(fase3))
.attr("y2", y(y.domain()[1])+17)
.attr("stroke", "#000000")
.style("stroke-width", 1)
.style("fill", "none")
.style("stroke-dasharray", "5,5");
svgBarC.append("text")
.attr("y", y(y.domain()[1]))
.attr("x", x(fase3) - 50)
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-size","10px")
.text("Comienza la fase 3")
.attr("stroke", "#000000")
.attr("font-family", "sans-serif");
//Fase 2
var fase12 = new Date(2020, 2, 23);
var fase = svgBarC.append("line")
.attr("x1", x(fase12))
.attr("y1", y(y.domain()[0]))
.attr("x2", x(fase12))
.attr("y2", y(y.domain()[1])+57)
.attr("stroke", "#000000")
.style("stroke-width", 1)
.style("fill", "none")
.style("stroke-dasharray", "5,5");
svgBarC.append("text")
.attr("y", y(y.domain()[1])+40)
.attr("x", x(fase12)+35)
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-size","10px")
.text("Comienza la fase 2")
.attr("stroke", "#000000")
.attr("font-family", "sans-serif");
//Emergencia sanitaria
var faseExt=new Date(2020, 2, 30);;
var fase = svgBarC.append("line")
.attr("x1", x(faseExt))
.attr("y1", y(y.domain()[0]))
.attr("x2", x(faseExt))
.attr("y2", y(y.domain()[1])+37)
.attr("stroke", "#000000")
.style("stroke-width", 1)
.style("fill", "none")
.style("stroke-dasharray", "5,5");
svgBarC.append("text")
.attr("y", y(y.domain()[1])+20)
.attr("x", x(faseExt)+30)
.attr("dy", "1em")
.style("text-anchor", "middle")
.style("font-size","10px")
.text("Emergencia sanitaria")
.attr("stroke", "#000000")
.attr("font-family", "sans-serif");
// Initialize plot
update('Nuevas_JH')
|
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CC_ANIMATION_TRANSFORM_OPERATIONS_H_
#define CC_ANIMATION_TRANSFORM_OPERATIONS_H_
#include <vector>
#include "base/memory/scoped_ptr.h"
#include "cc/animation/transform_operation.h"
#include "cc/base/cc_export.h"
#include "ui/gfx/transform.h"
namespace gfx {
class BoxF;
struct DecomposedTransform;
}
namespace cc {
// Transform operations are a decomposed transformation matrix. It can be
// applied to obtain a gfx::Transform at any time, and can be blended
// intelligently with other transform operations, so long as they represent the
// same decomposition. For example, if we have a transform that is made up of
// a rotation followed by skew, it can be blended intelligently with another
// transform made up of a rotation followed by a skew. Blending is possible if
// we have two dissimilar sets of transform operations, but the effect may not
// be what was intended. For more information, see the comments for the blend
// function below.
class CC_EXPORT TransformOperations {
public:
TransformOperations();
TransformOperations(const TransformOperations& other);
~TransformOperations();
// Returns a transformation matrix representing these transform operations.
gfx::Transform Apply() const;
// Given another set of transform operations and a progress in the range
// [0, 1], returns a transformation matrix representing the intermediate
// value. If this->MatchesTypes(from), then each of the operations are
// blended separately and then combined. Otherwise, the two sets of
// transforms are baked to matrices (using apply), and the matrices are
// then decomposed and interpolated. For more information, see
// http://www.w3.org/TR/2011/WD-css3-2d-transforms-20111215/#matrix-decomposition.
gfx::Transform Blend(const TransformOperations& from,
SkMScalar progress) const;
// Sets |bounds| be the bounding box for the region within which |box| will
// exist when it is transformed by the result of calling Blend on |from| and
// with progress in the range [min_progress, max_progress]. If this region
// cannot be computed, returns false.
bool BlendedBoundsForBox(const gfx::BoxF& box,
const TransformOperations& from,
SkMScalar min_progress,
SkMScalar max_progress,
gfx::BoxF* bounds) const;
// Returns true if these operations affect scale.
bool AffectsScale() const;
// Returns true if these operations are only translations.
bool IsTranslation() const;
// Returns false if the operations affect 2d axis alignment.
bool PreservesAxisAlignment() const;
// Returns true if this operation and its descendants have the same types
// as other and its descendants.
bool MatchesTypes(const TransformOperations& other) const;
// Returns true if these operations can be blended. It will only return
// false if we must resort to matrix interpolation, and matrix interpolation
// fails (this can happen if either matrix cannot be decomposed).
bool CanBlendWith(const TransformOperations& other) const;
// If these operations have no more than one scale operation, and if the only
// other operations are translations, sets |scale| to the scale component
// of these operations. Otherwise, returns false.
bool ScaleComponent(gfx::Vector3dF* scale) const;
void AppendTranslate(SkMScalar x, SkMScalar y, SkMScalar z);
void AppendRotate(SkMScalar x, SkMScalar y, SkMScalar z, SkMScalar degrees);
void AppendScale(SkMScalar x, SkMScalar y, SkMScalar z);
void AppendSkew(SkMScalar x, SkMScalar y);
void AppendPerspective(SkMScalar depth);
void AppendMatrix(const gfx::Transform& matrix);
void AppendIdentity();
bool IsIdentity() const;
private:
bool BlendInternal(const TransformOperations& from,
SkMScalar progress,
gfx::Transform* result) const;
std::vector<TransformOperation> operations_;
bool ComputeDecomposedTransform() const;
// For efficiency, we cache the decomposed transform.
mutable scoped_ptr<gfx::DecomposedTransform> decomposed_transform_;
mutable bool decomposed_transform_dirty_;
DISALLOW_ASSIGN(TransformOperations);
};
} // namespace cc
#endif // CC_ANIMATION_TRANSFORM_OPERATIONS_H_
|
import builtins
import functools
import inspect
import os
import typing
def parseType(name, typeTable):
if hasattr(builtins, name):
return getattr(builtins, name)
if name in typeTable:
return typeTable[name]
if '.' in name:
seek = typeTable
for part in name.split('.'):
try:
if not part in seek:
break
seek = seek[part]
except TypeError:
if not hasattr(seek, part):
break
seek = getattr(seek, part)
else:
return seek
raise ValueError("Unrecognized type: %s" % name)
def describeTypeOf(obj):
if obj is None:
return 'None'
if isinstance(obj, tuple):
return "tuple of %s" % '/'.join(sorted(set(describeTypeOf(e) for e in obj))) if obj else "tuple"
if isinstance(obj, list):
return "list of %s" % '/'.join(sorted(set(describeTypeOf(e) for e in obj))) if obj else "list"
if isinstance(obj, dict):
return "map from %s to %s" % ('/'.join(sorted(set(describeTypeOf(k) for k in obj))), '/'.join(sorted(set(describeTypeOf(v) for v in obj.values())))) if obj else "map"
if isinstance(obj, set):
return "set of %s" % '/'.join(sorted(set(describeTypeOf(e) for e in obj))) if obj else "set"
return type(obj).__name__
def describeTypestring(typestring, typeTable):
typestring = typestring.strip()
if typestring == '':
# This isn't illegal at the top level, but verify() wouldn't have called in that case
# If it happens here it means it's a fragment of a larger expression and can't be empty
raise ValueError("Empty substring in type string")
if typestring == 'None':
return "None"
if typestring[-1] == '?':
return "(optional) %s" % describeTypestring(typestring[:-1], typeTable)
if typestring[-1] == '^':
return "(implicit) %s" % describeTypestring(typestring[:-1], typeTable)
ends, rest = typestring[0] + typestring[-1], typestring[1:-1]
if ends == '()':
subtypes = rest.split(',')
if len(subtypes) == 1:
return "tuple of %s" % describeTypestring(rest, typeTable)
elif len(subtypes) == 2:
return "%s or %s" % tuple(describeTypestring(e, typeTable) for e in subtypes)
else:
return "%sor %s" % (''.join("%s, " % describeTypestring(e, typeTable) for e in subtypes[:-1]), describeTypestring(subtypes[-1], typeTable))
if ends == '[]':
return "list of %s" % describeTypestring(rest, typeTable)
if ends == '{}':
if ':' in rest:
return "map from %s to %s" % tuple(describeTypestring(e, typeTable) for e in rest.split(':', 1))
else:
return "set of %s" % describeTypestring(rest, typeTable)
try:
return parseType(typestring, typeTable).__name__
except ValueError as e:
raise ValueError("Invalid typestring `%s': %s" % (typestring, e))
def verify(typestring, typeTable):
"""
Check that 'typestring' is a valid typestring. Throws ValueError if not
"""
if typestring == inspect.Parameter.empty:
return True
if isinstance(typestring, str):
pass
elif isinstance(typestring, tuple) and len(typestring) == 2 and isinstance(typestring[0], str) and callable(typestring[1]):
typestring = typestring[0]
else:
raise ValueError("Invalid typestring `%s': not a string or string/predicate")
if typestring.strip() == '':
return True
describeTypestring(typestring, typeTable) # Will throw ValueError if bad
return True
def typecheck(typestring, value, typeTable, setter = None):
"""
Check that 'value' satisfies 'typestring'.
If 'setter' is non-None, it may be called to replace the function parameter with a converted instance
"""
if typestring == inspect.Parameter.empty:
return True
typestring = typestring.strip()
if typestring == '':
return True
# None type
if typestring == 'None':
return value is None
# Optional type, e.g. 'int?'
if typestring[-1] == '?':
return (value is None) or typecheck(typestring[:-1], value, typeTable, setter)
# Convertable type, e.g. 'int^'
if typestring[-1] == '^':
substr = typestring[:-1]
try:
ty = parseType(substr, typeTable)
except ValueError as e:
raise ValueError("Can't implicitly convert to unrecognized type: %s" % substr)
# Already the proper type; no conversion necessary
if isinstance(value, ty):
return True
# Attempt the conversion
try:
ctor = getattr(ty, '__init__')
# If the constructor is wrapped in a typechecker, get the real constructor
while hasattr(ctor, 'tcWrappedFn'):
ctor = ctor.tcWrappedFn
spec = inspect.getfullargspec(ctor)
if len(spec.args) != 2: # self + the one parameter
raise TypeError("constructor is not unary")
argName = spec.args[1]
if argName not in spec.annotations:
raise TypeError("constructor parameter has no type annotation")
if not typecheck(spec.annotations[argName], value, typeTable):
raise TypeError("got type [%s]; constructor takes [%s]" % (describeTypeOf(value), describeTypestring(spec.annotations[argName], typeTable)))
setter(ty(value))
return True
except Exception as e:
raise TypeError("Unable to implicitly convert to %s: %s" % (ty.__name__, e))
ends, rest = typestring[0] + typestring[-1], typestring[1:-1]
if ends == '()':
# Union type, e.g. '(int, str)'
if ',' in rest:
return any(typecheck(substr, value, typeTable, setter) for substr in rest.split(','))
# Tuple type, e.g. '(int)'
else:
if not isinstance(value, tuple):
return False
return all(typecheck(rest, e, typeTable) for e in value)
# List type, e.g. '[int]'
if ends == '[]':
if not isinstance(value, list):
return False
return all(typecheck(rest, e, typeTable, lambda new: value.__setitem__(i, new)) for i, e in enumerate(value))
if ends == '{}':
# Dict type, e.g. '{int: int}'
if ':' in rest:
if not isinstance(value, dict):
return False
keystr, valstr = map(lambda s: s.strip(), rest.split(':', 1))
def renameKey(old, new):
value[new] = value[old]
del value[old]
return all(typecheck(keystr, k, typeTable, lambda new: renameKey(k, new)) and typecheck(valstr, v, typeTable, lambda new: value.__setitem__(k, new)) for k, v in value.items())
# Set type, e.g. '{int}'
else:
if not isinstance(value, set):
return False
def replaceEntry(old, new):
value.remove(old)
value.add(new)
return all(typecheck(rest, e, typeTable, lambda new: replaceEntry(e, new)) for e in value)
# Bare type, e.g. 'int'
return isinstance(value, parseType(typestring, typeTable))
@typing.no_type_check_decorator
def tc(f, preVerifyAnnotations = True, nextOverload = None):
signature = inspect.signature(f)
# Need to look up types in the scope that 'f' was declared in
# Scan for the first frame that's within the function's
fFile = inspect.getsourcefile(f)
lines, fStartLine = inspect.getsourcelines(f)
fEndLine = fStartLine + len(lines) - 1
for frame, filename, lineno, _, _, _ in inspect.stack():
if filename == fFile and fStartLine <= lineno <= fEndLine:
typeTable = frame.f_globals
break
else:
raise RuntimeError("Unable to find scope containing the declaration of %s" % f)
# Make sure the annotations are valid
if preVerifyAnnotations:
for param in signature.parameters.values():
verify(param.annotation, typeTable)
verify(signature.return_annotation, typeTable)
@functools.wraps(f)
def wrap(*args, **kw):
try:
binding = signature.bind_partial(*args, **kw)
for name, param in signature.parameters.items():
typestring = param.annotation
# Already did the checking in verify(); at this point typestring is either just the string or a tuple with the string and predicate
predicate = None
if isinstance(typestring, tuple):
typestring, predicate = typestring
if name in binding.arguments: # If not, using the default value. We could typecheck the default as well, but choosing not to
value = binding.arguments[name]
if not typecheck(typestring, value, typeTable, lambda new: binding.arguments.__setitem__(name, new)):
raise TypeError("Invalid argument `%s' of type [%s]; expected [%s]" % (name, describeTypeOf(binding.arguments[name]), describeTypestring(typestring, typeTable)))
if predicate is not None:
result = predicate(value)
if result is not True:
result = ("predicate unsatisfied: %s" % result) if result else 'predicate unsatisfied'
raise TypeError("Invalid argument `%s': %s" % (name, result))
rtn = {'rtn': f(*binding.args, **binding.kwargs)}
typestring, predicate = signature.return_annotation, None
if isinstance(typestring, tuple):
typestring, predicate = typestring
if not typecheck(typestring, rtn['rtn'], typeTable, lambda new: rtn.__setitem__('rtn', new)):
raise TypeError("Invalid return value of type [%s]; expected [%s]" % (describeTypeOf(rtn['rtn']), describeTypestring(typestring, typeTable)))
if predicate is not None and not predicate(rtn['rtn']):
raise TypeError("Invalid return value: predicate unsatisfied")
except TypeError:
# If there are overloads, move on to those
if nextOverload is not None:
try:
return nextOverload(*args, **kw)
except TypeError:
# The overload didn't match; keep the exception chain going
raise
except Exception as e:
# If an overload matched (i.e. didn't raise a TypeError) but then the method threw some other exception, it shouldn't chain into previous TypeErrors raised by overload resolution failures
raise e from None
#TODO Right now all the overload type errors come out in a chain because of PEP3134; it'd be nice to generate a single exception that lists all the signatures, but an elegant way to do that escapes me at the moment
raise
return rtn['rtn']
wrap.tcWrappedFn = f
return wrap
# 'tc' is designed to be used as '@tc', not '@tc()', so it can't take arguments. This version takes arguments and forwards them to tc
def tc_opts(*, verify = True, overload = None):
def wrap(f):
return tc(f, verify, overload)
return wrap
|
/**
@file
@author Stefan Frings
*/
#ifndef HTTPSESSIONSTORE_H
#define HTTPSESSIONSTORE_H
#include <QObject>
#include <QMap>
#include <QTimer>
#include <QMutex>
#include "httpglobal.h"
#include "httpsession.h"
#include "httpresponse.h"
#include "httprequest.h"
namespace stefanfrings {
/**
Stores HTTP sessions and deletes them when they have expired.
The following configuration settings are required in the config file:
<code><pre>
expirationTime=3600000
cookieName=sessionid
</pre></code>
The following additional configurations settings are optionally:
<code><pre>
cookiePath=/
cookieComment=Session ID
;cookieDomain=stefanfrings.de
</pre></code>
*/
class DECLSPEC HttpSessionStore : public QObject {
Q_OBJECT
Q_DISABLE_COPY(HttpSessionStore)
public:
/**
Constructor.
@param settings Configuration settings, usually stored in an INI file. Must not be 0.
Settings are read from the current group, so the caller must have called settings->beginGroup().
Because the group must not change during runtime, it is recommended to provide a
separate QSettings instance that is not used by other parts of the program.
The HttpSessionStore does not take over ownership of the QSettings instance, so the
caller should destroy it during shutdown.
@param parent Parent object
*/
HttpSessionStore(const QSettings* settings, QObject* parent=nullptr);
/** Destructor */
virtual ~HttpSessionStore();
/**
Get the ID of the current HTTP session, if it is valid.
This method is thread safe.
@warning Sessions may expire at any time, so subsequent calls of
getSession() might return a new session with a different ID.
@param request Used to get the session cookie
@param response Used to get and set the new session cookie
@return Empty string, if there is no valid session.
*/
QByteArray getSessionId(HttpRequest& request, HttpResponse& response);
/**
Get the session of a HTTP request, eventually create a new one.
This method is thread safe. New sessions can only be created before
the first byte has been written to the HTTP response.
@param request Used to get the session cookie
@param response Used to get and set the new session cookie
@param allowCreate can be set to false, to disable the automatic creation of a new session.
@return If autoCreate is disabled, the function returns a null session if there is no session.
@see HttpSession::isNull()
*/
HttpSession getSession(HttpRequest& request, HttpResponse& response, const bool allowCreate=true);
/**
Get a HTTP session by it's ID number.
This method is thread safe.
@return If there is no such session, the function returns a null session.
@param id ID number of the session
@see HttpSession::isNull()
*/
HttpSession getSession(const QByteArray id);
/** Delete a session */
void removeSession(const HttpSession session);
protected:
/** Storage for the sessions */
QMap<QByteArray,HttpSession> sessions;
private:
/** Configuration settings */
const QSettings* settings;
/** Timer to remove expired sessions */
QTimer cleanupTimer;
/** Name of the session cookie */
QByteArray cookieName;
/** Time when sessions expire (in ms)*/
int expirationTime;
/** Used to synchronize threads */
QMutex mutex;
private slots:
/** Called every minute to cleanup expired sessions. */
void sessionTimerEvent();
signals:
/**
Emitted when the session is deleted.
@param sessionId The ID number of the session.
*/
void sessionDeleted(const QByteArray& sessionId);
};
} // end of namespace
#endif // HTTPSESSIONSTORE_H
|
#!/usr/bin/env python
import sys
import os
import shutil
def cmake(platform_dir, src_dir, arg):
if not(os.path.exists(platform_dir)):
os.mkdir(platform_dir)
os.chdir(platform_dir) # go to platform directory
cmd = "cmake " + arg + " ../" + src_dir
print("cmd")
os.system(cmd) # run cmake
def clean(platform_dir):
if os.path.exists(platform_dir):
shutil.rmtree(platform_dir)
if __name__ == '__main__':
src_dir = '../src/' # source code directory
bin_dir = '../build/' # binary directory
if not(os.path.exists(bin_dir)): # binary directory is created if not found
os.mkdir(bin_dir)
platform = sys.platform
cmd = ""
if (platform == 'darwin'): # platform directory is created if not found
platform = 'mac'
cmd = '-G Xcode'
platform_dir = bin_dir + platform
for arg in sys.argv:
if (arg == '--cmake'):
cmake(platform_dir, src_dir, cmd)
elif (arg == '--clean'):
clean(platform_dir)
|
"""Tests for the typing.NamedTuple overlay."""
from pytype.pytd import pytd_utils
from pytype.tests import test_base
class NamedTupleTest(test_base.TargetPython3BasicTest):
"""Tests for the typing.NamedTuple overlay."""
def test_make(self):
self.CheckWithErrors("""
import typing
A = typing.NamedTuple("A", [("b", str), ("c", str)])
a = A._make(["hello", "world"])
b = A._make(["hello", "world"], len=len)
c = A._make([1, 2]) # wrong-arg-types
d = A._make(A) # wrong-arg-types
def f(e: A) -> None: pass
f(a)
""")
def test_subclass(self):
self.CheckWithErrors("""
import typing
A = typing.NamedTuple("A", [("b", str), ("c", int)])
class B(A):
def __new__(cls, b: str, c: int=1):
return super(B, cls).__new__(cls, b, c)
x = B("hello", 2)
y = B("world")
def take_a(a: A) -> None: pass
def take_b(b: B) -> None: pass
take_a(x)
take_b(x)
take_b(y)
take_b(A("", 0)) # wrong-arg-types
B() # missing-parameter
# _make and _replace should return instances of the subclass.
take_b(B._make(["hello", 0]))
take_b(y._replace(b="world"))
""")
def test_callable_attribute(self):
ty = self.Infer("""
from typing import Callable, NamedTuple
X = NamedTuple("X", [("f", Callable)])
def foo(x: X):
return x.f
""")
self.assertMultiLineEqual(pytd_utils.Print(ty.Lookup("foo")),
"def foo(x: X) -> Callable: ...")
def test_bare_union_attribute(self):
ty, errors = self.InferWithErrors("""
from typing import NamedTuple, Union
X = NamedTuple("X", [("x", Union)]) # invalid-annotation[e]
def foo(x: X):
return x.x
""")
self.assertMultiLineEqual(pytd_utils.Print(ty.Lookup("foo")),
"def foo(x: X) -> Any: ...")
self.assertErrorRegexes(errors, {"e": r"Union.*x"})
class NamedTupleTestPy3(test_base.TargetPython3FeatureTest):
"""Tests for the typing.NamedTuple overlay in Python 3."""
def test_basic_namedtuple(self):
ty = self.Infer("""
import typing
X = typing.NamedTuple("X", [("a", int), ("b", str)])
x = X(1, "hello")
a = x.a
b = x.b
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
from typing import Callable, Iterable, Sized, Tuple, Type, TypeVar, Union
typing = ... # type: module
x = ... # type: X
a = ... # type: int
b = ... # type: str
_TX = TypeVar('_TX', bound=X)
class X(tuple):
__slots__ = ["a", "b"]
__dict__ = ... # type: collections.OrderedDict[str, Union[int, str]]
_field_defaults = ... # type: collections.OrderedDict[str, Union[int,
str]]
_field_types = ... # type: collections.OrderedDict[str, type]
_fields = ... # type: Tuple[str, str]
a = ... # type: int
b = ... # type: str
def __getnewargs__(self) -> Tuple[int, str]: ...
def __getstate__(self) -> None: ...
def __init__(self, *args, **kwargs) -> None: ...
def __new__(cls: Type[_TX], a: int, b: str) -> _TX: ...
def _asdict(self) -> collections.OrderedDict[str,
Union[int, str]]: ...
@classmethod
def _make(cls: Type[_TX], iterable: Iterable[Union[int, str]],
new = ..., len: Callable[[Sized], int] = ...) -> _TX: ...
def _replace(self: _TX, **kwds: Union[int, str]) -> _TX: ...
""")
def test_union_attribute(self):
ty = self.Infer("""
from typing import NamedTuple, Union
X = NamedTuple("X", [("x", Union[bytes, str])])
def foo(x: X):
return x.x
""")
self.assertMultiLineEqual(pytd_utils.Print(ty.Lookup("foo")),
"def foo(x: X) -> Union[bytes, str]: ...")
def test_bad_call(self):
_, errorlog = self.InferWithErrors("""
from typing import NamedTuple
E2 = NamedTuple('Employee2', [('name', str), ('id', int)],
birth=str, gender=bool) # invalid-namedtuple-arg[e1] # wrong-keyword-args[e2]
""")
self.assertErrorRegexes(errorlog, {
"e1": r"Either list of fields or keywords.*",
"e2": r".*(birth, gender).*NamedTuple"})
def test_bad_attribute(self):
_, errorlog = self.InferWithErrors("""
from typing import NamedTuple
class SubCls(NamedTuple): # not-writable[e]
def __init__(self):
pass
""")
self.assertErrorRegexes(errorlog, {"e": r".*'__init__'.*[SubCls]"})
def test_bad_arg_count(self):
_, errorlog = self.InferWithErrors("""
from typing import NamedTuple
class SubCls(NamedTuple):
a: int
b: int
cls1 = SubCls(5) # missing-parameter[e]
""")
self.assertErrorRegexes(errorlog, {"e": r"Missing.*'b'.*__new__"})
def test_bad_arg_name(self):
self.InferWithErrors("""
from typing import NamedTuple
class SubCls(NamedTuple): # invalid-namedtuple-arg
_a: int
b: int
cls1 = SubCls(5)
""")
def test_namedtuple_class(self):
self.Check("""
from typing import NamedTuple
class SubNamedTuple(NamedTuple):
a: int
b: str ="123"
c: int = 123
def __repr__(self) -> str:
return "__repr__"
def func():
pass
tuple1 = SubNamedTuple(5)
tuple2 = SubNamedTuple(5, "123")
tuple3 = SubNamedTuple(5, "123", 123)
E1 = NamedTuple('Employee1', name=str, id=int)
E2 = NamedTuple('Employee2', [('name', str), ('id', int)])
""")
def test_baseclass(self):
ty = self.Infer("""
from typing import NamedTuple
class baseClass(object):
x=5
y=6
class SubNamedTuple(baseClass, NamedTuple):
a: int
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
from typing import Callable, Iterable, Sized, Tuple, Type, TypeVar
_TSubNamedTuple = TypeVar('_TSubNamedTuple', bound=SubNamedTuple)
class SubNamedTuple(tuple):
__slots__ = ["a"]
__dict__ = ... # type: collections.OrderedDict[str, int]
_field_defaults = ... # type: collections.OrderedDict[str, int]
_field_types = ... # type: collections.OrderedDict[str, type]
_fields = ... # type: Tuple[str]
a = ... # type: int
def __getnewargs__(self) -> Tuple[int]: ...
def __getstate__(self) -> None: ...
def __init__(self, *args, **kwargs) -> None: ...
def __new__(cls: Type[_TSubNamedTuple], a: int) -> _TSubNamedTuple:
...
def _asdict(self) -> collections.OrderedDict[str, int]: ...
@classmethod
def _make(cls: Type[_TSubNamedTuple],
iterable: Iterable[int], new = ...,
len: Callable[[Sized], int] = ...) -> _TSubNamedTuple: ...
def _replace(self: _TSubNamedTuple,
**kwds: int) -> _TSubNamedTuple: ...
class baseClass(object):
x = ... # type: int
y = ... # type: int
""")
def test_namedtuple_class_pyi(self):
ty = self.Infer("""
from typing import NamedTuple
class SubNamedTuple(NamedTuple):
a: int
b: str ="123"
c: int = 123
def __repr__(self) -> str:
return "__repr__"
def func():
pass
X = SubNamedTuple(1, "aaa", 222)
a = X.a
b = X.b
c = X.c
f = X.func
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
from typing import Callable, Iterable, Sized, Tuple, Type, TypeVar, Union
X: SubNamedTuple
a: int
b: str
c: int
_TSubNamedTuple = TypeVar('_TSubNamedTuple', bound=SubNamedTuple)
class SubNamedTuple(tuple):
__slots__ = ["a", "b", "c"]
__dict__: collections.OrderedDict[str, Union[int, str]]
_field_defaults: collections.OrderedDict[str, Union[int, str]]
_field_types: collections.OrderedDict[str, type]
_fields: Tuple[str, str, str]
a: int
b: str
c: int
def __getnewargs__(self) -> Tuple[int, str, int]: ...
def __getstate__(self) -> None: ...
def __init__(self, *args, **kwargs) -> None: ...
def __new__(cls: Type[_TSubNamedTuple], a: int, b: str = ...,
c: int = ...) -> _TSubNamedTuple: ...
def _asdict(self) -> collections.OrderedDict[str, Union[int, str]]:
...
@classmethod
def _make(cls: Type[_TSubNamedTuple],
iterable: Iterable[Union[int, str]], new = ...,
len: Callable[[Sized], int] = ...) -> _TSubNamedTuple: ...
def _replace(self: _TSubNamedTuple,
**kwds: Union[int, str]) -> _TSubNamedTuple: ...
def func() -> None: ...
def f() -> None: ...
""")
test_base.main(globals(), __name__ == "__main__")
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.enums',
marshal='google.ads.googleads.v7',
manifest={
'ClickTypeEnum',
},
)
class ClickTypeEnum(proto.Message):
r"""Container for enumeration of Google Ads click types.
"""
class ClickType(proto.Enum):
r"""Enumerates Google Ads click types."""
UNSPECIFIED = 0
UNKNOWN = 1
APP_DEEPLINK = 2
BREADCRUMBS = 3
BROADBAND_PLAN = 4
CALL_TRACKING = 5
CALLS = 6
CLICK_ON_ENGAGEMENT_AD = 7
GET_DIRECTIONS = 8
LOCATION_EXPANSION = 9
LOCATION_FORMAT_CALL = 10
LOCATION_FORMAT_DIRECTIONS = 11
LOCATION_FORMAT_IMAGE = 12
LOCATION_FORMAT_LANDING_PAGE = 13
LOCATION_FORMAT_MAP = 14
LOCATION_FORMAT_STORE_INFO = 15
LOCATION_FORMAT_TEXT = 16
MOBILE_CALL_TRACKING = 17
OFFER_PRINTS = 18
OTHER = 19
PRODUCT_EXTENSION_CLICKS = 20
PRODUCT_LISTING_AD_CLICKS = 21
SITELINKS = 22
STORE_LOCATOR = 23
URL_CLICKS = 25
VIDEO_APP_STORE_CLICKS = 26
VIDEO_CALL_TO_ACTION_CLICKS = 27
VIDEO_CARD_ACTION_HEADLINE_CLICKS = 28
VIDEO_END_CAP_CLICKS = 29
VIDEO_WEBSITE_CLICKS = 30
VISUAL_SITELINKS = 31
WIRELESS_PLAN = 32
PRODUCT_LISTING_AD_LOCAL = 33
PRODUCT_LISTING_AD_MULTICHANNEL_LOCAL = 34
PRODUCT_LISTING_AD_MULTICHANNEL_ONLINE = 35
PRODUCT_LISTING_ADS_COUPON = 36
PRODUCT_LISTING_AD_TRANSACTABLE = 37
PRODUCT_AD_APP_DEEPLINK = 38
SHOWCASE_AD_CATEGORY_LINK = 39
SHOWCASE_AD_LOCAL_STOREFRONT_LINK = 40
SHOWCASE_AD_ONLINE_PRODUCT_LINK = 42
SHOWCASE_AD_LOCAL_PRODUCT_LINK = 43
PROMOTION_EXTENSION = 44
SWIPEABLE_GALLERY_AD_HEADLINE = 45
SWIPEABLE_GALLERY_AD_SWIPES = 46
SWIPEABLE_GALLERY_AD_SEE_MORE = 47
SWIPEABLE_GALLERY_AD_SITELINK_ONE = 48
SWIPEABLE_GALLERY_AD_SITELINK_TWO = 49
SWIPEABLE_GALLERY_AD_SITELINK_THREE = 50
SWIPEABLE_GALLERY_AD_SITELINK_FOUR = 51
SWIPEABLE_GALLERY_AD_SITELINK_FIVE = 52
HOTEL_PRICE = 53
PRICE_EXTENSION = 54
HOTEL_BOOK_ON_GOOGLE_ROOM_SELECTION = 55
SHOPPING_COMPARISON_LISTING = 56
__all__ = tuple(sorted(__protobuf__.manifest))
|
from __future__ import annotations
from typing import List, Sequence
from models.exceptions import CurrencyNotFound
from parser.currency import Currency
class CurrencyLookup:
def __init__(self) -> None:
self.currency_list: List[Currency] = []
def add_currency(self, c: Currency) -> CurrencyLookup:
self.currency_list.append(c)
return self
def name_to_currency(self, name: str) -> Currency:
return next(i for i in self.currency_list if i.name == name)
def contract_address_to_currency(self, addr: str) -> Currency:
try:
return next(i for i in self.currency_list if i.contract_address == addr)
except StopIteration:
raise CurrencyNotFound(f"cannot find currency of {addr}")
@staticmethod
def from_currency_list(currency_list: Sequence[Currency]) -> CurrencyLookup:
currency_lookup = CurrencyLookup()
for c in currency_list:
currency_lookup.add_currency(c)
return currency_lookup
|
typedef void __TARGET_TYPE__;
typedef long long int ssize_t;
typedef unsigned long long int size_t;
static int fffc_mutator_for_target_type(__TARGET_TYPE__ storage) {
return 0;
}
|
from decision import Buy, Nothing, Sell
import random
def act(current, history, balance):
maximum_can_buy = balance.money // current.price
if maximum_can_buy > 0:
return Buy(maximum_can_buy)
else:
if random.random() > 0.5:
return Sell(balance.stock)
else:
return Nothing()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Infers detections on a TFRecord of TFExamples given an inference graph.
Example usage:
./infer_detections \
--input_tfrecord_paths=/path/to/input/tfrecord1,/path/to/input/tfrecord2 \
--output_tfrecord_path=/path/to/output/detections.tfrecord \
--inference_graph=/path/to/frozen_weights_inference_graph.pb
The output is a TFRecord of TFExamples. Each TFExample from the input is first
augmented with detections from the inference graph and then copied to the
output.
The input and output nodes of the inference graph are expected to have the same
types, shapes, and semantics, as the input and output nodes of graphs produced
by export_inference_graph.py, when run with --input_type=image_tensor.
The script can also discard the image pixels in the output. This greatly
reduces the output size and can potentially accelerate reading data in
subsequent processing steps that don't require the images (e.g. computing
metrics).
"""
import itertools
import tensorflow as tf
from object_detection.inference import detection_inference
tf.flags.DEFINE_string('input_tfrecord_paths', None,
'A comma separated list of paths to input TFRecords.')
tf.flags.DEFINE_string('output_tfrecord_path', None,
'Path to the output TFRecord.')
tf.flags.DEFINE_string('inference_graph', None,
'Path to the inference graph with embedded weights.')
tf.flags.DEFINE_boolean('discard_image_pixels', False,
'Discards the images in the output TFExamples. This'
' significantly reduces the output size and is useful'
' if the subsequent tools don\'t need access to the'
' images (e.g. when computing evaluation measures).')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = ['input_tfrecord_paths', 'output_tfrecord_path',
'inference_graph']
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
with tf.Session() as sess:
input_tfrecord_paths = [
v for v in FLAGS.input_tfrecord_paths.split(',') if v]
tf.logging.info('Reading input from %d files', len(input_tfrecord_paths))
serialized_example_tensor, image_tensor = detection_inference.build_input(
input_tfrecord_paths)
tf.logging.info('Reading graph and building model...')
(detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor) = detection_inference.build_inference_graph(
image_tensor, FLAGS.inference_graph)
tf.logging.info('Running inference and writing output to {}'.format(
FLAGS.output_tfrecord_path))
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
with tf.python_io.TFRecordWriter(
FLAGS.output_tfrecord_path) as tf_record_writer:
try:
for counter in itertools.count():
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10,
counter)
tf_example = detection_inference.infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor,
detected_scores_tensor, detected_labels_tensor,
FLAGS.discard_image_pixels)
tf_record_writer.write(tf_example.SerializeToString())
except tf.errors.OutOfRangeError:
tf.logging.info('Finished processing records')
if __name__ == '__main__':
tf.app.run()
|
const assert = require('assert');
const { promises } = require('fs');
const fsReadFile = promises.readFile;
let currentReads = 0;
let maxReads = 0;
module.exports = {
description: 'maxParallelFileReads set to infinity',
options: {
maxParallelFileReads: 0
},
before() {
promises.readFile = async (path, options) => {
currentReads++;
maxReads = Math.max(maxReads, currentReads);
const content = await fsReadFile(path, options);
currentReads--;
return content;
};
},
after() {
promises.readFile = fsReadFile;
assert.strictEqual(maxReads, 5, 'Wrong number of parallel file reads: ' + maxReads);
}
};
|
import postqlite
import tk2
import os
from time import time
import traceback
#from . import ops
#from . import stats
from .geometry import geometry
from .raster import raster
class SQLiteApp(tk2.Tk):
def __init__(self, *args, **kwargs):
tk2.basics.Tk.__init__(self, *args, **kwargs)
self.top = tk2.Frame(self)
self.top.pack(side='top', fill='x', expand=1)
self.top.place(relx=0, relwidth=1, rely=0, relheight=0.6)
self.query = QueryEditor(self.top)
self.query.pack(side='left', fill='both', expand=1)
self.query.app = self
self.info = DataInfo(self.top)
self.info.pack(side='right', fill='both', expand=1)
self.info.app = self
self.bottom = tk2.Frame(self)
#self.bottom.pack(side='bottom', fill='x', expand=1)
self.bottom.place(relx=0, relwidth=1, rely=0.6, relheight=0.4)
self.browser = TableBrowser(self.bottom)
#self.browser.pack(side='left', fill="both", expand=1)
self.browser.place(relx=0, relwidth=0.6, rely=0, relheight=1)
self.browser.app = self
self.graphics = GraphicsViewer(self.bottom, width=100)
#self.graphics.pack(side='right', fill='both', expand=1)
self.graphics.place(relx=0.6, relwidth=0.4, rely=0, relheight=1)
self.graphics.app = self
self.connect(':memory:')
# bindings
def dndfunc(event):
filepath = list(event.data)[0]
self.connect(filepath)
self.winfo_toplevel().bind_dnddrop(dndfunc, "Files", event='<Drop>')
self.winfo_toplevel().bind('<Control-Return>', lambda e: self.query.run_query())
self.state('zoomed')
def connect(self, db):
if isinstance(db, basestring):
path = db
if path == ':memory:':
# in-memory
name = '[in-memory]'
root = '[in-memory]'
else:
# file path
root,name = os.path.split(path)
# load
self.db = postqlite.connect(path)
else:
# already loaded db
self.db = db
path = '[unknown]'
root = '[unknown]'
name = '[unknown]'
self.info.data.set( '{}'.format(name) )
self.info.path.set( '{}'.format(root) )
# get info
tableinfo = []
for table in self._tablenames():
columns = self._column_info(table)
tableinfo.append((table, columns))
# populate
self.info.populate(tableinfo)
# log
msg = 'Connected to database: {}'.format(path)
self.query.log.log_new(msg, 'normal')
def _tablenames(self):
names = [row[0] for row in self.db.cursor().execute("SELECT name FROM sqlite_master WHERE type='table'")]
return names
def _column_info(self, table):
# cid,name,typ,notnull,default,pk
if '.' in table:
schema,name = table.split('.')
query = 'PRAGMA {}.table_info({})'.format(schema, table)
else:
query = 'PRAGMA table_info({})'.format(table)
columns = [(name,typ) for _,name,typ,_,_,_ in self.db.cursor().execute(query)]
return columns
def run_sql(self, sql):
msg = 'Running query: \n' + sql.strip()
self.query.log.log_new(msg, 'normal')
try:
# execute
t = time()
req = self.db.cursor().execute(sql)
# reset
tree = self.browser.table.tree
tree.delete(*tree.get_children())
# set fields
fields = [item[0] for item in req.description]
tree["columns"]=tuple(fields)
for fl in fields:
tree.heading(fl, text=fl)
# populate table rows
maxrows = 1000
rows = self.browser.table.rows = []
for i,row in enumerate(req):
tid = tree.insert("", "end", i+1, text=i+1, values=row)
rows.append(row)
if i+1 >= maxrows:
break
# finish rest of query
itot = i+1
for row in req:
itot += 1
elaps = time() - t
msg = '\n' + 'Query completed in {} seconds'.format(round(elaps,6))
msg += '\n' + 'Resulted in {} rows of data'.format(itot)
if itot > (i+1):
msg += '\n' + 'Showing only first {}'.format(i+1)
self.query.log.log_append(msg, 'normal')
except:
# log error
err = traceback.format_exc()
self.query.log.log_append(err, 'error')
# reset table
fields = []
rows = [[]]
self.browser.table.populate(fields, rows)
class DataInfo(tk2.basics.Label):
def __init__(self, master, *args, **kwargs):
tk2.basics.Label.__init__(self, master, *args, **kwargs)
self.data = tk2.Entry(self, label='Data:', default='None', width=300)
self.data.pack(side='top', fill="x", expand=1)
self.path = tk2.Entry(self, label='Path:', default='None', width=300)
self.path.pack(side='top', fill="x", expand=1)
self.content = tk2.Treeview(self)
self.content.pack(side='top', fill="both", expand=1)
tree = self.content.tree
tree["columns"]=("column","type")
tree.column("#0", width=100, minwidth=100, stretch='no')
tree.column("column", width=200, minwidth=100, stretch='no')
tree.column("type", width=100, minwidth=100, stretch='no')
tree.heading("#0", text="Table",anchor='w')
tree.heading("column", text="Column",anchor='w')
tree.heading("type", text="Type",anchor='w')
self.actions = tk2.Label(self)
self.actions.pack(side='top', fill="x", expand=1)
def tableview():
item = tree.selection()[0]
prn = tree.parent(item)
if prn:
table = tree.item(prn)['text']
else:
table = tree.item(item)['text']
self.preview_table(table)
tablebut = tk2.Button(self.actions, text='Preview Table', command=tableview)
tablebut.pack(side='left')
def populate(self, tables):
tree = self.content.tree
tree.delete(*tree.get_children())
for table,columns in tables:
tid = self.content.insert("", "end", text=table, values=("", "", ""), open=True)
i = 1
for col,typ in columns:
self.content.insert(tid, "end", text=i, values=(col, typ))
i += 1
def preview_table(self, table):
sql = '''
select *
from {}
limit 100
'''.format(table)
self.app.run_sql(sql)
class TableBrowser(tk2.basics.Label):
def __init__(self, master, *args, **kwargs):
tk2.basics.Label.__init__(self, master, *args, **kwargs)
self.menu = tk2.Label(self, text='Results:')
self.menu.pack(side='top', fill="x", expand=1)
self.table = tk2.scrollwidgets.Table(self)
self.table.pack(side='bottom', fill="both", expand=1)
self.table.tree.bind('<Button-1>', self.click)
def click(self, event):
x,y = event.x, event.y
#print x,y
row = self.table.tree.identify_row(y)
column = self.table.tree.identify_column(x)
#print row,column
if not column or column == '#0':
return
ci = int(float(column[1:])) - 1 # zero-index
ri = int(float(row)) - 1 # zero-index
#print ci,ri
val = self.table.rows[ri][ci]
#print ci,ri,val
if val and isinstance(val, geometry.Geometry):
self.show_geom(val)
elif val and isinstance(val, raster.Raster):
self.show_rast(val)
def show_geom(self, geom):
from PIL import Image
xmin,ymin,xmax,ymax = geom.bbox()
xw,yh = xmax-xmin, ymax-ymin
aspect = xw / float(yh)
h = int(300)
w = int(h * aspect)
rast = geom.as_raster(w, h, 'u1', 255, 0)
img = Image.fromarray(rast.data(1))
self.app.graphics.show(img)
def show_rast(self, rast):
from PIL import Image
rast = rast.resize(300, 300)
arr = rast.data(1) # just 1st band for now
arr = (arr / float(arr.max())) * 255 # normalize
img = Image.fromarray(arr)
self.app.graphics.show(img)
class QueryEditor(tk2.basics.Label):
def __init__(self, master, *args, **kwargs):
tk2.basics.Label.__init__(self, master, *args, **kwargs)
self.title = tk2.Label(self, text='Query Editor:')
self.title.pack(fill="x", expand=1)
self.text = tk2.Text(self, height=12)
self.text.pack(fill="both", expand=1)
self.buttons = tk2.Label(self)
self.buttons.pack(fill="x", expand=1)
runbut = tk2.Button(self.buttons, text='Run', command=self.run_query)
runbut.pack(side='left')
self.log = LogViewer(self)
self.log.pack(fill="both", expand=1)
def run_query(self):
sql = self.text.get('1.0', 'end')
self.app.run_sql(sql)
class LogViewer(tk2.basics.Label):
def __init__(self, master, *args, **kwargs):
tk2.basics.Label.__init__(self, master, *args, **kwargs)
self.title = tk2.Label(self, text='Run Log:')
self.title.pack(fill="x", expand=1)
self.text = tk2.Text(self, height=5)
self.text.config(state='disabled')
self.text.tag_config('normal')
self.text.tag_config('error', foreground='red')
self.text.pack(fill="both", expand=1)
def log_new(self, text, *tags):
self.text.config(state='normal')
#prev = self.text.get('1.0', 'end')
self.text.delete('1.0', 'end')
self.text.insert('1.0', text, tags)
self.text.yview_moveto(1)
self.text.config(state='disabled')
def log_append(self, text, *tags):
self.text.config(state='normal')
#prev = self.text.get('1.0', 'end')
self.text.insert('end', text, tags)
self.text.yview_moveto(1)
self.text.config(state='disabled')
class GraphicsViewer(tk2.basics.Label):
def __init__(self, master, *args, **kwargs):
tk2.basics.Label.__init__(self, master, *args, **kwargs)
self.title = tk2.Label(self, text='Graphics:')
self.title.pack(fill="x", expand=0)
self.output = tk2.Label(self, background='white')
self.output['anchor'] = 'center'
self.output.pack(fill="both", expand=1)
self.actions = tk2.Label(self)
self.actions.pack(fill="x", expand=0)
def show(self, img):
from PIL import Image,ImageTk,ImageOps
w,h = self.output.winfo_width(), self.output.winfo_height()
wratio = img.size[0] / float(w)
hratio = img.size[1] / float(h)
ratio = max(wratio, hratio)
w,h = int(img.size[0]/ratio), int(img.size[1]/ratio)
img = img.resize((w,h), Image.ANTIALIAS)
self.tkim = ImageTk.PhotoImage(img)
self.output['image'] = self.tkim
|
from __future__ import absolute_import
from __future__ import division
# Copyright (c) 2010-2017 openpyxl
import math
#constants
DEFAULT_ROW_HEIGHT = 15. # Default row height measured in point size.
BASE_COL_WIDTH = 13 # in characters
DEFAULT_COLUMN_WIDTH = 51.85 # in points, should be characters
DEFAULT_LEFT_MARGIN = 0.7 # in inches, = right margin
DEFAULT_TOP_MARGIN = 0.7874 # in inches = bottom margin
DEFAULT_HEADER = 0.3 # in inches
# Conversion functions
"""
From the ECMA Spec (4th Edition part 1)
Page setup: "Left Page Margin in inches" p. 1647
Docs from
http://startbigthinksmall.wordpress.com/2010/01/04/points-inches-and-emus-measuring-units-in-office-open-xml/
See also http://msdn.microsoft.com/en-us/library/dd560821(v=office.12).aspx
dxa: The main unit in OOXML is a twentieth of a point. Also called twips.
pt: point. In Excel there are 72 points to an inch
hp: half-points are used to specify font sizes. A font-size of 12pt equals 24 half points
pct: Half-points are used to specify font sizes. A font-size of 12pt equals 24 half points
EMU: English Metric Unit, EMUs are used for coordinates in vector-based
drawings and embedded pictures. One inch equates to 914400 EMUs and a
centimeter is 360000. For bitmaps the default resolution is 96 dpi (known as
PixelsPerInch in Excel). Spec p. 1122
For radial geometry Excel uses integert units of 1/60000th of a degree.
"""
def inch_to_dxa(value):
"""1 inch = 72 * 20 dxa"""
return int(value * 20 * 72)
def dxa_to_inch(value):
return value / 72 / 20
def dxa_to_cm(value):
return 2.54 * dxa_to_inch(value)
def cm_to_dxa(value):
emu = cm_to_EMU(value)
inch = EMU_to_inch(emu)
return inch_to_dxa(inch)
def pixels_to_EMU(value):
"""1 pixel = 9525 EMUs"""
return int(value * 9525)
def EMU_to_pixels(value):
return round(value / 9525)
def cm_to_EMU(value):
"""1 cm = 360000 EMUs"""
return int(value * 360000)
def EMU_to_cm(value):
return round(value / 360000, 4)
def inch_to_EMU(value):
"""1 inch = 914400 EMUs"""
return int(value * 914400)
def EMU_to_inch(value):
return round(value / 914400, 4)
def pixels_to_points(value, dpi=96):
"""96 dpi, 72i"""
return value * 72 / dpi
def points_to_pixels(value, dpi=96):
return int(math.ceil(value * dpi / 72))
def degrees_to_angle(value):
"""1 degree = 60000 angles"""
return int(round(value * 60000))
def angle_to_degrees(value):
return round(value / 60000, 2)
def short_color(color):
""" format a color to its short size """
if len(color) > 6:
return color[2:]
return color
|
# ccm node
from __future__ import with_statement
import os
import re
import shutil
import signal
import stat
import subprocess
import time
import yaml
from six import print_,iteritems
from ccmlib import common
from ccmlib.node import Node, NodeError, NodetoolError
class DseNode(Node):
"""
Provides interactions to a DSE node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None, byteman_port='0', environment_variables=None):
super(DseNode, self).__init__(name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface, byteman_port, environment_variables=environment_variables)
self.get_cassandra_version()
self._dse_config_options = {}
if self.cluster.hasOpscenter():
self._copy_agent()
def get_install_cassandra_root(self):
return os.path.join(self.get_install_dir(), 'resources', 'cassandra')
def get_node_cassandra_root(self):
return os.path.join(self.get_path(), 'resources', 'cassandra')
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf')
def get_tool(self, toolname):
return common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', toolname)
def get_tool_args(self, toolname):
return [common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', 'dse'), toolname]
def get_env(self):
(node_ip, _) = self.network_interfaces['binary']
return common.make_dse_env(self.get_install_dir(), self.get_path(), node_ip)
def get_cassandra_version(self):
return common.get_dse_cassandra_version(self.get_install_dir())
def set_workloads(self, workloads):
self.workloads = workloads
self._update_config()
if 'solr' in self.workloads:
self.__generate_server_xml()
if 'graph' in self.workloads:
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
graph_options = data['graph']
graph_options['gremlin_server']['host'] = node_ip
self.set_dse_configuration_options({'graph' : graph_options})
self.__update_gremlin_config_yaml()
if 'dsefs' in self.workloads:
dsefs_options = {'dsefs_options' : {'enabled': 'true',
'work_dir': os.path.join(self.get_path(), 'dsefs'),
'data_directories': [{'dir': os.path.join(self.get_path(), 'dsefs', 'data')}]}}
self.set_dse_configuration_options(dsefs_options)
if 'spark' in self.workloads:
self._update_spark_env()
def set_dse_configuration_options(self, values=None):
if values is not None:
for k, v in iteritems(values):
self._dse_config_options[k] = v
self.import_dse_config_files()
def watch_log_for_alive(self, nodes, from_mark=None, timeout=720, filename='system.log'):
"""
Watch the log of this node until it detects that the provided other
nodes are marked UP. This method works similarly to watch_log_for_death.
We want to provide a higher default timeout when this is called on DSE.
"""
super(DseNode, self).watch_log_for_alive(nodes, from_mark=from_mark, timeout=timeout, filename=filename)
def start(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=False,
replace_token=None,
replace_address=None,
jvm_args=None,
wait_for_binary_proto=False,
profile_options=None,
use_jna=False,
quiet_start=False,
allow_root=False):
"""
Start the node. Options includes:
- join_ring: if false, start the node with -Dcassandra.join_ring=False
- no_wait: by default, this method returns when the node is started and listening to clients.
If no_wait=True, the method returns sooner.
- wait_other_notice: if True, this method returns only when all other live node of the cluster
have marked this node UP.
- replace_token: start the node with the -Dcassandra.replace_token option.
- replace_address: start the node with the -Dcassandra.replace_address option.
"""
if jvm_args is None:
jvm_args = []
if self.is_running():
raise NodeError("%s is already running" % self.name)
for itf in list(self.network_interfaces.values()):
if itf is not None and replace_address is None:
common.check_socket_available(itf)
if wait_other_notice:
marks = [(node, node.mark_log()) for node in list(self.cluster.nodes.values()) if node.is_running()]
self.mark = self.mark_log()
cdir = self.get_install_dir()
launch_bin = common.join_bin(cdir, 'bin', 'dse')
# Copy back the dse scripts since profiling may have modified it the previous time
shutil.copy(launch_bin, self.get_bin_dir())
launch_bin = common.join_bin(self.get_path(), 'bin', 'dse')
# If Windows, change entries in .bat file to split conf from binaries
if common.is_win():
self.__clean_bat()
if profile_options is not None:
config = common.get_config()
if 'yourkit_agent' not in config:
raise NodeError("Cannot enable profile. You need to set 'yourkit_agent' to the path of your agent in a {0}/config".format(common.get_default_path_display_name()))
cmd = '-agentpath:%s' % config['yourkit_agent']
if 'options' in profile_options:
cmd = cmd + '=' + profile_options['options']
print_(cmd)
# Yes, it's fragile as shit
pattern = r'cassandra_parms="-Dlog4j.configuration=log4j-server.properties -Dlog4j.defaultInitOverride=true'
common.replace_in_file(launch_bin, pattern, ' ' + pattern + ' ' + cmd + '"')
os.chmod(launch_bin, os.stat(launch_bin).st_mode | stat.S_IEXEC)
env = self.get_env()
if common.is_win():
self._clean_win_jmx()
pidfile = os.path.join(self.get_path(), 'cassandra.pid')
args = [launch_bin, 'cassandra']
for workload in self.workloads:
if 'hadoop' in workload:
args.append('-t')
if 'solr' in workload:
args.append('-s')
if 'spark' in workload:
args.append('-k')
if 'cfs' in workload:
args.append('-c')
if 'graph' in workload:
args.append('-g')
args += ['-p', pidfile, '-Dcassandra.join_ring=%s' % str(join_ring)]
args += ['-Dcassandra.logdir=%s' % os.path.join(self.get_path(), 'logs')]
if replace_token is not None:
args.append('-Dcassandra.replace_token=%s' % str(replace_token))
if replace_address is not None:
args.append('-Dcassandra.replace_address=%s' % str(replace_address))
if use_jna is False:
args.append('-Dcassandra.boot_without_jna=true')
if allow_root:
args.append('-R')
args = args + jvm_args
self._delete_old_pid()
process = None
FNULL = open(os.devnull, 'w')
stdout_sink = subprocess.PIPE if verbose else FNULL
if common.is_win():
# clean up any old dirty_pid files from prior runs
if (os.path.isfile(self.get_path() + "/dirty_pid.tmp")):
os.remove(self.get_path() + "/dirty_pid.tmp")
process = subprocess.Popen(args, cwd=self.get_bin_dir(), env=env, stdout=FNULL, stderr=subprocess.PIPE)
else:
process = subprocess.Popen(args, env=env, stdout=stdout_sink, stderr=subprocess.PIPE)
if verbose:
stdout, stderr = process.communicate()
print_(stdout)
print_(stderr)
# Our modified batch file writes a dirty output with more than just the pid - clean it to get in parity
# with *nix operation here.
if common.is_win():
self.__clean_win_pid()
self._update_pid(process)
elif update_pid:
self._update_pid(process)
if not self.is_running():
raise NodeError("Error starting node %s" % self.name, process)
if wait_other_notice:
for node, mark in marks:
node.watch_log_for_alive(self, from_mark=mark)
if wait_for_binary_proto:
self.wait_for_binary_interface(from_mark=self.mark)
if self.cluster.hasOpscenter():
self._start_agent()
return process
def stop(self, wait=True, wait_other_notice=False, gently=True):
stopped = super(DseNode, self).stop(wait, wait_other_notice, gently)
if self.cluster.hasOpscenter():
self._stop_agent()
return stopped
def nodetool(self, cmd, username=None, password=None, capture_output=True, wait=True):
"""
Setting wait=False makes it impossible to detect errors,
if capture_output is also False. wait=False allows us to return
while nodetool is still running.
"""
if capture_output and not wait:
raise common.ArgumentError("Cannot set capture_output while wait is False.")
env = self.get_env()
nodetool = common.join_bin(self.get_install_dir(), 'bin', 'nodetool')
args = [nodetool, '-h', 'localhost', '-p', str(self.jmx_port)]
if username is not None:
args += [ '-u', username]
if password is not None:
args += [ '-pw', password]
args += cmd.split()
if capture_output:
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
else:
p = subprocess.Popen(args, env=env)
stdout, stderr = None, None
if wait:
exit_status = p.wait()
if exit_status != 0:
raise NodetoolError(" ".join(args), exit_status, stdout, stderr)
return stdout, stderr
def dsetool(self, cmd):
env = self.get_env()
dsetool = common.join_bin(self.get_install_dir(), 'bin', 'dsetool')
args = [dsetool, '-h', 'localhost', '-j', str(self.jmx_port)]
args += cmd.split()
p = subprocess.Popen(args, env=env)
p.wait()
def dse(self, dse_options=None):
if dse_options is None:
dse_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse]
args += dse_options
p = subprocess.Popen(args, env=env)
p.wait()
def hadoop(self, hadoop_options=None):
if hadoop_options is None:
hadoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hadoop']
args += hadoop_options
p = subprocess.Popen(args, env=env)
p.wait()
def hive(self, hive_options=None):
if hive_options is None:
hive_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hive']
args += hive_options
p = subprocess.Popen(args, env=env)
p.wait()
def pig(self, pig_options=None):
if pig_options is None:
pig_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'pig']
args += pig_options
p = subprocess.Popen(args, env=env)
p.wait()
def sqoop(self, sqoop_options=None):
if sqoop_options is None:
sqoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'sqoop']
args += sqoop_options
p = subprocess.Popen(args, env=env)
p.wait()
def spark(self, spark_options=None):
if spark_options is None:
spark_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'spark']
args += spark_options
p = subprocess.Popen(args, env=env)
p.wait()
def import_dse_config_files(self):
self._update_config()
if not os.path.isdir(os.path.join(self.get_path(), 'resources', 'dse', 'conf')):
os.makedirs(os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'dse', 'conf'), os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
self.__update_yaml()
def copy_config_files(self):
for product in ['dse', 'cassandra', 'hadoop', 'sqoop', 'hive', 'tomcat', 'spark', 'shark', 'mahout', 'pig', 'solr', 'graph']:
src_conf = os.path.join(self.get_install_dir(), 'resources', product, 'conf')
dst_conf = os.path.join(self.get_path(), 'resources', product, 'conf')
if not os.path.isdir(src_conf):
continue
if os.path.isdir(dst_conf):
common.rmdirs(dst_conf)
shutil.copytree(src_conf, dst_conf)
if product == 'solr':
src_web = os.path.join(self.get_install_dir(), 'resources', product, 'web')
dst_web = os.path.join(self.get_path(), 'resources', product, 'web')
if os.path.isdir(dst_web):
common.rmdirs(dst_web)
shutil.copytree(src_web, dst_web)
if product == 'tomcat':
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'lib')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'lib')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
shutil.copytree(src_lib, dst_lib)
src_webapps = os.path.join(self.get_install_dir(), 'resources', product, 'webapps')
dst_webapps = os.path.join(self.get_path(), 'resources', product, 'webapps')
if os.path.isdir(dst_webapps):
common.rmdirs(dst_webapps)
shutil.copytree(src_webapps, dst_webapps)
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'gremlin-console', 'conf')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'gremlin-console', 'conf')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
def import_bin_files(self):
os.makedirs(os.path.join(self.get_path(), 'resources', 'cassandra', 'bin'))
common.copy_directory(os.path.join(self.get_install_dir(), 'bin'), self.get_bin_dir())
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'bin'), os.path.join(self.get_path(), 'resources', 'cassandra', 'bin'))
def _update_log4j(self):
super(DseNode, self)._update_log4j()
conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF)
append_pattern = 'log4j.appender.V.File='
log_file = os.path.join(self.get_path(), 'logs', 'solrvalidation.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.A.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.B.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit', 'dropped-events.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
def __update_yaml(self):
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['system_key_directory'] = os.path.join(self.get_path(), 'keys')
# Get a map of combined cluster and node configuration with the node
# configuration taking precedence.
full_options = common.merge_configuration(
self.cluster._dse_config_options,
self._dse_config_options, delete_empty=False)
# Merge options with original yaml data.
data = common.merge_configuration(data, full_options)
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _update_log4j(self):
super(DseNode, self)._update_log4j()
conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF)
append_pattern = 'log4j.appender.V.File='
log_file = os.path.join(self.get_path(), 'logs', 'solrvalidation.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.A.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.B.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit', 'dropped-events.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
def __generate_server_xml(self):
server_xml = os.path.join(self.get_path(), 'resources', 'tomcat', 'conf', 'server.xml')
if os.path.isfile(server_xml):
os.remove(server_xml)
with open(server_xml, 'w+') as f:
f.write('<Server port="8005" shutdown="SHUTDOWN">\n')
f.write(' <Service name="Solr">\n')
f.write(' <Connector port="8983" address="%s" protocol="HTTP/1.1" connectionTimeout="20000" maxThreads = "200" URIEncoding="UTF-8"/>\n' % self.network_interfaces['thrift'][0])
f.write(' <Engine name="Solr" defaultHost="localhost">\n')
f.write(' <Host name="localhost" appBase="../solr/web"\n')
f.write(' unpackWARs="true" autoDeploy="true"\n')
f.write(' xmlValidation="false" xmlNamespaceAware="false">\n')
f.write(' </Host>\n')
f.write(' </Engine>\n')
f.write(' </Service>\n')
f.write('</Server>\n')
f.close()
def __update_gremlin_config_yaml(self):
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'graph', 'gremlin-console', 'conf', 'remote.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['hosts'] = [node_ip]
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _get_directories(self):
dirs = []
for i in ['data', 'commitlogs', 'saved_caches', 'logs', 'bin', 'keys', 'resources', os.path.join('data', 'hints')]:
dirs.append(os.path.join(self.get_path(), i))
return dirs
def _copy_agent(self):
agent_source = os.path.join(self.get_install_dir(), 'datastax-agent')
agent_target = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_source) and not os.path.exists(agent_target):
shutil.copytree(agent_source, agent_target)
def _start_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
self._write_agent_address_yaml(agent_dir)
self._write_agent_log4j_properties(agent_dir)
args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))]
subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _stop_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
pidfile = os.path.join(agent_dir, 'datastax-agent.pid')
if os.path.exists(pidfile):
with open(pidfile, 'r') as f:
pid = int(f.readline().strip())
f.close()
if pid is not None:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
os.remove(pidfile)
def _write_agent_address_yaml(self, agent_dir):
address_yaml = os.path.join(agent_dir, 'conf', 'address.yaml')
if not os.path.exists(address_yaml):
with open(address_yaml, 'w+') as f:
(ip, port) = self.network_interfaces['thrift']
jmx = self.jmx_port
f.write('stomp_interface: 127.0.0.1\n')
f.write('local_interface: %s\n' % ip)
f.write('agent_rpc_interface: %s\n' % ip)
f.write('agent_rpc_broadcast_address: %s\n' % ip)
f.write('cassandra_conf: %s\n' % os.path.join(self.get_path(), 'resources', 'cassandra', 'conf', 'cassandra.yaml'))
f.write('cassandra_install: %s\n' % self.get_path())
f.write('cassandra_logs: %s\n' % os.path.join(self.get_path(), 'logs'))
f.write('thrift_port: %s\n' % port)
f.write('jmx_port: %s\n' % jmx)
f.close()
def _write_agent_log4j_properties(self, agent_dir):
log4j_properties = os.path.join(agent_dir, 'conf', 'log4j.properties')
with open(log4j_properties, 'w+') as f:
f.write('log4j.rootLogger=INFO,R\n')
f.write('log4j.logger.org.apache.http=OFF\n')
f.write('log4j.logger.org.eclipse.jetty.util.log=WARN,R\n')
f.write('log4j.appender.R=org.apache.log4j.RollingFileAppender\n')
f.write('log4j.appender.R.maxFileSize=20MB\n')
f.write('log4j.appender.R.maxBackupIndex=5\n')
f.write('log4j.appender.R.layout=org.apache.log4j.PatternLayout\n')
f.write('log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %m%n\n')
f.write('log4j.appender.R.File=./log/agent.log\n')
f.close()
def _update_spark_env(self):
conf_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf',
'spark-env.sh')
env = self.get_env()
content = []
with open(conf_file, 'r') as f:
for line in f.readlines():
for spark_var in env.keys():
if line.startswith('export %s=' % spark_var):
line = 'export %s=%s' % (spark_var, env[spark_var])
break
content.append(line)
with open(conf_file, 'w') as f:
f.writelines(content)
|
# %%
import os,sys
from numpy.lib.npyio import save
import vtk,vtktools
import u2r
import numpy as np
import matplotlib.pyplot as plt
import random
from nirom_dd_tools import *
import copy
### MY version of nirom_dd_orig.py for FLOW_PAST_IMPLICIT_BLOCK_84.vtu
# some functions of tools.io copied in
def write_sing_values(s_values):
f= open('singular_values.dat',"w+")
f.write('# index, s_values, normalised s_values, cumulative energy \n' )
for k in range(len(s_values)):
#f.write('# field: %s\n' % field[k])
total = 0.0
s_values = s_values[k]
for i in range(len(s_values)):
total = total + s_values[i]*s_values[i]
running_total = 0.0
for i in range(len(s_values)):
running_total = running_total + s_values[i]*s_values[i]
f.write ('%d %g %g %18.10g \n' % (i, s_values[i], s_values[i]/s_values[0], running_total/total) )
f.close()
return
def get_clean_vtk_file(filename):
"Removes fields and arrays from a vtk file, leaving the coordinates/connectivity information."
vtu_data = vtktools.vtu(filename)
clean_vtu = vtktools.vtu()
clean_vtu.ugrid.DeepCopy(vtu_data.ugrid)
fieldNames = clean_vtu.GetFieldNames()
# remove all fields and arrays from this vtu
for field in fieldNames:
clean_vtu.RemoveField(field)
fieldNames = clean_vtu.GetFieldNames()
vtkdata=clean_vtu.ugrid.GetCellData()
arrayNames = [vtkdata.GetArrayName(i) for i in range(vtkdata.GetNumberOfArrays())]
for array in arrayNames:
vtkdata.RemoveArray(array)
return clean_vtu
#(nNodes, reconstruction_on_mesh[iTime*nScalar:(iTime+1)*nScalar,:], template_vtu, original_data[0][iTime*nDim:(iTime+1)*nDim], iTime)
def create_vtu_file(path, nNodes, value_mesh_twice_interp, filename, orig_vel, iTime):
velocity_field = np.zeros((nNodes,3))
velocity_field[:,0:nDim] = np.transpose(value_mesh_twice_interp[0:nDim,:]) # streamwise component only
difference = np.zeros((nNodes,3))
difference[:,0:nDim] = np.transpose(value_mesh_twice_interp[0:nDim,:]) - orig_vel # streamwise component only
difference = difference / np.max(velocity_field)
clean_vtk = get_clean_vtk_file(filename)
new_vtu = vtktools.vtu()
new_vtu.ugrid.DeepCopy(clean_vtk.ugrid)
new_vtu.filename = path + 'recon_' + str(iTime) + '.vtu'
new_vtu.AddField('Velocity',velocity_field)
new_vtu.AddField('Original',orig_vel)
new_vtu.AddField('Velocity_diff',difference)
new_vtu.Write()
return
def create_vtu_file_timelevel(nNodes, value_mesh_twice_interp, template_vtu, iTime):
velocity_field = np.zeros((nNodes,3))
velocity_field[:,0:nDim] = np.transpose(value_mesh_twice_interp[0:nDim,:]) # streamwise component only
# difference = np.zeros((nNodes,3))
# difference[:,0:nDim] = np.transpose(value_mesh_twice_interp[0:nDim,:]) - orig_vel # streamwise component only
clean_vtk = get_clean_vtk_file(template_vtu)
new_vtu = vtktools.vtu()
new_vtu.ugrid.DeepCopy(clean_vtk.ugrid)
new_vtu.filename = 'reconstructed_' + str(iTime) + '.vtu'
new_vtu.AddField('Velocity',velocity_field)
#new_vtu.AddField('Velocity_diff',difference)
new_vtu.Write()
return
#code for full domain case
# def get_grid_end_points(grid_origin,grid_width,iGrid ):
# return np.array(( grid_origin[0]+iGrid*grid_width[0], grid_origin[1] +iGrid*grid_width[1]))#
def get_grid_end_points(grid_origin,grid_width):
return np.array((grid_origin[0]+grid_width[0], grid_origin[1] +grid_width[1]))
def plot_grid(grid_origin, grid_width, nx, ny):
# include plot of entire coordinates with grid
# plt.figure(figsize=(9,9))
plt.plot(coordinates[:,0], coordinates[:,1], 'g.', ms = 0.3, label = 'angle = {}˚'.format(random_angle)) # corrdinates
# code for just the edges
# plt.plot([grid_origin[0], grid_origin[0]+grid_width[0]], [grid_origin[1], grid_origin[1]], 'ko-') #1
# plt.plot([grid_origin[0], grid_origin[0]], [grid_origin[1], grid_origin[1]+grid_width[1]], 'ko-') #2
# plt.plot([grid_origin[0], grid_origin[0]+grid_width[0]], [grid_origin[1]+grid_width[1], grid_origin[1]+grid_width[1]], 'ko-') #3
# plt.plot([grid_origin[0]+grid_width[0], grid_origin[0]+grid_width[0]], [grid_origin[1], grid_origin[1]+grid_width[1]], 'ko-') #4
for d in range(ny + 1):
if d%4 == 0:
plt.plot([grid_origin[0], grid_origin[0]+grid_width[0]], [grid_origin[1]+d*ddx[1], grid_origin[1]+d*ddx[1]], 'k-', lw = 1.2) #horizontal
if ny == nx:
plt.plot([grid_origin[0]+d*ddx[1], grid_origin[0]+d*ddx[1]], [grid_origin[1], grid_origin[1]+grid_width[1]], 'k-', lw = 1.2) #vertical
if ny != nx:
for d in range (nx + 1): #vertical
if d%4 == 0:
plt.plot([grid_origin[0]+d*ddx[0], grid_origin[0]+d*ddx[0]], [grid_origin[1], grid_origin[1]+grid_width[1]], 'k-', lw = 1.2) #vertical
# plt.grid(':')
# plt.tight_layout()
# plt.show()
def rotate_mesh(angle):
theta = np.radians(angle)
#shift coordinates so that they are centred at (0,0)
# for i in range(coordinates.shape[0]):
# coordinates[i][0] -= 1.5
# coordinates[i][1] -= 1.5
new_mesh = np.zeros(coordinates.shape)
for i in range(coordinates.shape[0]):
new_mesh[i][0] = (coordinates[i][0]-1.5)*np.cos(theta) - (coordinates[i][1]-1.5)*np.sin(theta)
new_mesh[i][1] = (coordinates[i][0]-1.5)*np.sin(theta) + (coordinates[i][1]-1.5)*np.cos(theta)
#rotate the velocity field as well
return new_mesh
def rotate_vel(angle):
theta = np.radians(angle)
new_mesh = np.zeros(velocity_field.shape)
for i in range(coordinates.shape[0]):
new_mesh[i][0] = (velocity_field[i][0])*np.cos(theta) - (velocity_field[i][1])*np.sin(theta)
new_mesh[i][1] = (velocity_field[i][0])*np.sin(theta) + (velocity_field[i][1])*np.cos(theta)
return new_mesh
def select_gridpoint():
min_x = min(coordinates[:,0])
max_x = max(coordinates[:,0])
min_y = min(coordinates[:,1])
max_y = max(coordinates[:,1])
# plt.plot(min_x+0.5,min_y+0.5, 'ro' )
# plt.plot(min_x+0.5,max_y-1.0, 'ro' )
# plt.plot(max_x-1.0,min_y+0.5, 'ro' )
# plt.plot(max_x-1.0,max_y-1.0, 'ro' )
grid_origin = [3,3]
while np.sqrt(grid_origin[0]**2+grid_origin[1]**2) >= 1.3:
# print("finding point - ", np.sqrt(grid_origin[0]**2+grid_origin[1]**2))
grid_origin = [random.uniform(min_x+0.5, max_x-1.2), random.uniform(min_y+0.5, max_y-1.2)]
return grid_origin
def sample_starshape(mesh, grid_origin):
"""
Returns a snapshot matrix of shape (5,nx*ny) and
snapshot_ae of shape (5,nx,ny) with given
mesh and central grid origin for the starshape grid formation
"""
grid_point_0 = [grid_origin[0], grid_origin[1]+grid_width[1]]
grid_point_1 = [grid_origin[0]-grid_width[0], grid_origin[1]]
grid_point_2 = [grid_origin[0]+grid_width[0], grid_origin[1]]
grid_point_3 = [grid_origin[0], grid_origin[1]-grid_width[1]]
#grid_point_4 = grid_origin
grid_list = [grid_point_0,grid_point_1, grid_point_2, grid_point_3, grid_origin]
s_matrix = np.zeros((nx*ny, 5))
s_ae = np.zeros((5,nx,ny))
for iloc in range(5):
value_grid = u2r.simple_interpolate_from_mesh_to_grid(mesh, x_all, x_ndgln, ddx, grid_list[iloc], nx, ny, nz, zeros_beyond_mesh, nEl, nloc, nNodes, nScalar, nDim,1)
s_matrix[:,iloc] = value_grid.reshape(-1)
s_ae[iloc,:,:] = value_grid.reshape((nx,ny))
return s_matrix, s_ae
def plot_starshape(nSGrids):
plt.figure(figsize=(8,8))
plt.subplot(3,3,2)
plt.imshow(np.rot90(Ssnapshot_ae[5*nSGrids,:,:,2]))
plt.subplot(3,3,4)
plt.imshow(np.rot90(Ssnapshot_ae[5*nSGrids+1,:,:,2]))
plt.subplot(3,3,5)
plt.imshow(np.rot90(Ssnapshot_ae[5*nSGrids+4,:,:,2]))
plt.subplot(3,3,6)
plt.imshow(np.rot90(Ssnapshot_ae[5*nSGrids+2,:,:,2]))
plt.subplot(3,3,8)
plt.imshow(np.rot90(Ssnapshot_ae[5*nSGrids+3,:,:,2]))
plt.tight_layout()
# plt.show()
## settings
snapshot_data_location = '/Users/gohannaago/Desktop/IC_MSc_ACSE/9_PROJECT/ddpod/'
snapshot_file_base = 'Flow_past_implicit_block_'
nTime = 1
field_names = ['Velocity', 'VelocityAbsorption']
nFields = len(field_names)
xlength = 3.0
ylength = 3.0
grid_width = [0.48,0.48]
# spacing inside small grid
nx = int(grid_width[0]*100)
ny = nx
nz = 1
ddx = np.array((0.01,0.01))
# set number of grids - samples/snapshots to take
nGrids = 3000
# Turn on/off snapshots matrix
save_snapshots = False
save_stargrid = True
# Turn on/off save first 20 images
save_imgs = False
# get a vtu file (any will do as the mesh is not adapted)
filename = snapshot_data_location + snapshot_file_base + '84.vtu'
representative_vtu = vtktools.vtu(filename)
coordinates_org = representative_vtu.GetLocations() #coordinates of the nodes
coordinates = coordinates_org
nNodes = coordinates.shape[0] # vtu_data.ugrid.GetNumberOfPoints()
nEl = representative_vtu.ugrid.GetNumberOfCells()
# print('nEl', nEl, type(nEl), 'nNodes', nNodes)
#nNodes = 375627
#nEl = 125209
nloc = 3 # number of local nodes, ie three nodes per element (in 2D)
# nScalar = 2 # dimension of fields , 2 = u and v
nScalar = 1 #because I calculate u and v separately
nDim = 2 # dimension of problem (no need to interpolate in the third dimension)
# x_all = np.transpose(coordinates[:,0:nDim]) ### coords n,3 x_all 2,n
# get global node numbers
x_ndgln = np.zeros((nEl*nloc), dtype=int)
for iEl in range(nEl):
n = representative_vtu.GetCellPoints(iEl) + 1
x_ndgln[iEl*nloc:(iEl+1)*nloc] = n
# %%
# -------------------------------------------------------------------------------------------------
#SNAPSHOT MATRIX
#snapshot matrix configuration outside loop
snapshots_matrix = np.zeros((nx*ny*3, nGrids))
snapshot_ae = np.zeros((nGrids,nx,ny,3))
#saving coordinates of grid_origins of each grid (for later starshape grid)
origin_save = np.zeros((nGrids,2))
#saving rotation angle
rangle_save = np.zeros(nGrids)
# full run - later to iterate
for iGrid in range (nGrids): #replace with ngrids later
# plt.subplot(nGrids//4,(nGrids//4+nGrids%4),iGrid+1)
if iGrid <= 20 and save_imgs:
plt.figure(figsize=(9,9))
if iGrid%10 == 0:
print("Currently Generating Grid ", iGrid+1, " out of ", nGrids, " Grids")
# rotate mesh and select grid
coordinates = coordinates_org
random_angle = random.randint(0,360)
# print("Rotation angle of mesh = ", random_angle)
rangle_save[iGrid] = random_angle
#rotate coordinates
coordinates = rotate_mesh(random_angle)
# velocity_field = representative_vtu.GetField(field_names[0])[:,:nDim] #field name 0 is velocity field
# #rotate velocity
# # print('velocity_field[0]', velocity_field[0])
# velocity_field = rotate_vel(random_angle)
# # print('velocity_field[0]', velocity_field[0])
# #call velocity absorption field
# va_field = representative_vtu.GetField(field_names[1])[:,0] #field name 0 is velocity field
# # print('va_field', va_field[:20])
# x_all = np.transpose(coordinates[:,0:nDim])
# plot the orientation
# plt.rcParams.update({'font.size': 18})
grid_origin = select_gridpoint()
# print(grid_origin)
origin_save[iGrid] = grid_origin
# if iGrid <= 20:
# plot_grid(grid_origin, grid_width, nx, ny)
# plt.tight_layout()
# plt.legend(loc = 'best')
"""
#variables for interpolation
# block_x_start = get_grid_end_points(grid_origin, grid_width)
zeros_beyond_mesh = 0
#interpolate & append stream velocity
u_mesh = np.zeros((1,nNodes,1))
u_mesh[:,:,0] = np.transpose(velocity_field[:,0])
uvalue_grid = u2r.simple_interpolate_from_mesh_to_grid(u_mesh, x_all, x_ndgln, ddx, grid_origin, nx, ny, nz, zeros_beyond_mesh, nEl, nloc, nNodes, nScalar, nDim,1)
# print('uvalue_grid.shape', uvalue_grid.shape)
snapshot_ae[iGrid,:,:,0] = uvalue_grid.reshape((nx,ny))
snapshots_matrix[:nx*ny*1,iGrid] = uvalue_grid.reshape(-1)
#interpolate & append v velocity
v_mesh = np.zeros((1,nNodes,1))
v_mesh[:,:,0] = np.transpose(velocity_field[:,1])
vvalue_grid = u2r.simple_interpolate_from_mesh_to_grid(v_mesh, x_all, x_ndgln, ddx, grid_origin, nx, ny, nz, zeros_beyond_mesh, nEl, nloc, nNodes, nScalar, nDim,1)
snapshot_ae[iGrid,:,:,1] = vvalue_grid.reshape((nx,ny))
snapshots_matrix[nx*ny:nx*ny*2,iGrid] = vvalue_grid.reshape(-1)
#interpolate & append velocity absoprtion
va_mesh = np.zeros((1,nNodes,1))
va_mesh[:,:,0] = np.transpose(va_field)
vavalue_grid = u2r.simple_interpolate_from_mesh_to_grid(va_mesh, x_all, x_ndgln, ddx, grid_origin, nx, ny, nz, zeros_beyond_mesh, nEl, nloc, nNodes, nScalar, nDim,1)
# print(vavalue_grid.reshape(-1)[:30])
snapshot_ae[iGrid,:,:,2] = vavalue_grid.reshape((nx,ny))
snapshots_matrix[nx*ny*2:nx*ny*3,iGrid] = vavalue_grid.reshape(-1)
if iGrid <= 20 and save_imgs:
plt.savefig('plots/grid_selection_{}'.format(iGrid))
plt.close()
# print('snapshots_matrix.shape: ', snapshots_matrix.shape)
# print('saved grid origins for later - shape:', origin_save.shape)
# Scale snapshot matrix
min_vel = np.amin(snapshots_matrix[:nx*ny*2,:]) #minimum among u and v velocity
max_vel = np.amax(snapshots_matrix[:nx*ny*2,:])
vel_scaling = 1/(max_vel-min_vel)
va_scaling = 1e-5 #maximum of buildings information is 100000
snapshots_matrix[:nx*ny*2,:] = vel_scaling*(snapshots_matrix[:nx*ny*2,:]-min_vel)
snapshots_matrix[nx*ny*2:nx*ny*3,:] = va_scaling*snapshots_matrix[nx*ny*2:nx*ny*3,:]
#scale snapshots for ae
snapshot_ae[:,:,:,:2] = vel_scaling*(snapshot_ae[:,:,:,:2]-min_vel)
snapshot_ae[:,:,:,2] = va_scaling*snapshot_ae[:,:,:,2]
# Export snapshots for training ae
if save_snapshots:
np.save("ae_data.npy", snapshot_ae)
np.save("snapshots_pod.npy", snapshots_matrix)
"""
#%%
np.save("grid_origins.npy", origin_save)
np.save("rotation_angles.npy", rangle_save)
# %%
# -------------------------------------------------------------------------------------------------
# POD
# some POD truncation settings
cumulative_tol = 0.90
# nPOD = [nTime] # len(nPOD) = nFields
# nPOD = [-2]
nPOD = [-1]
# nPOD = [10] # 100 50 10
bases = []
singular_values = []
#snapshots matrix done!
nrows, ncols = snapshots_matrix.shape
if nrows > ncols:
SSmatrix = np.dot(snapshots_matrix.T, snapshots_matrix)
else:
SSmatrix = np.dot(snapshots_matrix, snapshots_matrix.T)
print('WARNING - CHECK HOW THE BASIS FUNCTIONS ARE CALCULATED WITH THIS METHOD')
# print('SSmatrix', SSmatrix.shape)
eigvalues, v = np.linalg.eigh(SSmatrix)
# print('eigenvalues', eigvalues)
# print('normalized eigenvector ([:j]) v = ', v)
eigvalues = eigvalues[::-1]
# get rid of small negative eigenvalues (there shouldn't be any as the eigenvalues of a real, symmetric
# matrix are non-negative, but sometimes very small negative values do appear)
eigvalues[eigvalues<0] = 0
s_values = np.sqrt(eigvalues)
# print('s values', s_values[0:20])
singular_values.append(s_values)
cumulative_info = np.zeros(len(eigvalues))
for j in range(len(eigvalues)):
if j==0:
cumulative_info[j] = eigvalues[j]
else:
cumulative_info[j] = cumulative_info[j-1] + eigvalues[j]
cumulative_info = cumulative_info / cumulative_info[-1]
nAll = len(eigvalues)
# Apply POD
#if nPOD = -1, use cumulative tolerance
#if nPOD = -2 use all coefficients (or set nPOD = nTime)
#if nPOD > 0 use nPOD coefficients as defined by the user
if nPOD[0] == -1:
# SVD truncation - percentage of information captured or number
# cumulative_tol = nirom_options.compression.cumulative_tol[iField]
nPOD_iField = sum(cumulative_info <= cumulative_tol) #tolerance
nPOD[0] = nPOD_iField
elif nPOD[0] == -2:
nPOD_iField = nAll
nPOD[0] = nPOD_iField
else:
nPOD_iField = nPOD[0]
print("retaining", nPOD_iField, "basis functions of a possible", len(eigvalues))
basis_functions = np.zeros((nx*ny*nz*(nDim+1),nPOD_iField)) # nDim should be nScalar?
for j in reversed(range(nAll-nPOD_iField,nAll)):
Av = np.dot(snapshots_matrix,v[:,j])
basis_functions[:,nAll-j-1] = Av/np.linalg.norm(Av)
# bases.append(basis_functions)
# write_sing_values(singular_values)
print('basis_functions.shape: ', basis_functions.shape)
np.save("basis_functions.npy", basis_functions)
# %%
# -------------------------------------------------------------------------------------------------
# Starshape information using basis functions
# basis_functions is the matrix R - to find reduced variables of snapshots
print("Generating starshape grids")
zeros_beyond_mesh = 0
#set number of starshape grids to use
nSGrids = 3000
assert nSGrids <= nGrids, "Cannot make more starshape grids than number of central grids we have"
Ssnapshots_matrix = np.zeros((nx*ny*3, nSGrids*5))
Ssnapshot_ae = np.zeros((nSGrids*5,nx,ny,3))
for iGrid in range(nSGrids):
print("Sampling starshape grid ", iGrid+1, " out of ", nSGrids)
# call saved values for grid origin and random angle
grid_origin = origin_save[iGrid]
rangle = rangle_save[iGrid]
# rotate mesh and velocity
coordinates = coordinates_org
coordinates = rotate_mesh(rangle)
x_all = np.transpose(coordinates[:,0:nDim])
velocity_field = representative_vtu.GetField(field_names[0])[:,:nDim] #field name 0 is velocity field
velocity_field = rotate_vel(rangle)
va_field = representative_vtu.GetField(field_names[1])[:,0]
#starshape for u_vel
u_mesh = np.zeros((1,nNodes,1))
u_mesh[:,:,0] = np.transpose(velocity_field[:,0])
u_smatrix, u_sae = sample_starshape(u_mesh, grid_origin)
Ssnapshots_matrix[:nx*ny,5*iGrid:5*iGrid+5] = u_smatrix
Ssnapshot_ae[5*iGrid:5*iGrid+5,:,:,0] = u_sae
#starshape for v_vel
v_mesh = np.zeros((1,nNodes,1))
v_mesh[:,:,0] = np.transpose(velocity_field[:,1])
v_smatrix, v_sae = sample_starshape(v_mesh, grid_origin)
Ssnapshots_matrix[nx*ny:nx*ny*2,5*iGrid:5*iGrid+5] = v_smatrix
Ssnapshot_ae[5*iGrid:5*iGrid+5,:,:,1] = v_sae
#starshape for va
va_mesh = np.zeros((1,nNodes,1))
va_mesh[:,:,0] = np.transpose(va_field)
va_smatrix, va_sae = sample_starshape(va_mesh, grid_origin)
Ssnapshots_matrix[nx*ny*2:nx*ny*3,5*iGrid:5*iGrid+5] = va_smatrix
Ssnapshot_ae[5*iGrid:5*iGrid+5,:,:,2] = va_sae
#Scale values
min_vel = np.amin(Ssnapshots_matrix[:nx*ny*2,:]) #minimum among u and v velocity
max_vel = np.amax(Ssnapshots_matrix[:nx*ny*2,:])
vel_scaling = 1/(max_vel-min_vel)
min_va = np.amin(Ssnapshots_matrix[nx*ny*2:nx*ny*3,:]) #minimum among u and v velocity
max_va = np.amax(Ssnapshots_matrix[nx*ny*2:nx*ny*3,:])
va_scaling = 1/(max_va-min_va) #maximum of buildings information is 100000
Ssnapshots_matrix[:nx*ny*2,:] = vel_scaling*(Ssnapshots_matrix[:nx*ny*2,:]-min_vel)
Ssnapshots_matrix[nx*ny*2:nx*ny*3,:] = va_scaling*Ssnapshots_matrix[nx*ny*2:nx*ny*3,:]
#scale snapshots for ae
Ssnapshot_ae[:,:,:,:2] = vel_scaling*(Ssnapshot_ae[:,:,:,:2]-min_vel)
Ssnapshot_ae[:,:,:,2] = va_scaling*Ssnapshot_ae[:,:,:,2]
if save_stargrid:
np.save("starshape_ae.npy",Ssnapshot_ae)
# np.save("starshape_pod.npy", Ssnapshots_matrix)
for i in range(10):
plot_starshape(i)
plt.savefig('plots/starshape_{}'.format(i))
plt.close()
# # -------------------------------------------------------------------------------------------------
#find POD coefficients for starshape grids
#in POD_imports.py
# # -------------------------------------------------------------------------------------------------
# find node duplications when superposing results (don't need this part for my project)
# my_field = representative_vtu.GetField(field_names[0])[:,0] #u(x) velocity only
# my_field = 1
# nScalar_test = 1
# # for one timestep
# # for one field
# value_mesh = np.zeros((nScalar_test,nNodes,1)) # nTime=1
# value_mesh[:,:,0] = np.transpose(my_field)
# superposed_grids = np.zeros((nNodes))
# for iGrid in range(nGrids):
# block_x_start = get_grid_end_points(grid_origin, grid_width, iGrid)
# zeros_on_mesh = 0
# value_grid = u2r.simple_interpolate_from_mesh_to_grid(value_mesh,x_all,x_ndgln,ddx,block_x_start,nx,ny,nz,zeros_on_mesh, nEl,nloc,nNodes,nScalar_test,nDim,1)
# zeros_on_grid = 1
# value_back_on_mesh = u2r.interpolate_from_grid_to_mesh(value_grid, block_x_start, ddx, x_all, zeros_on_grid, nScalar_test,nx,ny,nz,nNodes,nDim, 1)
# superposed_grids = superposed_grids + np.rint(np.squeeze(value_back_on_mesh))
# superposed_grids = np.array(superposed_grids, dtype='int')
# duplicated_nodal_values = []
# for iNode in range(nNodes):
# if superposed_grids[iNode] == 0:
# # this is bad news - the node hasn't appeared in any grid
# print ('zero:', iNode)
# elif superposed_grids[iNode] == 2:
# print ('two:', iNode)
# # the node appears in two grids - deal with this later
# duplicated_nodal_values.append(iNode)
# elif superposed_grids[iNode] != 1:
# # most of the nodes will appear in one grid
# print ('unknown:', iNode, superposed_grids[iNode])
# -------------------------------------------------------------------------------------------------
# build up the snapshots matrix from solutions on each of the grids
# offset = 500 # for time level - at which time level to start taking the snapshots
# snapshots_data = []
# for iField in range(nFields):
# #nDoF = nNodes # could be different value per field
# snapshots_data.append(np.zeros((nx*ny*nz*nDim, nGrids*nTime))) #create snapshot for each field we have
# #value_mesh = np.zeros((nScalar,nNodes)) # no need to initialise - overwritten
# for iTime in range(nTime):
# #print('')
# #print('time level', iTime)
# filename = snapshot_data_location + snapshot_file_base + str(offset+iTime) + '.vtu'
# vtu_data = vtktools.vtu(filename)
# for iField in range(nFields):
# my_field = vtu_data.GetField(field_names[iField])[:,0:nDim]
# for iGrid in range(nGrids):
# block_x_start = get_grid_end_points(grid_origin, grid_width, iGrid) #randomly locate
# if iTime==0:
# print('block_x_start', block_x_start)
# #value_mesh = np.zeros((nScalar,nNodes,nTime)) # nTime - this must need initialising here
# #value_mesh[:,:,iTime] = np.transpose(my_field)
# value_mesh = np.transpose(my_field) # size nScalar,nNodes
# # interpolate field onto structured mesh
# # feed in one result at t time (no need to store in value_mesh in this case)
# zeros_beyond_mesh = 0 # 0 extrapolate solution (for the cylinder in fpc); 1 gives zeros for nodes outside mesh
# #value_grid = u2r.simple_interpolate_from_mesh_to_grid(value_mesh[:,:,iTime],x_all,x_ndgln,ddx,block_x_start,nx,ny,nz,zeros_beyond_mesh,nEl,nloc,nNodes,nScalar,nDim,1)
# value_grid = u2r.simple_interpolate_from_mesh_to_grid(value_mesh, x_all, x_ndgln, ddx, block_x_start, nx, ny, nz, zeros_beyond_mesh, nEl, nloc, nNodes, nScalar, nDim,1)
# snapshots_data[iField][:,iTime*nGrids+iGrid] = value_grid.reshape(-1)
# # ---------------------------------------------------------------------------------------
# apply POD to the snapshots
# some POD truncation settings
# cumulative_tol = 0.99
# nPOD = [nTime] # len(nPOD) = nFields
# nPOD = [-2]
# nPOD = [10] # 100 50 10
# bases = []
# singular_values = []
# for iField in range(nFields):
# snapshots_matrix = snapshots_data[iField]
# nrows, ncols = snapshots_matrix.shape
# if nrows > ncols:
# SSmatrix = np.dot(snapshots_matrix.T, snapshots_matrix)
# else:
# SSmatrix = np.dot(snapshots_matrix, snapshots_matrix.T)
# print('WARNING - CHECK HOW THE BASIS FUNCTIONS ARE CALCULATED WITH THIS METHOD')
# print('SSmatrix', SSmatrix.shape)
# eigvalues, v = np.linalg.eigh(SSmatrix)
# eigvalues = eigvalues[::-1]
# # get rid of small negative eigenvalues (there shouldn't be any as the eigenvalues of a real, symmetric
# # matrix are non-negative, but sometimes very small negative values do appear)
# eigvalues[eigvalues<0] = 0
# s_values = np.sqrt(eigvalues)
# #print('s values', s_values[0:20])
# singular_values.append(s_values)
# cumulative_info = np.zeros(len(eigvalues))
# for j in range(len(eigvalues)):
# if j==0:
# cumulative_info[j] = eigvalues[j]
# else:
# cumulative_info[j] = cumulative_info[j-1] + eigvalues[j]
# cumulative_info = cumulative_info / cumulative_info[-1]
# nAll = len(eigvalues)
# #if nPOD = -1, use cumulative tolerance
# #if nPOD = -2 use all coefficients (or set nPOD = nTime)
# #if nPOD > 0 use nPOD coefficients as defined by the user
# if nPOD[iField] == -1:
# # SVD truncation - percentage of information captured or number
# cumulative_tol = nirom_options.compression.cumulative_tol[iField]
# nPOD_iField = sum(cumulative_info <= cumulative_tol) #tolerance
# nPOD[iField] = nPOD_iField
# elif nPOD[iField] == -2:
# nPOD_iField = nAll
# nPOD[iField] = nPOD_iField
# else:
# nPOD_iField = nPOD[iField]
# print("retaining", nPOD_iField, "basis functions of a possible", len(eigvalues))
# basis_functions = np.zeros((nx*ny*nz*nDim,nPOD_iField)) # nDim should be nScalar?
# for j in reversed(range(nAll-nPOD_iField,nAll)):
# Av = np.dot(snapshots_matrix,v[:,j])
# basis_functions[:,nAll-j-1] = Av/np.linalg.norm(Av)
# bases.append(basis_functions)
# write_sing_values(singular_values)
# # get reconstructed snapshots
# reconstruction_data = []
# for iField in range(nFields):
# basis = bases[iField]
# snapshots_matrix = snapshots_data[iField]
# print('snapshots_matrix', snapshots_matrix.shape)
# reconstruction_on_mesh = np.zeros((nScalar*nTime,nNodes))
# #reconstruction_on_mesh_from_one_grid = np.zeros((nScalar,nNodes))
# for iGrid in range(nGrids):
# #:,iTime*nGrids+iGrid
# # want solutions in time for a particular grid
# snapshots_per_grid = np.zeros((nx*ny*nz*nDim,nTime))
# for iTime in range(nTime):
# #print('taking snapshots from', iTime*nGrids+iGrid )
# snapshots_per_grid[:,iTime] = snapshots_matrix[:,iTime*nGrids+iGrid]
# reconstruction = np.dot( basis, np.dot( basis.T, snapshots_per_grid ) )
# #print('reconstruction', reconstruction.shape)
# #reconstruction_data.append(reconstruction)
# #print ('recon shape',reconstruction.shape)
# reconstruction_grid = reconstruction.reshape(nScalar,nx,ny,nTime)
# #print ('recon shape just before interpolating back onto mesh',reconstruction.reshape(nScalar,nx,ny,nTime).shape)
# # plot solution on each grid at 4 time steps
# #fig, axs = plt.subplots(2, 2, figsize=(15,15))
# #if iGrid==0:
# # levels = np.linspace(0, 4, 5)
# #elif iGrid==1:
# # levels = np.linspace(5, 9, 5)
# #icount = 0
# #for col in range(2):
# # for row in range(2):
# # ax = axs[row, col]
# # ax.set_title('time '+str(icount))
# # pcm = ax.contourf(reconstruction_grid[0,:,:,icount],levels=levels)
# # fig.colorbar(pcm,ax=ax)
# # icount += 1
# #plt.show()
# block_x_start = get_grid_end_points(grid_origin, grid_width, iGrid)
# if iTime==0:
# print('block_x_start', block_x_start)
# for iTime in range(nTime):
# zeros_beyond_grid = 1 # 0 extrapolate solution; 1 gives zeros for nodes outside grid
# reconstruction_on_mesh_from_one_grid = u2r.interpolate_from_grid_to_mesh(reconstruction_grid[:,:,:,iTime], block_x_start, ddx, x_all, zeros_beyond_grid, nScalar,nx,ny,nz,nNodes,nDim, 1)
# #print('reconstruction_on_mesh_from_one_grid - about to add solutions',reconstruction_on_mesh_from_one_grid.shape)
# reconstruction_on_mesh[nScalar*iTime:nScalar*(iTime+1),:] = reconstruction_on_mesh[nScalar*iTime:nScalar*(iTime+1),:] + np.squeeze(reconstruction_on_mesh_from_one_grid)
# reconstruction_on_mesh[:,duplicated_nodal_values] = 0.5*reconstruction_on_mesh[:,duplicated_nodal_values]
# reconstruction_data.append(reconstruction_on_mesh)
# original_data = []
# #for ifield in range(nFields):
# # nDoF = nNodes # could be different value per field
# # original_data.append(np.zeros((nNodes, nDim*nTime)))
# original = np.zeros((nNodes, nDim*nTime))
# for iTime in range(nTime):
# #print('')
# #print('time level', iTime)
# filename = snapshot_data_location + snapshot_file_base + str(offset+iTime) + '.vtu'
# vtu_data = vtktools.vtu(filename)
# #original = np.zeros((nNodes, nDim*nTime))
# for iField in range(nFields):
# #vtu_data = vtktools.vtu(filename)
# my_field = vtu_data.GetField(field_names[iField])[:,0:nDim]
# original[:,iTime*nDim:(iTime+1)*nDim] = my_field
# #print('original.shape',original.shape)
# #original_data.append(original)
# # make diretory for results
# path_to_reconstructed_results = 'reconstructed_results/'
# if not os.path.isdir(path_to_reconstructed_results):
# os.mkdir(path_to_reconstructed_results)
# template_vtu = snapshot_data_location + snapshot_file_base + '0.vtu'
# for iTime in range(nTime):
# for iField in range(nFields):
# reconstruction_on_mesh = reconstruction_data[iField]
# #print ('recon shape',reconstruction.shape)
# # for more than one field, will this work?
# #create_vtu_file_timelevel(nNodes, reconstruction_on_mesh[iTime*nScalar:(iTime+1)*nScalar,:], template_vtu, iTime)
# create_vtu_file(path_to_reconstructed_results, nNodes, reconstruction_on_mesh[iTime*nScalar:(iTime+1)*nScalar,:], template_vtu, original[:,iTime*nDim:(iTime+1)*nDim], iTime)
# print('ddx',ddx.shape,ddx)
# print('grid lengths', (nx-1)*ddx[0], (ny-1)*ddx[1] ) #, (nz-1)*ddx[2])
# print('Finished.')
.# %%
|
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, models, transaction
from django.utils.html import format_html
from .managers import OrderableManager
class Orderable(models.Model):
"""
An orderable object that keeps all the instances in an enforced order.
If there's a unique_together which includes the sort_order field then that
will be used when checking for collisions etc.
This works well for inlines, which can be manually reordered by entering
numbers, and the save function will prevent against collisions.
For main objects, you would want to also use "OrderableAdmin", which will
make a nice jquery admin interface.
"""
sort_order = models.IntegerField(blank=True, db_index=True)
objects = OrderableManager()
class Meta:
abstract = True
ordering = ['sort_order']
def get_unique_fields(self):
"""List field names that are unique_together with `sort_order`."""
for unique_together in self._meta.unique_together:
if 'sort_order' in unique_together:
unique_fields = list(unique_together)
unique_fields.remove('sort_order')
return ['%s_id' % f for f in unique_fields]
return []
def get_filtered_manager(self):
manager = self.__class__.objects
kwargs = {field: getattr(self, field) for field in self.get_unique_fields()}
return manager.filter(**kwargs)
def next(self):
if not self.sort_order:
return None
return self.get_filtered_manager().after(self)
def prev(self):
if not self.sort_order:
return None
return self.get_filtered_manager().before(self)
def validate_unique(self, exclude=None):
if self._is_sort_order_unique_together_with_something():
exclude = exclude or []
if 'sort_order' not in exclude:
exclude.append('sort_order')
return super(Orderable, self).validate_unique(exclude=exclude)
def _is_sort_order_unique_together_with_something(self):
"""
Is the sort_order field unique_together with something
"""
unique_together = self._meta.unique_together
for fields in unique_together:
if 'sort_order' in fields and len(fields) > 1:
return True
return False
@staticmethod
def _update(qs):
"""
Increment the sort_order in a queryset.
Handle IntegrityErrors caused by unique constraints.
"""
try:
with transaction.atomic():
qs.update(sort_order=models.F('sort_order') + 1)
except IntegrityError:
for obj in qs.order_by('-sort_order'):
qs.filter(pk=obj.pk).update(sort_order=models.F('sort_order') + 1)
def _save(self, objects, old_pos, new_pos):
"""WARNING: Intensive giggery-pokery zone."""
to_shift = objects.exclude(pk=self.pk) if self.pk else objects
# If not set, insert at end.
if self.sort_order is None:
self._move_to_end(objects)
# New insert.
elif not self.pk and not old_pos:
# Increment `sort_order` on objects with:
# sort_order > new_pos.
to_shift = to_shift.filter(sort_order__gte=self.sort_order)
self._update(to_shift)
self.sort_order = new_pos
# self.sort_order decreased.
elif old_pos and new_pos < old_pos:
self._move_to_end(objects)
super(Orderable, self).save()
# Increment `sort_order` on objects with:
# sort_order >= new_pos and sort_order < old_pos
to_shift = to_shift.filter(sort_order__gte=new_pos, sort_order__lt=old_pos)
self._update(to_shift)
self.sort_order = new_pos
# self.sort_order increased.
elif old_pos and new_pos > old_pos:
self._move_to_end(objects)
super(Orderable, self).save()
# Decrement sort_order on objects with:
# sort_order <= new_pos and sort_order > old_pos.
to_shift = to_shift.filter(sort_order__lte=new_pos, sort_order__gt=old_pos)
to_shift.update(sort_order=models.F('sort_order') - 1)
self.sort_order = new_pos
def _move_to_end(self, objects):
"""Temporarily save `self.sort_order` elsewhere (max_obj)."""
max_obj = objects.all().aggregate(models.Max('sort_order'))['sort_order__max']
self.sort_order = max_obj + 1 if max_obj else 1
def _unique_togethers_changed(self):
for field in self.get_unique_fields():
if getattr(self, '_original_%s' % field, False):
return True
return False
def save(self, *args, **kwargs):
"""Keep the unique order in sync."""
objects = self.get_filtered_manager()
old_pos = getattr(self, '_original_sort_order', None)
new_pos = self.sort_order
if old_pos is None and self._unique_togethers_changed():
self.sort_order = None
new_pos = None
try:
with transaction.atomic():
self._save(objects, old_pos, new_pos)
except IntegrityError:
with transaction.atomic():
old_pos = objects.filter(pk=self.pk).values_list(
'sort_order', flat=True)[0]
self._save(objects, old_pos, new_pos)
# Call the "real" save() method.
super(Orderable, self).save(*args, **kwargs)
def sort_order_display(self):
return format_html(
'<span id="neworder_{}" class="sorthandle">{}</span>',
self.id, self.sort_order,
)
sort_order_display.allow_tags = True
sort_order_display.short_description = 'Order'
sort_order_display.admin_order_field = 'sort_order'
def __setattr__(self, attr, value):
"""
Cache original value of `sort_order` when a change is made to it.
Also cache values of other unique together fields.
Greatly inspired by http://code.google.com/p/django-audit/
"""
if attr == 'sort_order' or attr in self.get_unique_fields():
try:
current = self.__dict__[attr]
except (AttributeError, KeyError, ObjectDoesNotExist):
pass
else:
previously_set = getattr(self, '_original_%s' % attr, False)
if current != value and not previously_set:
setattr(self, '_original_%s' % attr, current)
super(Orderable, self).__setattr__(attr, value)
|
from solvers.neighborhoods.Neighborhood import Neighborhood
from solvers.LocalSearch.SimpleLocalSearch import SimpleLocalSearch
class RVNS(object):
def __init__(self, neighborhoods, evaluator, searchStrategy):
self.neighborhoods = neighborhoods
self.evaluator = evaluator
self.searchStrategy = searchStrategy
def optimize(self, x):
self.setStrategy(x)
while not self.evaluator.criteriaReached(x):
l = 0
while l<len(self.neighborhoods):
x_prim = self.neighborhoods[l].chooseNext(x)
if x_prim == None:
#neighborhood exhausted!
l+=1
continue
if self.evaluator.compare(x_prim, x):
x = x_prim
for neighb in self.neighborhoods:
neighb.reset(x)
l = 0
return x
def setStrategy(self,x):
for neighb in self.neighborhoods:
neighb.reset(x,Neighborhood.RANDOM)
|
# -*- coding: utf-8 -*-
#
# libwally-core documentation build configuration file
SCANNING, DOCS, FUNC = 1, 2, 3
def get_doc_lines(l):
if l.startswith('.. '):
return [' ' + l, '']
return [' ' + l]
def output_func(docs, func):
is_normal_ret = 'WALLY_CORE_API int' in func
func = func[:-1].replace('WALLY_CORE_API','').strip()
func = func.replace(',',', ').replace(' ', ' ')
ret = ['.. c:function:: ' + func, '']
seen_param = False
for l in docs:
ret.extend(get_doc_lines(l))
if is_normal_ret:
ret.append(' :return: WALLY_OK or an error code.') # FIXME: Link
ret.append(' :rtype: int')
ret.append('')
ret.append('')
return ret
def extract_docs(infile, outfile):
lines = [l.strip() for l in open(infile).readlines()]
title = infile.split('_')[1][:-2].capitalize() + ' Functions'
title_markup = '=' * len(title)
output, current, func, state = [title, title_markup, ''], [], '', SCANNING
for l in lines:
if state == SCANNING:
if l.startswith('/**') and '*/' not in l:
current, func, state = [l[3:]], '', DOCS
elif state == DOCS:
if l == '*/':
state = FUNC
else:
assert l.startswith('*'), l
if l.startswith('*|'):
current[-1] += ' ' + l[2:].strip()
else:
current.append(l[1:].strip())
else: # FUNC
func += l
if ');' in func:
output.extend(output_func(current, func))
state = SCANNING
with open(outfile, 'w') as f:
f.write('\n'.join(output))
# Generate the documentation source files
for m in [
'core', 'crypto', 'address', 'bip32', 'bip38', 'bip39', 'script', 'psbt', 'transaction',
'elements'
]:
extract_docs('../../include/wally_%s.h' % m, '%s.rst' % m)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.ifconfig',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
default_role = 'any'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libwally-core'
copyright = u'2019, Jon Griffiths'
author = u'Jon Griffiths'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.7.7'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '%s.tex' % project, u'%s Documentation' % project,
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, u'%s Documentation' % project,
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'%s Documentation' % project,
author, project, 'The libwally Bitcoin library.',
'Miscellaneous'),
]
|
// This file is part of Better Enums, released under the BSD 2-clause license.
// See doc/LICENSE for details, or visit http://github.com/aantron/better-enums.
#pragma once
#ifndef BETTER_ENUMS_ENUM_H
#define BETTER_ENUMS_ENUM_H
#include <cstddef>
#include <cstring>
#include <iosfwd>
#include <stdexcept>
// Feature detection.
#ifdef __GNUC__
# ifdef __clang__
# if __has_feature(cxx_constexpr)
# define BETTER_ENUMS_HAVE_CONSTEXPR
# endif
# if !defined(__EXCEPTIONS) || !__has_feature(cxx_exceptions)
# define BETTER_ENUMS_NO_EXCEPTIONS
# endif
# else
# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
# if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6))
# define BETTER_ENUMS_HAVE_CONSTEXPR
# endif
# endif
# ifndef __EXCEPTIONS
# define BETTER_ENUMS_NO_EXCEPTIONS
# endif
# endif
#endif
#ifdef _MSC_VER
# if _MSC_VER >= 1911
# define BETTER_ENUMS_HAVE_CONSTEXPR
# endif
# ifdef __clang__
# if __has_feature(cxx_constexpr)
# define BETTER_ENUMS_HAVE_CONSTEXPR
# endif
# endif
# ifndef _CPPUNWIND
# define BETTER_ENUMS_NO_EXCEPTIONS
# endif
# if _MSC_VER < 1600
# define BETTER_ENUMS_VC2008_WORKAROUNDS
# endif
#endif
#ifdef BETTER_ENUMS_CONSTEXPR
# define BETTER_ENUMS_HAVE_CONSTEXPR
#endif
#ifdef BETTER_ENUMS_NO_CONSTEXPR
# ifdef BETTER_ENUMS_HAVE_CONSTEXPR
# undef BETTER_ENUMS_HAVE_CONSTEXPR
# endif
#endif
// GCC (and maybe clang) can be made to warn about using 0 or NULL when nullptr
// is available, so Better Enums tries to use nullptr. This passage uses
// availability of constexpr as a proxy for availability of nullptr, i.e. it
// assumes that nullptr is available when compiling on the right versions of gcc
// and clang with the right -std flag. This is actually slightly wrong, because
// nullptr is also available in Visual C++, but constexpr isn't. This
// imprecision doesn't matter, however, because VC++ doesn't have the warnings
// that make using nullptr necessary.
#ifdef BETTER_ENUMS_HAVE_CONSTEXPR
# define BETTER_ENUMS_CONSTEXPR_ constexpr
# define BETTER_ENUMS_NULLPTR nullptr
#else
# define BETTER_ENUMS_CONSTEXPR_
# define BETTER_ENUMS_NULLPTR NULL
#endif
#ifndef BETTER_ENUMS_NO_EXCEPTIONS
# define BETTER_ENUMS_IF_EXCEPTIONS(x) x
#else
# define BETTER_ENUMS_IF_EXCEPTIONS(x)
#endif
#ifdef __GNUC__
# define BETTER_ENUMS_UNUSED __attribute__((__unused__))
#else
# define BETTER_ENUMS_UNUSED
#endif
// Higher-order preprocessor macros.
#ifdef BETTER_ENUMS_MACRO_FILE
# include BETTER_ENUMS_MACRO_FILE
#else
#define BETTER_ENUMS_PP_MAP(macro, data, ...) \
BETTER_ENUMS_ID( \
BETTER_ENUMS_APPLY( \
BETTER_ENUMS_PP_MAP_VAR_COUNT, \
BETTER_ENUMS_PP_COUNT(__VA_ARGS__)) \
(macro, data, __VA_ARGS__))
#define BETTER_ENUMS_PP_MAP_VAR_COUNT(count) BETTER_ENUMS_M ## count
#define BETTER_ENUMS_APPLY(macro, ...) BETTER_ENUMS_ID(macro(__VA_ARGS__))
#define BETTER_ENUMS_ID(x) x
#define BETTER_ENUMS_M1(m, d, x) m(d,0,x)
#define BETTER_ENUMS_M2(m,d,x,...) m(d,1,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M1(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M3(m,d,x,...) m(d,2,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M2(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M4(m,d,x,...) m(d,3,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M3(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M5(m,d,x,...) m(d,4,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M4(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M6(m,d,x,...) m(d,5,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M5(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M7(m,d,x,...) m(d,6,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M6(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M8(m,d,x,...) m(d,7,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M7(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M9(m,d,x,...) m(d,8,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M8(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M10(m,d,x,...) m(d,9,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M9(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M11(m,d,x,...) m(d,10,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M10(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M12(m,d,x,...) m(d,11,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M11(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M13(m,d,x,...) m(d,12,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M12(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M14(m,d,x,...) m(d,13,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M13(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M15(m,d,x,...) m(d,14,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M14(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M16(m,d,x,...) m(d,15,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M15(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M17(m,d,x,...) m(d,16,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M16(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M18(m,d,x,...) m(d,17,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M17(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M19(m,d,x,...) m(d,18,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M18(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M20(m,d,x,...) m(d,19,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M19(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M21(m,d,x,...) m(d,20,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M20(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M22(m,d,x,...) m(d,21,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M21(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M23(m,d,x,...) m(d,22,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M22(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M24(m,d,x,...) m(d,23,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M23(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M25(m,d,x,...) m(d,24,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M24(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M26(m,d,x,...) m(d,25,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M25(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M27(m,d,x,...) m(d,26,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M26(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M28(m,d,x,...) m(d,27,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M27(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M29(m,d,x,...) m(d,28,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M28(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M30(m,d,x,...) m(d,29,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M29(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M31(m,d,x,...) m(d,30,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M30(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M32(m,d,x,...) m(d,31,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M31(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M33(m,d,x,...) m(d,32,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M32(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M34(m,d,x,...) m(d,33,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M33(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M35(m,d,x,...) m(d,34,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M34(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M36(m,d,x,...) m(d,35,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M35(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M37(m,d,x,...) m(d,36,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M36(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M38(m,d,x,...) m(d,37,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M37(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M39(m,d,x,...) m(d,38,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M38(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M40(m,d,x,...) m(d,39,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M39(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M41(m,d,x,...) m(d,40,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M40(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M42(m,d,x,...) m(d,41,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M41(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M43(m,d,x,...) m(d,42,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M42(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M44(m,d,x,...) m(d,43,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M43(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M45(m,d,x,...) m(d,44,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M44(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M46(m,d,x,...) m(d,45,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M45(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M47(m,d,x,...) m(d,46,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M46(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M48(m,d,x,...) m(d,47,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M47(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M49(m,d,x,...) m(d,48,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M48(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M50(m,d,x,...) m(d,49,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M49(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M51(m,d,x,...) m(d,50,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M50(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M52(m,d,x,...) m(d,51,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M51(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M53(m,d,x,...) m(d,52,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M52(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M54(m,d,x,...) m(d,53,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M53(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M55(m,d,x,...) m(d,54,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M54(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M56(m,d,x,...) m(d,55,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M55(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M57(m,d,x,...) m(d,56,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M56(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M58(m,d,x,...) m(d,57,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M57(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M59(m,d,x,...) m(d,58,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M58(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M60(m,d,x,...) m(d,59,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M59(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M61(m,d,x,...) m(d,60,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M60(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M62(m,d,x,...) m(d,61,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M61(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M63(m,d,x,...) m(d,62,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M62(m,d,__VA_ARGS__))
#define BETTER_ENUMS_M64(m,d,x,...) m(d,63,x) \
BETTER_ENUMS_ID(BETTER_ENUMS_M63(m,d,__VA_ARGS__))
#define BETTER_ENUMS_PP_COUNT_IMPL(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, \
_11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, \
_26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, \
_41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, \
_56, _57, _58, _59, _60, _61, _62, _63, _64, count, ...) count
#define BETTER_ENUMS_PP_COUNT(...) \
BETTER_ENUMS_ID(BETTER_ENUMS_PP_COUNT_IMPL(__VA_ARGS__, 64, 63, 62, 61, 60,\
59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42,\
41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24,\
23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, \
4, 3, 2, 1))
#define BETTER_ENUMS_ITERATE(X, f, l) X(f, l, 0) X(f, l, 1) X(f, l, 2) \
X(f, l, 3) X(f, l, 4) X(f, l, 5) X(f, l, 6) X(f, l, 7) X(f, l, 8) \
X(f, l, 9) X(f, l, 10) X(f, l, 11) X(f, l, 12) X(f, l, 13) X(f, l, 14) \
X(f, l, 15) X(f, l, 16) X(f, l, 17) X(f, l, 18) X(f, l, 19) X(f, l, 20) \
X(f, l, 21) X(f, l, 22) X(f, l, 23)
#endif // #ifdef BETTER_ENUMS_MACRO_FILE else case
namespace better_enums {
// Optional type.
template <typename T>
BETTER_ENUMS_CONSTEXPR_ inline T _default()
{
return static_cast<typename T::_enumerated>(0);
}
template <>
BETTER_ENUMS_CONSTEXPR_ inline const char* _default<const char*>()
{
return BETTER_ENUMS_NULLPTR;
}
template <>
BETTER_ENUMS_CONSTEXPR_ inline std::size_t _default<std::size_t>()
{
return 0;
}
template <typename T>
struct optional {
BETTER_ENUMS_CONSTEXPR_ optional() :
_valid(false), _value(_default<T>()) { }
BETTER_ENUMS_CONSTEXPR_ optional(T v) : _valid(true), _value(v) { }
BETTER_ENUMS_CONSTEXPR_ const T& operator *() const { return _value; }
BETTER_ENUMS_CONSTEXPR_ const T* operator ->() const { return &_value; }
BETTER_ENUMS_CONSTEXPR_ operator bool() const { return _valid; }
BETTER_ENUMS_CONSTEXPR_ const T& value() const { return _value; }
private:
bool _valid;
T _value;
};
template <typename CastTo, typename Element>
BETTER_ENUMS_CONSTEXPR_ static optional<CastTo>
_map_index(const Element *array, optional<std::size_t> index)
{
return index ? static_cast<CastTo>(array[*index]) : optional<CastTo>();
}
#ifdef BETTER_ENUMS_VC2008_WORKAROUNDS
#define BETTER_ENUMS_OR_THROW \
if (!maybe) \
throw std::runtime_error(message); \
\
return *maybe;
#else
#define BETTER_ENUMS_OR_THROW \
return maybe ? *maybe : throw std::runtime_error(message);
#endif
BETTER_ENUMS_IF_EXCEPTIONS(
template <typename T>
BETTER_ENUMS_CONSTEXPR_ static T _or_throw(optional<T> maybe,
const char *message)
{
BETTER_ENUMS_OR_THROW
}
)
template <typename T>
BETTER_ENUMS_CONSTEXPR_ static T* _or_null(optional<T*> maybe)
{
return maybe ? *maybe : BETTER_ENUMS_NULLPTR;
}
template <typename T>
BETTER_ENUMS_CONSTEXPR_ static T _or_zero(optional<T> maybe)
{
return maybe ? *maybe : T::_from_integral_unchecked(0);
}
// Functional sequencing. This is essentially a comma operator wrapped in a
// constexpr function. g++ 4.7 doesn't "accept" integral constants in the second
// position for the comma operator, and emits an external symbol, which then
// causes a linking error.
template <typename T, typename U>
BETTER_ENUMS_CONSTEXPR_ U
continue_with(T, U value) { return value; }
// Values array declaration helper.
template <typename EnumType>
struct _eat_assign {
explicit BETTER_ENUMS_CONSTEXPR_ _eat_assign(EnumType value) : _value(value)
{ }
template <typename Any>
BETTER_ENUMS_CONSTEXPR_ const _eat_assign&
operator =(Any) const { return *this; }
BETTER_ENUMS_CONSTEXPR_ operator EnumType () const { return _value; }
private:
EnumType _value;
};
// Iterables.
template <typename Element>
struct _iterable {
typedef const Element* iterator;
BETTER_ENUMS_CONSTEXPR_ iterator begin() const { return iterator(_array); }
BETTER_ENUMS_CONSTEXPR_ iterator end() const
{ return iterator(_array + _size); }
BETTER_ENUMS_CONSTEXPR_ std::size_t size() const { return _size; }
BETTER_ENUMS_CONSTEXPR_ const Element& operator [](std::size_t index) const
{ return _array[index]; }
BETTER_ENUMS_CONSTEXPR_ _iterable(const Element *array, std::size_t s) :
_array(array), _size(s) { }
private:
const Element * const _array;
const std::size_t _size;
};
// String routines.
BETTER_ENUMS_CONSTEXPR_ static const char *_name_enders = "= \t\n";
BETTER_ENUMS_CONSTEXPR_ inline bool _ends_name(char c, std::size_t index = 0)
{
return
c == _name_enders[index] ? true :
_name_enders[index] == '\0' ? false :
_ends_name(c, index + 1);
}
BETTER_ENUMS_CONSTEXPR_ inline bool _has_initializer(const char *s,
std::size_t index = 0)
{
return
s[index] == '\0' ? false :
s[index] == '=' ? true :
_has_initializer(s, index + 1);
}
BETTER_ENUMS_CONSTEXPR_ inline std::size_t
_constant_length(const char *s, std::size_t index = 0)
{
return _ends_name(s[index]) ? index : _constant_length(s, index + 1);
}
BETTER_ENUMS_CONSTEXPR_ inline char
_select(const char *from, std::size_t from_length, std::size_t index)
{
return index >= from_length ? '\0' : from[index];
}
BETTER_ENUMS_CONSTEXPR_ inline char _to_lower_ascii(char c)
{
return c >= 0x41 && c <= 0x5A ? static_cast<char>(c + 0x20) : c;
}
BETTER_ENUMS_CONSTEXPR_ inline bool _names_match(const char *stringizedName,
const char *referenceName,
std::size_t index = 0)
{
return
_ends_name(stringizedName[index]) ? referenceName[index] == '\0' :
referenceName[index] == '\0' ? false :
stringizedName[index] != referenceName[index] ? false :
_names_match(stringizedName, referenceName, index + 1);
}
BETTER_ENUMS_CONSTEXPR_ inline bool
_names_match_nocase(const char *stringizedName, const char *referenceName,
std::size_t index = 0)
{
return
_ends_name(stringizedName[index]) ? referenceName[index] == '\0' :
referenceName[index] == '\0' ? false :
_to_lower_ascii(stringizedName[index]) !=
_to_lower_ascii(referenceName[index]) ? false :
_names_match_nocase(stringizedName, referenceName, index + 1);
}
inline void _trim_names(const char * const *raw_names,
const char **trimmed_names,
char *storage, std::size_t count)
{
std::size_t offset = 0;
for (std::size_t index = 0; index < count; ++index) {
trimmed_names[index] = storage + offset;
std::size_t trimmed_length =
std::strcspn(raw_names[index], _name_enders);
storage[offset + trimmed_length] = '\0';
std::size_t raw_length = std::strlen(raw_names[index]);
offset += raw_length + 1;
}
}
// Eager initialization.
template <typename Enum>
struct _initialize_at_program_start {
_initialize_at_program_start() { Enum::initialize(); }
};
} // namespace better_enums
// Array generation macros.
#define BETTER_ENUMS_EAT_ASSIGN_SINGLE(EnumType, index, expression) \
(EnumType)((::better_enums::_eat_assign<EnumType>)EnumType::expression),
#define BETTER_ENUMS_EAT_ASSIGN(EnumType, ...) \
BETTER_ENUMS_ID( \
BETTER_ENUMS_PP_MAP( \
BETTER_ENUMS_EAT_ASSIGN_SINGLE, EnumType, __VA_ARGS__))
#ifdef BETTER_ENUMS_HAVE_CONSTEXPR
#define BETTER_ENUMS_SELECT_SINGLE_CHARACTER(from, from_length, index) \
::better_enums::_select(from, from_length, index),
#define BETTER_ENUMS_SELECT_CHARACTERS(from, from_length) \
BETTER_ENUMS_ITERATE( \
BETTER_ENUMS_SELECT_SINGLE_CHARACTER, from, from_length)
#define BETTER_ENUMS_TRIM_SINGLE_STRING(ignored, index, expression) \
constexpr std::size_t _length_ ## index = \
::better_enums::_constant_length(#expression); \
constexpr const char _trimmed_ ## index [] = \
{ BETTER_ENUMS_SELECT_CHARACTERS(#expression, _length_ ## index) }; \
constexpr const char *_final_ ## index = \
::better_enums::_has_initializer(#expression) ? \
_trimmed_ ## index : #expression;
#define BETTER_ENUMS_TRIM_STRINGS(...) \
BETTER_ENUMS_ID( \
BETTER_ENUMS_PP_MAP( \
BETTER_ENUMS_TRIM_SINGLE_STRING, ignored, __VA_ARGS__))
#define BETTER_ENUMS_REFER_TO_SINGLE_STRING(ignored, index, expression) \
_final_ ## index,
#define BETTER_ENUMS_REFER_TO_STRINGS(...) \
BETTER_ENUMS_ID( \
BETTER_ENUMS_PP_MAP( \
BETTER_ENUMS_REFER_TO_SINGLE_STRING, ignored, __VA_ARGS__))
#endif // #ifdef BETTER_ENUMS_HAVE_CONSTEXPR
#define BETTER_ENUMS_STRINGIZE_SINGLE(ignored, index, expression) #expression,
#define BETTER_ENUMS_STRINGIZE(...) \
BETTER_ENUMS_ID( \
BETTER_ENUMS_PP_MAP( \
BETTER_ENUMS_STRINGIZE_SINGLE, ignored, __VA_ARGS__))
#define BETTER_ENUMS_RESERVE_STORAGE_SINGLE(ignored, index, expression) \
#expression ","
#define BETTER_ENUMS_RESERVE_STORAGE(...) \
BETTER_ENUMS_ID( \
BETTER_ENUMS_PP_MAP( \
BETTER_ENUMS_RESERVE_STORAGE_SINGLE, ignored, __VA_ARGS__))
// The enums proper.
#define BETTER_ENUMS_NS(EnumType) better_enums_data_ ## EnumType
#ifdef BETTER_ENUMS_VC2008_WORKAROUNDS
#define BETTER_ENUMS_COPY_CONSTRUCTOR(Enum) \
BETTER_ENUMS_CONSTEXPR_ Enum(const Enum &other) : \
_value(other._value) { }
#else
#define BETTER_ENUMS_COPY_CONSTRUCTOR(Enum)
#endif
#define BETTER_ENUMS_TYPE(SetUnderlyingType, SwitchType, GenerateSwitchType, \
GenerateStrings, ToStringConstexpr, \
DeclareInitialize, DefineInitialize, CallInitialize, \
Enum, Underlying, ...) \
\
namespace better_enums_data_ ## Enum { \
\
BETTER_ENUMS_ID(GenerateSwitchType(Underlying, __VA_ARGS__)) \
\
} \
\
class Enum { \
private: \
typedef ::better_enums::optional<Enum> _optional; \
typedef ::better_enums::optional<std::size_t> _optional_index; \
\
public: \
typedef Underlying _integral; \
\
enum _enumerated SetUnderlyingType(Underlying) { __VA_ARGS__ }; \
\
BETTER_ENUMS_CONSTEXPR_ Enum(_enumerated value) : _value(value) { } \
\
BETTER_ENUMS_COPY_CONSTRUCTOR(Enum) \
\
BETTER_ENUMS_CONSTEXPR_ operator SwitchType(Enum)() const \
{ \
return SwitchType(Enum)(_value); \
} \
\
BETTER_ENUMS_CONSTEXPR_ _integral _to_integral() const; \
BETTER_ENUMS_IF_EXCEPTIONS( \
BETTER_ENUMS_CONSTEXPR_ static Enum _from_integral(_integral value); \
) \
BETTER_ENUMS_CONSTEXPR_ static Enum \
_from_integral_unchecked(_integral value); \
BETTER_ENUMS_CONSTEXPR_ static _optional \
_from_integral_nothrow(_integral value); \
\
BETTER_ENUMS_CONSTEXPR_ std::size_t _to_index() const; \
BETTER_ENUMS_IF_EXCEPTIONS( \
BETTER_ENUMS_CONSTEXPR_ static Enum _from_index(std::size_t value); \
) \
BETTER_ENUMS_CONSTEXPR_ static Enum \
_from_index_unchecked(std::size_t value); \
BETTER_ENUMS_CONSTEXPR_ static _optional \
_from_index_nothrow(std::size_t value); \
\
ToStringConstexpr const char* _to_string() const; \
BETTER_ENUMS_IF_EXCEPTIONS( \
BETTER_ENUMS_CONSTEXPR_ static Enum _from_string(const char *name); \
) \
BETTER_ENUMS_CONSTEXPR_ static _optional \
_from_string_nothrow(const char *name); \
\
BETTER_ENUMS_IF_EXCEPTIONS( \
BETTER_ENUMS_CONSTEXPR_ static Enum _from_string_nocase(const char *name); \
) \
BETTER_ENUMS_CONSTEXPR_ static _optional \
_from_string_nocase_nothrow(const char *name); \
\
BETTER_ENUMS_CONSTEXPR_ static bool _is_valid(_integral value); \
BETTER_ENUMS_CONSTEXPR_ static bool _is_valid(const char *name); \
BETTER_ENUMS_CONSTEXPR_ static bool _is_valid_nocase(const char *name); \
\
typedef ::better_enums::_iterable<Enum> _value_iterable; \
typedef ::better_enums::_iterable<const char*> _name_iterable; \
\
typedef _value_iterable::iterator _value_iterator; \
typedef _name_iterable::iterator _name_iterator; \
\
BETTER_ENUMS_CONSTEXPR_ static const std::size_t _size_constant = \
BETTER_ENUMS_ID(BETTER_ENUMS_PP_COUNT(__VA_ARGS__)); \
BETTER_ENUMS_CONSTEXPR_ static std::size_t _size() \
{ return _size_constant; } \
\
BETTER_ENUMS_CONSTEXPR_ static const char* _name(); \
BETTER_ENUMS_CONSTEXPR_ static _value_iterable _values(); \
ToStringConstexpr static _name_iterable _names(); \
\
_integral _value; \
\
BETTER_ENUMS_DEFAULT_CONSTRUCTOR(Enum) \
\
private: \
explicit BETTER_ENUMS_CONSTEXPR_ Enum(const _integral &value) : \
_value(value) { } \
\
DeclareInitialize \
\
BETTER_ENUMS_CONSTEXPR_ static _optional_index \
_from_value_loop(_integral value, std::size_t index = 0); \
BETTER_ENUMS_CONSTEXPR_ static _optional_index \
_from_string_loop(const char *name, std::size_t index = 0); \
BETTER_ENUMS_CONSTEXPR_ static _optional_index \
_from_string_nocase_loop(const char *name, std::size_t index = 0); \
\
friend struct ::better_enums::_initialize_at_program_start<Enum>; \
}; \
\
namespace better_enums_data_ ## Enum { \
\
static ::better_enums::_initialize_at_program_start<Enum> \
_force_initialization; \
\
enum _putNamesInThisScopeAlso { __VA_ARGS__ }; \
\
BETTER_ENUMS_CONSTEXPR_ const Enum _value_array[] = \
{ BETTER_ENUMS_ID(BETTER_ENUMS_EAT_ASSIGN(Enum, __VA_ARGS__)) }; \
\
BETTER_ENUMS_ID(GenerateStrings(Enum, __VA_ARGS__)) \
\
} \
\
BETTER_ENUMS_UNUSED BETTER_ENUMS_CONSTEXPR_ \
inline const Enum \
operator +(Enum::_enumerated enumerated) \
{ \
return static_cast<Enum>(enumerated); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_optional_index \
Enum::_from_value_loop(Enum::_integral value, std::size_t index) \
{ \
return \
index == _size() ? \
_optional_index() : \
BETTER_ENUMS_NS(Enum)::_value_array[index]._value == value ? \
_optional_index(index) : \
_from_value_loop(value, index + 1); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_optional_index \
Enum::_from_string_loop(const char *name, std::size_t index) \
{ \
return \
index == _size() ? _optional_index() : \
::better_enums::_names_match( \
BETTER_ENUMS_NS(Enum)::_raw_names()[index], name) ? \
_optional_index(index) : \
_from_string_loop(name, index + 1); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_optional_index \
Enum::_from_string_nocase_loop(const char *name, std::size_t index) \
{ \
return \
index == _size() ? _optional_index() : \
::better_enums::_names_match_nocase( \
BETTER_ENUMS_NS(Enum)::_raw_names()[index], name) ? \
_optional_index(index) : \
_from_string_nocase_loop(name, index + 1); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_integral Enum::_to_integral() const \
{ \
return _integral(_value); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline std::size_t Enum::_to_index() const \
{ \
return *_from_value_loop(_value); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum \
Enum::_from_index_unchecked(std::size_t index) \
{ \
return \
::better_enums::_or_zero(_from_index_nothrow(index)); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_optional \
Enum::_from_index_nothrow(std::size_t index) \
{ \
return \
index >= _size() ? \
_optional() : \
_optional(BETTER_ENUMS_NS(Enum)::_value_array[index]); \
} \
\
BETTER_ENUMS_IF_EXCEPTIONS( \
BETTER_ENUMS_CONSTEXPR_ inline Enum Enum::_from_index(std::size_t index) \
{ \
return \
::better_enums::_or_throw(_from_index_nothrow(index), \
#Enum "::_from_index: invalid argument"); \
} \
) \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum \
Enum::_from_integral_unchecked(_integral value) \
{ \
return static_cast<_enumerated>(value); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_optional \
Enum::_from_integral_nothrow(_integral value) \
{ \
return \
::better_enums::_map_index<Enum>(BETTER_ENUMS_NS(Enum)::_value_array, \
_from_value_loop(value)); \
} \
\
BETTER_ENUMS_IF_EXCEPTIONS( \
BETTER_ENUMS_CONSTEXPR_ inline Enum Enum::_from_integral(_integral value) \
{ \
return \
::better_enums::_or_throw(_from_integral_nothrow(value), \
#Enum "::_from_integral: invalid argument"); \
} \
) \
\
ToStringConstexpr inline const char* Enum::_to_string() const \
{ \
return \
::better_enums::_or_null( \
::better_enums::_map_index<const char*>( \
BETTER_ENUMS_NS(Enum)::_name_array(), \
_from_value_loop(CallInitialize(_value)))); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_optional \
Enum::_from_string_nothrow(const char *name) \
{ \
return \
::better_enums::_map_index<Enum>( \
BETTER_ENUMS_NS(Enum)::_value_array, _from_string_loop(name)); \
} \
\
BETTER_ENUMS_IF_EXCEPTIONS( \
BETTER_ENUMS_CONSTEXPR_ inline Enum Enum::_from_string(const char *name) \
{ \
return \
::better_enums::_or_throw(_from_string_nothrow(name), \
#Enum "::_from_string: invalid argument"); \
} \
) \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_optional \
Enum::_from_string_nocase_nothrow(const char *name) \
{ \
return \
::better_enums::_map_index<Enum>(BETTER_ENUMS_NS(Enum)::_value_array, \
_from_string_nocase_loop(name)); \
} \
\
BETTER_ENUMS_IF_EXCEPTIONS( \
BETTER_ENUMS_CONSTEXPR_ inline Enum Enum::_from_string_nocase(const char *name)\
{ \
return \
::better_enums::_or_throw( \
_from_string_nocase_nothrow(name), \
#Enum "::_from_string_nocase: invalid argument"); \
} \
) \
\
BETTER_ENUMS_CONSTEXPR_ inline bool Enum::_is_valid(_integral value) \
{ \
return _from_value_loop(value); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline bool Enum::_is_valid(const char *name) \
{ \
return _from_string_loop(name); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline bool Enum::_is_valid_nocase(const char *name) \
{ \
return _from_string_nocase_loop(name); \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline const char* Enum::_name() \
{ \
return #Enum; \
} \
\
BETTER_ENUMS_CONSTEXPR_ inline Enum::_value_iterable Enum::_values() \
{ \
return _value_iterable(BETTER_ENUMS_NS(Enum)::_value_array, _size()); \
} \
\
ToStringConstexpr inline Enum::_name_iterable Enum::_names() \
{ \
return \
_name_iterable(BETTER_ENUMS_NS(Enum)::_name_array(), \
CallInitialize(_size())); \
} \
\
DefineInitialize(Enum) \
\
BETTER_ENUMS_UNUSED BETTER_ENUMS_CONSTEXPR_ \
inline bool operator ==(const Enum &a, const Enum &b) \
{ return a._to_integral() == b._to_integral(); } \
\
BETTER_ENUMS_UNUSED BETTER_ENUMS_CONSTEXPR_ \
inline bool operator !=(const Enum &a, const Enum &b) \
{ return a._to_integral() != b._to_integral(); } \
\
BETTER_ENUMS_UNUSED BETTER_ENUMS_CONSTEXPR_ \
inline bool operator <(const Enum &a, const Enum &b) \
{ return a._to_integral() < b._to_integral(); } \
\
BETTER_ENUMS_UNUSED BETTER_ENUMS_CONSTEXPR_ \
inline bool operator <=(const Enum &a, const Enum &b) \
{ return a._to_integral() <= b._to_integral(); } \
\
BETTER_ENUMS_UNUSED BETTER_ENUMS_CONSTEXPR_ \
inline bool operator >(const Enum &a, const Enum &b) \
{ return a._to_integral() > b._to_integral(); } \
\
BETTER_ENUMS_UNUSED BETTER_ENUMS_CONSTEXPR_ \
inline bool operator >=(const Enum &a, const Enum &b) \
{ return a._to_integral() >= b._to_integral(); } \
\
\
template <typename Char, typename Traits> \
std::basic_ostream<Char, Traits>& \
operator <<(std::basic_ostream<Char, Traits>& stream, const Enum &value) \
{ \
return stream << value._to_string(); \
} \
\
template <typename Char, typename Traits> \
std::basic_istream<Char, Traits>& \
operator >>(std::basic_istream<Char, Traits>& stream, Enum &value) \
{ \
std::basic_string<Char, Traits> buffer; \
\
stream >> buffer; \
::better_enums::optional<Enum> converted = \
Enum::_from_string_nothrow(buffer.c_str()); \
\
if (converted) \
value = *converted; \
else \
stream.setstate(std::basic_istream<Char, Traits>::failbit); \
\
return stream; \
}
// Enum feature options.
// C++98, C++11
#define BETTER_ENUMS_CXX98_UNDERLYING_TYPE(Underlying)
// C++11
#define BETTER_ENUMS_CXX11_UNDERLYING_TYPE(Underlying) \
: Underlying
#if defined(_MSC_VER) && _MSC_VER >= 1700
// VS 2012 and above fully support strongly typed enums and will warn about
// incorrect usage.
# define BETTER_ENUMS_LEGACY_UNDERLYING_TYPE(Underlying) \
BETTER_ENUMS_CXX11_UNDERLYING_TYPE(Underlying)
#else
# define BETTER_ENUMS_LEGACY_UNDERLYING_TYPE(Underlying) \
BETTER_ENUMS_CXX98_UNDERLYING_TYPE(Underlying)
#endif
// C++98, C++11
#define BETTER_ENUMS_REGULAR_ENUM_SWITCH_TYPE(Type) \
_enumerated
// C++11
#define BETTER_ENUMS_ENUM_CLASS_SWITCH_TYPE(Type) \
BETTER_ENUMS_NS(Type)::_enumClassForSwitchStatements
// C++98, C++11
#define BETTER_ENUMS_REGULAR_ENUM_SWITCH_TYPE_GENERATE(Underlying, ...)
// C++11
#define BETTER_ENUMS_ENUM_CLASS_SWITCH_TYPE_GENERATE(Underlying, ...) \
enum class _enumClassForSwitchStatements : Underlying { __VA_ARGS__ };
// C++98
#define BETTER_ENUMS_CXX98_TRIM_STRINGS_ARRAYS(Enum, ...) \
inline const char** _raw_names() \
{ \
static const char *value[] = \
{ BETTER_ENUMS_ID(BETTER_ENUMS_STRINGIZE(__VA_ARGS__)) }; \
return value; \
} \
\
inline char* _name_storage() \
{ \
static char storage[] = \
BETTER_ENUMS_ID(BETTER_ENUMS_RESERVE_STORAGE(__VA_ARGS__)); \
return storage; \
} \
\
inline const char** _name_array() \
{ \
static const char *value[Enum::_size_constant]; \
return value; \
} \
\
inline bool& _initialized() \
{ \
static bool value = false; \
return value; \
}
// C++11 fast version
#define BETTER_ENUMS_CXX11_PARTIAL_CONSTEXPR_TRIM_STRINGS_ARRAYS(Enum, ...) \
constexpr const char *_the_raw_names[] = \
{ BETTER_ENUMS_ID(BETTER_ENUMS_STRINGIZE(__VA_ARGS__)) }; \
\
constexpr const char * const * _raw_names() \
{ \
return _the_raw_names; \
} \
\
inline char* _name_storage() \
{ \
static char storage[] = \
BETTER_ENUMS_ID(BETTER_ENUMS_RESERVE_STORAGE(__VA_ARGS__)); \
return storage; \
} \
\
inline const char** _name_array() \
{ \
static const char *value[Enum::_size_constant]; \
return value; \
} \
\
inline bool& _initialized() \
{ \
static bool value = false; \
return value; \
}
// C++11 slow all-constexpr version
#define BETTER_ENUMS_CXX11_FULL_CONSTEXPR_TRIM_STRINGS_ARRAYS(Enum, ...) \
BETTER_ENUMS_ID(BETTER_ENUMS_TRIM_STRINGS(__VA_ARGS__)) \
\
constexpr const char * const _the_name_array[] = \
{ BETTER_ENUMS_ID(BETTER_ENUMS_REFER_TO_STRINGS(__VA_ARGS__)) }; \
\
constexpr const char * const * _name_array() \
{ \
return _the_name_array; \
} \
\
constexpr const char * const * _raw_names() \
{ \
return _the_name_array; \
}
// C++98, C++11 fast version
#define BETTER_ENUMS_NO_CONSTEXPR_TO_STRING_KEYWORD
// C++11 slow all-constexpr version
#define BETTER_ENUMS_CONSTEXPR_TO_STRING_KEYWORD \
constexpr
// C++98, C++11 fast version
#define BETTER_ENUMS_DO_DECLARE_INITIALIZE \
static int initialize();
// C++11 slow all-constexpr version
#define BETTER_ENUMS_DECLARE_EMPTY_INITIALIZE \
static int initialize() { return 0; }
// C++98, C++11 fast version
#define BETTER_ENUMS_DO_DEFINE_INITIALIZE(Enum) \
inline int Enum::initialize() \
{ \
if (BETTER_ENUMS_NS(Enum)::_initialized()) \
return 0; \
\
::better_enums::_trim_names(BETTER_ENUMS_NS(Enum)::_raw_names(), \
BETTER_ENUMS_NS(Enum)::_name_array(), \
BETTER_ENUMS_NS(Enum)::_name_storage(), \
_size()); \
\
BETTER_ENUMS_NS(Enum)::_initialized() = true; \
\
return 0; \
}
// C++11 slow all-constexpr version
#define BETTER_ENUMS_DO_NOT_DEFINE_INITIALIZE(Enum)
// C++98, C++11 fast version
#define BETTER_ENUMS_DO_CALL_INITIALIZE(value) \
::better_enums::continue_with(initialize(), value)
// C++11 slow all-constexpr version
#define BETTER_ENUMS_DO_NOT_CALL_INITIALIZE(value) \
value
// User feature selection.
#ifdef BETTER_ENUMS_STRICT_CONVERSION
# define BETTER_ENUMS_DEFAULT_SWITCH_TYPE \
BETTER_ENUMS_ENUM_CLASS_SWITCH_TYPE
# define BETTER_ENUMS_DEFAULT_SWITCH_TYPE_GENERATE \
BETTER_ENUMS_ENUM_CLASS_SWITCH_TYPE_GENERATE
#else
# define BETTER_ENUMS_DEFAULT_SWITCH_TYPE \
BETTER_ENUMS_REGULAR_ENUM_SWITCH_TYPE
# define BETTER_ENUMS_DEFAULT_SWITCH_TYPE_GENERATE \
BETTER_ENUMS_REGULAR_ENUM_SWITCH_TYPE_GENERATE
#endif
#ifndef BETTER_ENUMS_DEFAULT_CONSTRUCTOR
# define BETTER_ENUMS_DEFAULT_CONSTRUCTOR(Enum) \
private: \
Enum() : _value(0) { }
#endif
#ifdef BETTER_ENUMS_HAVE_CONSTEXPR
#ifdef BETTER_ENUMS_CONSTEXPR_TO_STRING
# define BETTER_ENUMS_DEFAULT_TRIM_STRINGS_ARRAYS \
BETTER_ENUMS_CXX11_FULL_CONSTEXPR_TRIM_STRINGS_ARRAYS
# define BETTER_ENUMS_DEFAULT_TO_STRING_KEYWORD \
BETTER_ENUMS_CONSTEXPR_TO_STRING_KEYWORD
# define BETTER_ENUMS_DEFAULT_DECLARE_INITIALIZE \
BETTER_ENUMS_DECLARE_EMPTY_INITIALIZE
# define BETTER_ENUMS_DEFAULT_DEFINE_INITIALIZE \
BETTER_ENUMS_DO_NOT_DEFINE_INITIALIZE
# define BETTER_ENUMS_DEFAULT_CALL_INITIALIZE \
BETTER_ENUMS_DO_NOT_CALL_INITIALIZE
#else
# define BETTER_ENUMS_DEFAULT_TRIM_STRINGS_ARRAYS \
BETTER_ENUMS_CXX11_PARTIAL_CONSTEXPR_TRIM_STRINGS_ARRAYS
# define BETTER_ENUMS_DEFAULT_TO_STRING_KEYWORD \
BETTER_ENUMS_NO_CONSTEXPR_TO_STRING_KEYWORD
# define BETTER_ENUMS_DEFAULT_DECLARE_INITIALIZE \
BETTER_ENUMS_DO_DECLARE_INITIALIZE
# define BETTER_ENUMS_DEFAULT_DEFINE_INITIALIZE \
BETTER_ENUMS_DO_DEFINE_INITIALIZE
# define BETTER_ENUMS_DEFAULT_CALL_INITIALIZE \
BETTER_ENUMS_DO_CALL_INITIALIZE
#endif
// Top-level macros.
#define BETTER_ENUM(Enum, Underlying, ...) \
BETTER_ENUMS_ID(BETTER_ENUMS_TYPE( \
BETTER_ENUMS_CXX11_UNDERLYING_TYPE, \
BETTER_ENUMS_DEFAULT_SWITCH_TYPE, \
BETTER_ENUMS_DEFAULT_SWITCH_TYPE_GENERATE, \
BETTER_ENUMS_DEFAULT_TRIM_STRINGS_ARRAYS, \
BETTER_ENUMS_DEFAULT_TO_STRING_KEYWORD, \
BETTER_ENUMS_DEFAULT_DECLARE_INITIALIZE, \
BETTER_ENUMS_DEFAULT_DEFINE_INITIALIZE, \
BETTER_ENUMS_DEFAULT_CALL_INITIALIZE, \
Enum, Underlying, __VA_ARGS__))
#define SLOW_ENUM(Enum, Underlying, ...) \
BETTER_ENUMS_ID(BETTER_ENUMS_TYPE( \
BETTER_ENUMS_CXX11_UNDERLYING_TYPE, \
BETTER_ENUMS_DEFAULT_SWITCH_TYPE, \
BETTER_ENUMS_DEFAULT_SWITCH_TYPE_GENERATE, \
BETTER_ENUMS_CXX11_FULL_CONSTEXPR_TRIM_STRINGS_ARRAYS, \
BETTER_ENUMS_CONSTEXPR_TO_STRING_KEYWORD, \
BETTER_ENUMS_DECLARE_EMPTY_INITIALIZE, \
BETTER_ENUMS_DO_NOT_DEFINE_INITIALIZE, \
BETTER_ENUMS_DO_NOT_CALL_INITIALIZE, \
Enum, Underlying, __VA_ARGS__))
#else
#define BETTER_ENUM(Enum, Underlying, ...) \
BETTER_ENUMS_ID(BETTER_ENUMS_TYPE( \
BETTER_ENUMS_LEGACY_UNDERLYING_TYPE, \
BETTER_ENUMS_DEFAULT_SWITCH_TYPE, \
BETTER_ENUMS_DEFAULT_SWITCH_TYPE_GENERATE, \
BETTER_ENUMS_CXX98_TRIM_STRINGS_ARRAYS, \
BETTER_ENUMS_NO_CONSTEXPR_TO_STRING_KEYWORD, \
BETTER_ENUMS_DO_DECLARE_INITIALIZE, \
BETTER_ENUMS_DO_DEFINE_INITIALIZE, \
BETTER_ENUMS_DO_CALL_INITIALIZE, \
Enum, Underlying, __VA_ARGS__))
#endif
namespace better_enums {
// Maps.
template <typename T>
struct map_compare {
BETTER_ENUMS_CONSTEXPR_ static bool less(const T& a, const T& b)
{ return a < b; }
};
template <>
struct map_compare<const char*> {
BETTER_ENUMS_CONSTEXPR_ static bool less(const char *a, const char *b)
{ return less_loop(a, b); }
private:
BETTER_ENUMS_CONSTEXPR_ static bool
less_loop(const char *a, const char *b, size_t index = 0)
{
return
a[index] != b[index] ? a[index] < b[index] :
a[index] == '\0' ? false :
less_loop(a, b, index + 1);
}
};
template <>
struct map_compare<const wchar_t*> {
BETTER_ENUMS_CONSTEXPR_ static bool less(const wchar_t *a, const wchar_t *b)
{ return less_loop(a, b); }
private:
BETTER_ENUMS_CONSTEXPR_ static bool
less_loop(const wchar_t *a, const wchar_t *b, size_t index = 0)
{
return
a[index] != b[index] ? a[index] < b[index] :
a[index] == L'\0' ? false :
less_loop(a, b, index + 1);
}
};
template <typename Enum, typename T, typename Compare = map_compare<T> >
struct map {
typedef T (*function)(Enum);
BETTER_ENUMS_CONSTEXPR_ explicit map(function f) : _f(f) { }
BETTER_ENUMS_CONSTEXPR_ T from_enum(Enum value) const { return _f(value); }
BETTER_ENUMS_CONSTEXPR_ T operator [](Enum value) const
{ return _f(value); }
BETTER_ENUMS_CONSTEXPR_ Enum to_enum(T value) const
{
return
_or_throw(to_enum_nothrow(value), "map::to_enum: invalid argument");
}
BETTER_ENUMS_CONSTEXPR_ optional<Enum>
to_enum_nothrow(T value, size_t index = 0) const
{
return
index >= Enum::_size() ? optional<Enum>() :
Compare::less(_f(Enum::_values()[index]), value) ||
Compare::less(value, _f(Enum::_values()[index])) ?
to_enum_nothrow(value, index + 1) :
Enum::_values()[index];
}
private:
const function _f;
};
template <typename Enum, typename T>
BETTER_ENUMS_CONSTEXPR_ map<Enum, T> make_map(T (*f)(Enum))
{
return map<Enum, T>(f);
}
}
#endif // #ifndef BETTER_ENUMS_ENUM_H
|
# Generated by Django 2.2.6 on 2019-11-29 20:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='assets',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
#! /usr/bin/env python
# -*- coding: iso-8859-15 -*-
"""nt2 tree
"""
__author__ = "Lapreste Jean-thierry (lapreste@univ-bpclermont.fr)"
__version__ = "$Revision: 1.0 $"
__date__ = "$Date: 2010 $"
__copyright__ = "Copyleft (c) 2010 Jean-thierry Lapreste"
__license__ = "Python"
import os
import sys
import re
from mylogging import Mylogging
from nt2_env import nt2_dir
from archi import Sse, Vmx
class Nt2 :
""" This class contains info about the nt2
directory tree"""
Std_paths_from_nt2 = {
'toolbox' : "nt2/toolbox/",
'core' : "nt2/core/",
'sdk' : "nt2/sdk/"
}
## Std_arbo = [
## ["include",
## "src",
## {"doc" : ["source"] },
## {"unit": ["scalar","simd"] },
## {"bench": ["scalar","simd"] },
## {"function" :
## ["scalar",
## {"simd" :
## ["common",
## {"vmx" : Vmx.Variants.keys() },
## {"sse" : Sse.Variants.keys() }
## ]
## }
## ]
## }
## ]
## ]
## Std_actions = {
## "bench": ["bench", 'std', 'bench' ],
## "doc" : ["doc", 'std', 'doc/source/' ],
## "include" : ["include", 'std', 'include/' ],
## "function" : ["define", 'std', 'function/' ],
## "scalar" : ["scalar", 'std', 'function/scalar/' ],
## "common" : ["common", 'std', 'function/simd/common/' ],
## "vmx" : ["hierarchy", 'hie', 'function/simd/vmx/', Vmx ],
## "sse" : ["hierarchy", 'hie', 'function/simd/sse/', Sse ]
## }
def __init__(self,
mode='toolbox',
arbo=None,
actions = None) :
self.logger = Mylogging("nt2.nt2_base.Nt2")
self.__mode = mode
self.__pathfnt2 = self.path_from_nt2(self.__mode)
# self.__actions = (Nt2.Std_actions if actions is None else actions)
# self.__arbo = (Nt2.Std_arbo if arbo is None else arbo)
self.__path2nt2=nt2_dir()
def get_path_from_nt2(self) : return self.__pathfnt2
def get_path_to_nt2 (self) : return self.__path2nt2
def get_pathfnt2(self) : return self.__pathfnt2
def get_path2nt2(self) : return self.__path2nt2
def path_from_nt2(self,mode) :
return (Nt2.Std_paths_from_nt2[mode]
if (mode in Nt2.Std_paths_from_nt2)
else 'nt2/'+mode+'/')
def absolute_path_to_mode (self,mode = None) :
if mode is None : mode = self.get_mode()
return os.path.join(self.__path2nt2,
self.path_from_nt2(mode))
def get_mode(self) : return self.__mode
def get_arbo(self) : return self.__arbo
# def get_actions(self) : return self.__actions
if __name__ == "__main__":
Mylogging.set_level('INFO')
nt2 = Nt2()
print "nt2.get_path_from_nt2 %s" %nt2.get_path_from_nt2()
print "nt2.get_path_to_nt2 %s" %nt2.get_path_to_nt2()
print "nt2.absolute_path_to_mode %s" %nt2.absolute_path_to_mode()
print "nt2.absolute_path_to_mode('zut') %s" %nt2.absolute_path_to_mode('zut')
print "nt2.path_from_nt2('zut') %s" %nt2.path_from_nt2('zut')
|
import { create } from '@storybook/theming/create';
export default create({
base: 'light',
colorPrimary: 'hotpink',
colorSecondary: 'rgb(30, 167, 253)',
// UI
appBg: 'white',
appContentBg: 'white',
appBorderColor: 'gainsboro',
appBorderRadius: 4,
// Typography
fontBase: '"Open Sans", sans-serif',
fontCode: 'monospace',
// Text colors
textColor: 'black',
textInverseColor: 'rgba(255,255,255,0.9)',
// Toolbar default and active colors
barTextColor: 'silver',
barSelectedColor: 'rgb(30, 167, 253)',
barBg: 'white',
// Form colors
inputBg: 'white',
inputBorder: 'silver',
inputTextColor: 'black',
inputBorderRadius: 4,
brandTitle: 'Radial Design System',
brandUrl: 'https://nypr-design-system.nypr.digital/',
brandImage: './images/radial_logo.svg',
});
|
'use strict';
/**
* Requirements
* @ignore
*/
const Filter = require('./Filter.js').Filter;
const uppercaseFirst = require('../../utils/string.js').uppercaseFirst;
const lorem = require('lorem-ipsum');
/**
* Generates random lorem ipsum text.
*
* @memberOf nunjucks.filter
*/
class LipsumFilter extends Filter
{
/**
* @inheritDoc
*/
constructor()
{
super();
this._name = 'lipsum';
}
/**
* @inheritDoc
*/
static get className()
{
return 'nunjucks.filter/LipsumFilter';
}
/**
* @inheritDoc
*/
filter(value)
{
const scope = this;
return function (value, unit, minCount, maxCount)
{
// Prepare
const options =
{
units: 'words',
count: 1
};
// Unit
if (unit == 's')
{
options.units = 'sentences';
}
if (unit == 'p')
{
options.units = 'paragraphs';
}
// Count
const min = minCount || 1;
const max = maxCount || 10;
options.count = min + ((max - min) * Math.random());
// Generate
return scope.applyCallbacks(uppercaseFirst(lorem(options)), arguments, options);
};
}
}
/**
* Exports
* @ignore
*/
module.exports.LipsumFilter = LipsumFilter;
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for android buildbot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
_DELETIONS_ONLY_FILES = (
'build/android/findbugs_filter/findbugs_known_bugs.txt',
)
def _CheckDeletionsOnlyFiles(input_api, output_api):
"""Check that a certain listed files only have deletions.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath() in _DELETIONS_ONLY_FILES:
if f.ChangedContents():
warnings.append(f.LocalPath())
results = []
if warnings:
results.append(output_api.PresubmitPromptWarning(
'Following files should only contain deletions.', warnings))
return results
def CommonChecks(input_api, output_api):
output = []
def J(*dirs):
"""Returns a path relative to presubmit directory."""
return input_api.os_path.join(input_api.PresubmitLocalPath(), *dirs)
output.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=[r'pylib/symbols/.*\.py$', r'gyp/.*\.py$', r'gn/.*\.py'],
extra_paths_list=[
J(), J('..', '..', 'third_party', 'android_testrunner'),
J('buildbot')]))
output.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
white_list=[r'gyp/.*\.py$', r'gn/.*\.py'],
extra_paths_list=[J('gyp'), J('gn')]))
# Disabled due to http://crbug.com/410936
#output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
#input_api, output_api, J('buildbot', 'tests')))
pylib_test_env = dict(input_api.environ)
pylib_test_env.update({
'PYTHONPATH': input_api.PresubmitLocalPath(),
'PYTHONDONTWRITEBYTECODE': '1',
})
output.extend(input_api.canned_checks.RunUnitTests(
input_api,
output_api,
unit_tests=[
J('pylib', 'device', 'device_utils_test.py'),
J('pylib', 'gtest', 'test_package_test.py'),
J('pylib', 'instrumentation', 'test_runner_test.py'),
],
env=pylib_test_env))
output.extend(_CheckDeletionsOnlyFiles(input_api, output_api))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
|
import React from 'react';
import demoState from './demo-state';
class Loading extends React.Component {
render() {
if (!this.props.active) return null;
return (
<div
style={{
position: 'absolute',
top: 0,
bottom: 0,
left: 0,
right: 0,
padding: '1em',
background: 'rgba(255, 255, 255, 0.7)',
fontWeight: 'bold'
}}
>
Waiting ...
</div>
);
}
}
function selectProps(state) {
return {
active: state.loading
};
}
export default demoState.connect(selectProps)(Loading);
|
# -*- coding: utf-8 -*-
#
# Chef documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 22 13:50:49 2012.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates', '../../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'essentials_cookbook_versions'
# General information about the project.
project = u'Cookbook Versions'
copyright = u'This work is licensed under a Creative Commons Attribution 3.0 Unported License.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# highlight_language = 'ruby'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# A string of reStructuredText that will be included at the beginning of every source file that is read.
rst_prolog = """
.. include:: ../../swaps/swap_desc_a.txt
.. include:: ../../swaps/swap_desc_b.txt
.. include:: ../../swaps/swap_desc_c.txt
.. include:: ../../swaps/swap_desc_d.txt
.. include:: ../../swaps/swap_desc_e.txt
.. include:: ../../swaps/swap_desc_f.txt
.. include:: ../../swaps/swap_desc_g.txt
.. include:: ../../swaps/swap_desc_h.txt
.. include:: ../../swaps/swap_desc_i.txt
.. include:: ../../swaps/swap_desc_j.txt
.. include:: ../../swaps/swap_desc_k.txt
.. include:: ../../swaps/swap_desc_l.txt
.. include:: ../../swaps/swap_desc_m.txt
.. include:: ../../swaps/swap_desc_n.txt
.. include:: ../../swaps/swap_desc_o.txt
.. include:: ../../swaps/swap_desc_p.txt
.. include:: ../../swaps/swap_desc_q.txt
.. include:: ../../swaps/swap_desc_r.txt
.. include:: ../../swaps/swap_desc_s.txt
.. include:: ../../swaps/swap_desc_t.txt
.. include:: ../../swaps/swap_desc_u.txt
.. include:: ../../swaps/swap_desc_v.txt
.. include:: ../../swaps/swap_desc_w.txt
.. include:: ../../swaps/swap_desc_x.txt
.. include:: ../../swaps/swap_desc_y.txt
.. include:: ../../swaps/swap_desc_z.txt
.. include:: ../../swaps/swap_http.txt
.. include:: ../../swaps/swap_names.txt
.. include:: ../../swaps/swap_notes.txt
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'opscode'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../../_themes/']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Cookbook Versions"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../images/opscode_chef_html_logo.jpg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "opscode.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'relations.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'search': 'opscode_search.html',
}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cookbook Versions'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Cookbook Versions.tex', u'Cookbook Versions',
u'Opscode, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "../../images/opscode_color_text.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Cookbook Versions', u'Cookbook Versions',
[u'Opscode, Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Cookbook Versions', u'Cookbook Versions',
u'Opscode, Inc.', 'Cookbook Versions', 'Cookbook Versions',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Cookbook Versions'
epub_author = u'Opscode, Inc.'
epub_publisher = u'Opscode, Inc.'
epub_copyright = u'2012, Opscode, Inc.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
#ifndef _SYSTEMCALLS_H
#define _SYSTEMCALLS_H
#include "types.h"
#include "systemcall_handler.h"
#include "filesystem.h"
#include "paging.h"
#include "rtc.h"
#include "x86_desc.h"
#include "exception_handler.h"
#define SPACE 32 /* Ascii value for space (' ') */
#define ELF_LENGTH 3 /* 3 bytes long, one per character in "ELF" */
#define ELF_OFFSET 1 /* first character is always 'del' before "ELF" */
#define PAGE_DIR_MASK 0xFFC00000 /* Mask to get just the highest 10 bits (page dir offset) of the address*/
#define _8KB_ 0x00002000 /* 8KB = 8192 bytes */
#define PID_SIZE 8 /* Number of possible PIDs in our OS */
#define EXCEPTION_OCCURRED 256 /* Signifies exception occurred */
#define MAX_PROC 6 /* Maximum number of processes that can run at once */
/* dummy function returns -1 for terminal_open */
int32_t bad_call_open(const uint8_t* filename);
/* dummy function returns -1 for terminal_close */
int32_t bad_call_close(int32_t fd);
/* exits the current process running */
int32_t halt (uint8_t status);
/* executes programs */
int32_t execute(const uint8_t* command);
/* [helper function] parses command into three seperate buffers*/
void execute_parse_args(uint8_t* filename_buf, uint8_t* args_buf, const uint8_t* command);
/* [helper function] checks elf file to see if executable program*/
int32_t execute_executable_check(uint8_t* filename);
/* [helper function] finds next available PID for new PCB */
int8_t execute_find_pid();
/* [helper function] sets up correct paging for shell / user function */
int32_t execute_program_paging(int8_t new_pid);
/* [helper function] maps the current program from virtual to physical memory */
void execute_user_level_program_loader();
/* [helper function] creates a new pcb for a new process */
int32_t execute_create_pcb(dentry_t* dentry, uint8_t* filename, uint8_t* args, int8_t new_pid);
/* [helper function] context switch (fool IRET) to run other process*/
int32_t execute_context_switch(uint8_t* filename);
/* Finds the file in the file system and assigns it an unassigned file descriptor */
int32_t open (const uint8_t* filename);
/* syscall function reads file based on the fd */
int32_t read (int32_t fd, void* buf, int32_t nbytes);
/* write system call, calls file specific write */
int32_t write (int32_t fd, const void* buf, int32_t nbytes);
/* Close the file descriptor passed in and set it to be available */
int32_t close (int32_t fd);
/* returns arguments passed to executable */
int32_t getargs (uint8_t* buf, int32_t nbytes);
/* sets a pointer to video memory */
int32_t vidmap (uint8_t** screen_start);
/* EXTRA CREDIT */
int32_t set_handler (int32_t signum, void* handler_address);
/* EXTRA CREDIT */
int32_t sigreturn (void);
#endif /* _SYSTEMCALLS_H */
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class TaskResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, data=None, status=None, date_add=None):
"""
TaskResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'data': 'str',
'status': 'str',
'date_add': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'data': 'data',
'status': 'status',
'date_add': 'date_add'
}
self._id = id
self._name = name
self._data = data
self._status = status
self._date_add = date_add
@property
def id(self):
"""
Gets the id of this TaskResponse.
:return: The id of this TaskResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this TaskResponse.
:param id: The id of this TaskResponse.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this TaskResponse.
:return: The name of this TaskResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TaskResponse.
:param name: The name of this TaskResponse.
:type: str
"""
self._name = name
@property
def data(self):
"""
Gets the data of this TaskResponse.
:return: The data of this TaskResponse.
:rtype: str
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this TaskResponse.
:param data: The data of this TaskResponse.
:type: str
"""
self._data = data
@property
def status(self):
"""
Gets the status of this TaskResponse.
:return: The status of this TaskResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this TaskResponse.
:param status: The status of this TaskResponse.
:type: str
"""
self._status = status
@property
def date_add(self):
"""
Gets the date_add of this TaskResponse.
:return: The date_add of this TaskResponse.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this TaskResponse.
:param date_add: The date_add of this TaskResponse.
:type: str
"""
self._date_add = date_add
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from django.urls import path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'domain', views.DomainViewSet)
router.register(r'subdomain', views.SubdomainViewSet)
router.register(r'indicator', views.IndicatorViewSet)
router.register(r'data-viz', views.DataVizViewSet)
router.register(r'time-axis', views.TimeAxisViewSet)
router.register(r'variable', views.VariableViewSet)
urlpatterns = router.urls + [
path('map_layer/<slug:geog_type_id>:<int:data_viz_id>:<int:variable_id>.geojson', views.GeoJSONWithDataView.as_view()),
# path('tiles/<slug:geog_type_id>/<int:data_viz_id>/<int:variable_id>/<int:zoom>/<int:x>/<int:y>.mvt',
# views.mvt_tiles),
]
|
# -*- coding: utf-8 -*-
"""
tests.http
~~~~~~~~~~
HTTP parsing utilities.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
from datetime import datetime
from tests import strict_eq
from werkzeug._compat import itervalues, wsgi_encoding_dance
from werkzeug import http, datastructures
from werkzeug.test import create_environ
class TestHTTPUtility(object):
def test_accept(self):
a = http.parse_accept_header('en-us,ru;q=0.5')
assert list(itervalues(a)) == ['en-us', 'ru']
assert a.best == 'en-us'
assert a.find('ru') == 1
pytest.raises(ValueError, a.index, 'de')
assert a.to_header() == 'en-us,ru;q=0.5'
def test_mime_accept(self):
a = http.parse_accept_header('text/xml,application/xml,'
'application/xhtml+xml,'
'application/foo;quiet=no; bar=baz;q=0.6,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png,*/*;q=0.5',
datastructures.MIMEAccept)
pytest.raises(ValueError, lambda: a['missing'])
assert a['image/png'] == 1
assert a['text/plain'] == 0.8
assert a['foo/bar'] == 0.5
assert a['application/foo;quiet=no; bar=baz'] == 0.6
assert a[a.find('foo/bar')] == ('*/*', 0.5)
def test_accept_matches(self):
a = http.parse_accept_header('text/xml,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png', datastructures.MIMEAccept)
assert a.best_match(['text/html', 'application/xhtml+xml']) == \
'application/xhtml+xml'
assert a.best_match(['text/html']) == 'text/html'
assert a.best_match(['foo/bar']) is None
assert a.best_match(['foo/bar', 'bar/foo'], default='foo/bar') == 'foo/bar'
assert a.best_match(['application/xml', 'text/xml']) == 'application/xml'
def test_charset_accept(self):
a = http.parse_accept_header('ISO-8859-1,utf-8;q=0.7,*;q=0.7',
datastructures.CharsetAccept)
assert a['iso-8859-1'] == a['iso8859-1']
assert a['iso-8859-1'] == 1
assert a['UTF8'] == 0.7
assert a['ebcdic'] == 0.7
def test_language_accept(self):
a = http.parse_accept_header('de-AT,de;q=0.8,en;q=0.5',
datastructures.LanguageAccept)
assert a.best == 'de-AT'
assert 'de_AT' in a
assert 'en' in a
assert a['de-at'] == 1
assert a['en'] == 0.5
def test_set_header(self):
hs = http.parse_set_header('foo, Bar, "Blah baz", Hehe')
assert 'blah baz' in hs
assert 'foobar' not in hs
assert 'foo' in hs
assert list(hs) == ['foo', 'Bar', 'Blah baz', 'Hehe']
hs.add('Foo')
assert hs.to_header() == 'foo, Bar, "Blah baz", Hehe'
def test_list_header(self):
hl = http.parse_list_header('foo baz, blah')
assert hl == ['foo baz', 'blah']
def test_dict_header(self):
d = http.parse_dict_header('foo="bar baz", blah=42')
assert d == {'foo': 'bar baz', 'blah': '42'}
def test_cache_control_header(self):
cc = http.parse_cache_control_header('max-age=0, no-cache')
assert cc.max_age == 0
assert cc.no_cache
cc = http.parse_cache_control_header('private, community="UCI"', None,
datastructures.ResponseCacheControl)
assert cc.private
assert cc['community'] == 'UCI'
c = datastructures.ResponseCacheControl()
assert c.no_cache is None
assert c.private is None
c.no_cache = True
assert c.no_cache == '*'
c.private = True
assert c.private == '*'
del c.private
assert c.private is None
assert c.to_header() == 'no-cache'
def test_authorization_header(self):
a = http.parse_authorization_header('Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
assert a.type == 'basic'
assert a.username == 'Aladdin'
assert a.password == 'open sesame'
a = http.parse_authorization_header('''Digest username="Mufasa",
realm="testrealm@host.invalid",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
qop=auth,
nc=00000001,
cnonce="0a4f113b",
response="6629fae49393a05397450978507c4ef1",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert a.type == 'digest'
assert a.username == 'Mufasa'
assert a.realm == 'testrealm@host.invalid'
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert a.uri == '/dir/index.html'
assert 'auth' in a.qop
assert a.nc == '00000001'
assert a.cnonce == '0a4f113b'
assert a.response == '6629fae49393a05397450978507c4ef1'
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
a = http.parse_authorization_header('''Digest username="Mufasa",
realm="testrealm@host.invalid",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
response="e257afa1414a3340d93d30955171dd0e",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert a.type == 'digest'
assert a.username == 'Mufasa'
assert a.realm == 'testrealm@host.invalid'
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert a.uri == '/dir/index.html'
assert a.response == 'e257afa1414a3340d93d30955171dd0e'
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
assert http.parse_authorization_header('') is None
assert http.parse_authorization_header(None) is None
assert http.parse_authorization_header('foo') is None
def test_www_authenticate_header(self):
wa = http.parse_www_authenticate_header('Basic realm="WallyWorld"')
assert wa.type == 'basic'
assert wa.realm == 'WallyWorld'
wa.realm = 'Foo Bar'
assert wa.to_header() == 'Basic realm="Foo Bar"'
wa = http.parse_www_authenticate_header('''Digest
realm="testrealm@host.com",
qop="auth,auth-int",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert wa.type == 'digest'
assert wa.realm == 'testrealm@host.com'
assert 'auth' in wa.qop
assert 'auth-int' in wa.qop
assert wa.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert wa.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
wa = http.parse_www_authenticate_header('broken')
assert wa.type == 'broken'
assert not http.parse_www_authenticate_header('').type
assert not http.parse_www_authenticate_header('')
def test_etags(self):
assert http.quote_etag('foo') == '"foo"'
assert http.quote_etag('foo', True) == 'W/"foo"'
assert http.unquote_etag('"foo"') == ('foo', False)
assert http.unquote_etag('W/"foo"') == ('foo', True)
es = http.parse_etags('"foo", "bar", W/"baz", blar')
assert sorted(es) == ['bar', 'blar', 'foo']
assert 'foo' in es
assert 'baz' not in es
assert es.contains_weak('baz')
assert 'blar' in es
assert es.contains_raw('W/"baz"')
assert es.contains_raw('"foo"')
assert sorted(es.to_header().split(', ')) == ['"bar"', '"blar"', '"foo"', 'W/"baz"']
def test_etags_nonzero(self):
etags = http.parse_etags('W/"foo"')
assert bool(etags)
assert etags.contains_raw('W/"foo"')
def test_parse_date(self):
assert http.parse_date('Sun, 06 Nov 1994 08:49:37 GMT ') == datetime(
1994, 11, 6, 8, 49, 37)
assert http.parse_date('Sunday, 06-Nov-94 08:49:37 GMT') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date(' Sun Nov 6 08:49:37 1994') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date('foo') is None
def test_parse_date_overflows(self):
assert http.parse_date(' Sun 02 Feb 1343 08:49:37 GMT') == datetime(1343, 2, 2, 8, 49, 37)
assert http.parse_date('Thu, 01 Jan 1970 00:00:00 GMT') == datetime(1970, 1, 1, 0, 0)
assert http.parse_date('Thu, 33 Jan 1970 00:00:00 GMT') is None
def test_remove_entity_headers(self):
now = http.http_date()
headers1 = [('Date', now), ('Content-Type', 'text/html'), ('Content-Length', '0')]
headers2 = datastructures.Headers(headers1)
http.remove_entity_headers(headers1)
assert headers1 == [('Date', now)]
http.remove_entity_headers(headers2)
assert headers2 == datastructures.Headers([(u'Date', now)])
def test_remove_hop_by_hop_headers(self):
headers1 = [('Connection', 'closed'), ('Foo', 'bar'),
('Keep-Alive', 'wtf')]
headers2 = datastructures.Headers(headers1)
http.remove_hop_by_hop_headers(headers1)
assert headers1 == [('Foo', 'bar')]
http.remove_hop_by_hop_headers(headers2)
assert headers2 == datastructures.Headers([('Foo', 'bar')])
def test_parse_options_header(self):
assert http.parse_options_header(None) == \
('', {})
assert http.parse_options_header("") == \
('', {})
assert http.parse_options_header(r'something; foo="other\"thing"') == \
('something', {'foo': 'other"thing'})
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42') == \
('something', {'foo': 'other"thing', 'meh': '42'})
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42; bleh') == \
('something', {'foo': 'other"thing', 'meh': '42', 'bleh': None})
assert http.parse_options_header('something; foo="other;thing"; meh=42; bleh') == \
('something', {'foo': 'other;thing', 'meh': '42', 'bleh': None})
assert http.parse_options_header('something; foo="otherthing"; meh=; bleh') == \
('something', {'foo': 'otherthing', 'meh': None, 'bleh': None})
# Issue #404
assert http.parse_options_header('multipart/form-data; name="foo bar"; '
'filename="bar foo"') == \
('multipart/form-data', {'name': 'foo bar', 'filename': 'bar foo'})
# Examples from RFC
assert http.parse_options_header('audio/*; q=0.2, audio/basic') == \
('audio/*', {'q': '0.2'})
assert http.parse_options_header('audio/*; q=0.2, audio/basic', multiple=True) == \
('audio/*', {'q': '0.2'}, "audio/basic", {})
assert http.parse_options_header(
'text/plain; q=0.5, text/html\n '
'text/x-dvi; q=0.8, text/x-c',
multiple=True) == \
('text/plain', {'q': '0.5'}, "text/html", {},
"text/x-dvi", {'q': '0.8'}, "text/x-c", {})
assert http.parse_options_header('text/plain; q=0.5, text/html\n'
' '
'text/x-dvi; q=0.8, text/x-c') == \
('text/plain', {'q': '0.5'})
def test_dump_options_header(self):
assert http.dump_options_header('foo', {'bar': 42}) == \
'foo; bar=42'
assert http.dump_options_header('foo', {'bar': 42, 'fizz': None}) in \
('foo; bar=42; fizz', 'foo; fizz; bar=42')
def test_dump_header(self):
assert http.dump_header([1, 2, 3]) == '1, 2, 3'
assert http.dump_header([1, 2, 3], allow_token=False) == '"1", "2", "3"'
assert http.dump_header({'foo': 'bar'}, allow_token=False) == 'foo="bar"'
assert http.dump_header({'foo': 'bar'}) == 'foo=bar'
def test_is_resource_modified(self):
env = create_environ()
# ignore POST
env['REQUEST_METHOD'] = 'POST'
assert not http.is_resource_modified(env, etag='testing')
env['REQUEST_METHOD'] = 'GET'
# etagify from data
pytest.raises(TypeError, http.is_resource_modified, env,
data='42', etag='23')
env['HTTP_IF_NONE_MATCH'] = http.generate_etag(b'awesome')
assert not http.is_resource_modified(env, data=b'awesome')
env['HTTP_IF_MODIFIED_SINCE'] = http.http_date(datetime(2008, 1, 1, 12, 30))
assert not http.is_resource_modified(env,
last_modified=datetime(2008, 1, 1, 12, 00))
assert http.is_resource_modified(env,
last_modified=datetime(2008, 1, 1, 13, 00))
def test_date_formatting(self):
assert http.cookie_date(0) == 'Thu, 01-Jan-1970 00:00:00 GMT'
assert http.cookie_date(datetime(1970, 1, 1)) == 'Thu, 01-Jan-1970 00:00:00 GMT'
assert http.http_date(0) == 'Thu, 01 Jan 1970 00:00:00 GMT'
assert http.http_date(datetime(1970, 1, 1)) == 'Thu, 01 Jan 1970 00:00:00 GMT'
def test_cookies(self):
strict_eq(
dict(http.parse_cookie('dismiss-top=6; CP=null*; PHPSESSID=0a539d42abc001cd'
'c762809248d4beed; a=42; b="\\\";"')),
{
'CP': u'null*',
'PHPSESSID': u'0a539d42abc001cdc762809248d4beed',
'a': u'42',
'dismiss-top': u'6',
'b': u'\";'
}
)
rv = http.dump_cookie('foo', 'bar baz blub', 360, httponly=True,
sync_expires=False)
assert type(rv) is str
assert set(rv.split('; ')) == set(['HttpOnly', 'Max-Age=360',
'Path=/', 'foo="bar baz blub"'])
strict_eq(dict(http.parse_cookie('fo234{=bar; blub=Blah')),
{'fo234{': u'bar', 'blub': u'Blah'})
def test_cookie_quoting(self):
val = http.dump_cookie("foo", "?foo")
strict_eq(val, 'foo="?foo"; Path=/')
strict_eq(dict(http.parse_cookie(val)), {'foo': u'?foo'})
strict_eq(dict(http.parse_cookie(r'foo="foo\054bar"')),
{'foo': u'foo,bar'})
def test_cookie_domain_resolving(self):
val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com')
strict_eq(val, 'foo=bar; Domain=xn--n3h.com; Path=/')
def test_cookie_unicode_dumping(self):
val = http.dump_cookie('foo', u'\N{SNOWMAN}')
h = datastructures.Headers()
h.add('Set-Cookie', val)
assert h['Set-Cookie'] == 'foo="\\342\\230\\203"; Path=/'
cookies = http.parse_cookie(h['Set-Cookie'])
assert cookies['foo'] == u'\N{SNOWMAN}'
def test_cookie_unicode_keys(self):
# Yes, this is technically against the spec but happens
val = http.dump_cookie(u'fö', u'fö')
assert val == wsgi_encoding_dance(u'fö="f\\303\\266"; Path=/', 'utf-8')
cookies = http.parse_cookie(val)
assert cookies[u'fö'] == u'fö'
def test_cookie_unicode_parsing(self):
# This is actually a correct test. This is what is being submitted
# by firefox if you set an unicode cookie and we get the cookie sent
# in on Python 3 under PEP 3333.
cookies = http.parse_cookie(u'fö=fö')
assert cookies[u'fö'] == u'fö'
def test_cookie_domain_encoding(self):
val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com')
strict_eq(val, 'foo=bar; Domain=xn--n3h.com; Path=/')
val = http.dump_cookie('foo', 'bar', domain=u'.\N{SNOWMAN}.com')
strict_eq(val, 'foo=bar; Domain=.xn--n3h.com; Path=/')
val = http.dump_cookie('foo', 'bar', domain=u'.foo.com')
strict_eq(val, 'foo=bar; Domain=.foo.com; Path=/')
class TestRange(object):
def test_if_range_parsing(self):
rv = http.parse_if_range_header('"Test"')
assert rv.etag == 'Test'
assert rv.date is None
assert rv.to_header() == '"Test"'
# weak information is dropped
rv = http.parse_if_range_header('W/"Test"')
assert rv.etag == 'Test'
assert rv.date is None
assert rv.to_header() == '"Test"'
# broken etags are supported too
rv = http.parse_if_range_header('bullshit')
assert rv.etag == 'bullshit'
assert rv.date is None
assert rv.to_header() == '"bullshit"'
rv = http.parse_if_range_header('Thu, 01 Jan 1970 00:00:00 GMT')
assert rv.etag is None
assert rv.date == datetime(1970, 1, 1)
assert rv.to_header() == 'Thu, 01 Jan 1970 00:00:00 GMT'
for x in '', None:
rv = http.parse_if_range_header(x)
assert rv.etag is None
assert rv.date is None
assert rv.to_header() == ''
def test_range_parsing(self):
rv = http.parse_range_header('bytes=52')
assert rv is None
rv = http.parse_range_header('bytes=52-')
assert rv.units == 'bytes'
assert rv.ranges == [(52, None)]
assert rv.to_header() == 'bytes=52-'
rv = http.parse_range_header('bytes=52-99')
assert rv.units == 'bytes'
assert rv.ranges == [(52, 100)]
assert rv.to_header() == 'bytes=52-99'
rv = http.parse_range_header('bytes=52-99,-1000')
assert rv.units == 'bytes'
assert rv.ranges == [(52, 100), (-1000, None)]
assert rv.to_header() == 'bytes=52-99,-1000'
rv = http.parse_range_header('bytes = 1 - 100')
assert rv.units == 'bytes'
assert rv.ranges == [(1, 101)]
assert rv.to_header() == 'bytes=1-100'
rv = http.parse_range_header('AWesomes=0-999')
assert rv.units == 'awesomes'
assert rv.ranges == [(0, 1000)]
assert rv.to_header() == 'awesomes=0-999'
def test_content_range_parsing(self):
rv = http.parse_content_range_header('bytes 0-98/*')
assert rv.units == 'bytes'
assert rv.start == 0
assert rv.stop == 99
assert rv.length is None
assert rv.to_header() == 'bytes 0-98/*'
rv = http.parse_content_range_header('bytes 0-98/*asdfsa')
assert rv is None
rv = http.parse_content_range_header('bytes 0-99/100')
assert rv.to_header() == 'bytes 0-99/100'
rv.start = None
rv.stop = None
assert rv.units == 'bytes'
assert rv.to_header() == 'bytes */100'
rv = http.parse_content_range_header('bytes */100')
assert rv.start is None
assert rv.stop is None
assert rv.length == 100
assert rv.units == 'bytes'
class TestRegression(object):
def test_best_match_works(self):
# was a bug in 0.6
rv = http.parse_accept_header('foo=,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png,*/*;q=0.5',
datastructures.MIMEAccept).best_match(['foo/bar'])
assert rv == 'foo/bar'
|
/*
Translation from original CKEDITOR language files:
Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("base64image","is",{
"alt":"Baklægur texti",
"lockRatio":"Festa stærðarhlutfall",
"vSpace":"Hægri bil",
"hSpace":"Vinstri bil",
"border":"Rammi"
});
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Fuller unit tests for Python MLlib.
"""
import os
import sys
import tempfile
import array as pyarray
from time import time, sleep
from shutil import rmtree
from numpy import (
array, array_equal, zeros, inf, random, exp, dot, all, mean, abs, arange, tile, ones)
from numpy import sum as array_sum
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version > '3':
basestring = str
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark import SparkContext
from pyspark.mllib.common import _to_java_object_rdd
from pyspark.mllib.clustering import StreamingKMeans, StreamingKMeansModel
from pyspark.mllib.linalg import Vector, SparseVector, DenseVector, VectorUDT, _convert_to_vector,\
DenseMatrix, SparseMatrix, Vectors, Matrices, MatrixUDT
from pyspark.mllib.classification import StreamingLogisticRegressionWithSGD
from pyspark.mllib.regression import LabeledPoint, StreamingLinearRegressionWithSGD
from pyspark.mllib.random import RandomRDDs
from pyspark.mllib.stat import Statistics
from pyspark.mllib.feature import Word2Vec
from pyspark.mllib.feature import IDF
from pyspark.mllib.feature import StandardScaler, ElementwiseProduct
from pyspark.mllib.util import LinearDataGenerator
from pyspark.mllib.util import MLUtils
from pyspark.serializers import PickleSerializer
from pyspark.streaming import StreamingContext
from pyspark.sql import SQLContext
from pyspark.streaming import StreamingContext
_have_scipy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
ser = PickleSerializer()
sc = SparkContext('local[4]', "MLlib tests")
class MLlibTestCase(unittest.TestCase):
def setUp(self):
self.sc = sc
class MLLibStreamingTestCase(unittest.TestCase):
def setUp(self):
self.sc = sc
self.ssc = StreamingContext(self.sc, 1.0)
def tearDown(self):
self.ssc.stop(False)
@staticmethod
def _eventually(condition, timeout=30.0, catch_assertions=False):
"""
Wait a given amount of time for a condition to pass, else fail with an error.
This is a helper utility for streaming ML tests.
:param condition: Function that checks for termination conditions.
condition() can return:
- True: Conditions met. Return without error.
- other value: Conditions not met yet. Continue. Upon timeout,
include last such value in error message.
Note that this method may be called at any time during
streaming execution (e.g., even before any results
have been created).
:param timeout: Number of seconds to wait. Default 30 seconds.
:param catch_assertions: If False (default), do not catch AssertionErrors.
If True, catch AssertionErrors; continue, but save
error to throw upon timeout.
"""
start_time = time()
lastValue = None
while time() - start_time < timeout:
if catch_assertions:
try:
lastValue = condition()
except AssertionError as e:
lastValue = e
else:
lastValue = condition()
if lastValue is True:
return
sleep(0.01)
if isinstance(lastValue, AssertionError):
raise lastValue
else:
raise AssertionError(
"Test failed due to timeout after %g sec, with last condition returning: %s"
% (timeout, lastValue))
def _squared_distance(a, b):
if isinstance(a, Vector):
return a.squared_distance(b)
else:
return b.squared_distance(a)
class VectorTests(MLlibTestCase):
def _test_serialize(self, v):
self.assertEqual(v, ser.loads(ser.dumps(v)))
jvec = self.sc._jvm.SerDe.loads(bytearray(ser.dumps(v)))
nv = ser.loads(bytes(self.sc._jvm.SerDe.dumps(jvec)))
self.assertEqual(v, nv)
vs = [v] * 100
jvecs = self.sc._jvm.SerDe.loads(bytearray(ser.dumps(vs)))
nvs = ser.loads(bytes(self.sc._jvm.SerDe.dumps(jvecs)))
self.assertEqual(vs, nvs)
def test_serialize(self):
self._test_serialize(DenseVector(range(10)))
self._test_serialize(DenseVector(array([1., 2., 3., 4.])))
self._test_serialize(DenseVector(pyarray.array('d', range(10))))
self._test_serialize(SparseVector(4, {1: 1, 3: 2}))
self._test_serialize(SparseVector(3, {}))
self._test_serialize(DenseMatrix(2, 3, range(6)))
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self._test_serialize(sm1)
def test_dot(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([1, 2, 3, 4])
mat = array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
arr = pyarray.array('d', [0, 1, 2, 3])
self.assertEqual(10.0, sv.dot(dv))
self.assertTrue(array_equal(array([3., 6., 9., 12.]), sv.dot(mat)))
self.assertEqual(30.0, dv.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), dv.dot(mat)))
self.assertEqual(30.0, lst.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), lst.dot(mat)))
self.assertEqual(7.0, sv.dot(arr))
def test_squared_distance(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([4, 3, 2, 1])
lst1 = [4, 3, 2, 1]
arr = pyarray.array('d', [0, 2, 1, 3])
narr = array([0, 2, 1, 3])
self.assertEqual(15.0, _squared_distance(sv, dv))
self.assertEqual(25.0, _squared_distance(sv, lst))
self.assertEqual(20.0, _squared_distance(dv, lst))
self.assertEqual(15.0, _squared_distance(dv, sv))
self.assertEqual(25.0, _squared_distance(lst, sv))
self.assertEqual(20.0, _squared_distance(lst, dv))
self.assertEqual(0.0, _squared_distance(sv, sv))
self.assertEqual(0.0, _squared_distance(dv, dv))
self.assertEqual(0.0, _squared_distance(lst, lst))
self.assertEqual(25.0, _squared_distance(sv, lst1))
self.assertEqual(3.0, _squared_distance(sv, arr))
self.assertEqual(3.0, _squared_distance(sv, narr))
def test_hash(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(hash(v1), hash(v2))
self.assertEqual(hash(v1), hash(v3))
self.assertEqual(hash(v2), hash(v3))
self.assertFalse(hash(v1) == hash(v4))
self.assertFalse(hash(v2) == hash(v4))
def test_eq(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(6, [(1, 1.0), (3, 5.5)])
v5 = DenseVector([0.0, 1.0, 0.0, 2.5])
v6 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertFalse(v2 == v4)
self.assertFalse(v1 == v5)
self.assertFalse(v1 == v6)
def test_equals(self):
indices = [1, 2, 4]
values = [1., 3., 2.]
self.assertTrue(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 1., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 2., 2.]))
def test_conversion(self):
# numpy arrays should be automatically upcast to float64
# tests for fix of [SPARK-5089]
v = array([1, 2, 3, 4], dtype='float64')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
v = array([1, 2, 3, 4], dtype='float32')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
def test_sparse_vector_indexing(self):
sv = SparseVector(5, {1: 1, 3: 2})
self.assertEqual(sv[0], 0.)
self.assertEqual(sv[3], 2.)
self.assertEqual(sv[1], 1.)
self.assertEqual(sv[2], 0.)
self.assertEqual(sv[4], 0.)
self.assertEqual(sv[-1], 0.)
self.assertEqual(sv[-2], 2.)
self.assertEqual(sv[-3], 0.)
self.assertEqual(sv[-5], 0.)
for ind in [5, -6]:
self.assertRaises(ValueError, sv.__getitem__, ind)
for ind in [7.8, '1']:
self.assertRaises(TypeError, sv.__getitem__, ind)
zeros = SparseVector(4, {})
self.assertEqual(zeros[0], 0.0)
self.assertEqual(zeros[3], 0.0)
for ind in [4, -5]:
self.assertRaises(ValueError, zeros.__getitem__, ind)
empty = SparseVector(0, {})
for ind in [-1, 0, 1]:
self.assertRaises(ValueError, empty.__getitem__, ind)
def test_matrix_indexing(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
expected = [[0, 6], [1, 8], [4, 10]]
for i in range(3):
for j in range(2):
self.assertEqual(mat[i, j], expected[i][j])
def test_repr_dense_matrix(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10], True)
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(6, 3, zeros(18))
self.assertTrue(
repr(mat),
'DenseMatrix(6, 3, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ..., \
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], False)')
def test_repr_sparse_matrix(self):
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertTrue(
repr(sm1t),
'SparseMatrix(3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], True)')
indices = tile(arange(6), 3)
values = ones(18)
sm = SparseMatrix(6, 3, [0, 6, 12, 18], indices, values)
self.assertTrue(
repr(sm), "SparseMatrix(6, 3, [0, 6, 12, 18], \
[0, 1, 2, 3, 4, 5, 0, 1, ..., 4, 5, 0, 1, 2, 3, 4, 5], \
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ..., \
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], False)")
self.assertTrue(
str(sm),
"6 X 3 CSCMatrix\n\
(0,0) 1.0\n(1,0) 1.0\n(2,0) 1.0\n(3,0) 1.0\n(4,0) 1.0\n(5,0) 1.0\n\
(0,1) 1.0\n(1,1) 1.0\n(2,1) 1.0\n(3,1) 1.0\n(4,1) 1.0\n(5,1) 1.0\n\
(0,2) 1.0\n(1,2) 1.0\n(2,2) 1.0\n(3,2) 1.0\n..\n..")
sm = SparseMatrix(1, 18, zeros(19), [], [])
self.assertTrue(
repr(sm),
'SparseMatrix(1, 18, \
[0, 0, 0, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0], [], [], False)')
def test_sparse_matrix(self):
# Test sparse matrix creation.
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self.assertEqual(sm1.numRows, 3)
self.assertEqual(sm1.numCols, 4)
self.assertEqual(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4])
self.assertEqual(sm1.rowIndices.tolist(), [1, 2, 1, 2])
self.assertEqual(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0])
self.assertTrue(
repr(sm1),
'SparseMatrix(3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0], False)')
# Test indexing
expected = [
[0, 0, 0, 0],
[1, 0, 4, 0],
[2, 0, 5, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1[i, j])
self.assertTrue(array_equal(sm1.toArray(), expected))
# Test conversion to dense and sparse.
smnew = sm1.toDense().toSparse()
self.assertEqual(sm1.numRows, smnew.numRows)
self.assertEqual(sm1.numCols, smnew.numCols)
self.assertTrue(array_equal(sm1.colPtrs, smnew.colPtrs))
self.assertTrue(array_equal(sm1.rowIndices, smnew.rowIndices))
self.assertTrue(array_equal(sm1.values, smnew.values))
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertEqual(sm1t.numRows, 3)
self.assertEqual(sm1t.numCols, 4)
self.assertEqual(sm1t.colPtrs.tolist(), [0, 2, 3, 5])
self.assertEqual(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2])
self.assertEqual(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0])
expected = [
[3, 2, 0, 0],
[0, 0, 4, 0],
[9, 0, 8, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1t[i, j])
self.assertTrue(array_equal(sm1t.toArray(), expected))
def test_dense_matrix_is_transposed(self):
mat1 = DenseMatrix(3, 2, [0, 4, 1, 6, 3, 9], isTransposed=True)
mat = DenseMatrix(3, 2, [0, 1, 3, 4, 6, 9])
self.assertEqual(mat1, mat)
expected = [[0, 4], [1, 6], [3, 9]]
for i in range(3):
for j in range(2):
self.assertEqual(mat1[i, j], expected[i][j])
self.assertTrue(array_equal(mat1.toArray(), expected))
sm = mat1.toSparse()
self.assertTrue(array_equal(sm.rowIndices, [1, 2, 0, 1, 2]))
self.assertTrue(array_equal(sm.colPtrs, [0, 2, 5]))
self.assertTrue(array_equal(sm.values, [1, 3, 4, 6, 9]))
def test_parse_vector(self):
a = DenseVector([3, 4, 6, 7])
self.assertTrue(str(a), '[3.0,4.0,6.0,7.0]')
self.assertTrue(Vectors.parse(str(a)), a)
a = SparseVector(4, [0, 2], [3, 4])
self.assertTrue(str(a), '(4,[0,2],[3.0,4.0])')
self.assertTrue(Vectors.parse(str(a)), a)
a = SparseVector(10, [0, 1], [4, 5])
self.assertTrue(SparseVector.parse(' (10, [0,1 ],[ 4.0,5.0] )'), a)
def test_norms(self):
a = DenseVector([0, 2, 3, -1])
self.assertAlmostEqual(a.norm(2), 3.742, 3)
self.assertTrue(a.norm(1), 6)
self.assertTrue(a.norm(inf), 3)
a = SparseVector(4, [0, 2], [3, -4])
self.assertAlmostEqual(a.norm(2), 5)
self.assertTrue(a.norm(1), 7)
self.assertTrue(a.norm(inf), 4)
tmp = SparseVector(4, [0, 2], [3, 0])
self.assertEqual(tmp.numNonzeros(), 1)
class ListTests(MLlibTestCase):
"""
Test MLlib algorithms on plain lists, to make sure they're passed through
as NumPy arrays.
"""
def test_kmeans(self):
from pyspark.mllib.clustering import KMeans
data = [
[0, 1.1],
[0, 1.2],
[1.1, 0],
[1.2, 0],
]
clusters = KMeans.train(self.sc.parallelize(data), 2, initializationMode="k-means||",
initializationSteps=7, epsilon=1e-4)
self.assertEqual(clusters.predict(data[0]), clusters.predict(data[1]))
self.assertEqual(clusters.predict(data[2]), clusters.predict(data[3]))
def test_kmeans_deterministic(self):
from pyspark.mllib.clustering import KMeans
X = range(0, 100, 10)
Y = range(0, 100, 10)
data = [[x, y] for x, y in zip(X, Y)]
clusters1 = KMeans.train(self.sc.parallelize(data),
3, initializationMode="k-means||",
seed=42, initializationSteps=7, epsilon=1e-4)
clusters2 = KMeans.train(self.sc.parallelize(data),
3, initializationMode="k-means||",
seed=42, initializationSteps=7, epsilon=1e-4)
centers1 = clusters1.centers
centers2 = clusters2.centers
for c1, c2 in zip(centers1, centers2):
# TODO: Allow small numeric difference.
self.assertTrue(array_equal(c1, c2))
def test_gmm(self):
from pyspark.mllib.clustering import GaussianMixture
data = self.sc.parallelize([
[1, 2],
[8, 9],
[-4, -3],
[-6, -7],
])
clusters = GaussianMixture.train(data, 2, convergenceTol=0.001,
maxIterations=10, seed=56)
labels = clusters.predict(data).collect()
self.assertEqual(labels[0], labels[1])
self.assertEqual(labels[2], labels[3])
def test_gmm_deterministic(self):
from pyspark.mllib.clustering import GaussianMixture
x = range(0, 100, 10)
y = range(0, 100, 10)
data = self.sc.parallelize([[a, b] for a, b in zip(x, y)])
clusters1 = GaussianMixture.train(data, 5, convergenceTol=0.001,
maxIterations=10, seed=63)
clusters2 = GaussianMixture.train(data, 5, convergenceTol=0.001,
maxIterations=10, seed=63)
for c1, c2 in zip(clusters1.weights, clusters2.weights):
self.assertEqual(round(c1, 7), round(c2, 7))
def test_classification(self):
from pyspark.mllib.classification import LogisticRegressionWithSGD, SVMWithSGD, NaiveBayes
from pyspark.mllib.tree import DecisionTree, DecisionTreeModel, RandomForest,\
RandomForestModel, GradientBoostedTrees, GradientBoostedTreesModel
data = [
LabeledPoint(0.0, [1, 0, 0]),
LabeledPoint(1.0, [0, 1, 1]),
LabeledPoint(0.0, [2, 0, 0]),
LabeledPoint(1.0, [0, 2, 1])
]
rdd = self.sc.parallelize(data)
features = [p.features.tolist() for p in data]
temp_dir = tempfile.mkdtemp()
lr_model = LogisticRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(lr_model.predict(features[0]) <= 0)
self.assertTrue(lr_model.predict(features[1]) > 0)
self.assertTrue(lr_model.predict(features[2]) <= 0)
self.assertTrue(lr_model.predict(features[3]) > 0)
svm_model = SVMWithSGD.train(rdd, iterations=10)
self.assertTrue(svm_model.predict(features[0]) <= 0)
self.assertTrue(svm_model.predict(features[1]) > 0)
self.assertTrue(svm_model.predict(features[2]) <= 0)
self.assertTrue(svm_model.predict(features[3]) > 0)
nb_model = NaiveBayes.train(rdd)
self.assertTrue(nb_model.predict(features[0]) <= 0)
self.assertTrue(nb_model.predict(features[1]) > 0)
self.assertTrue(nb_model.predict(features[2]) <= 0)
self.assertTrue(nb_model.predict(features[3]) > 0)
categoricalFeaturesInfo = {0: 3} # feature 0 has 3 categories
dt_model = DecisionTree.trainClassifier(
rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, maxBins=4)
self.assertTrue(dt_model.predict(features[0]) <= 0)
self.assertTrue(dt_model.predict(features[1]) > 0)
self.assertTrue(dt_model.predict(features[2]) <= 0)
self.assertTrue(dt_model.predict(features[3]) > 0)
dt_model_dir = os.path.join(temp_dir, "dt")
dt_model.save(self.sc, dt_model_dir)
same_dt_model = DecisionTreeModel.load(self.sc, dt_model_dir)
self.assertEqual(same_dt_model.toDebugString(), dt_model.toDebugString())
rf_model = RandomForest.trainClassifier(
rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=10,
maxBins=4, seed=1)
self.assertTrue(rf_model.predict(features[0]) <= 0)
self.assertTrue(rf_model.predict(features[1]) > 0)
self.assertTrue(rf_model.predict(features[2]) <= 0)
self.assertTrue(rf_model.predict(features[3]) > 0)
rf_model_dir = os.path.join(temp_dir, "rf")
rf_model.save(self.sc, rf_model_dir)
same_rf_model = RandomForestModel.load(self.sc, rf_model_dir)
self.assertEqual(same_rf_model.toDebugString(), rf_model.toDebugString())
gbt_model = GradientBoostedTrees.trainClassifier(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4)
self.assertTrue(gbt_model.predict(features[0]) <= 0)
self.assertTrue(gbt_model.predict(features[1]) > 0)
self.assertTrue(gbt_model.predict(features[2]) <= 0)
self.assertTrue(gbt_model.predict(features[3]) > 0)
gbt_model_dir = os.path.join(temp_dir, "gbt")
gbt_model.save(self.sc, gbt_model_dir)
same_gbt_model = GradientBoostedTreesModel.load(self.sc, gbt_model_dir)
self.assertEqual(same_gbt_model.toDebugString(), gbt_model.toDebugString())
try:
rmtree(temp_dir)
except OSError:
pass
def test_regression(self):
from pyspark.mllib.regression import LinearRegressionWithSGD, LassoWithSGD, \
RidgeRegressionWithSGD
from pyspark.mllib.tree import DecisionTree, RandomForest, GradientBoostedTrees
data = [
LabeledPoint(-1.0, [0, -1]),
LabeledPoint(1.0, [0, 1]),
LabeledPoint(-1.0, [0, -2]),
LabeledPoint(1.0, [0, 2])
]
rdd = self.sc.parallelize(data)
features = [p.features.tolist() for p in data]
lr_model = LinearRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(lr_model.predict(features[0]) <= 0)
self.assertTrue(lr_model.predict(features[1]) > 0)
self.assertTrue(lr_model.predict(features[2]) <= 0)
self.assertTrue(lr_model.predict(features[3]) > 0)
lasso_model = LassoWithSGD.train(rdd, iterations=10)
self.assertTrue(lasso_model.predict(features[0]) <= 0)
self.assertTrue(lasso_model.predict(features[1]) > 0)
self.assertTrue(lasso_model.predict(features[2]) <= 0)
self.assertTrue(lasso_model.predict(features[3]) > 0)
rr_model = RidgeRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(rr_model.predict(features[0]) <= 0)
self.assertTrue(rr_model.predict(features[1]) > 0)
self.assertTrue(rr_model.predict(features[2]) <= 0)
self.assertTrue(rr_model.predict(features[3]) > 0)
categoricalFeaturesInfo = {0: 2} # feature 0 has 2 categories
dt_model = DecisionTree.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, maxBins=4)
self.assertTrue(dt_model.predict(features[0]) <= 0)
self.assertTrue(dt_model.predict(features[1]) > 0)
self.assertTrue(dt_model.predict(features[2]) <= 0)
self.assertTrue(dt_model.predict(features[3]) > 0)
rf_model = RandomForest.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=10, maxBins=4, seed=1)
self.assertTrue(rf_model.predict(features[0]) <= 0)
self.assertTrue(rf_model.predict(features[1]) > 0)
self.assertTrue(rf_model.predict(features[2]) <= 0)
self.assertTrue(rf_model.predict(features[3]) > 0)
gbt_model = GradientBoostedTrees.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4)
self.assertTrue(gbt_model.predict(features[0]) <= 0)
self.assertTrue(gbt_model.predict(features[1]) > 0)
self.assertTrue(gbt_model.predict(features[2]) <= 0)
self.assertTrue(gbt_model.predict(features[3]) > 0)
try:
LinearRegressionWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
LassoWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
RidgeRegressionWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
except ValueError:
self.fail()
# Verify that maxBins is being passed through
GradientBoostedTrees.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4, maxBins=32)
with self.assertRaises(Exception) as cm:
GradientBoostedTrees.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4, maxBins=1)
class StatTests(MLlibTestCase):
# SPARK-4023
def test_col_with_different_rdds(self):
# numpy
data = RandomRDDs.normalVectorRDD(self.sc, 1000, 10, 10)
summary = Statistics.colStats(data)
self.assertEqual(1000, summary.count())
# array
data = self.sc.parallelize([range(10)] * 10)
summary = Statistics.colStats(data)
self.assertEqual(10, summary.count())
# array
data = self.sc.parallelize([pyarray.array("d", range(10))] * 10)
summary = Statistics.colStats(data)
self.assertEqual(10, summary.count())
def test_col_norms(self):
data = RandomRDDs.normalVectorRDD(self.sc, 1000, 10, 10)
summary = Statistics.colStats(data)
self.assertEqual(10, len(summary.normL1()))
self.assertEqual(10, len(summary.normL2()))
data2 = self.sc.parallelize(range(10)).map(lambda x: Vectors.dense(x))
summary2 = Statistics.colStats(data2)
self.assertEqual(array([45.0]), summary2.normL1())
import math
expectedNormL2 = math.sqrt(sum(map(lambda x: x*x, range(10))))
self.assertTrue(math.fabs(summary2.normL2()[0] - expectedNormL2) < 1e-14)
class VectorUDTTests(MLlibTestCase):
dv0 = DenseVector([])
dv1 = DenseVector([1.0, 2.0])
sv0 = SparseVector(2, [], [])
sv1 = SparseVector(2, [1], [2.0])
udt = VectorUDT()
def test_json_schema(self):
self.assertEqual(VectorUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for v in [self.dv0, self.dv1, self.sv0, self.sv1]:
self.assertEqual(v, self.udt.deserialize(self.udt.serialize(v)))
def test_infer_schema(self):
sqlCtx = SQLContext(self.sc)
rdd = self.sc.parallelize([LabeledPoint(1.0, self.dv1), LabeledPoint(0.0, self.sv1)])
df = rdd.toDF()
schema = df.schema
field = [f for f in schema.fields if f.name == "features"][0]
self.assertEqual(field.dataType, self.udt)
vectors = df.map(lambda p: p.features).collect()
self.assertEqual(len(vectors), 2)
for v in vectors:
if isinstance(v, SparseVector):
self.assertEqual(v, self.sv1)
elif isinstance(v, DenseVector):
self.assertEqual(v, self.dv1)
else:
raise TypeError("expecting a vector but got %r of type %r" % (v, type(v)))
class MatrixUDTTests(MLlibTestCase):
dm1 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10])
dm2 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10], isTransposed=True)
sm1 = SparseMatrix(1, 1, [0, 1], [0], [2.0])
sm2 = SparseMatrix(2, 1, [0, 0, 1], [0], [5.0], isTransposed=True)
udt = MatrixUDT()
def test_json_schema(self):
self.assertEqual(MatrixUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for m in [self.dm1, self.dm2, self.sm1, self.sm2]:
self.assertEqual(m, self.udt.deserialize(self.udt.serialize(m)))
def test_infer_schema(self):
sqlCtx = SQLContext(self.sc)
rdd = self.sc.parallelize([("dense", self.dm1), ("sparse", self.sm1)])
df = rdd.toDF()
schema = df.schema
self.assertTrue(schema.fields[1].dataType, self.udt)
matrices = df.map(lambda x: x._2).collect()
self.assertEqual(len(matrices), 2)
for m in matrices:
if isinstance(m, DenseMatrix):
self.assertTrue(m, self.dm1)
elif isinstance(m, SparseMatrix):
self.assertTrue(m, self.sm1)
else:
raise ValueError("Expected a matrix but got type %r" % type(m))
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(MLlibTestCase):
"""
Test both vector operations and MLlib algorithms with SciPy sparse matrices,
if SciPy is available.
"""
def test_serialize(self):
from scipy.sparse import lil_matrix
lil = lil_matrix((4, 1))
lil[1, 0] = 1
lil[3, 0] = 2
sv = SparseVector(4, {1: 1, 3: 2})
self.assertEqual(sv, _convert_to_vector(lil))
self.assertEqual(sv, _convert_to_vector(lil.tocsc()))
self.assertEqual(sv, _convert_to_vector(lil.tocoo()))
self.assertEqual(sv, _convert_to_vector(lil.tocsr()))
self.assertEqual(sv, _convert_to_vector(lil.todok()))
def serialize(l):
return ser.loads(ser.dumps(_convert_to_vector(l)))
self.assertEqual(sv, serialize(lil))
self.assertEqual(sv, serialize(lil.tocsc()))
self.assertEqual(sv, serialize(lil.tocsr()))
self.assertEqual(sv, serialize(lil.todok()))
def test_dot(self):
from scipy.sparse import lil_matrix
lil = lil_matrix((4, 1))
lil[1, 0] = 1
lil[3, 0] = 2
dv = DenseVector(array([1., 2., 3., 4.]))
self.assertEqual(10.0, dv.dot(lil))
def test_squared_distance(self):
from scipy.sparse import lil_matrix
lil = lil_matrix((4, 1))
lil[1, 0] = 3
lil[3, 0] = 2
dv = DenseVector(array([1., 2., 3., 4.]))
sv = SparseVector(4, {0: 1, 1: 2, 2: 3, 3: 4})
self.assertEqual(15.0, dv.squared_distance(lil))
self.assertEqual(15.0, sv.squared_distance(lil))
def scipy_matrix(self, size, values):
"""Create a column SciPy matrix from a dictionary of values"""
from scipy.sparse import lil_matrix
lil = lil_matrix((size, 1))
for key, value in values.items():
lil[key, 0] = value
return lil
def test_clustering(self):
from pyspark.mllib.clustering import KMeans
data = [
self.scipy_matrix(3, {1: 1.0}),
self.scipy_matrix(3, {1: 1.1}),
self.scipy_matrix(3, {2: 1.0}),
self.scipy_matrix(3, {2: 1.1})
]
clusters = KMeans.train(self.sc.parallelize(data), 2, initializationMode="k-means||")
self.assertEqual(clusters.predict(data[0]), clusters.predict(data[1]))
self.assertEqual(clusters.predict(data[2]), clusters.predict(data[3]))
def test_classification(self):
from pyspark.mllib.classification import LogisticRegressionWithSGD, SVMWithSGD, NaiveBayes
from pyspark.mllib.tree import DecisionTree
data = [
LabeledPoint(0.0, self.scipy_matrix(2, {0: 1.0})),
LabeledPoint(1.0, self.scipy_matrix(2, {1: 1.0})),
LabeledPoint(0.0, self.scipy_matrix(2, {0: 2.0})),
LabeledPoint(1.0, self.scipy_matrix(2, {1: 2.0}))
]
rdd = self.sc.parallelize(data)
features = [p.features for p in data]
lr_model = LogisticRegressionWithSGD.train(rdd)
self.assertTrue(lr_model.predict(features[0]) <= 0)
self.assertTrue(lr_model.predict(features[1]) > 0)
self.assertTrue(lr_model.predict(features[2]) <= 0)
self.assertTrue(lr_model.predict(features[3]) > 0)
svm_model = SVMWithSGD.train(rdd)
self.assertTrue(svm_model.predict(features[0]) <= 0)
self.assertTrue(svm_model.predict(features[1]) > 0)
self.assertTrue(svm_model.predict(features[2]) <= 0)
self.assertTrue(svm_model.predict(features[3]) > 0)
nb_model = NaiveBayes.train(rdd)
self.assertTrue(nb_model.predict(features[0]) <= 0)
self.assertTrue(nb_model.predict(features[1]) > 0)
self.assertTrue(nb_model.predict(features[2]) <= 0)
self.assertTrue(nb_model.predict(features[3]) > 0)
categoricalFeaturesInfo = {0: 3} # feature 0 has 3 categories
dt_model = DecisionTree.trainClassifier(rdd, numClasses=2,
categoricalFeaturesInfo=categoricalFeaturesInfo)
self.assertTrue(dt_model.predict(features[0]) <= 0)
self.assertTrue(dt_model.predict(features[1]) > 0)
self.assertTrue(dt_model.predict(features[2]) <= 0)
self.assertTrue(dt_model.predict(features[3]) > 0)
def test_regression(self):
from pyspark.mllib.regression import LinearRegressionWithSGD, LassoWithSGD, \
RidgeRegressionWithSGD
from pyspark.mllib.tree import DecisionTree
data = [
LabeledPoint(-1.0, self.scipy_matrix(2, {1: -1.0})),
LabeledPoint(1.0, self.scipy_matrix(2, {1: 1.0})),
LabeledPoint(-1.0, self.scipy_matrix(2, {1: -2.0})),
LabeledPoint(1.0, self.scipy_matrix(2, {1: 2.0}))
]
rdd = self.sc.parallelize(data)
features = [p.features for p in data]
lr_model = LinearRegressionWithSGD.train(rdd)
self.assertTrue(lr_model.predict(features[0]) <= 0)
self.assertTrue(lr_model.predict(features[1]) > 0)
self.assertTrue(lr_model.predict(features[2]) <= 0)
self.assertTrue(lr_model.predict(features[3]) > 0)
lasso_model = LassoWithSGD.train(rdd)
self.assertTrue(lasso_model.predict(features[0]) <= 0)
self.assertTrue(lasso_model.predict(features[1]) > 0)
self.assertTrue(lasso_model.predict(features[2]) <= 0)
self.assertTrue(lasso_model.predict(features[3]) > 0)
rr_model = RidgeRegressionWithSGD.train(rdd)
self.assertTrue(rr_model.predict(features[0]) <= 0)
self.assertTrue(rr_model.predict(features[1]) > 0)
self.assertTrue(rr_model.predict(features[2]) <= 0)
self.assertTrue(rr_model.predict(features[3]) > 0)
categoricalFeaturesInfo = {0: 2} # feature 0 has 2 categories
dt_model = DecisionTree.trainRegressor(rdd, categoricalFeaturesInfo=categoricalFeaturesInfo)
self.assertTrue(dt_model.predict(features[0]) <= 0)
self.assertTrue(dt_model.predict(features[1]) > 0)
self.assertTrue(dt_model.predict(features[2]) <= 0)
self.assertTrue(dt_model.predict(features[3]) > 0)
class ChiSqTestTests(MLlibTestCase):
def test_goodness_of_fit(self):
from numpy import inf
observed = Vectors.dense([4, 6, 5])
pearson = Statistics.chiSqTest(observed)
# Validated against the R command `chisq.test(c(4, 6, 5), p=c(1/3, 1/3, 1/3))`
self.assertEqual(pearson.statistic, 0.4)
self.assertEqual(pearson.degreesOfFreedom, 2)
self.assertAlmostEqual(pearson.pValue, 0.8187, 4)
# Different expected and observed sum
observed1 = Vectors.dense([21, 38, 43, 80])
expected1 = Vectors.dense([3, 5, 7, 20])
pearson1 = Statistics.chiSqTest(observed1, expected1)
# Results validated against the R command
# `chisq.test(c(21, 38, 43, 80), p=c(3/35, 1/7, 1/5, 4/7))`
self.assertAlmostEqual(pearson1.statistic, 14.1429, 4)
self.assertEqual(pearson1.degreesOfFreedom, 3)
self.assertAlmostEqual(pearson1.pValue, 0.002717, 4)
# Vectors with different sizes
observed3 = Vectors.dense([1.0, 2.0, 3.0])
expected3 = Vectors.dense([1.0, 2.0, 3.0, 4.0])
self.assertRaises(ValueError, Statistics.chiSqTest, observed3, expected3)
# Negative counts in observed
neg_obs = Vectors.dense([1.0, 2.0, 3.0, -4.0])
self.assertRaises(Py4JJavaError, Statistics.chiSqTest, neg_obs, expected1)
# Count = 0.0 in expected but not observed
zero_expected = Vectors.dense([1.0, 0.0, 3.0])
pearson_inf = Statistics.chiSqTest(observed, zero_expected)
self.assertEqual(pearson_inf.statistic, inf)
self.assertEqual(pearson_inf.degreesOfFreedom, 2)
self.assertEqual(pearson_inf.pValue, 0.0)
# 0.0 in expected and observed simultaneously
zero_observed = Vectors.dense([2.0, 0.0, 1.0])
self.assertRaises(Py4JJavaError, Statistics.chiSqTest, zero_observed, zero_expected)
def test_matrix_independence(self):
data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
# Results validated against R command
# `chisq.test(rbind(c(40, 56, 31, 30),c(24, 32, 10, 15), c(29, 42, 0, 12)))`
self.assertAlmostEqual(chi.statistic, 21.9958, 4)
self.assertEqual(chi.degreesOfFreedom, 6)
self.assertAlmostEqual(chi.pValue, 0.001213, 4)
# Negative counts
neg_counts = Matrices.dense(2, 2, [4.0, 5.0, 3.0, -3.0])
self.assertRaises(Py4JJavaError, Statistics.chiSqTest, neg_counts)
# Row sum = 0.0
row_zero = Matrices.dense(2, 2, [0.0, 1.0, 0.0, 2.0])
self.assertRaises(Py4JJavaError, Statistics.chiSqTest, row_zero)
# Column sum = 0.0
col_zero = Matrices.dense(2, 2, [0.0, 0.0, 2.0, 2.0])
self.assertRaises(Py4JJavaError, Statistics.chiSqTest, col_zero)
def test_chi_sq_pearson(self):
data = [
LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
LabeledPoint(1.0, Vectors.dense([3.5, 40.0]))
]
for numParts in [2, 4, 6, 8]:
chi = Statistics.chiSqTest(self.sc.parallelize(data, numParts))
feature1 = chi[0]
self.assertEqual(feature1.statistic, 0.75)
self.assertEqual(feature1.degreesOfFreedom, 2)
self.assertAlmostEqual(feature1.pValue, 0.6873, 4)
feature2 = chi[1]
self.assertEqual(feature2.statistic, 1.5)
self.assertEqual(feature2.degreesOfFreedom, 3)
self.assertAlmostEqual(feature2.pValue, 0.6823, 4)
def test_right_number_of_results(self):
num_cols = 1001
sparse_data = [
LabeledPoint(0.0, Vectors.sparse(num_cols, [(100, 2.0)])),
LabeledPoint(0.1, Vectors.sparse(num_cols, [(200, 1.0)]))
]
chi = Statistics.chiSqTest(self.sc.parallelize(sparse_data))
self.assertEqual(len(chi), num_cols)
self.assertIsNotNone(chi[1000])
class KolmogorovSmirnovTest(MLlibTestCase):
def test_R_implementation_equivalence(self):
data = self.sc.parallelize([
1.1626852897838, -0.585924465893051, 1.78546500331661, -1.33259371048501,
-0.446566766553219, 0.569606122374976, -2.88971761441412, -0.869018343326555,
-0.461702683149641, -0.555540910137444, -0.0201353678515895, -0.150382224136063,
-0.628126755843964, 1.32322085193283, -1.52135057001199, -0.437427868856691,
0.970577579543399, 0.0282226444247749, -0.0857821886527593, 0.389214404984942
])
model = Statistics.kolmogorovSmirnovTest(data, "norm")
self.assertAlmostEqual(model.statistic, 0.189, 3)
self.assertAlmostEqual(model.pValue, 0.422, 3)
model = Statistics.kolmogorovSmirnovTest(data, "norm", 0, 1)
self.assertAlmostEqual(model.statistic, 0.189, 3)
self.assertAlmostEqual(model.pValue, 0.422, 3)
class SerDeTest(MLlibTestCase):
def test_to_java_object_rdd(self): # SPARK-6660
data = RandomRDDs.uniformRDD(self.sc, 10, 5, seed=0)
self.assertEqual(_to_java_object_rdd(data).count(), 10)
class FeatureTest(MLlibTestCase):
def test_idf_model(self):
data = [
Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),
Vectors.dense([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]),
Vectors.dense([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]),
Vectors.dense([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9])
]
model = IDF().fit(self.sc.parallelize(data, 2))
idf = model.idf()
self.assertEqual(len(idf), 11)
class Word2VecTests(MLlibTestCase):
def test_word2vec_setters(self):
model = Word2Vec() \
.setVectorSize(2) \
.setLearningRate(0.01) \
.setNumPartitions(2) \
.setNumIterations(10) \
.setSeed(1024) \
.setMinCount(3)
self.assertEqual(model.vectorSize, 2)
self.assertTrue(model.learningRate < 0.02)
self.assertEqual(model.numPartitions, 2)
self.assertEqual(model.numIterations, 10)
self.assertEqual(model.seed, 1024)
self.assertEqual(model.minCount, 3)
def test_word2vec_get_vectors(self):
data = [
["a", "b", "c", "d", "e", "f", "g"],
["a", "b", "c", "d", "e", "f"],
["a", "b", "c", "d", "e"],
["a", "b", "c", "d"],
["a", "b", "c"],
["a", "b"],
["a"]
]
model = Word2Vec().fit(self.sc.parallelize(data))
self.assertEqual(len(model.getVectors()), 3)
class StandardScalerTests(MLlibTestCase):
def test_model_setters(self):
data = [
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0]
]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertIsNotNone(model.setWithMean(True))
self.assertIsNotNone(model.setWithStd(True))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([-1.0, -1.0, -1.0]))
def test_model_transform(self):
data = [
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0]
]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([1.0, 2.0, 3.0]))
class ElementwiseProductTests(MLlibTestCase):
def test_model_transform(self):
weight = Vectors.dense([3, 2, 1])
densevec = Vectors.dense([4, 5, 6])
sparsevec = Vectors.sparse(3, [0], [1])
eprod = ElementwiseProduct(weight)
self.assertEqual(eprod.transform(densevec), DenseVector([12, 10, 6]))
self.assertEqual(
eprod.transform(sparsevec), SparseVector(3, [0], [3]))
class StreamingKMeansTest(MLLibStreamingTestCase):
def test_model_params(self):
"""Test that the model params are set correctly"""
stkm = StreamingKMeans()
stkm.setK(5).setDecayFactor(0.0)
self.assertEqual(stkm._k, 5)
self.assertEqual(stkm._decayFactor, 0.0)
# Model not set yet.
self.assertIsNone(stkm.latestModel())
self.assertRaises(ValueError, stkm.trainOn, [0.0, 1.0])
stkm.setInitialCenters(
centers=[[0.0, 0.0], [1.0, 1.0]], weights=[1.0, 1.0])
self.assertEqual(
stkm.latestModel().centers, [[0.0, 0.0], [1.0, 1.0]])
self.assertEqual(stkm.latestModel().clusterWeights, [1.0, 1.0])
def test_accuracy_for_single_center(self):
"""Test that parameters obtained are correct for a single center."""
centers, batches = self.streamingKMeansDataGenerator(
batches=5, numPoints=5, k=1, d=5, r=0.1, seed=0)
stkm = StreamingKMeans(1)
stkm.setInitialCenters([[0., 0., 0., 0., 0.]], [0.])
input_stream = self.ssc.queueStream(
[self.sc.parallelize(batch, 1) for batch in batches])
stkm.trainOn(input_stream)
self.ssc.start()
def condition():
self.assertEqual(stkm.latestModel().clusterWeights, [25.0])
return True
self._eventually(condition, catch_assertions=True)
realCenters = array_sum(array(centers), axis=0)
for i in range(5):
modelCenters = stkm.latestModel().centers[0][i]
self.assertAlmostEqual(centers[0][i], modelCenters, 1)
self.assertAlmostEqual(realCenters[i], modelCenters, 1)
def streamingKMeansDataGenerator(self, batches, numPoints,
k, d, r, seed, centers=None):
rng = random.RandomState(seed)
# Generate centers.
centers = [rng.randn(d) for i in range(k)]
return centers, [[Vectors.dense(centers[j % k] + r * rng.randn(d))
for j in range(numPoints)]
for i in range(batches)]
def test_trainOn_model(self):
"""Test the model on toy data with four clusters."""
stkm = StreamingKMeans()
initCenters = [[1.0, 1.0], [-1.0, 1.0], [-1.0, -1.0], [1.0, -1.0]]
stkm.setInitialCenters(
centers=initCenters, weights=[1.0, 1.0, 1.0, 1.0])
# Create a toy dataset by setting a tiny offset for each point.
offsets = [[0, 0.1], [0, -0.1], [0.1, 0], [-0.1, 0]]
batches = []
for offset in offsets:
batches.append([[offset[0] + center[0], offset[1] + center[1]]
for center in initCenters])
batches = [self.sc.parallelize(batch, 1) for batch in batches]
input_stream = self.ssc.queueStream(batches)
stkm.trainOn(input_stream)
self.ssc.start()
# Give enough time to train the model.
def condition():
finalModel = stkm.latestModel()
self.assertTrue(all(finalModel.centers == array(initCenters)))
self.assertEqual(finalModel.clusterWeights, [5.0, 5.0, 5.0, 5.0])
return True
self._eventually(condition, catch_assertions=True)
def test_predictOn_model(self):
"""Test that the model predicts correctly on toy data."""
stkm = StreamingKMeans()
stkm._model = StreamingKMeansModel(
clusterCenters=[[1.0, 1.0], [-1.0, 1.0], [-1.0, -1.0], [1.0, -1.0]],
clusterWeights=[1.0, 1.0, 1.0, 1.0])
predict_data = [[[1.5, 1.5]], [[-1.5, 1.5]], [[-1.5, -1.5]], [[1.5, -1.5]]]
predict_data = [sc.parallelize(batch, 1) for batch in predict_data]
predict_stream = self.ssc.queueStream(predict_data)
predict_val = stkm.predictOn(predict_stream)
result = []
def update(rdd):
rdd_collect = rdd.collect()
if rdd_collect:
result.append(rdd_collect)
predict_val.foreachRDD(update)
self.ssc.start()
def condition():
self.assertEqual(result, [[0], [1], [2], [3]])
return True
self._eventually(condition, catch_assertions=True)
def test_trainOn_predictOn(self):
"""Test that prediction happens on the updated model."""
stkm = StreamingKMeans(decayFactor=0.0, k=2)
stkm.setInitialCenters([[0.0], [1.0]], [1.0, 1.0])
# Since decay factor is set to zero, once the first batch
# is passed the clusterCenters are updated to [-0.5, 0.7]
# which causes 0.2 & 0.3 to be classified as 1, even though the
# classification based in the initial model would have been 0
# proving that the model is updated.
batches = [[[-0.5], [0.6], [0.8]], [[0.2], [-0.1], [0.3]]]
batches = [sc.parallelize(batch) for batch in batches]
input_stream = self.ssc.queueStream(batches)
predict_results = []
def collect(rdd):
rdd_collect = rdd.collect()
if rdd_collect:
predict_results.append(rdd_collect)
stkm.trainOn(input_stream)
predict_stream = stkm.predictOn(input_stream)
predict_stream.foreachRDD(collect)
self.ssc.start()
def condition():
self.assertEqual(predict_results, [[0, 1, 1], [1, 0, 1]])
return True
self._eventually(condition, catch_assertions=True)
class LinearDataGeneratorTests(MLlibTestCase):
def test_dim(self):
linear_data = LinearDataGenerator.generateLinearInput(
intercept=0.0, weights=[0.0, 0.0, 0.0],
xMean=[0.0, 0.0, 0.0], xVariance=[0.33, 0.33, 0.33],
nPoints=4, seed=0, eps=0.1)
self.assertEqual(len(linear_data), 4)
for point in linear_data:
self.assertEqual(len(point.features), 3)
linear_data = LinearDataGenerator.generateLinearRDD(
sc=sc, nexamples=6, nfeatures=2, eps=0.1,
nParts=2, intercept=0.0).collect()
self.assertEqual(len(linear_data), 6)
for point in linear_data:
self.assertEqual(len(point.features), 2)
class StreamingLogisticRegressionWithSGDTests(MLLibStreamingTestCase):
@staticmethod
def generateLogisticInput(offset, scale, nPoints, seed):
"""
Generate 1 / (1 + exp(-x * scale + offset))
where,
x is randomnly distributed and the threshold
and labels for each sample in x is obtained from a random uniform
distribution.
"""
rng = random.RandomState(seed)
x = rng.randn(nPoints)
sigmoid = 1. / (1 + exp(-(dot(x, scale) + offset)))
y_p = rng.rand(nPoints)
cut_off = y_p <= sigmoid
y_p[cut_off] = 1.0
y_p[~cut_off] = 0.0
return [
LabeledPoint(y_p[i], Vectors.dense([x[i]]))
for i in range(nPoints)]
def test_parameter_accuracy(self):
"""
Test that the final value of weights is close to the desired value.
"""
input_batches = [
self.sc.parallelize(self.generateLogisticInput(0, 1.5, 100, 42 + i))
for i in range(20)]
input_stream = self.ssc.queueStream(input_batches)
slr = StreamingLogisticRegressionWithSGD(
stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0])
slr.trainOn(input_stream)
self.ssc.start()
def condition():
rel = (1.5 - slr.latestModel().weights.array[0]) / 1.5
self.assertAlmostEqual(rel, 0.1, 1)
return True
self._eventually(condition, catch_assertions=True)
def test_convergence(self):
"""
Test that weights converge to the required value on toy data.
"""
input_batches = [
self.sc.parallelize(self.generateLogisticInput(0, 1.5, 100, 42 + i))
for i in range(20)]
input_stream = self.ssc.queueStream(input_batches)
models = []
slr = StreamingLogisticRegressionWithSGD(
stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0])
slr.trainOn(input_stream)
input_stream.foreachRDD(
lambda x: models.append(slr.latestModel().weights[0]))
self.ssc.start()
def condition():
self.assertEqual(len(models), len(input_batches))
return True
# We want all batches to finish for this test.
self._eventually(condition, 60.0, catch_assertions=True)
t_models = array(models)
diff = t_models[1:] - t_models[:-1]
# Test that weights improve with a small tolerance
self.assertTrue(all(diff >= -0.1))
self.assertTrue(array_sum(diff > 0) > 1)
@staticmethod
def calculate_accuracy_error(true, predicted):
return sum(abs(array(true) - array(predicted))) / len(true)
def test_predictions(self):
"""Test predicted values on a toy model."""
input_batches = []
for i in range(20):
batch = self.sc.parallelize(
self.generateLogisticInput(0, 1.5, 100, 42 + i))
input_batches.append(batch.map(lambda x: (x.label, x.features)))
input_stream = self.ssc.queueStream(input_batches)
slr = StreamingLogisticRegressionWithSGD(
stepSize=0.2, numIterations=25)
slr.setInitialWeights([1.5])
predict_stream = slr.predictOnValues(input_stream)
true_predicted = []
predict_stream.foreachRDD(lambda x: true_predicted.append(x.collect()))
self.ssc.start()
def condition():
self.assertEqual(len(true_predicted), len(input_batches))
return True
self._eventually(condition, catch_assertions=True)
# Test that the accuracy error is no more than 0.4 on each batch.
for batch in true_predicted:
true, predicted = zip(*batch)
self.assertTrue(
self.calculate_accuracy_error(true, predicted) < 0.4)
def test_training_and_prediction(self):
"""Test that the model improves on toy data with no. of batches"""
input_batches = [
self.sc.parallelize(self.generateLogisticInput(0, 1.5, 100, 42 + i))
for i in range(20)]
predict_batches = [
b.map(lambda lp: (lp.label, lp.features)) for b in input_batches]
slr = StreamingLogisticRegressionWithSGD(
stepSize=0.01, numIterations=25)
slr.setInitialWeights([-0.1])
errors = []
def collect_errors(rdd):
true, predicted = zip(*rdd.collect())
errors.append(self.calculate_accuracy_error(true, predicted))
true_predicted = []
input_stream = self.ssc.queueStream(input_batches)
predict_stream = self.ssc.queueStream(predict_batches)
slr.trainOn(input_stream)
ps = slr.predictOnValues(predict_stream)
ps.foreachRDD(lambda x: collect_errors(x))
self.ssc.start()
def condition():
# Test that the improvement in error is > 0.3
if len(errors) == len(predict_batches):
self.assertGreater(errors[1] - errors[-1], 0.3)
if len(errors) >= 3 and errors[1] - errors[-1] > 0.3:
return True
return "Latest errors: " + ", ".join(map(lambda x: str(x), errors))
self._eventually(condition)
class StreamingLinearRegressionWithTests(MLLibStreamingTestCase):
def assertArrayAlmostEqual(self, array1, array2, dec):
for i, j in array1, array2:
self.assertAlmostEqual(i, j, dec)
def test_parameter_accuracy(self):
"""Test that coefs are predicted accurately by fitting on toy data."""
# Test that fitting (10*X1 + 10*X2), (X1, X2) gives coefficients
# (10, 10)
slr = StreamingLinearRegressionWithSGD(stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0, 0.0])
xMean = [0.0, 0.0]
xVariance = [1.0 / 3.0, 1.0 / 3.0]
# Create ten batches with 100 sample points in each.
batches = []
for i in range(10):
batch = LinearDataGenerator.generateLinearInput(
0.0, [10.0, 10.0], xMean, xVariance, 100, 42 + i, 0.1)
batches.append(sc.parallelize(batch))
input_stream = self.ssc.queueStream(batches)
slr.trainOn(input_stream)
self.ssc.start()
def condition():
self.assertArrayAlmostEqual(
slr.latestModel().weights.array, [10., 10.], 1)
self.assertAlmostEqual(slr.latestModel().intercept, 0.0, 1)
return True
self._eventually(condition, catch_assertions=True)
def test_parameter_convergence(self):
"""Test that the model parameters improve with streaming data."""
slr = StreamingLinearRegressionWithSGD(stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0])
# Create ten batches with 100 sample points in each.
batches = []
for i in range(10):
batch = LinearDataGenerator.generateLinearInput(
0.0, [10.0], [0.0], [1.0 / 3.0], 100, 42 + i, 0.1)
batches.append(sc.parallelize(batch))
model_weights = []
input_stream = self.ssc.queueStream(batches)
input_stream.foreachRDD(
lambda x: model_weights.append(slr.latestModel().weights[0]))
slr.trainOn(input_stream)
self.ssc.start()
def condition():
self.assertEqual(len(model_weights), len(batches))
return True
# We want all batches to finish for this test.
self._eventually(condition, catch_assertions=True)
w = array(model_weights)
diff = w[1:] - w[:-1]
self.assertTrue(all(diff >= -0.1))
def test_prediction(self):
"""Test prediction on a model with weights already set."""
# Create a model with initial Weights equal to coefs
slr = StreamingLinearRegressionWithSGD(stepSize=0.2, numIterations=25)
slr.setInitialWeights([10.0, 10.0])
# Create ten batches with 100 sample points in each.
batches = []
for i in range(10):
batch = LinearDataGenerator.generateLinearInput(
0.0, [10.0, 10.0], [0.0, 0.0], [1.0 / 3.0, 1.0 / 3.0],
100, 42 + i, 0.1)
batches.append(
sc.parallelize(batch).map(lambda lp: (lp.label, lp.features)))
input_stream = self.ssc.queueStream(batches)
output_stream = slr.predictOnValues(input_stream)
samples = []
output_stream.foreachRDD(lambda x: samples.append(x.collect()))
self.ssc.start()
def condition():
self.assertEqual(len(samples), len(batches))
return True
# We want all batches to finish for this test.
self._eventually(condition, catch_assertions=True)
# Test that mean absolute error on each batch is less than 0.1
for batch in samples:
true, predicted = zip(*batch)
self.assertTrue(mean(abs(array(true) - array(predicted))) < 0.1)
def test_train_prediction(self):
"""Test that error on test data improves as model is trained."""
slr = StreamingLinearRegressionWithSGD(stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0])
# Create ten batches with 100 sample points in each.
batches = []
for i in range(10):
batch = LinearDataGenerator.generateLinearInput(
0.0, [10.0], [0.0], [1.0 / 3.0], 100, 42 + i, 0.1)
batches.append(sc.parallelize(batch))
predict_batches = [
b.map(lambda lp: (lp.label, lp.features)) for b in batches]
errors = []
def func(rdd):
true, predicted = zip(*rdd.collect())
errors.append(mean(abs(true) - abs(predicted)))
input_stream = self.ssc.queueStream(batches)
output_stream = self.ssc.queueStream(predict_batches)
slr.trainOn(input_stream)
output_stream = slr.predictOnValues(output_stream)
output_stream.foreachRDD(func)
self.ssc.start()
def condition():
if len(errors) == len(predict_batches):
self.assertGreater(errors[1] - errors[-1], 2)
if len(errors) >= 3 and errors[1] - errors[-1] > 2:
return True
return "Latest errors: " + ", ".join(map(lambda x: str(x), errors))
self._eventually(condition)
class MLUtilsTests(MLlibTestCase):
def test_append_bias(self):
data = [2.0, 2.0, 2.0]
ret = MLUtils.appendBias(data)
self.assertEqual(ret[3], 1.0)
self.assertEqual(type(ret), DenseVector)
def test_append_bias_with_vector(self):
data = Vectors.dense([2.0, 2.0, 2.0])
ret = MLUtils.appendBias(data)
self.assertEqual(ret[3], 1.0)
self.assertEqual(type(ret), DenseVector)
def test_append_bias_with_sp_vector(self):
data = Vectors.sparse(3, {0: 2.0, 2: 2.0})
expected = Vectors.sparse(4, {0: 2.0, 2: 2.0, 3: 1.0})
# Returned value must be SparseVector
ret = MLUtils.appendBias(data)
self.assertEqual(ret, expected)
self.assertEqual(type(ret), SparseVector)
def test_load_vectors(self):
import shutil
data = [
[1.0, 2.0, 3.0],
[1.0, 2.0, 3.0]
]
temp_dir = tempfile.mkdtemp()
load_vectors_path = os.path.join(temp_dir, "test_load_vectors")
try:
self.sc.parallelize(data).saveAsTextFile(load_vectors_path)
ret_rdd = MLUtils.loadVectors(self.sc, load_vectors_path)
ret = ret_rdd.collect()
self.assertEqual(len(ret), 2)
self.assertEqual(ret[0], DenseVector([1.0, 2.0, 3.0]))
self.assertEqual(ret[1], DenseVector([1.0, 2.0, 3.0]))
except:
self.fail()
finally:
shutil.rmtree(load_vectors_path)
if __name__ == "__main__":
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
sc.stop()
|
import datetime
import json
import os
import requests
from airflow import models
from airflow.hooks.http_hook import HttpHook
from airflow.models.connection import Connection
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.http_operator import SimpleHttpOperator
state_districts = {
'AN' : ['Andaman And Nicobar Islands'],
'AP' : ['Anantapur', 'Chittoor', 'East Godavari', 'Guntur', 'Krishna', 'Kurnool', 'Prakasam', 'S.P.S. Nellore', 'Srikakulam', 'Visakhapatnam', 'Vizianagaram', 'West Godavari', 'Y.S.R. Kadapa'], 'AR': ['Anjaw', 'Changlang', 'East Kameng', 'East Siang', 'Kamle', 'Kra Daadi', 'Kurung Kumey', 'Lepa Rada', 'Lohit', 'Longding', 'Lower Dibang Valley', 'Lower Siang', 'Lower Subansiri', 'Namsai', 'Pakke Kessang', 'Papum Pare', 'Shi Yomi', 'Siang', 'Tawang', 'Tirap', 'Upper Dibang Valley', 'Upper Siang', 'Upper Subansiri', 'West Kameng', 'West Siang'],
'AS' : ['Baksa', 'Barpeta', 'Biswanath', 'Bongaigaon', 'Cachar', 'Charaideo', 'Chirang', 'Darrang', 'Dhemaji', 'Dhubri', 'Dibrugarh', 'Dima Hasao', 'Goalpara', 'Golaghat', 'Hailakandi', 'Hojai', 'Jorhat', 'Kamrup', 'Kamrup Metropolitan', 'Karbi Anglong', 'Karimganj', 'Kokrajhar', 'Lakhimpur', 'Majuli', 'Morigaon', 'Nagaon', 'Nalbari', 'Sivasagar', 'Sonitpur', 'South Salmara Mankachar', 'Tinsukia', 'Udalguri', 'West Karbi Anglong'],
'BR' : ['Araria', 'Arwal', 'Aurangabad', 'Banka', 'Begusarai', 'Bhagalpur', 'Bhojpur', 'Buxar', 'Darbhanga', 'East Champaran', 'Gaya', 'Gopalganj', 'Jamui', 'Jehanabad', 'Kaimur', 'Katihar', 'Khagaria', 'Kishanganj', 'Lakhisarai', 'Madhepura', 'Madhubani', 'Munger', 'Muzaffarpur', 'Nalanda', 'Nawada', 'Patna', 'Purnia', 'Rohtas', 'Saharsa', 'Samastipur', 'Saran', 'Sheikhpura', 'Sheohar', 'Sitamarhi', 'Siwan', 'Supaul', 'Vaishali', 'West Champaran'],
'CH' : ['Chandigarh'],
'CT' : ['Balod', 'Baloda Bazar', 'Balrampur', 'Bametara', 'Bastar', 'Bijapur', 'Bilaspur', 'Dakshin Bastar Dantewada', 'Dhamtari', 'Durg', 'Gariaband', 'Gaurela Pendra Marwahi', 'Janjgir Champa', 'Jashpur', 'Kabeerdham', 'Kondagaon', 'Korba', 'Koriya', 'Mahasamund', 'Mungeli', 'Narayanpur', 'Raigarh', 'Raipur', 'Rajnandgaon', 'Sukma', 'Surajpur', 'Surguja', 'Uttar Bastar Kanker'],
'DNDD': ['Dadra And Nagar Haveli And Daman And Diu'],
'DL' : ['Delhi'],
'GA' : ['North Goa', 'South Goa'],
'GJ' : ['Ahmedabad', 'Amreli', 'Anand', 'Aravalli', 'Banaskantha', 'Bharuch', 'Bhavnagar', 'Botad', 'Chhota Udaipur', 'Dahod', 'Dang', 'Devbhumi Dwarka', 'Gandhinagar', 'Gir Somnath', 'Jamnagar', 'Junagadh', 'Kheda', 'Kutch', 'Mahisagar', 'Mehsana', 'Morbi', 'Narmada', 'Navsari', 'Panchmahal', 'Patan', 'Porbandar', 'Rajkot', 'Sabarkantha', 'Surat', 'Surendranagar', 'Tapi', 'Vadodara', 'Valsad'],
'HR' : ['Ambala', 'Bhiwani', 'Charkhi Dadri', 'Faridabad', 'Fatehabad', 'Gurugram', 'Hisar', 'Jhajjar', 'Jind', 'Kaithal', 'Karnal', 'Kurukshetra', 'Mahendragarh', 'Nuh', 'Palwal', 'Panchkula', 'Panipat', 'Rewari', 'Rohtak', 'Sirsa', 'Sonipat', 'Yamunanagar'],
'HP' : ['Bilaspur', 'Chamba', 'Hamirpur', 'Kangra', 'Kinnaur', 'Kullu', 'Lahaul And Spiti', 'Mandi', 'Shimla', 'Sirmaur', 'Solan', 'Una'],
'JK' : ['Anantnag', 'Bandipora', 'Baramulla', 'Budgam', 'Doda', 'Ganderbal', 'Jammu', 'Kathua', 'Kishtwar', 'Kulgam', 'Kupwara', 'Pulwama', 'Punch', 'Rajouri', 'Ramban', 'Reasi', 'Samba', 'Shopiyan', 'Srinagar', 'Udhampur'],
'JH' : ['Bokaro', 'Chatra', 'Deoghar', 'Dhanbad', 'Dumka', 'East Singhbhum', 'Garhwa', 'Giridih', 'Godda', 'Gumla', 'Hazaribagh', 'Jamtara', 'Khunti', 'Koderma', 'Latehar', 'Lohardaga', 'Pakur', 'Palamu', 'Ramgarh', 'Ranchi', 'Sahibganj', 'Saraikela-Kharsawan', 'Simdega', 'West Singhbhum'],
'KA' : ['Bagalkote', 'Ballari', 'Belagavi', 'Bengaluru Rural', 'Bengaluru Urban', 'Bidar', 'Chamarajanagara', 'Chikkaballapura', 'Chikkamagaluru', 'Chitradurga', 'Dakshina Kannada', 'Davanagere', 'Dharwad', 'Gadag', 'Hassan', 'Haveri', 'Kalaburagi', 'Kodagu', 'Kolar', 'Koppal', 'Mandya', 'Mysuru', 'Raichur', 'Ramanagara', 'Shivamogga', 'Tumakuru', 'Udupi', 'Uttara Kannada', 'Vijayapura', 'Yadgir'],
'KL' : ['Alappuzha', 'Ernakulam', 'Idukki', 'Kannur', 'Kasaragod', 'Kollam', 'Kottayam', 'Kozhikode', 'Malappuram', 'Palakkad', 'Pathanamthitta', 'Thiruvananthapuram', 'Thrissur', 'Wayanad'],
'LA' : ['Kargil', 'Leh'],
'MP' : ['Agar Malwa', 'Alirajpur', 'Anuppur', 'Ashoknagar', 'Balaghat', 'Barwani', 'Betul', 'Bhind', 'Bhopal', 'Burhanpur', 'Chhatarpur', 'Chhindwara', 'Damoh', 'Datia', 'Dewas', 'Dhar', 'Dindori', 'Guna', 'Gwalior', 'Harda', 'Hoshangabad', 'Indore', 'Jabalpur', 'Jhabua', 'Katni', 'Khandwa', 'Khargone', 'Mandla', 'Mandsaur', 'Morena', 'Narsinghpur', 'Neemuch', 'Niwari', 'Panna', 'Raisen', 'Rajgarh', 'Ratlam', 'Rewa', 'Sagar', 'Satna', 'Sehore', 'Seoni', 'Shahdol', 'Shajapur', 'Sheopur', 'Shivpuri', 'Sidhi', 'Singrauli', 'Tikamgarh', 'Ujjain', 'Umaria', 'Vidisha'],
'MH' : ['Ahmednagar', 'Akola', 'Amravati', 'Aurangabad', 'Beed', 'Bhandara', 'Buldhana', 'Chandrapur', 'Dhule', 'Gadchiroli', 'Gondia', 'Hingoli', 'Jalgaon', 'Jalna', 'Kolhapur', 'Latur', 'Mumbai', 'Nagpur', 'Nanded', 'Nandurbar', 'Nashik', 'Osmanabad', 'Palghar', 'Parbhani', 'Pune', 'Raigad', 'Ratnagiri', 'Sangli', 'Satara', 'Sindhudurg', 'Solapur', 'Thane', 'Wardha', 'Washim', 'Yavatmal'],
'MN' : ['Manipur'],
'ML' : ['East Garo Hills', 'East Jaintia Hills', 'East Khasi Hills', 'North Garo Hills', 'Ribhoi', 'South Garo Hills', 'South West Garo Hills', 'South West Khasi Hills', 'West Garo Hills', 'West Jaintia Hills', 'West Khasi Hills'],
'MZ' : ['Mizoram'],
'NL' : ['Dimapur', 'Kiphire', 'Kohima', 'Longleng', 'Mokokchung', 'Mon', 'Peren', 'Phek', 'Tuensang', 'Wokha', 'Zunheboto'],
'OR' : ['Angul', 'Balangir', 'Balasore', 'Bargarh', 'Bhadrak', 'Boudh', 'Cuttack', 'Deogarh', 'Dhenkanal', 'Gajapati', 'Ganjam', 'Jagatsinghpur', 'Jajpur', 'Jharsuguda', 'Kalahandi', 'Kandhamal', 'Kendrapara', 'Kendujhar', 'Khordha', 'Koraput', 'Malkangiri', 'Mayurbhanj', 'Nabarangapur', 'Nayagarh', 'Nuapada', 'Puri', 'Rayagada', 'Sambalpur', 'Subarnapur', 'Sundargarh'],
'PY' : ['Karaikal', 'Mahe', 'Puducherry', 'Yanam'],
'PB' : ['Amritsar', 'Barnala', 'Bathinda', 'Faridkot', 'Fatehgarh Sahib', 'Fazilka', 'Ferozepur', 'Gurdaspur', 'Hoshiarpur', 'Jalandhar', 'Kapurthala', 'Ludhiana', 'Mansa', 'Moga', 'Pathankot', 'Patiala', 'Rupnagar', 'S.A.S. Nagar', 'Sangrur', 'Shahid Bhagat Singh Nagar', 'Sri Muktsar Sahib', 'Tarn Taran'],
'RJ' : ['Ajmer', 'Alwar', 'Banswara', 'Baran', 'Barmer', 'Bharatpur', 'Bhilwara', 'Bikaner', 'Bundi', 'Chittorgarh', 'Churu', 'Dausa', 'Dholpur', 'Dungarpur', 'Ganganagar', 'Hanumangarh', 'Jaipur', 'Jaisalmer', 'Jalore', 'Jhalawar', 'Jhunjhunu', 'Jodhpur', 'Karauli', 'Kota', 'Nagaur', 'Pali', 'Pratapgarh', 'Rajsamand', 'Sawai Madhopur', 'Sikar', 'Sirohi', 'Tonk', 'Udaipur'],
'SK' : ['East Sikkim', 'North Sikkim', 'South Sikkim', 'West Sikkim'],
'TN' : ['Ariyalur', 'Chengalpattu', 'Chennai', 'Coimbatore', 'Cuddalore', 'Dharmapuri', 'Dindigul', 'Erode', 'Kallakurichi', 'Kancheepuram', 'Kanyakumari', 'Karur', 'Krishnagiri', 'Madurai', 'Nagapattinam', 'Namakkal', 'Nilgiris', 'Perambalur', 'Pudukkottai', 'Ramanathapuram', 'Ranipet', 'Salem', 'Sivaganga', 'Tenkasi', 'Thanjavur', 'Theni', 'Thiruvallur', 'Thiruvarur', 'Thoothukkudi', 'Tiruchirappalli', 'Tirunelveli', 'Tirupathur', 'Tiruppur', 'Tiruvannamalai', 'Vellore', 'Viluppuram', 'Virudhunagar'],
'TG' : ['Adilabad', 'Bhadradri Kothagudem', 'Hyderabad', 'Jagtial', 'Jangaon', 'Jayashankar Bhupalapally', 'Jogulamba Gadwal', 'Kamareddy', 'Karimnagar', 'Khammam', 'Komaram Bheem', 'Mahabubabad', 'Mahabubnagar', 'Mancherial', 'Medak', 'Medchal Malkajgiri', 'Mulugu', 'Nagarkurnool', 'Nalgonda', 'Narayanpet', 'Nirmal', 'Nizamabad', 'Peddapalli', 'Rajanna Sircilla', 'Ranga Reddy', 'Sangareddy', 'Siddipet', 'Suryapet', 'Vikarabad', 'Wanaparthy', 'Warangal Rural', 'Warangal Urban', 'Yadadri Bhuvanagiri'],
'TR' : ['Dhalai', 'Gomati', 'Khowai', 'North Tripura', 'Sipahijala', 'South Tripura', 'Unokoti', 'West Tripura'],
'UP' : ['Agra', 'Aligarh', 'Ambedkar Nagar', 'Amethi', 'Amroha', 'Auraiya', 'Ayodhya', 'Azamgarh', 'Baghpat', 'Bahraich', 'Ballia', 'Balrampur', 'Banda', 'Barabanki', 'Bareilly', 'Basti', 'Bhadohi', 'Bijnor', 'Budaun', 'Bulandshahr', 'Chandauli', 'Chitrakoot', 'Deoria', 'Etah', 'Etawah', 'Farrukhabad', 'Fatehpur', 'Firozabad', 'Gautam Buddha Nagar', 'Ghaziabad', 'Ghazipur', 'Gonda', 'Gorakhpur', 'Hamirpur', 'Hapur', 'Hardoi', 'Hathras', 'Jalaun', 'Jaunpur', 'Jhansi', 'Kannauj', 'Kanpur Dehat', 'Kanpur Nagar', 'Kasganj', 'Kaushambi', 'Kushinagar', 'Lakhimpur Kheri', 'Lalitpur', 'Lucknow', 'Maharajganj', 'Mahoba', 'Mainpuri', 'Mathura', 'Mau', 'Meerut', 'Mirzapur', 'Moradabad', 'Muzaffarnagar', 'Pilibhit', 'Pratapgarh', 'Prayagraj', 'Rae Bareli', 'Rampur', 'Saharanpur', 'Sambhal', 'Sant Kabir Nagar', 'Shahjahanpur', 'Shamli', 'Shrawasti', 'Siddharthnagar', 'Sitapur', 'Sonbhadra', 'Sultanpur', 'Unnao', 'Varanasi'],
'UT' : ['Almora', 'Bageshwar', 'Chamoli', 'Champawat', 'Dehradun', 'Haridwar', 'Nainital', 'Pauri Garhwal', 'Pithoragarh', 'Rudraprayag', 'Tehri Garhwal', 'Udham Singh Nagar', 'Uttarkashi'],
'WB' : ['Alipurduar', 'Bankura', 'Birbhum', 'Cooch Behar', 'Dakshin Dinajpur', 'Darjeeling', 'Hooghly', 'Howrah', 'Jalpaiguri', 'Jhargram', 'Kalimpong', 'Kolkata', 'Malda', 'Murshidabad', 'Nadia', 'North 24 Parganas', 'Paschim Bardhaman', 'Paschim Medinipur', 'Purba Bardhaman', 'Purba Medinipur', 'Purulia', 'South 24 Parganas', 'Uttar Dinajpur']
}
AUDIENCE_ROOT = os.environ["GCF_URL"]
METADATA_ROOT = os.environ["METADATA"]
START_DATE = datetime.datetime(2021, 6, 1)
VAX_ALLOC_URL = "vaccine-allocation-sipjq3uhla-uc.a.run.app"
class CloudFunction(SimpleHttpOperator):
ui_color = "#2B6CE6"
ui_fgcolor = "#FFFFFF"
def get_metadata_url(self):
return f"{METADATA_ROOT}{AUDIENCE_ROOT}/{self.endpoint}"
def execute(self, context):
token = requests.get(self.get_metadata_url(), headers = {"Metadata-Flavor": "Google"}).text
HttpHook(self.method, http_conn_id = self.http_conn_id)\
.run(self.endpoint, self.data, {"Content-Type": "application/json", "Authorization": f"Bearer {token}"}, self.extra_options)
class CloudRun(CloudFunction):
ui_color = "#175AE1"
def __init__(self, run_url, conn_id, *args, **kwargs):
conn = Connection(
conn_id = conn_id,
conn_type = "http",
host = run_url,
schema = "https"
)
kwargs["http_conn_id"] = conn_id
super(CloudRun, self).__init__(*args, **kwargs)
self.run_url = run_url
def get_metadata_url(self):
return f"{METADATA_ROOT}https://{self.run_url}"
def epi(state_code, district):
return CloudRun(
task_id = f"epi_{state_code}_" + district.replace(" ", ""),
method = "POST",
endpoint = "/epi",
start_date = START_DATE,
run_url = VAX_ALLOC_URL,
conn_id = "vaccine-allocation-cloud-run",
data = json.dumps({"state_code": state_code, "district": district}),
execution_timeout = datetime.timedelta(minutes=30)
)
def tev(state_code, district):
return CloudRun(
task_id = f"tev_{state_code}_" + district.replace(" ", ""),
method = "POST",
endpoint = "/tev",
start_date = START_DATE,
run_url = VAX_ALLOC_URL,
conn_id = "vaccine-allocation-cloud-run",
data = json.dumps({"state_code": state_code, "district": district}),
execution_timeout = datetime.timedelta(minutes=30)
)
def agg(state_code = None):
return CloudRun(
task_id = f"agg_" + (state_code if state_code else "NATL"),
method = "POST",
endpoint = "/agg",
start_date = START_DATE,
run_url = VAX_ALLOC_URL,
conn_id = "vaccine-allocation-cloud-run",
data = json.dumps({"state_code": state_code} if state_code else {}),
execution_timeout = datetime.timedelta(minutes=30)
)
def viz(state_code = None, district = None):
return CloudRun(
task_id = f"viz_" + (state_code if state_code else "NATL") + (f"_{district.replace(' ', '')}" if district else ""),
method = "POST",
endpoint = "/viz",
start_date = START_DATE,
run_url = VAX_ALLOC_URL,
conn_id = "vaccine-allocation-cloud-run",
data = json.dumps({"state_code": state_code, "district": district}),
execution_timeout = datetime.timedelta(minutes=30)
)
with models.DAG("vaccine-allocation", schedule_interval = "0 0 1 1 1", catchup = False, concurrency = 50) as dag:
root = DummyOperator(task_id = "root", start_date = START_DATE)
natl_agg = agg()
natl_agg >> viz()
for (state_code, districts) in state_districts.items():
state_root = DummyOperator(task_id = state_code + "_root", start_date = START_DATE)
root >> state_root
state_agg = agg(state_code = state_code)
state_agg >> [viz(state_code = state_code), natl_agg]
for district in districts:
state_root >> epi(state_code, district) >> tev(state_code, district) >> [state_agg, viz(state_code = state_code, district = district)]
|
# coding: utf-8
from __future__ import unicode_literals
from ..utils import determine_ext, js_to_json, qualities
from .common import InfoExtractor
from .youtube import YoutubeIE
class Tele13IE(InfoExtractor):
_VALID_URL = r"^https?://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)"
_TESTS = [
{
"url": "http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda",
"md5": "4cb1fa38adcad8fea88487a078831755",
"info_dict": {
"id": "el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda",
"ext": "mp4",
"title": "El círculo de hierro de Michelle Bachelet en su regreso a La Moneda",
},
"params": {
# HTTP Error 404: Not Found
"skip_download": True,
},
},
{
"url": "http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok",
"md5": "867adf6a3b3fef932c68a71d70b70946",
"info_dict": {
"id": "rOoKv2OMpOw",
"ext": "mp4",
"title": "Shooting star seen on 7-Sep-2015",
"description": "md5:7292ff2a34b2f673da77da222ae77e1e",
"uploader": "Porjai Jaturongkhakun",
"upload_date": "20150906",
"uploader_id": "UCnLY_3ezwNcDSC_Wc6suZxw",
},
"add_ie": ["Youtube"],
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
setup_js = self._search_regex(
r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)", webpage, "setup code"
)
sources = self._parse_json(
self._search_regex(r"sources\s*:\s*(\[[^\]]+\])", setup_js, "sources"),
display_id,
js_to_json,
)
preference = qualities(["Móvil", "SD", "HD"])
formats = []
urls = []
for f in sources:
format_url = f["file"]
if format_url and format_url not in urls:
ext = determine_ext(format_url)
if ext == "m3u8":
formats.extend(
self._extract_m3u8_formats(
format_url,
display_id,
"mp4",
"m3u8_native",
m3u8_id="hls",
fatal=False,
)
)
elif YoutubeIE.suitable(format_url):
return self.url_result(format_url, "Youtube")
else:
formats.append(
{
"url": format_url,
"format_id": f.get("label"),
"preference": preference(f.get("label")),
"ext": ext,
}
)
urls.append(format_url)
self._sort_formats(formats)
return {
"id": display_id,
"title": self._search_regex(r'title\s*:\s*"([^"]+)"', setup_js, "title"),
"description": self._html_search_meta(
"description", webpage, "description"
),
"thumbnail": self._search_regex(
r'image\s*:\s*"([^"]+)"', setup_js, "thumbnail", default=None
),
"formats": formats,
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import onnx
from ..base import Base
from . import expect
class Relu(Base):
@staticmethod
def export():
node = onnx.helper.make_node(
'Relu',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf)
expect(node, inputs=[x], outputs=[y],
name='test_relu')
|
import React, { useState, useEffect, useMemo, useCallback } from 'react';
import { FormattedMessage } from 'react-intl';
import { subMinutes, startOfMinute } from 'date-fns';
import firstBy from 'thenby';
import Page from 'components/layout/Page';
import GridLayout, { GridRow, GridColumn } from 'components/layout/GridLayout';
import RealtimeChart from 'components/metrics/RealtimeChart';
import RealtimeLog from 'components/metrics/RealtimeLog';
import RealtimeHeader from 'components/metrics/RealtimeHeader';
import WorldMap from 'components/common/WorldMap';
import DataTable from 'components/metrics/DataTable';
import RealtimeViews from 'components/metrics/RealtimeViews';
import useFetch from 'hooks/useFetch';
import useLocale from 'hooks/useLocale';
import useCountryNames from 'hooks/useCountryNames';
import { percentFilter } from 'lib/filters';
import { TOKEN_HEADER, REALTIME_RANGE, REALTIME_INTERVAL } from 'lib/constants';
import styles from './RealtimeDashboard.module.css';
function mergeData(state, data, time) {
const ids = state.map(({ __id }) => __id);
return state
.concat(data.filter(({ __id }) => !ids.includes(__id)))
.filter(({ created_at }) => new Date(created_at).getTime() >= time);
}
function filterWebsite(data, id) {
return data.filter(({ website_id }) => website_id === id);
}
export default function RealtimeDashboard() {
const { locale } = useLocale();
const countryNames = useCountryNames(locale);
const [data, setData] = useState();
const [websiteId, setWebsiteId] = useState(0);
const { data: init, loading } = useFetch('/api/realtime/init', {
params: { page: 0, limit: 10 },
});
const { data: updates } = useFetch('/api/realtime/update', {
params: { start_at: data?.timestamp },
disabled: !init?.websites?.length || !data,
interval: REALTIME_INTERVAL,
headers: { [TOKEN_HEADER]: init?.token },
});
const renderCountryName = useCallback(
({ x }) => <span className={locale}>{countryNames[x]}</span>,
[countryNames],
);
const realtimeData = useMemo(() => {
if (data) {
const { pageviews, sessions, events } = data;
if (websiteId) {
return {
pageviews: filterWebsite(pageviews, websiteId),
sessions: filterWebsite(sessions, websiteId),
events: filterWebsite(events, websiteId),
};
}
}
return data;
}, [data, websiteId]);
const countries = useMemo(() => {
if (realtimeData?.sessions) {
return percentFilter(
realtimeData.sessions
.reduce((arr, { country }) => {
if (country) {
const row = arr.find(({ x }) => x === country);
if (!row) {
arr.push({ x: country, y: 1 });
} else {
row.y += 1;
}
}
return arr;
}, [])
.sort(firstBy('y', -1)),
);
}
return [];
}, [realtimeData?.sessions]);
useEffect(() => {
if (init && !data) {
const { websites, data } = init;
setData({ websites, ...data });
}
}, [init]);
useEffect(() => {
if (updates) {
const { pageviews, sessions, events, timestamp } = updates;
const time = subMinutes(startOfMinute(new Date()), REALTIME_RANGE).getTime();
setData(state => ({
...state,
pageviews: mergeData(state.pageviews, pageviews, time),
sessions: mergeData(state.sessions, sessions, time),
events: mergeData(state.events, events, time),
timestamp,
}));
}
}, [updates]);
if (!init || !data || loading) {
return null;
}
const { websites } = data;
return (
<Page>
<RealtimeHeader
websites={websites}
websiteId={websiteId}
data={{ ...realtimeData, countries }}
onSelect={setWebsiteId}
/>
<div className={styles.chart}>
<RealtimeChart
websiteId={websiteId}
data={realtimeData}
unit="minute"
records={REALTIME_RANGE}
/>
</div>
<GridLayout>
<GridRow>
<GridColumn xs={12} lg={4}>
<RealtimeViews websiteId={websiteId} data={realtimeData} websites={websites} />
</GridColumn>
<GridColumn xs={12} lg={8}>
<RealtimeLog websiteId={websiteId} data={realtimeData} websites={websites} />
</GridColumn>
</GridRow>
<GridRow>
<GridColumn xs={12} lg={4}>
<DataTable
title={<FormattedMessage id="metrics.countries" defaultMessage="Countries" />}
metric={<FormattedMessage id="metrics.visitors" defaultMessage="Visitors" />}
data={countries}
renderLabel={renderCountryName}
height={500}
/>
</GridColumn>
<GridColumn xs={12} lg={8}>
<WorldMap data={countries} />
</GridColumn>
</GridRow>
</GridLayout>
</Page>
);
}
|
/*
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
*
* Copyright (c) 2016 Blizzard Entertainment
*
* This software may be modified and distributed under the terms
* of the MIT license. See the LICENSE.txt file for details.
*/
#ifndef SRC_TOPIC_H_
#define SRC_TOPIC_H_
#include <napi.h>
#include <string>
#include "rdkafkacpp.h"
#include "src/config.h"
namespace NodeKafka {
class Topic {
public:
Baton toRDKafkaTopic(Connection *handle);
Topic(std::string, RdKafka::Conf *);
~Topic();
std::string name();
protected:
// TopicConfig * config_;
std::string errstr;
private:
std::string m_topic_name;
RdKafka::Conf * m_config;
};
} // namespace NodeKafka
#endif // SRC_TOPIC_H_
|