code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
"""Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that almost all platforms do have support for
# locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest on most boxes.
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
current_thread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = current_thread().__dict__.get(key)
if d is None:
d = {}
current_thread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__(self):
import threading
key = object.__getattribute__(self, '_local__key')
try:
# We use the non-locking API since we might already hold the lock
# (__del__ can be called at any point by the cyclic GC).
threads = threading._enumerate()
except:
# If enumerating the current threads fails, as it seems to do
# during shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up.
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace.
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
from threading import current_thread, RLock
|
unknown
|
codeparrot/codeparrot-clean
| ||
--- !ELF
FileHeader:
Class: ELFCLASS64
Data: ELFDATA2LSB
Type: ET_EXEC
Machine: EM_X86_64
Entry: 0x4010A0
ProgramHeaders:
- Type: PT_PHDR
Flags: [ PF_R ]
VAddr: 0x400040
Align: 0x8
Offset: 0x40
- Type: PT_INTERP
Flags: [ PF_R ]
FirstSec: .interp
LastSec: .interp
VAddr: 0x400444
Offset: 0x444
- Type: PT_LOAD
Flags: [ PF_X, PF_R ]
FirstSec: .init
LastSec: .fini
VAddr: 0x401000
Align: 0x1000
Offset: 0x1000
- Type: PT_LOAD
Flags: [ PF_R ]
FirstSec: .rodata
LastSec: .rodata
VAddr: 0x402000
Align: 0x1000
Offset: 0x2000
- Type: PT_LOAD
Flags: [ PF_W, PF_R ]
FirstSec: .init_array
LastSec: .bss
VAddr: 0x403DD8
Align: 0x1000
Offset: 0x2DD8
- Type: PT_DYNAMIC
Flags: [ PF_W, PF_R ]
FirstSec: .dynamic
LastSec: .dynamic
VAddr: 0x403DE8
Align: 0x8
Offset: 0x2DE8
- Type: PT_NOTE
Flags: [ PF_R ]
FirstSec: .note.gnu.build-id
LastSec: .note.ABI-tag
VAddr: 0x400400
Align: 0x4
Offset: 0x400
Sections:
- Name: .note.gnu.build-id
Type: SHT_NOTE
Flags: [ SHF_ALLOC ]
Address: 0x400400
AddressAlign: 0x4
Offset: 0x400
Notes:
- Name: GNU
Desc: 3C34F7D1612996940C48F98DC272543BC3C9C956
Type: NT_PRPSINFO
- Name: .note.ABI-tag
Type: SHT_NOTE
Flags: [ SHF_ALLOC ]
Address: 0x400424
AddressAlign: 0x4
Notes:
- Name: GNU
Desc: '00000000030000000200000000000000'
Type: NT_VERSION
- Name: .interp
Type: SHT_PROGBITS
Flags: [ SHF_ALLOC ]
Address: 0x400444
AddressAlign: 0x1
Content: 2F6C696236342F6C642D6C696E75782D7838362D36342E736F2E3200
- Name: .gnu.hash
Type: SHT_GNU_HASH
Flags: [ SHF_ALLOC ]
Address: 0x400460
Link: .dynsym
AddressAlign: 0x8
Header:
SymNdx: 0x7
Shift2: 0x6
BloomFilter: [ 0x810000 ]
HashBuckets: [ 0x7, 0x0 ]
HashValues: [ 0x6DCE65D1 ]
- Name: .dynsym
Type: SHT_DYNSYM
Flags: [ SHF_ALLOC ]
Address: 0x400488
Link: .dynstr
AddressAlign: 0x8
- Name: .dynstr
Type: SHT_STRTAB
Flags: [ SHF_ALLOC ]
Address: 0x400548
AddressAlign: 0x1
- Name: .gnu.version
Type: SHT_GNU_versym
Flags: [ SHF_ALLOC ]
Address: 0x4005F2
Link: .dynsym
AddressAlign: 0x2
Entries: [ 0, 2, 3, 1, 1, 4, 1, 2 ]
- Name: .gnu.version_r
Type: SHT_GNU_verneed
Flags: [ SHF_ALLOC ]
Address: 0x400608
Link: .dynstr
AddressAlign: 0x8
Dependencies:
- Version: 1
File: libc.so.6
Entries:
- Name: GLIBC_2.3.4
Hash: 157882740
Flags: 0
Other: 4
- Name: GLIBC_2.34
Hash: 110530996
Flags: 0
Other: 3
- Name: GLIBC_2.2.5
Hash: 157882997
Flags: 0
Other: 2
- Name: .init
Type: SHT_PROGBITS
Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
Address: 0x401000
AddressAlign: 0x4
Offset: 0x1000
Content: F30F1EFA4883EC08488B05D92F00004885C07402FFD04883C408C3
- Name: .plt.sec
Type: SHT_PROGBITS
Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
Address: 0x401060
AddressAlign: 0x10
EntSize: 0x10
Content: F30F1EFAF2FF25AD2F00000F1F440000F30F1EFAF2FF25A52F00000F1F440000
- Name: .text
Type: SHT_PROGBITS
Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
Address: 0x401080
AddressAlign: 0x10
Content: F30F1EFA4883EC0831C0E80101000031C04883C408C3662E0F1F840000000000F30F1EFA31ED4989D15E4889E24883E4F050544531C031C9488D3DC1FFFFFFFF15132F0000F4662E0F1F840000000000488D3D612F0000488D055A2F00004839F87415488B05F62E00004885C07409FFE00F1F8000000000C30F1F8000000000488D3D312F0000488D352A2F00004829FE4889F048C1EE3F48C1F8034801C648D1FE7414488B05C52E00004885C07408FFE0660F1F440000C30F1F8000000000F30F1EFA803DED2E000000752B5548833DA22E0000004889E5740C488B3DCE2E0000E8E9FEFFFFE864FFFFFFC605C52E0000015DC30F1F00C30F1F8000000000F30F1EFAE977FFFFFF0F1F8000000000F30F1EFA415455488D2D660E000053488D1D6AF2FFFF4C8D6314660F1F4400000FB6134889EEBF0100000031C04883C301E8AAFEFFFF4C39E375E55BBF0A0000005D415CE987FEFFFF
- Name: .fini
Type: SHT_PROGBITS
Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
Address: 0x4011DC
AddressAlign: 0x4
Content: F30F1EFA4883EC084883C408C3
- Name: .rodata
Type: SHT_PROGBITS
Flags: [ SHF_ALLOC ]
Address: 0x402000
AddressAlign: 0x4
Offset: 0x2000
Content: '0100020025303268687800'
- Name: .init_array
Type: SHT_INIT_ARRAY
Flags: [ SHF_WRITE, SHF_ALLOC ]
Address: 0x403DD8
AddressAlign: 0x8
EntSize: 0x8
Offset: 0x2DD8
Content: '8011400000000000'
- Name: .fini_array
Type: SHT_FINI_ARRAY
Flags: [ SHF_WRITE, SHF_ALLOC ]
Address: 0x403DE0
AddressAlign: 0x8
EntSize: 0x8
Content: '4011400000000000'
- Name: .dynamic
Type: SHT_DYNAMIC
Flags: [ SHF_WRITE, SHF_ALLOC ]
Address: 0x403DE8
Link: .dynstr
AddressAlign: 0x8
Entries:
- Tag: DT_NEEDED
Value: 0x37
- Tag: DT_INIT
Value: 0x401000
- Tag: DT_FINI
Value: 0x4011DC
- Tag: DT_INIT_ARRAY
Value: 0x403DD8
- Tag: DT_INIT_ARRAYSZ
Value: 0x8
- Tag: DT_FINI_ARRAY
Value: 0x403DE0
- Tag: DT_FINI_ARRAYSZ
Value: 0x8
- Tag: DT_GNU_HASH
Value: 0x400460
- Tag: DT_STRTAB
Value: 0x400548
- Tag: DT_SYMTAB
Value: 0x400488
- Tag: DT_STRSZ
Value: 0xA9
- Tag: DT_SYMENT
Value: 0x18
- Tag: DT_DEBUG
Value: 0x0
- Tag: DT_PLTGOT
Value: 0x404000
- Tag: DT_PLTRELSZ
Value: 0x30
- Tag: DT_PLTREL
Value: 0x7
- Tag: DT_FLAGS
Value: 0x8
- Tag: DT_FLAGS_1
Value: 0x8000001
- Tag: DT_VERNEED
Value: 0x400608
- Tag: DT_VERNEEDNUM
Value: 0x1
- Tag: DT_VERSYM
Value: 0x4005F2
- Tag: DT_RELACOUNT
Value: 0x3
- Tag: DT_NULL
Value: 0x0
- Name: .data
Type: SHT_PROGBITS
Flags: [ SHF_WRITE, SHF_ALLOC ]
Address: 0x404028
AddressAlign: 0x8
Content: '00000000000000003040400000000000'
- Name: .tm_clone_table
Type: SHT_PROGBITS
Flags: [ SHF_WRITE, SHF_ALLOC ]
Address: 0x404038
AddressAlign: 0x8
- Name: .bss
Type: SHT_NOBITS
Flags: [ SHF_WRITE, SHF_ALLOC ]
Address: 0x404038
AddressAlign: 0x1
Size: 0x8
- Name: .rela.text
Type: SHT_RELA
Flags: [ SHF_INFO_LINK ]
Link: .symtab
AddressAlign: 0x8
Info: .text
Relocations:
- Offset: 0x40108B
Symbol: print_build_id
Type: R_X86_64_PLT32
Addend: -4
- Offset: 0x4010BB
Symbol: main
Type: R_X86_64_PC32
Addend: -4
- Offset: 0x4011A2
Symbol: build_id_note
Type: R_X86_64_PC32
Addend: 12
- Type: SectionHeaderTable
Sections:
- Name: .note.gnu.build-id
- Name: .note.ABI-tag
- Name: .interp
- Name: .gnu.hash
- Name: .dynsym
- Name: .dynstr
- Name: .gnu.version
- Name: .gnu.version_r
- Name: .init
- Name: .plt.sec
- Name: .text
- Name: .rela.text
- Name: .fini
- Name: .rodata
- Name: .init_array
- Name: .fini_array
- Name: .dynamic
- Name: .data
- Name: .tm_clone_table
- Name: .bss
- Name: .symtab
- Name: .strtab
- Name: .shstrtab
Symbols:
- Name: print_build_id
Type: STT_FUNC
Section: .text
Binding: STB_GLOBAL
Value: 0x401190
Size: 0x49
- Name: _end
Section: .bss
Binding: STB_GLOBAL
Value: 0x404040
- Name: _start
Type: STT_FUNC
Section: .text
Binding: STB_GLOBAL
Value: 0x4010A0
Size: 0x26
- Name: __bss_start
Section: .bss
Binding: STB_GLOBAL
Value: 0x404038
- Name: main
Type: STT_FUNC
Section: .text
Binding: STB_GLOBAL
Value: 0x401080
Size: 0x16
- Name: build_id_note
Index: SHN_ABS
Binding: STB_GLOBAL
Value: 0x400400
...
|
unknown
|
github
|
https://github.com/llvm/llvm-project
|
bolt/test/X86/Inputs/build_id.yaml
|
#pragma once
#include <math.h>
#include <stdint.h>
#if defined(_WIN32) && !defined (__MINGW32__)
#define inline __forceinline
#endif
#define RK_STATE_LEN 624
#define _MT19937_N 624
#define _MT19937_M 397
#define MATRIX_A 0x9908b0dfUL
#define UPPER_MASK 0x80000000UL
#define LOWER_MASK 0x7fffffffUL
typedef struct s_mt19937_state {
uint32_t key[RK_STATE_LEN];
int pos;
} mt19937_state;
extern void mt19937_seed(mt19937_state *state, uint32_t seed);
extern void mt19937_gen(mt19937_state *state);
/* Slightly optimized reference implementation of the Mersenne Twister */
static inline uint32_t mt19937_next(mt19937_state *state) {
uint32_t y;
if (state->pos == RK_STATE_LEN) {
// Move to function to help inlining
mt19937_gen(state);
}
y = state->key[state->pos++];
/* Tempering */
y ^= (y >> 11);
y ^= (y << 7) & 0x9d2c5680UL;
y ^= (y << 15) & 0xefc60000UL;
y ^= (y >> 18);
return y;
}
extern void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key,
int key_length);
static inline uint64_t mt19937_next64(mt19937_state *state) {
return (uint64_t)mt19937_next(state) << 32 | mt19937_next(state);
}
static inline uint32_t mt19937_next32(mt19937_state *state) {
return mt19937_next(state);
}
static inline double mt19937_next_double(mt19937_state *state) {
int32_t a = mt19937_next(state) >> 5, b = mt19937_next(state) >> 6;
return (a * 67108864.0 + b) / 9007199254740992.0;
}
void mt19937_jump(mt19937_state *state);
|
c
|
github
|
https://github.com/numpy/numpy
|
numpy/random/src/mt19937/mt19937.h
|
from .flag import FlagMetaclass
from .value import ZERO
from .tags import has_tag, get_tag
class Filter(object, metaclass=FlagMetaclass):
flags = []
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, *args):
return self.predicate(*args)
def __and__(self, other):
@Filter
def result(*args):
return self(*args) and other(*args)
return result
def __or__(self, other):
@Filter
def result(*args):
return self(*args) or other(*args)
return result
def __invert__(self):
@Filter
def result(*args):
return not self(*args)
return result
@classmethod
def has_account(cls, account):
@cls
def result(transaction, entry):
return entry.account == account
return result
@classmethod
def matches(cls, regexp):
@cls
def result(transaction, entry):
return regexp.search(entry.account.name) is not None
return result
@classmethod
def parse(cls, parser, *args):
return cls(*args)
@classmethod
def tag(cls, factory, tag, value=None):
@cls
def result(transaction, entry):
obj = factory.from_entry(transaction, entry)
return has_tag(obj, tag, value)
return result
Filter.null = Filter(lambda transaction, entry: True)
class DateFilter(Filter):
@classmethod
def parse(cls, parser, str):
date = parser.parse_fuzzy_date(str)
if date:
return cls(date)
else:
raise ValueError("Invalid date")
def __init__(self, date):
self.date = date
class BeginFilter(DateFilter):
flag = "begin"
args = 1
def __call__(self, transaction, entry):
return entry.date(transaction) >= self.date
class EndFilter(DateFilter):
flag = "end"
args = 1
def __call__(self, transaction, entry):
return entry.date(transaction) < self.date
class ExpressionFilter(Filter):
flag = "filter"
args = 1
@classmethod
def parse(cls, parser, expression):
return cls(parser, expression, parser.repo)
def __init__(self, parser, expression, repo):
self.parser = parser
self.expression = compile(expression, "<commandline>", "eval")
self.repo = repo
def __call__(self, transaction, entry):
context = {
"transaction": SmartWrapper(transaction),
"entry": SmartWrapper(entry.info(transaction)),
"date": self.parse_date,
"ZERO": ZERO}
return eval(self.expression, context)
def parse_date(self, str):
return self.parser.parse_fuzzy_date(str)
class SmartWrapper(object):
def __init__(self, obj):
self.obj = obj
def __getattr__(self, name):
try:
return getattr(self.obj, name)
except AttributeError:
return get_tag(self.obj, name)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars
from ansible.inventory.ini import InventoryParser as InventoryINIParser
from ansible.inventory.script import InventoryScript
__all__ = ['get_file_parser']
def get_file_parser(hostsfile, groups, loader):
# check to see if the specified file starts with a
# shebang (#!/), so if an error is raised by the parser
# class we can show a more apropos error
shebang_present = False
processed = False
myerr = []
parser = None
try:
inv_file = open(hostsfile)
first_line = inv_file.readlines()[0]
inv_file.close()
if first_line.startswith('#!'):
shebang_present = True
except:
pass
if loader.is_executable(hostsfile):
try:
parser = InventoryScript(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append("The file %s is marked as executable, but failed to execute correctly. " % hostsfile + \
"If this is not supposed to be an executable script, correct this with `chmod -x %s`." % hostsfile)
myerr.append(str(e))
if not processed:
try:
parser = InventoryINIParser(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
if shebang_present and not loader.is_executable(hostsfile):
myerr.append("The file %s looks like it should be an executable inventory script, but is not marked executable. " % hostsfile + \
"Perhaps you want to correct this with `chmod +x %s`?" % hostsfile)
else:
myerr.append(str(e))
if not processed and myerr:
raise AnsibleError( '\n'.join(myerr) )
return parser
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
if groups is None:
groups = dict()
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
self.parsers = []
self.hosts = {}
self.groups = groups
self._loader = loader
for i in self.names:
# Skip files that end with certain extensions or characters
if any(i.endswith(ext) for ext in C.DEFAULT_INVENTORY_IGNORE):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
continue
# These are things inside of an inventory basedir
if i in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
parser = InventoryDirectory(loader=loader, filename=fullpath)
else:
parser = get_file_parser(fullpath, self.groups, loader)
if parser is None:
#FIXME: needs to use display
import warnings
warnings.warning("Could not find parser for %s, skipping" % fullpath)
continue
self.parsers.append(parser)
# retrieve all groups and hosts form the parser and add them to
# self, don't look at group lists yet, to avoid
# recursion trouble, but just make sure all objects exist in self
newgroups = parser.groups.values()
for group in newgroups:
for host in group.hosts:
self._add_host(host)
for group in newgroups:
self._add_group(group)
# now check the objects lists so they contain only objects from
# self; membership data in groups is already fine (except all &
# ungrouped, see later), but might still reference objects not in self
for group in self.groups.values():
# iterate on a copy of the lists, as those lists get changed in
# the loop
# list with group's child group objects:
for child in group.child_groups[:]:
if child != self.groups[child.name]:
group.child_groups.remove(child)
group.child_groups.append(self.groups[child.name])
# list with group's parent group objects:
for parent in group.parent_groups[:]:
if parent != self.groups[parent.name]:
group.parent_groups.remove(parent)
group.parent_groups.append(self.groups[parent.name])
# list with group's host objects:
for host in group.hosts[:]:
if host != self.hosts[host.name]:
group.hosts.remove(host)
group.hosts.append(self.hosts[host.name])
# also check here that the group that contains host, is
# also contained in the host's group list
if group not in self.hosts[host.name].groups:
self.hosts[host.name].groups.append(group)
# extra checks on special groups all and ungrouped
# remove hosts from 'ungrouped' if they became member of other groups
if 'ungrouped' in self.groups:
ungrouped = self.groups['ungrouped']
# loop on a copy of ungrouped hosts, as we want to change that list
for host in ungrouped.hosts[:]:
if len(host.groups) > 1:
host.groups.remove(ungrouped)
ungrouped.hosts.remove(host)
# remove hosts from 'all' if they became member of other groups
# all should only contain direct children, not grandchildren
# direct children should have dept == 1
if 'all' in self.groups:
allgroup = self.groups['all' ]
# loop on a copy of all's child groups, as we want to change that list
for group in allgroup.child_groups[:]:
# groups might once have beeen added to all, and later be added
# to another group: we need to remove the link wit all then
if len(group.parent_groups) > 1 and allgroup in group.parent_groups:
# real children of all have just 1 parent, all
# this one has more, so not a direct child of all anymore
group.parent_groups.remove(allgroup)
allgroup.child_groups.remove(group)
elif allgroup not in group.parent_groups:
# this group was once added to all, but doesn't list it as
# a parent any more; the info in the group is the correct
# info
allgroup.child_groups.remove(group)
def _add_group(self, group):
""" Merge an existing group or add a new one;
Track parent and child groups, and hosts of the new one """
if group.name not in self.groups:
# it's brand new, add him!
self.groups[group.name] = group
if self.groups[group.name] != group:
# different object, merge
self._merge_groups(self.groups[group.name], group)
def _add_host(self, host):
if host.name not in self.hosts:
# Papa's got a brand new host
self.hosts[host.name] = host
if self.hosts[host.name] != host:
# different object, merge
self._merge_hosts(self.hosts[host.name], host)
def _merge_groups(self, group, newgroup):
""" Merge all of instance newgroup into group,
update parent/child relationships
group lists may still contain group objects that exist in self with
same name, but was instanciated as a different object in some other
inventory parser; these are handled later """
# name
if group.name != newgroup.name:
raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
# hosts list (host objects are by now already added to self.hosts)
for host in newgroup.hosts:
grouphosts = dict([(h.name, h) for h in group.hosts])
if host.name in grouphosts:
# same host name but different object, merge
self._merge_hosts(grouphosts[host.name], host)
else:
# new membership, add host to group from self
# group from self will also be added again to host.groups, but
# as different object
group.add_host(self.hosts[host.name])
# now remove this the old object for group in host.groups
for hostgroup in [g for g in host.groups]:
if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
self.hosts[host.name].groups.remove(hostgroup)
# group child membership relation
for newchild in newgroup.child_groups:
# dict with existing child groups:
childgroups = dict([(g.name, g) for g in group.child_groups])
# check if child of new group is already known as a child
if newchild.name not in childgroups:
self.groups[group.name].add_child_group(newchild)
# group parent membership relation
for newparent in newgroup.parent_groups:
# dict with existing parent groups:
parentgroups = dict([(g.name, g) for g in group.parent_groups])
# check if parent of new group is already known as a parent
if newparent.name not in parentgroups:
if newparent.name not in self.groups:
# group does not exist yet in self, import him
self.groups[newparent.name] = newparent
# group now exists but not yet as a parent here
self.groups[newparent.name].add_child_group(group)
# variables
group.vars = combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
# dict with existing groups:
hostgroups = dict([(g.name, g) for g in host.groups])
# check if new group is already known as a group
if newgroup.name not in hostgroups:
if newgroup.name not in self.groups:
# group does not exist yet in self, import him
self.groups[newgroup.name] = newgroup
# group now exists but doesn't have host yet
self.groups[newgroup.name].add_host(host)
# variables
host.vars = combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
vars = {}
for i in self.parsers:
vars.update(i.get_host_variables(host))
return vars
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
"""
Copyright (C) 2010 Stephen Georg
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
For Questions about this program please contact
Stephen Georg at srgeorg@gmail.com
A copy of the license should be included in the file LICENSE.txt
"""
from PyQt4 import QtCore, QtGui
import logging
import sys
class WidgetProjectEditor(QtGui.QGroupBox):
"""
Provides an editor for GTD projects
"""
# TODO define signals emitted by this widget
__pyqtSignals__ = ("projectMoodified()",
)
projectModified = QtCore.pyqtSignal()
def __init__(self, dbCon):
logging.info("TracksProjectEditor initiated...")
# The current item id
self.current_id = None
self.databaseCon = dbCon
self.current_user_id = None
QtGui.QGroupBox.__init__(self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())
self.setSizePolicy(sizePolicy)
self.setMaximumSize(QtCore.QSize(250, 16777215))
self.setMinimumSize(QtCore.QSize(250, 0))
self.verticalLayout = QtGui.QVBoxLayout(self)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(4)
# Hide Form Button
#self.formVisible = True
#self.horizontalLayout_3 = QtGui.QHBoxLayout()
#spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
#self.horizontalLayout_3.addItem(spacerItem)
#self.hideFormButton = QtGui.QPushButton(self)
#self.hideFormButton.setText(">> Hide Form")
#self.hideFormButton.setToolTip("Hide the form from view")
#self.horizontalLayout_3.addWidget(self.hideFormButton)
#self.verticalLayout.addLayout(self.horizontalLayout_3)
#self.hideFormButton.clicked.connect(self.hideButtonClicked)
# Name line edit
self.nameLabel = QtGui.QLabel(self)
self.nameLabel.setText("Name")
self.verticalLayout.addWidget(self.nameLabel)
self.nameEdit = QtGui.QLineEdit(self)
self.verticalLayout.addWidget(self.nameEdit)
# Description edit
self.descriptionLabel = QtGui.QLabel(self)
self.descriptionLabel.setText("Description")
self.verticalLayout.addWidget(self.descriptionLabel)
self.descriptionEdit = QtGui.QPlainTextEdit(self)
self.descriptionEdit.setTabChangesFocus(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.descriptionEdit.sizePolicy().hasHeightForWidth())
self.descriptionEdit.setSizePolicy(sizePolicy)
self.descriptionEdit.setMinimumSize(QtCore.QSize(0, 120))
self.descriptionEdit.setMaximumSize(QtCore.QSize(16777215, 120))
self.verticalLayout.addWidget(self.descriptionEdit)
# Default context Line Edit
self.contextLabel = QtGui.QLabel(self)
self.contextLabel.setText("Default Context (Optional)")
self.verticalLayout.addWidget(self.contextLabel)
self.contextEdit = QtGui.QLineEdit(self)
self.verticalLayout.addWidget(self.contextEdit)
# Add string list completer, this is now done in refresh
# TODO get projects from database
#contextList = []
#for row in self.databaseCon.execute("select name FROM contexts"):
# contextList.append(row[0])
#contextStringList = QtCore.QStringList(contextList)
#contextCompleter = QtGui.QCompleter(contextStringList)
#contextCompleter.setCompletionMode(1)
#self.contextEdit.setCompleter(contextCompleter)
# Default Tags Line Edit
# TODO find existing tags from database
self.existingTags = ["one", "two", "three", "four"]
#
self.tagsLabel = QtGui.QLabel(self)
self.tagsLabel.setText("Default Tags (Optional)")
self.verticalLayout.addWidget(self.tagsLabel)
self.tagsEdit = QtGui.QLineEdit(self)
self.verticalLayout.addWidget(self.tagsEdit)
# TODO add completion. Consider this: http://john.nachtimwald.com/2009/07/04/qcompleter-and-comma-separated-tags/
# make tags all lower case
# use set(list of strings) and set.diffence
#QObject.connect(self, SIGNAL('textChanged(QString)'), self.text_changed)
self.tagsEdit.textChanged.connect(self.tagsEditChanged)
self.tagCompleter = QtGui.QCompleter(QtCore.QStringList(self.existingTags))
self.tagCompleter.setWidget(self.tagsEdit)
self.tagCompleter.setCompletionMode(1)
self.tagCompleter.activated.connect(self.tagsCompleterSelect)
self.tagsLabel.setVisible(False)
self.tagsEdit.setVisible(False)
# Project state
self.statusLabel = QtGui.QLabel(self)
self.statusLabel.setText("Project Status")
self.verticalLayout.addWidget(self.statusLabel)
self.statusLayout = QtGui.QHBoxLayout()
self.statusRadio1 = QtGui.QRadioButton(self)
self.statusRadio1.setText("active")
self.statusRadio1.setChecked(True)
self.statusLayout.addWidget(self.statusRadio1)
self.statusRadio2 = QtGui.QRadioButton(self)
self.statusRadio2.setText("hidden")
self.statusLayout.addWidget(self.statusRadio2)
self.statusRadio3 = QtGui.QRadioButton(self)
self.statusRadio3.setText("completed")
self.statusLayout.addWidget(self.statusRadio3)
self.verticalLayout.addLayout(self.statusLayout)
# Parent project (If this is a sub project)
self.existingProjects = []
#for row in self.databaseCon.execute("select description FROM todos where state='active'"):
# self.existingActions.append(row[0])
self.existingProjects.append("FAKE-PROJECT")
#
self.parentLabel = QtGui.QLabel(self)
self.parentLabel.setText("\nSubordinate to (optional)") # can it have multiple parents???
self.verticalLayout.addWidget(self.parentLabel)
self.parentEdit = QtGui.QLineEdit(self)
self.verticalLayout.addWidget(self.parentEdit)
# TODO add completion. Done in refresh
#projectList = []
#print self.current_user_id
#for row in self.databaseCon.execute("select name FROM projects"):
# projectList.append(row[0])
#projectStringList = QtCore.QStringList(projectList)
#projectCompleter = QtGui.QCompleter(projectStringList)
#projectCompleter.setCompletionMode(1)
#self.parentEdit.setCompleter(projectCompleter)
self.defaultParent = None
# Commit and Cancel button
# TODO hide cancel button by default??? only show when editing an existing item
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.addProjectButton = QtGui.QPushButton(self)
self.addProjectButton.setText("Add project")
self.horizontalLayout_5.addWidget(self.addProjectButton)
self.cancelEditButton = QtGui.QPushButton(self)
self.cancelEditButton.setText("Cancel edit")
self.horizontalLayout_5.addWidget(self.cancelEditButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_5)
# connect buttons
self.addProjectButton.clicked.connect(self.saveButtonClicked)
self.cancelEditButton.clicked.connect(self.cancelButtonClicked)
#self.cancelEditButton.setVisible(self.current_id != None)
# Add a vertical spacer
spacerItem = QtGui.QSpacerItem(
1, 1, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
# Settings
self.settings = QtCore.QSettings("tracks-queue", "tracks-queue")
# Set up keyboard shortcuts
shortcut = QtGui.QShortcut(self)
shortcut.setKey(QtGui.QKeySequence("Esc"))
shortcut.setContext(QtCore.Qt.WidgetWithChildrenShortcut)
shortcut.activated.connect(self.cancelButtonClicked)
#def hideButtonClicked(self):
# logging.info("TracksProjectEditor->hideButtonClicked")
# self.formVisible = not self.formVisible
# self.settings.setValue("editor/visible", QtCore.QVariant(self.formVisible))
# self.updateHidden()
#def updateHidden(self):
# logging.info("TracksProjectEditor->updateHidden")
#
# if self.formVisible:
# self.hideFormButton.setText(">> Hide Form")
# self.setMaximumSize(QtCore.QSize(250, 16777215))
# self.setMinimumSize(QtCore.QSize(250, 0))
# self.verticalLayout.setMargin(4)
# self.nameEdit.setFocus()
# else:
# self.hideFormButton.setText("<<")
# self.setMaximumSize(QtCore.QSize(30, 16777215))
# self.setMinimumSize(QtCore.QSize(30, 0))
# self.verticalLayout.setMargin(0)
#
# # Hide or show all of the form elements
# self.descriptionLabel.setVisible(self.formVisible)
# self.descriptionEdit.setVisible(self.formVisible)
# self.nameLabel.setVisible(self.formVisible)
# self.nameEdit.setVisible(self.formVisible)
# self.contextLabel.setVisible(self.formVisible)
# self.contextEdit.setVisible(self.formVisible)
# self.tagsLabel.setVisible(False)#self.formVisible) TODO
# self.tagsEdit.setVisible(False)#self.formVisible) TODO
# self.statusLabel.setVisible(self.formVisible)
# self.statusRadio1.setVisible(self.formVisible)
# self.statusRadio2.setVisible(self.formVisible)
# self.statusRadio3.setVisible(self.formVisible)
# self.parentLabel.setVisible(self.formVisible)
# self.parentEdit.setVisible(self.formVisible)
# self.addProjectButton.setVisible(self.formVisible)
# #TODO only reshow cancel button when editing existing item
# self.cancelEditButton.setVisible(self.formVisible and self.current_id != None)
def tagsEditChanged(self, theText):
# refer to this example:
# http://john.nachtimwald.com/2009/07/04/qcompleter-and-comma-separated-tags/
#logging.info("TracksActionEditor->tagsEditChanged - "+str(theText))
tagText = str(theText).lower().split(",")
theTags = []
for tag in tagText:
tag = tag.strip()
if len(tag) > 0:
theTags.append(tag)
theSet = list(set(theTags))
currentText = str(theText[:self.tagsEdit.cursorPosition()])
prefix = currentText.split(',')[-1].strip()
tags = list(set(self.existingTags).difference(theSet))
model = QtGui.QStringListModel(QtCore.QStringList(tags), self.tagCompleter)
model.sort(0)
self.tagCompleter.setModel(model)
self.tagCompleter.setCompletionPrefix(prefix)
if prefix.strip() != '':
self.tagCompleter.complete()
self.tagCompleter.setModelSorting(2)
def tagsCompleterSelect(self, theText):
# refer to this example:
# http://john.nachtimwald.com/2009/07/04/qcompleter-and-comma-separated-tags/
#logging.info("TracksActionEditor->tagsCompleterSelect - " + str(theText))
cursor_pos = self.tagsEdit.cursorPosition()
before_text = unicode(self.tagsEdit.text())[:cursor_pos]
after_text = unicode(self.tagsEdit.text())[cursor_pos:]
prefix_len = len(before_text.split(',')[-1].strip())
self.tagsEdit.setText('%s%s, %s' % (before_text[:cursor_pos - prefix_len], theText, after_text))
self.tagsEdit.setCursorPosition(cursor_pos - prefix_len + len(theText) + 2)
def cancelButtonClicked(self):
logging.info("TracksProjectEditor->cancelButtonClicked")
# Clear all the widgets
# TODO also clear internal data reflecting the database item we are editing
self.descriptionEdit.clear()
self.nameEdit.clear()
self.contextEdit.clear()
self.tagsEdit.clear()
self.statusRadio1.setChecked(True)
self.parentEdit.clear()
if self.defaultParent:
self.parentEdit.setText(self.defaultParent)
self.current_id = None
#self.cancelEditButton.setVisible(False)
self.setVisible(False)
self.addProjectButton.setText("Add Project")
def saveButtonClicked(self):
logging.info("TracksProjectEditor->saveButtonClicked")
if self.nameEdit.text()=="":
QtGui.QMessageBox.critical(self,
"Error",
"Project editor is either incomplete or erroneous\n\nNo data has been inserted or modified")
return
if self.current_user_id==None:
QtGui.QMessageBox.critical(self,
"Error",
"Editor doesn't know which user??\n\nNo data has been inserted or modified")
return
name = str(self.nameEdit.text())
desc = str(self.descriptionEdit.toPlainText())
context = str(self.contextEdit.text())
if context == "":
context= None
else:
# look up the id in the database, error if it does not exist.
result = self.databaseCon.execute("select id FROM contexts where name=?", [context,]).fetchone()
if result != None:
context = result[0]
else:
QtGui.QMessageBox.critical(self,
"Error",
"Nominated context doesn't exist (should prompt to add it in the future)\n\nNo data has been inserted or modified")
return
tags = None
state = "active"
if self.statusRadio1.isChecked():
state = "active"
elif self.statusRadio2.isChecked():
state = "hidden"
elif self.statusRadio3.isChecked():
state = "completed"
# parent project
parent = str(self.parentEdit.text())
if parent == "":
parent= None
else:
# look up the id in the database, error if it does not exist.
result = self.databaseCon.execute("select id FROM projects where name=?", [parent,]).fetchone()
if result != None:
parent = result[0]
else:
QtGui.QMessageBox.critical(self,
"Error",
"Nominated parent project doesn't exist\n\nNo data has been inserted or modified")
return
#TODO more here
if self.current_id == None:
logging.debug("TracksProjectEditor->saveButtonClicked->adding new project")
q = "INSERT INTO projects VALUES(NULL,?,1,?,?,?,DATETIME('now'),DATETIME('now'),?,NULL,?)"
if state == "completed":
q = "INSERT INTO projects VALUES(NULL,?,1,?,?,?,DATETIME('now'),DATETIME('now'),?,DATETIME('now'),?)"
self.databaseCon.execute(q,[name,self.current_user_id,desc,state,context,tags])
self.databaseCon.commit()
# Add the subproject relationship
if parent:
newID = self.databaseCon.execute("select last_insert_rowid()").fetchone()[0]
existing = self.databaseCon.execute("select * from dependencies where successor_id=? AND predecessor_id=? AND relationship_type='subproject'",(newID,parent)).fetchall()
if len(existing)==0:
self.databaseCon.execute("INSERT INTO dependencies VALUES(NULL, ?,?,'subproject')",(newID,parent))
self.databaseCon.commit()
self.cancelButtonClicked()
self.emit(QtCore.SIGNAL("projectModified()"))
else:
logging.debug("TracksProjectEditor->saveButtonClicked->modifying existing project")
q = "UPDATE projects SET name=?, description=?, state=?, default_context_id=?, default_tags=?, updated_at=DATETIME('now') WHERE id=?"
if state == "completed" and state != self.current_id_prevstatus:
q = "UPDATE projects SET name=?, description=?, state=?, default_context_id=?, default_tags=?, updated_at=DATETIME('now'), completed_at=DATETIME('now') WHERE id=?"
self.databaseCon.execute(q,[name,desc,state,context,tags,self.current_id])
self.databaseCon.commit()
# Add the subproject relationship
self.databaseCon.execute("delete from dependencies where successor_id=? AND relationship_type='subproject'",(self.current_id,))
if parent:
self.databaseCon.execute("INSERT INTO dependencies VALUES(NULL, ?,?,'subproject')",(self.current_id,parent))
self.databaseCon.commit()
self.cancelButtonClicked()
self.emit(QtCore.SIGNAL("projectModified()"))
def setCurrentProjectID(self, projectID):
logging.info("TracksProjectEditor->setCurrentProjectID")
for row in self.databaseCon.execute("select id, name, description, state, default_context_id, default_tags FROM projects where id="+str(projectID)):
self.nameEdit.setText(row[1])
self.descriptionEdit.setPlainText(row[2])
#context
if row[4] != None:
for name in self.databaseCon.execute("select name from contexts where id =?",[str(row[4]),]):
self.contextEdit.setText(name[0])
#tags
if row[5] != None:
self.tagsEdit.setText(row[5])
# the state
if row[3] == "active":
self.statusRadio1.setChecked(True)
self.current_id_prevstatus = "active"
elif row[3] == "hidden":
self.statusRadio2.setChecked(True)
self.current_id_prevstatus = "hidden"
elif row[3] == "completed":
self.statusRadio3.setChecked(True)
self.current_id_prevstatus = "completed"
else:
self.statusRadio1.setChecked(True)
self.current_id_prevstatus = "active"
#parent
parentData = self.databaseCon.execute("select name from projects where id=(select predecessor_id from dependencies where relationship_type='subproject' AND successor_id=?)",(row[0],)).fetchall()
if len(parentData) > 0:
self.parentEdit.setText(parentData[0][0])
else:
self.parentEdit.setText("")
self.current_id=projectID
self.addProjectButton.setText("Save project")
self.cancelEditButton.setVisible(True)
## Make the editor visible if not already
#if not self.formVisible:
# self.hideButtonClicked()
# Make the editor visible if not already and focus it
self.setVisible(True)
self.setFocus()
def setCurrentUser(self, user):
"""Change the current database user"""
self.current_user_id = user
def setFocus(self):
logging.info("TracksProjectEditor->setFocus")
self.nameEdit.setFocus()
def refresh(self):
logging.info("TracksProjectEditor->refresh")
# What is the setting re form visibility?
#if self.settings.contains("editor/visible"):
# self.formVisible = bool(self.settings.value("editor/visible").toBool())
# self.updateHidden()
# update the context auto complete list
contextList = []
for row in self.databaseCon.execute("SELECT name FROM contexts WHERE user_id=? ORDER BY UPPER(name)", (self.current_user_id,)):
contextList.append(row[0])
contextStringList = QtCore.QStringList(contextList)
contextCompleter = QtGui.QCompleter(contextStringList)
contextCompleter.setCompletionMode(1)
self.contextEdit.setCompleter(contextCompleter)
# update the parent auto complete list
projectList = []
for row in self.databaseCon.execute("select name FROM projects WHERE user_id=? ORDER BY UPPER(name)", (self.current_user_id,)):
projectList.append(row[0])
projectStringList = QtCore.QStringList(projectList)
projectCompleter = QtGui.QCompleter(projectStringList)
projectCompleter.setCompletionMode(1)
self.parentEdit.setCompleter(projectCompleter)
def setDefaultParent(self, projectName):
self.defaultParent = projectName
|
unknown
|
codeparrot/codeparrot-clean
| ||
import path from "pathe";
export function isReactRouterRepo() {
// We use '@react-router/node' for this check since it's a
// dependency of this package and guaranteed to be in node_modules
let serverRuntimePath = path.dirname(
require.resolve("@react-router/node/package.json"),
);
let serverRuntimeParentDir = path.basename(
path.resolve(serverRuntimePath, ".."),
);
return serverRuntimeParentDir === "packages";
}
|
typescript
|
github
|
https://github.com/remix-run/react-router
|
packages/react-router-dev/config/is-react-router-repo.ts
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json, os
import frappe.permissions
from frappe.utils.csvutils import UnicodeWriter
from frappe.utils import cstr, cint, flt
from frappe.core.page.data_import_tool.data_import_tool import data_keys
@frappe.whitelist()
def get_template(doctype=None, parent_doctype=None, all_doctypes="No", with_data="No"):
all_doctypes = all_doctypes=="Yes"
if not parent_doctype:
parent_doctype = doctype
column_start_end = {}
if all_doctypes:
doctype_parentfield = {}
child_doctypes = []
for df in frappe.get_meta(doctype).get_table_fields():
child_doctypes.append(df.options)
doctype_parentfield[df.options] = df.fieldname
def add_main_header():
w.writerow(['Data Import Template'])
w.writerow([data_keys.main_table, doctype])
if parent_doctype != doctype:
w.writerow([data_keys.parent_table, parent_doctype])
else:
w.writerow([''])
w.writerow([''])
w.writerow(['Notes:'])
w.writerow(['Please do not change the template headings.'])
w.writerow(['First data column must be blank.'])
w.writerow(['If you are uploading new records, leave the "name" (ID) column blank.'])
w.writerow(['If you are uploading new records, "Naming Series" becomes mandatory, if present.'])
w.writerow(['Only mandatory fields are necessary for new records. You can delete non-mandatory columns if you wish.'])
w.writerow(['For updating, you can update only selective columns.'])
w.writerow(['You can only upload upto 5000 records in one go. (may be less in some cases)'])
if key == "parent":
w.writerow(['"Parent" signifies the parent table in which this row must be added'])
w.writerow(['If you are updating, please select "Overwrite" else existing rows will not be deleted.'])
def build_field_columns(dt):
meta = frappe.get_meta(dt)
tablecolumns = filter(None,
[(meta.get_field(f[0]) or None) for f in frappe.db.sql('desc `tab%s`' % dt)])
tablecolumns.sort(lambda a, b: a.idx - b.idx)
if dt==doctype:
column_start_end[dt] = frappe._dict({"start": 0})
else:
column_start_end[dt] = frappe._dict({"start": len(columns)})
append_field_column(frappe._dict({
"fieldname": "name",
"label": "ID",
"fieldtype": "Data",
"reqd": 1,
"idx": 0,
"info": "Leave blank for new records"
}), True)
for docfield in tablecolumns:
append_field_column(docfield, True)
# all non mandatory fields
for docfield in tablecolumns:
append_field_column(docfield, False)
# append DocType name
tablerow[column_start_end[dt].start + 1] = dt
if dt!=doctype:
tablerow[column_start_end[dt].start + 2] = doctype_parentfield[dt]
column_start_end[dt].end = len(columns) + 1
def append_field_column(docfield, mandatory):
if docfield and ((mandatory and docfield.reqd) or not (mandatory or docfield.reqd)) \
and (docfield.fieldname not in ('parenttype', 'trash_reason')) and not docfield.hidden:
tablerow.append("")
fieldrow.append(docfield.fieldname)
labelrow.append(docfield.label)
mandatoryrow.append(docfield.reqd and 'Yes' or 'No')
typerow.append(docfield.fieldtype)
inforow.append(getinforow(docfield))
columns.append(docfield.fieldname)
def append_empty_field_column():
tablerow.append("~")
fieldrow.append("~")
labelrow.append("")
mandatoryrow.append("")
typerow.append("")
inforow.append("")
columns.append("")
def getinforow(docfield):
"""make info comment for options, links etc."""
if docfield.fieldtype == 'Select':
if not docfield.options:
return ''
else:
return 'One of: %s' % ', '.join(filter(None, docfield.options.split('\n')))
elif docfield.fieldtype == 'Link':
return 'Valid %s' % docfield.options
elif docfield.fieldtype == 'Int':
return 'Integer'
elif docfield.fieldtype == "Check":
return "0 or 1"
elif hasattr(docfield, "info"):
return docfield.info
else:
return ''
def add_field_headings():
w.writerow(tablerow)
w.writerow(labelrow)
w.writerow(fieldrow)
w.writerow(mandatoryrow)
w.writerow(typerow)
w.writerow(inforow)
w.writerow([data_keys.data_separator])
def add_data():
def add_data_row(row_group, dt, doc, rowidx):
d = doc.copy()
if all_doctypes:
d.name = '"'+ d.name+'"'
if len(row_group) < rowidx + 1:
row_group.append([""] * (len(columns) + 1))
row = row_group[rowidx]
for i, c in enumerate(columns[column_start_end[dt].start:column_start_end[dt].end]):
row[column_start_end[dt].start + i + 1] = d.get(c, "")
if with_data=='Yes':
frappe.permissions.can_export(parent_doctype, raise_exception=True)
# get permitted data only
data = frappe.get_list(doctype, fields=["*"], limit_page_length=None)
for doc in data:
# add main table
row_group = []
add_data_row(row_group, doctype, doc, 0)
if all_doctypes:
# add child tables
for child_doctype in child_doctypes:
for ci, child in enumerate(frappe.db.sql("""select * from `tab%s`
where parent=%s order by idx""" % (child_doctype, "%s"), doc.name, as_dict=1)):
add_data_row(row_group, child_doctype, child, ci)
for row in row_group:
w.writerow(row)
w = UnicodeWriter()
key = 'parent' if parent_doctype != doctype else 'name'
add_main_header()
w.writerow([''])
tablerow = [data_keys.doctype, ""]
labelrow = ["Column Labels:", "ID"]
fieldrow = [data_keys.columns, key]
mandatoryrow = ['Mandatory:', 'Yes']
typerow = ['Type:', 'Data (text)']
inforow = ['Info:', '']
columns = [key]
build_field_columns(doctype)
if all_doctypes:
for d in child_doctypes:
append_empty_field_column()
build_field_columns(d)
add_field_headings()
add_data()
# write out response as a type csv
frappe.response['result'] = cstr(w.getvalue())
frappe.response['type'] = 'csv'
frappe.response['doctype'] = doctype
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
""" Tests for the account API. """
import re
from unittest import skipUnless
from nose.tools import raises
from mock import patch
import ddt
from dateutil.parser import parse as parse_datetime
from django.core import mail
from django.test import TestCase
from django.conf import settings
from ..api import account as account_api
from ..models import UserProfile
@ddt.ddt
class AccountApiTest(TestCase):
USERNAME = u'frank-underwood'
PASSWORD = u'ṕáśśẃőŕd'
EMAIL = u'frank+underwood@example.com'
ORIG_HOST = 'example.com'
IS_SECURE = False
INVALID_USERNAMES = [
None,
u'',
u'a',
u'a' * (account_api.USERNAME_MAX_LENGTH + 1),
u'invalid_symbol_@',
u'invalid-unicode_fŕáńḱ',
]
INVALID_EMAILS = [
None,
u'',
u'a',
'no_domain',
'no+domain',
'@',
'@domain.com',
'test@no_extension',
u'fŕáńḱ@example.com',
u'frank@éxáḿṕĺé.ćőḿ',
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u'{user}@example.com'.format(
user=(u'e' * (account_api.EMAIL_MAX_LENGTH - 11))
)
]
INVALID_PASSWORDS = [
None,
u'',
u'a',
u'a' * (account_api.PASSWORD_MAX_LENGTH + 1)
]
def test_activate_account(self):
# Create the account, which is initially inactive
activation_key = account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
account = account_api.account_info(self.USERNAME)
self.assertEqual(account, {
'username': self.USERNAME,
'email': self.EMAIL,
'is_active': False
})
# Activate the account and verify that it is now active
account_api.activate_account(activation_key)
account = account_api.account_info(self.USERNAME)
self.assertTrue(account['is_active'])
def test_change_email(self):
# Request an email change
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activation_key = account_api.request_email_change(
self.USERNAME, u'new+email@example.com', self.PASSWORD
)
# Verify that the email has not yet changed
account = account_api.account_info(self.USERNAME)
self.assertEqual(account['email'], self.EMAIL)
# Confirm the change, using the activation code
old_email, new_email = account_api.confirm_email_change(activation_key)
self.assertEqual(old_email, self.EMAIL)
self.assertEqual(new_email, u'new+email@example.com')
# Verify that the email is changed
account = account_api.account_info(self.USERNAME)
self.assertEqual(account['email'], u'new+email@example.com')
def test_confirm_email_change_repeat(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activation_key = account_api.request_email_change(
self.USERNAME, u'new+email@example.com', self.PASSWORD
)
# Confirm the change once
account_api.confirm_email_change(activation_key)
# Confirm the change again. The activation code should be
# single-use, so this should raise an error.
with self.assertRaises(account_api.AccountNotAuthorized):
account_api.confirm_email_change(activation_key)
def test_create_account_duplicate_username(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
with self.assertRaises(account_api.AccountUserAlreadyExists):
account_api.create_account(self.USERNAME, self.PASSWORD, 'different+email@example.com')
# Email uniqueness constraints were introduced in a database migration,
# which we disable in the unit tests to improve the speed of the test suite.
@skipUnless(settings.SOUTH_TESTS_MIGRATE, "South migrations required")
def test_create_account_duplicate_email(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
with self.assertRaises(account_api.AccountUserAlreadyExists):
account_api.create_account('different_user', self.PASSWORD, self.EMAIL)
def test_username_too_long(self):
long_username = 'e' * (account_api.USERNAME_MAX_LENGTH + 1)
with self.assertRaises(account_api.AccountUsernameInvalid):
account_api.create_account(long_username, self.PASSWORD, self.EMAIL)
def test_account_info_no_user(self):
self.assertIs(account_api.account_info('does_not_exist'), None)
@raises(account_api.AccountEmailInvalid)
@ddt.data(*INVALID_EMAILS)
def test_create_account_invalid_email(self, invalid_email):
account_api.create_account(self.USERNAME, self.PASSWORD, invalid_email)
@raises(account_api.AccountPasswordInvalid)
@ddt.data(*INVALID_PASSWORDS)
def test_create_account_invalid_password(self, invalid_password):
account_api.create_account(self.USERNAME, invalid_password, self.EMAIL)
@raises(account_api.AccountPasswordInvalid)
def test_create_account_username_password_equal(self):
# Username and password cannot be the same
account_api.create_account(self.USERNAME, self.USERNAME, self.EMAIL)
@raises(account_api.AccountRequestError)
@ddt.data(*INVALID_USERNAMES)
def test_create_account_invalid_username(self, invalid_username):
account_api.create_account(invalid_username, self.PASSWORD, self.EMAIL)
@raises(account_api.AccountNotAuthorized)
def test_activate_account_invalid_key(self):
account_api.activate_account(u'invalid')
@raises(account_api.AccountUserNotFound)
def test_request_email_change_no_user(self):
account_api.request_email_change(u'no_such_user', self.EMAIL, self.PASSWORD)
@ddt.data(*INVALID_EMAILS)
def test_request_email_change_invalid_email(self, invalid_email):
# Create an account with a valid email address
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Attempt to change the account to an invalid email
with self.assertRaises(account_api.AccountEmailInvalid):
account_api.request_email_change(self.USERNAME, invalid_email, self.PASSWORD)
def test_request_email_change_already_exists(self):
# Create two accounts, both activated
activation_key = account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
account_api.activate_account(activation_key)
activation_key = account_api.create_account(u'another_user', u'password', u'another+user@example.com')
account_api.activate_account(activation_key)
# Try to change the first user's email to the same as the second user's
with self.assertRaises(account_api.AccountEmailAlreadyExists):
account_api.request_email_change(self.USERNAME, u'another+user@example.com', self.PASSWORD)
def test_request_email_change_duplicates_unactivated_account(self):
# Create two accounts, but the second account is inactive
activation_key = account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
account_api.activate_account(activation_key)
account_api.create_account(u'another_user', u'password', u'another+user@example.com')
# Try to change the first user's email to the same as the second user's
# Since the second user has not yet activated, this should succeed.
account_api.request_email_change(self.USERNAME, u'another+user@example.com', self.PASSWORD)
def test_request_email_change_same_address(self):
# Create and activate the account
activation_key = account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
account_api.activate_account(activation_key)
# Try to change the email address to the current address
with self.assertRaises(account_api.AccountEmailAlreadyExists):
account_api.request_email_change(self.USERNAME, self.EMAIL, self.PASSWORD)
def test_request_email_change_wrong_password(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Use the wrong password
with self.assertRaises(account_api.AccountNotAuthorized):
account_api.request_email_change(self.USERNAME, u'new+email@example.com', u'wrong password')
def test_confirm_email_change_invalid_activation_key(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
account_api.request_email_change(self.USERNAME, u'new+email@example.com', self.PASSWORD)
with self.assertRaises(account_api.AccountNotAuthorized):
account_api.confirm_email_change(u'invalid')
def test_confirm_email_change_no_request_pending(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
def test_confirm_email_already_exists(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Request a change
activation_key = account_api.request_email_change(
self.USERNAME, u'new+email@example.com', self.PASSWORD
)
# Another use takes the email before we confirm the change
account_api.create_account(u'other_user', u'password', u'new+email@example.com')
# When we try to confirm our change, we get an error because the email is taken
with self.assertRaises(account_api.AccountEmailAlreadyExists):
account_api.confirm_email_change(activation_key)
# Verify that the email was NOT changed
self.assertEqual(account_api.account_info(self.USERNAME)['email'], self.EMAIL)
def test_confirm_email_no_user_profile(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activation_key = account_api.request_email_change(
self.USERNAME, u'new+email@example.com', self.PASSWORD
)
# This should never happen, but just in case...
UserProfile.objects.get(user__username=self.USERNAME).delete()
with self.assertRaises(account_api.AccountInternalError):
account_api.confirm_email_change(activation_key)
def test_record_email_change_history(self):
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Change the email once
activation_key = account_api.request_email_change(
self.USERNAME, u'new+email@example.com', self.PASSWORD
)
account_api.confirm_email_change(activation_key)
# Verify that the old email appears in the history
meta = UserProfile.objects.get(user__username=self.USERNAME).get_meta()
self.assertEqual(len(meta['old_emails']), 1)
email, timestamp = meta['old_emails'][0]
self.assertEqual(email, self.EMAIL)
self._assert_is_datetime(timestamp)
# Change the email again
activation_key = account_api.request_email_change(
self.USERNAME, u'another_new+email@example.com', self.PASSWORD
)
account_api.confirm_email_change(activation_key)
# Verify that both emails appear in the history
meta = UserProfile.objects.get(user__username=self.USERNAME).get_meta()
self.assertEqual(len(meta['old_emails']), 2)
email, timestamp = meta['old_emails'][1]
self.assertEqual(email, 'new+email@example.com')
self._assert_is_datetime(timestamp)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_request_password_change(self):
# Create and activate an account
activation_key = account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
account_api.activate_account(activation_key)
# Request a password change
account_api.request_password_change(self.EMAIL, self.ORIG_HOST, self.IS_SECURE)
# Verify that one email message has been sent
self.assertEqual(len(mail.outbox), 1)
# Verify that the body of the message contains something that looks
# like an activation link
email_body = mail.outbox[0].body
result = re.search('(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_request_password_change_invalid_user(self):
with self.assertRaises(account_api.AccountUserNotFound):
account_api.request_password_change(self.EMAIL, self.ORIG_HOST, self.IS_SECURE)
# Verify that no email messages have been sent
self.assertEqual(len(mail.outbox), 0)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_request_password_change_inactive_user(self):
# Create an account, but do not activate it
account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
account_api.request_password_change(self.EMAIL, self.ORIG_HOST, self.IS_SECURE)
# Verify that the activation email was still sent
self.assertEqual(len(mail.outbox), 1)
def _assert_is_datetime(self, timestamp):
if not timestamp:
return False
try:
parse_datetime(timestamp)
except ValueError:
return False
else:
return True
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
# -*- coding: utf-8 -*-
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import datetime
import json
import time
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.client_log import ClientLog, CLIENT_LOG_CONTENT_TYPE
from viewfinder.backend.www import json_schema
from viewfinder.backend.www.test import service_base_test
class NewClientLogUrlTestCase(service_base_test.ServiceBaseTestCase):
def setUp(self):
super(NewClientLogUrlTestCase, self).setUp()
self._validate = False
def testNewClientLogUrl(self):
"""Verify the put url can be fetched via /service/get_client_log."""
timestamp = time.time()
log_timestamp = timestamp - 24 * 60 * 60
response_dict = self._SendRequest('new_client_log_url', self._cookie,
{'headers': {'op_id': 'o1', 'op_timestamp': timestamp},
'timestamp': log_timestamp,
'client_log_id': 'log1'})
exp_put_url = ClientLog.GetPutUrl(
self._user.user_id, self._device_ids[0], log_timestamp, 'log1')
self.assertEqual(exp_put_url, response_dict['client_log_put_url'])
def testContentType(self):
"""Verify that content type can be set explicitly and used."""
request_dict = {'headers': {'op_id': 'o1', 'op_timestamp': time.time()},
'timestamp': time.time(),
'client_log_id': 'log_content_type',
'content_type': 'text/plain'}
self._GetNewLogUrlAndVerify(request_dict, 'test log file',
content_type='test/plain',
content_md5=None)
def testDefaultContentType(self):
"""Verify default content type."""
request_dict = {'headers': {'op_id': 'o1', 'op_timestamp': time.time()},
'timestamp': time.time(),
'client_log_id': 'default_content_type'}
self._GetNewLogUrlAndVerify(request_dict, 'test log file',
content_type=CLIENT_LOG_CONTENT_TYPE,
content_md5=None)
def testMD5ClientLog(self):
"""Verify MD5 validation for client logs."""
log_body = 'test log file'
content_md5 = util.ComputeMD5Hex(log_body)
request_dict = {'headers': {'op_id': 'o1', 'op_timestamp': time.time()},
'timestamp': time.time(),
'client_log_id': 'log1',
'content_md5': content_md5}
self._GetNewLogUrlAndVerify(request_dict, log_body,
content_type=CLIENT_LOG_CONTENT_TYPE,
content_md5=content_md5)
def _GetNewLogUrlAndVerify(self, request_dict, log_body, content_type, content_md5):
"""Get a new client log url based on "request_dict" and verify
the URL can be PUT using the specified content-type and md5.
"""
response_dict = self._SendRequest('new_client_log_url', self._cookie, request_dict)
url = response_dict['client_log_put_url']
headers = {'Content-Type': content_type}
if content_md5 is not None:
headers['Content-MD5'] = content_md5
response = self._RunAsync(self._tester.http_client.fetch, url, method='PUT',
body=log_body, follow_redirects=False, headers=headers)
self.assertEqual(200, response.code)
|
unknown
|
codeparrot/codeparrot-clean
| ||
def getDefaultGateway():
f = open("/proc/net/route", "r")
if f:
for line in f.readlines():
tokens = line.split('\t')
if tokens[1] == '00000000': #dest 0.0.0.0
return int(tokens[2], 16)
return None
def getTelephone():
f = open("/etc/ppp/options", "r")
if f:
for line in f.readlines():
if line.find('connect') == 0:
line = line[line.find(' ')+1:]
line = line[line.find(' ')+1:]
line = line[:line.find('"')]
return line
return ""
def setOptions(tel, user):
f = open("/etc/ppp/options", "r+")
if f:
lines = f.readlines()
f.seek(0)
for line in lines:
if line.find('connect') == 0:
p = line.find(' ')
p = line.find(' ', p+1)
line = line[:p+1]
f.write(line+tel+'"\n')
elif line.find('user') == 0:
f.write('user '+user+'\n')
else:
f.write(line)
def getSecretString():
f = open("/etc/ppp/pap-secrets", "r")
if f:
for line in f.readlines():
if line[0] == '#' or line.find('*') == -1:
continue
for ch in (' ', '\t', '\n', '"'):
line = line.replace(ch, '')
return line
return None
def setSecretString(secret):
f = open("/etc/ppp/pap-secrets", 'r+')
if f:
lines = f.readlines()
f.seek(0)
for line in lines:
if line[0] == '#' or line.find('*') == -1:
f.write(line)
continue
f.write(secret+'\n')
from Screens.Screen import Screen
from Plugins.Plugin import PluginDescriptor
from enigma import eConsoleAppContainer, eTimer
from Components.Label import Label
from Components.Button import Button
from Components.ConfigList import ConfigList
from Components.config import ConfigText, ConfigPassword, KEY_LEFT, KEY_RIGHT, KEY_0, KEY_DELETE, KEY_BACKSPACE
from Components.ActionMap import NumberActionMap, ActionMap
from os import system
NONE = 0
CONNECT = 1
ABORT = 2
DISCONNECT = 3
gateway = None
def pppdClosed(ret):
global gateway
print "modem disconnected", ret
if gateway:
#FIXMEEE... hardcoded for little endian!!
system("route add default gw %d.%d.%d.%d" %(gateway&0xFF, (gateway>>8)&0xFF, (gateway>>16)&0xFF, (gateway>>24)&0xFF))
connected = False
conn = eConsoleAppContainer()
conn.appClosed.append(pppdClosed)
class ModemSetup(Screen):
skin = """
<screen position="180,100" size="320,300" title="Modem" >
<ePixmap pixmap="skin_default/buttons/green.png" position="10,10" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/red.png" position="160,10" size="140,40" alphatest="on" />
<widget name="key_green" position="10,10" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_red" position="160,10" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="list" position="10,60" size="300,120" />
<widget name="state" position="10,210" size="300,80" font="Regular;20" />
</screen>"""
def nothing(self):
print "nothing!"
def __init__(self, session, args = None):
global connected
global conn
self.skin = ModemSetup.skin
secret = getSecretString()
user = secret[:secret.find('*')]
password = secret[secret.find('*')+1:]
self.username = ConfigText(user, fixed_size=False)
self.password = ConfigPassword(password, fixed_size=False)
self.phone = ConfigText(getTelephone(), fixed_size=False)
self.phone.setUseableChars(u"0123456789")
lst = [ (_("Username"), self.username),
(_("Password"), self.password),
(_("Phone number"), self.phone) ]
self["list"] = ConfigList(lst)
self["key_green"] = Button("")
self["key_red"] = Button("")
self["state"] = Label("")
self["actions"] = NumberActionMap(["ModemActions"],
{
"cancel": self.close,
"left": self.keyLeft,
"right": self.keyRight,
"connect": self.connect,
"disconnect": self.disconnect,
"deleteForward": self.deleteForward,
"deleteBackward": self.deleteBackward,
"0": self.keyNumber,
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"6": self.keyNumber,
"7": self.keyNumber,
"8": self.keyNumber,
"9": self.keyNumber
}, -1)
self["ListActions"] = ActionMap(["ListboxDisableActions"],
{
"moveUp": self.nothing,
"moveDown": self.nothing,
"moveTop": self.nothing,
"moveEnd": self.nothing,
"pageUp": self.nothing,
"pageDown": self.nothing
}, -1)
self.stateTimer = eTimer()
self.stateTimer.callback.append(self.stateLoop)
conn.appClosed.append(self.pppdClosed)
conn.dataAvail.append(self.dataAvail)
Screen.__init__(self, session)
self.onClose.append(self.__closed)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
global conn
if conn.running():
self["state"].setText(_("Connected!"));
self.green_function = NONE
self.red_function = DISCONNECT
else:
self.green_function = CONNECT
self.red_function = NONE
self.updateGui()
def __closed(self):
global connected
conn.appClosed.remove(self.pppdClosed)
conn.dataAvail.remove(self.dataAvail)
if not connected:
conn.sendCtrlC()
setOptions(self.phone.getText(), self.username.getText())
setSecretString(self.username.getText() + ' * ' + self.password.getText())
def stateLoop(self):
txt = self["state"].getText()
txt += '.'
self["state"].setText(txt)
def connect(self):
if self.green_function == CONNECT:
global gateway
gateway = getDefaultGateway()
self["state"].setText(_("Dialing:"))
system("route del default")
system("modprobe ppp_async");
self.stateTimer.start(1000,False)
setOptions(self.phone.getText(), self.username.getText())
setSecretString(self.username.getText() + ' * ' + self.password.getText())
ret = conn.execute("pppd", "pppd", "-d", "-detach")
if ret:
print "execute pppd failed!"
self.pppdClosed(ret)
pppdClosed(ret)
self.green_function = NONE
self.red_function = ABORT
self.updateGui()
def disconnect(self):
conn.sendCtrlC()
self.red_function = NONE
self.updateGui()
def keyLeft(self):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_LEFT)
def keyRight(self):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_RIGHT)
def keyNumber(self, number):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_0 + number)
def deleteForward(self):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_DELETE)
def deleteBackward(self):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_BACKSPACE)
def pppdClosed(self, retval):
global connected
self.stateTimer.stop()
self.red_function = NONE
self.green_function = CONNECT
self["state"].setText("")
self.updateGui()
connected = False
def dataAvail(self, text):
if text.find("Serial connection established") != -1:
tmp = self["state"].getText()
tmp += "OK\nLogin:"
self["state"].setText(tmp)
if text.find("PAP authentication succeeded") != -1:
tmp = self["state"].getText()
tmp += "OK\n";
self["state"].setText(tmp)
self.stateTimer.stop()
if text.find("ip-up finished") != -1:
global connected
tmp = self["state"].getText()
tmp += "Connected :)\n"
self["state"].setText(tmp)
self.red_function = DISCONNECT
connected=True
if text.find("Connect script failed") != -1:
tmp = self["state"].getText()
tmp += "FAILED\n"
self["state"].setText(tmp)
self.stateTimer.stop()
self.red_function = NONE
self.green_function = CONNECT
self.updateGui()
def updateGui(self):
if self.red_function == NONE:
self["key_red"].setText("")
elif self.red_function == DISCONNECT:
self["key_red"].setText(_("Disconnect"))
elif self.red_function == ABORT:
self["key_red"].setText(_("Abort"))
if self.green_function == NONE:
self["key_green"].setText("")
elif self.green_function == CONNECT:
self["key_green"].setText(_("Connect"))
focus_enabled = self.green_function == CONNECT
self["list"].instance.setSelectionEnable(focus_enabled)
self["ListActions"].setEnabled(not focus_enabled)
def main(session, **kwargs):
session.open(ModemSetup)
def Plugins(**kwargs):
return PluginDescriptor(name="Modem", description="plugin to connect to internet via builtin modem", where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=main)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from scrapy.http import Request, Response
from scrapy.spidermiddlewares.depth import DepthMiddleware
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from collections.abc import Generator
from scrapy.crawler import Crawler
from scrapy.statscollectors import StatsCollector
@pytest.fixture
def crawler() -> Crawler:
return get_crawler(Spider, {"DEPTH_LIMIT": 1, "DEPTH_STATS_VERBOSE": True})
@pytest.fixture
def stats(crawler: Crawler) -> Generator[StatsCollector]:
assert crawler.stats is not None
crawler.stats.open_spider()
yield crawler.stats
crawler.stats.close_spider()
@pytest.fixture
def mw(crawler: Crawler) -> DepthMiddleware:
return DepthMiddleware.from_crawler(crawler)
def test_process_spider_output(mw: DepthMiddleware, stats: StatsCollector) -> None:
req = Request("http://scrapytest.org")
resp = Response("http://scrapytest.org")
resp.request = req
result = [Request("http://scrapytest.org")]
out = list(mw.process_spider_output(resp, result))
assert out == result
rdc = stats.get_value("request_depth_count/1")
assert rdc == 1
req.meta["depth"] = 1
out2 = list(mw.process_spider_output(resp, result))
assert not out2
rdm = stats.get_value("request_depth_max")
assert rdm == 1
|
python
|
github
|
https://github.com/scrapy/scrapy
|
tests/test_spidermiddleware_depth.py
|
An inner doc comment was used in an invalid context.
Erroneous code example:
```compile_fail,E0753
fn foo() {}
//! foo
// ^ error!
fn main() {}
```
Inner document can only be used before items. For example:
```
//! A working comment applied to the module!
fn foo() {
//! Another working comment!
}
fn main() {}
```
In case you want to document the item following the doc comment, you might want
to use outer doc comment:
```
/// I am an outer doc comment
#[doc = "I am also an outer doc comment!"]
fn foo() {
// ...
}
```
|
unknown
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_error_codes/src/error_codes/E0753.md
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper script for executing the Microsoft Compiler."""
import os
import sys
import msvc_link
import msvc_tools
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
GCCPATTERNS = [
('-m(32|64)', ['$TARGET_ARCH']),
('-Xcompilation-mode=(dbg|fastbuild|opt)', ['$COMPILATION_MODE']),
('-msse', ['/arch:SSE']),
('-msse2', ['/arch:SSE2']),
('-D(.+)', ['/D$0']),
('-U(.+)', ['/U$0']),
('-E', ['/E']),
('-O0', ['/Od']),
('-Os', ['/O1']),
('-O2', ['/O2']),
('-g0', []),
('-g', ['$DEBUG_RT']),
('-fexceptions', ['/U_HAS_EXCEPTIONS', '/D_HAS_EXCEPTIONS=1', '/EHsc']),
('-fomit-frame-pointer', ['/Oy']),
('-fno-rtti', ['/GR-']),
('-frtti', ['/GR']),
('-fPIC', []),
# This is unneeded for Windows.
(('-include', '(.+)'), ['/FI$PATH0']),
(('/DEPENDENCY_FILE', '(.+)'), ['$GENERATE_DEPS0']),
('-w', ['/w']),
('-Wall', ['/Wall']),
('-Wsign-compare', ['/we4018']),
('-Wno-sign-compare', ['/wd4018']),
('-Wconversion', ['/we4244', '/we4267']),
('-Wno-conversion', ['/wd4244', '/wd4267']),
('-Wno-sign-conversion', []),
('-Wno-implicit-fallthrough', []),
('-Wno-implicit-function-declaration', []),
('-Wcovered-switch-default', ['/we4062']),
('-Wno-covered-switch-default', ['/wd4062']),
('-Wno-error', []),
('-Wno-invalid-offsetof', []),
('-Wno-overloaded-virtual', []),
('-Wno-reorder', []),
('-Wno-string-plus-int', []),
('-Wl,S', []), # Stripping is unnecessary since msvc uses pdb files.
('-Wl,-rpath(.+)', []),
('-B(.+)', []),
('-static', []),
('-shared', ['/DLL']),
('-std=(.+)', []),
]
def _IsLink(args):
"""Determines whether we need to link rather than compile.
A set of arguments is for linking if they contain -static, -shared, are adding
adding library search paths through -L, or libraries via -l.
Args:
args: List of arguments
Returns:
Boolean whether this is a link operation or not.
"""
for arg in args:
# Certain flags indicate we are linking.
if (arg in ['-shared', '-static'] or arg[:2] in ['-l', '-L'] or
arg[:3] == '-Wl'):
return True
return False
class MsvcCompiler(msvc_tools.WindowsRunner):
"""Driver for the Microsoft compiler."""
def Run(self, argv):
"""Runs the compiler using the passed clang/gcc style argument list.
Args:
argv: List of arguments
Returns:
The return code of the compilation.
Raises:
ValueError: if target architecture isn't specified
"""
parser = msvc_tools.ArgParser(self, argv, GCCPATTERNS)
if not parser.target_arch:
raise ValueError('Must specify target architecture (-m32 or -m64)')
compiler = 'cl'
if parser.is_cuda_compilation:
compiler = 'nvcc'
return self.RunBinary(compiler, parser.options, parser.target_arch, parser)
def main(argv):
# If we are supposed to link create a static library.
if _IsLink(argv[1:]):
return msvc_link.main(argv)
else:
return MsvcCompiler().Run(argv[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) # need to skip the first argument
|
unknown
|
codeparrot/codeparrot-clean
| ||
#if defined __AVX512__ || defined __AVX512F__
#include <immintrin.h>
void test()
{
__m512i zmm = _mm512_setzero_si512();
#if defined __GNUC__ && defined __x86_64__
asm volatile ("" : : : "zmm16", "zmm17", "zmm18", "zmm19");
#endif
}
#else
#error "AVX512 is not supported"
#endif
int main() { return 0; }
|
cpp
|
github
|
https://github.com/opencv/opencv
|
cmake/checks/cpu_avx512.cpp
|
from django.forms.models import inlineformset_factory
from django.test import TestCase
from regressiontests.inline_formsets.models import Poet, Poem, School, Parent, Child
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'1',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': u'test',
'poem_set-0-DELETE': u'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'0',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': u'',
'poem_set-0-poem': u'1',
'poem_set-0-name': u'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'1',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': u'1',
'poem_set-0-poem': u'1',
'poem_set-0-name': u'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name=u'test')
mother = Parent.objects.create(name=u'mother')
father = Parent.objects.create(name=u'father')
data = {
'child_set-TOTAL_FORMS': u'1',
'child_set-INITIAL_FORMS': u'0',
'child_set-MAX_NUM_FORMS': u'0',
'child_set-0-name': u'child',
}
formset = ChildFormSet(data, instance=school)
self.assertEqual(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
class InlineFormsetFactoryTest(TestCase):
def assertRaisesErrorWithMessage(self, error, message, callable, *args, **kwargs):
self.assertRaises(error, callable, *args, **kwargs)
try:
callable(*args, **kwargs)
except error, e:
self.assertEqual(message, str(e))
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name='mother')
inlineformset_factory(Parent, Child, fk_name='father')
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
self.assertRaisesErrorWithMessage(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has more than 1 ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child
)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
self.assertRaisesErrorWithMessage(Exception,
"fk_name 'school' is not a ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child, fk_name='school'
)
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
self.assertRaisesErrorWithMessage(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has no field named 'test'",
inlineformset_factory, Parent, Child, fk_name='test'
)
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(
Parent, Child, exclude=['school'], fk_name='mother'
)
inlineformset_factory(
Parent, Child, exclude=('school',), fk_name='mother'
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import h2o_cmd, h2o_util
import h2o2 as h2o
import re, random, math
from h2o_test import check_sandbox_for_errors, dump_json, verboseprint
import h2o_nodes
from tabulate import tabulate
# recursive walk an object check that it has valid numbers only (no "" or nan or inf
def check_obj_has_good_numbers(obj, hierarchy="", curr_depth=0, max_depth=4, allowNaN=False):
"""Represent instance of a class as JSON.
Arguments:
obj -- any object
Return:
String that represent JSON-encoded object.
"""
def serialize(obj, hierarchy="", curr_depth=0):
"""Recursively walk object's hierarchy. Limit to max_depth"""
if curr_depth>max_depth:
return
if isinstance(obj, (bool, int, long, float, basestring)):
try:
number = float(obj)
print "Yay!", hierarchy, number
except:
if obj is None:
print "Not Yay! how come you're giving me None for a coefficient? %s %s" % (hierarchy, obj)
elif str(obj)=="":
print "Not Yay! how come you're giving me an empty string for a coefficient? %s %s" % (hierarchy, obj)
else:
raise Exception("%s %s %s is not a valid float" % (hierarchy, obj, type(obj)))
# hack for now
number = 0.0
if not allowNaN and math.isnan(number):
raise Exception("%s %s is a NaN" % (hierarchy, obj))
if not allowNaN and math.isinf(number):
raise Exception("%s %s is a Inf" % (hierarchy, obj))
return number
elif isinstance(obj, dict):
obj = obj.copy()
for key in obj:
obj[key] = serialize(obj[key], hierarchy + ".%" % key, curr_depth+1)
return obj
elif isinstance(obj, (list, tuple)):
return [serialize(item, hierarchy + "[%s]" % i, curr_depth+1) for (i, item) in enumerate(obj)]
elif hasattr(obj, '__dict__'):
return serialize(obj.__dict__, hierarchy, curr_depth+1)
else:
return repr(obj) # Don't know how to handle, convert to string
return (serialize(obj, hierarchy, curr_depth+1))
#************************************************************88
# where do we get the CM?
def simpleCheckGLM(self, model, parameters,
labelList, labelListUsed, allowFailWarning=False, allowZeroCoeff=False,
prettyPrint=False, noPrint=False,
maxExpectedIterations=None, doNormalized=False, allowNaN=False):
# FIX! the structure is all different
return
warnings = ''
# binomial = model.binomial
residual_deviance = model.training_metrics.residual_deviance
threshold = model.training_metrics.threshold
check_obj_has_good_numbers(threshold, 'threshold', allowNaN=allowNaN)
auc = model.AUC
# NaN if not logistic
# check_obj_has_good_numbers(auc, 'model.AUC')
best_lambda_idx = model.best_lambda_idx
model_category = model.model_category
name = model.name
residual_degrees_of_freedom = model.residual_degrees_of_freedom
# is this no longer used?
coefficients_magnitude = model.coefficients_magnitude
null_deviance = model.null_deviance
check_obj_has_good_numbers(null_deviance, 'model.null_deviance', allowNaN=allowNaN)
null_degrees_of_freedom = model.null_degrees_of_freedom
check_obj_has_good_numbers(null_degrees_of_freedom, 'model.null_degrees_of_freedom', allowNaN=allowNaN)
domains = model.domains
# when is is this okay to be NaN?
AIC = model.AIC
check_obj_has_good_numbers(AIC, 'model.AIC', allowNaN=allowNaN)
names = model.names
coeffs_names = model.coefficients_table.data[0]
# these are returned as quoted strings. Turn them into numbers
temp = model.coefficients_table.data[1]
assert len(coeffs_names)==len(temp), "%s %s" % (len(coeffs_names), len(temp))
# we need coefficients to be floats or empty
check_obj_has_good_numbers(temp, 'model.coeffs', allowNaN=False)
# print "temp", temp[0:10]
# print "temp[5489:5500]", temp[5489:5500]
# UPDATE: None (null json) is legal for coeffs
coeffs = map(lambda x : float(x) if (x is not None and str(x) != "") else 0, temp)
intercept = coeffs[-1]
interceptName = coeffs_names[-1]
assert interceptName == 'Intercept'
assert len(coeffs) == len(coeffs_names), "%s %s" % (len(coeffs), len(coeffs_names))
# FIX! if a coeff is zeroed/ignored, it doesn't show up?
# get rid of intercept in glm response
# assert (len(coeffs)-1) == len(labelListUsed, \
# "%s %s %s %s" % (len(coeffs), len(labelListUsed), coeffs, labelListUsed)
# labelList still has the response column?
# ignored columns aren't in model.names, but output response is.
# labelListUsed has the response col removed so add 1
# Hmm..dropped coefficients again? can't do this check?
# assert len(model.names) == len(labelListUsed), \
# "%s %s %s %s" % (len(model.names), len(labelListUsed), model.names, labelList)
# this is no longer true!
# assert model.threshold!=0
print "len(coeffs)", len(coeffs)
print "coeffs:", coeffs
# last one is intercept
if interceptName != "Intercept" or abs(intercept)<1e-26:
raise Exception("'Intercept' should be last in coeffs_names %s %s" % (interceptName, intercept))
y = parameters['response_column']
cString = "\n"
for i,c in enumerate(coeffs_names):
cString += "%s: %.5e " % (coeffs_names[i], coeffs[i])
print cString
print "\nH2O intercept:\t\t%.5e" % intercept
print "\nTotal # of coeffs:", len(coeffs_names)
# intercept is buried in there too
absIntercept = abs(float(intercept))
self.assertGreater(absIntercept, 1e-26, (
"abs. value of GLM coeffs['Intercept'] is " +
str(absIntercept) + ", not >= 1e-26 for Intercept" + "\n" +
"parameters:" + dump_json(parameters)
))
if (not allowZeroCoeff) and (len(coeffs)>1):
s = 0.0
for c in coeffs:
s += abs(float(c))
self.assertGreater(s, 1e-26, (
"sum of abs. value of GLM coeffs/intercept is " + str(s) + ", not >= 1e-26\n" +
"parameters:" + dump_json(parameters)
))
# shouldn't have any errors
check_sandbox_for_errors()
return (warnings, coeffs, intercept)
#************************************************************88
def pickRandGlmParams(paramDict, params):
colX = 0
randomGroupSize = random.randint(1,len(paramDict))
for i in range(randomGroupSize):
randomKey = random.choice(paramDict.keys())
randomV = paramDict[randomKey]
randomValue = random.choice(randomV)
params[randomKey] = randomValue
if (randomKey=='x'):
colX = randomValue
# Only identity, log and inverse links are allowed for family=gaussian.
# force legal family/ink combos
if 'family' not in params: # defaults to gaussian
if 'link' in params and params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'):
params['link'] = None
elif params['family'] is not None and 'link' in params and params['link'] is not None:
# only log/identity is legal?
if params['family'] == 'poisson':
if params['link'] not in ('identity', 'log', 'familyDefault'):
params['link'] = None
# only tweedie/tweedie is legal?
elif params['family'] == 'tweedie':
if params['link'] not in ('tweedie'):
params['link'] = None
elif params['family'] == 'binomial':
# only logit and log
if params['link'] not in ('logit', 'log', 'familyDefault'):
params['link'] = None
elif params['family'] == 'gaussian':
if params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'):
params['link'] = None
elif params['family'] is None: # defaults to gaussian
if 'link' in params and params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'):
params['link'] = None
if 'lambda_search' in params and params['lambda_search']==1:
if 'nlambdas' in params and params['nlambdas']<=1:
params['nlambdas'] = 2
return colX
def simpleCheckGLMScore(self, glmScore, family='gaussian', allowFailWarning=False, **kwargs):
warnings = None
if 'warnings' in glmScore:
warnings = glmScore['warnings']
# stop on failed
x = re.compile("failed", re.IGNORECASE)
# don't stop if fail to converge
c = re.compile("converge", re.IGNORECASE)
for w in warnings:
print "\nwarning:", w
if re.search(x,w) and not allowFailWarning:
if re.search(c,w):
# ignore the fail to converge warning now
pass
else:
# stop on other 'fail' warnings (are there any? fail to solve?
raise Exception(w)
validation = glmScore['validation']
validation['err'] = h2o_util.cleanseInfNan(validation['err'])
validation['nullDev'] = h2o_util.cleanseInfNan(validation['nullDev'])
validation['resDev'] = h2o_util.cleanseInfNan(validation['resDev'])
print "%15s %s" % ("err:\t", validation['err'])
print "%15s %s" % ("nullDev:\t", validation['nullDev'])
print "%15s %s" % ("resDev:\t", validation['resDev'])
# threshold only there if binomial?
# auc only for binomial
if family=="binomial":
print "%15s %s" % ("AUC:\t", validation['AUC'])
print "%15s %s" % ("threshold:\t", validation['threshold'])
err = False
if family=="poisson" or family=="gaussian":
if 'AIC' not in validation:
print "AIC is missing from the glm json response"
err = True
if not allowNaN and math.isnan(validation['err']):
print "Why is this err = 'nan'?? %6s %s" % ("err:\t", validation['err'])
err = True
if not allowNaN and math.isnan(validation['resDev']):
print "Why is this resDev = 'nan'?? %6s %s" % ("resDev:\t", validation['resDev'])
err = True
if err:
raise Exception ("How am I supposed to tell that any of these errors should be ignored?")
# legal?
if not allowNaN and math.isnan(validation['nullDev']):
## emsg = "Why is this nullDev = 'nan'?? %6s %s" % ("nullDev:\t", validation['nullDev'])
## raise Exception(emsg)
pass
def oldSimpleCheckGLM(self, glm, colX, allowFailWarning=False, allowZeroCoeff=False,
prettyPrint=False, noPrint=False, maxExpectedIterations=None, doNormalized=False, **kwargs):
# if we hit the max_iter, that means it probably didn't converge. should be 1-maxExpectedIter
# h2o GLM will verboseprint the result and print errors.
# so don't have to do that
# different when cross validation is used? No trainingErrorDetails?
GLMModel = glm['glm_model']
if not GLMModel:
raise Exception("GLMModel didn't exist in the glm response? %s" % dump_json(glm))
warnings = None
if 'warnings' in GLMModel and GLMModel['warnings']:
warnings = GLMModel['warnings']
# stop on failed
x = re.compile("failed", re.IGNORECASE)
# don't stop if fail to converge
c = re.compile("converge", re.IGNORECASE)
for w in warnings:
print "\nwarning:", w
if re.search(x,w) and not allowFailWarning:
if re.search(c,w):
# ignore the fail to converge warning now
pass
else:
# stop on other 'fail' warnings (are there any? fail to solve?
raise Exception(w)
# for key, value in glm.iteritems(): print key
# not in GLMGrid?
# FIX! don't get GLMParams if it can't solve?
GLMParams = GLMModel['glm']
family = GLMParams["family"]
# number of submodels = number of lambda
# min of 2. lambda_max is first
submodels = GLMModel['submodels']
# since all our tests?? only use one lambda, the best_lamda_idx should = 1
best_lambda_idx = GLMModel['best_lambda_idx']
print "best_lambda_idx:", best_lambda_idx
lambda_max = GLMModel['lambda_max']
print "lambda_max:", lambda_max
# currently lambda_max is not set by tomas. ..i.e.not valid
if 1==0 and (lambda_max <= submodels[best_lambda_idx].lambda_value):
raise Exception("lambda_max %s should always be > the lambda result %s we're checking" % (lambda_max, submodels[best_lambda_idx].lambda_value))
# submodels0 = submodels[0]
# submodels1 = submodels[-1] # hackery to make it work when there's just one
if (best_lambda_idx >= len(submodels)) or (best_lambda_idx < 0):
raise Exception("best_lambda_idx: %s should point to one of lambdas (which has len %s)" % (best_lambda_idx, len(submodels)))
if (best_lambda_idx >= len(submodels)) or (best_lambda_idx < 0):
raise Exception("best_lambda_idx: %s should point to one of submodels (which has len %s)" % (best_lambda_idx, len(submodels)))
submodels1 = submodels[best_lambda_idx] # hackery to make it work when there's just one
iterations = submodels1['iteration']
print "GLMModel/iterations:", iterations
# if we hit the max_iter, that means it probably didn't converge. should be 1-maxExpectedIter
if maxExpectedIterations is not None and iterations > maxExpectedIterations:
raise Exception("Convergence issue? GLM did iterations: %d which is greater than expected: %d" % (iterations, maxExpectedIterations) )
if 'validation' not in submodels1:
raise Exception("Should be a 'validation' key in submodels1: %s" % dump_json(submodels1))
validationsList = submodels1['validation']
validations = validationsList
# xval. compare what we asked for and what we got.
n_folds = kwargs.setdefault('n_folds', None)
print "GLMModel/validations"
validations['null_deviance'] = h2o_util.cleanseInfNan(validations['null_deviance'])
validations['residual_deviance'] = h2o_util.cleanseInfNan(validations['residual_deviance'])
print "%15s %s" % ("null_deviance:\t", validations['null_deviance'])
print "%15s %s" % ("residual_deviance:\t", validations['residual_deviance'])
# threshold only there if binomial?
# auc only for binomial
if family=="binomial":
print "%15s %s" % ("auc:\t", validations['auc'])
best_threshold = validations['best_threshold']
thresholds = validations['thresholds']
print "%15s %s" % ("best_threshold:\t", best_threshold)
# have to look up the index for the cm, from the thresholds list
best_index = None
for i,t in enumerate(thresholds):
if t >= best_threshold: # ends up using next one if not present
best_index = i
break
assert best_index!=None, "%s %s" % (best_threshold, thresholds)
print "Now printing the right 'best_threshold' %s from '_cms" % best_threshold
# cm = glm['glm_model']['submodels'][0]['validation']['_cms'][-1]
submodels = glm['glm_model']['submodels']
# FIX! this isn't right if we have multiple lambdas? different submodels?
cms = submodels[0]['validation']['_cms']
self.assertEqual(len(thresholds), len(cms),
msg="thresholds %s and cm %s should be lists of the same size. %s" % (len(thresholds), len(cms), thresholds))
# FIX! best_threshold isn't necessarily in the list. jump out if >=
assert best_index<len(cms), "%s %s" % (best_index, len(cms))
# if we want 0.5..rounds to int
# mid = len(cms)/2
# cm = cms[mid]
cm = cms[best_index]
print "cm:", dump_json(cm['_arr'])
predErr = cm['_predErr']
classErr = cm['_classErr']
# compare to predErr
# pctWrong = h2o_gbm.pp_cm_summary(cm['_arr']);
# FIX!
pctWrong = 0
print "predErr:", predErr
print "calculated pctWrong from cm:", pctWrong
print "classErr:", classErr
# self.assertLess(pctWrong, 9,"Should see less than 9% error (class = 4)")
print "\nTrain\n==========\n"
# print h2o_gbm.pp_cm(cm['_arr'])
if family=="poisson" or family=="gaussian":
print "%15s %s" % ("AIC:\t", validations['AIC'])
coefficients_names = GLMModel['coefficients_names']
# print "coefficients_names:", coefficients_names
idxs = submodels1['idxs']
print "idxs:", idxs
coefficients_names = coefficients_names
# always check both normalized and normal coefficients
norm_beta = submodels1['norm_beta']
# if norm_beta and len(coefficients_names)!=len(norm_beta):
# print len(coefficients_names), len(norm_beta)
# raise Exception("coefficients_names and normalized_norm_beta from h2o json not same length. coefficients_names: %s normalized_norm_beta: %s" % (coefficients_names, norm_beta))
#
beta = submodels1['beta']
# print "beta:", beta
# if len(coefficients_names)!=len(beta):
# print len(coefficients_names), len(beta)
# raise Exception("coefficients_names and beta from h2o json not same length. coefficients_names: %s beta: %s" % (coefficients_names, beta))
# test wants to use normalized?
if doNormalized:
beta_used = norm_beta
else:
beta_used = beta
coefficients = {}
# create a dictionary with name, beta (including intercept) just like v1
for i,b in zip(idxs, beta_used[:-1]):
name = coefficients_names[i]
coefficients[name] = b
print "len(idxs)", len(idxs), "len(beta_used)", len(beta_used)
print "coefficients:", coefficients
print "beta:", beta
print "norm_beta:", norm_beta
coefficients['Intercept'] = beta_used[-1]
print "len(coefficients_names)", len(coefficients_names)
print "len(idxs)", len(idxs)
print "idxs[-1]", idxs[-1]
print "intercept demapping info:", \
"coefficients_names[-i]:", coefficients_names[-1], \
"idxs[-1]:", idxs[-1], \
"coefficients_names[idxs[-1]]:", coefficients_names[idxs[-1]], \
"beta_used[-1]:", beta_used[-1], \
"coefficients['Intercept']", coefficients['Intercept']
# last one is intercept
interceptName = coefficients_names[idxs[-1]]
if interceptName != "Intercept" or abs(beta_used[-1])<1e-26:
raise Exception("'Intercept' should be last in coefficients_names and beta %s %s %s" %\
(idxs[-1], beta_used[-1], "-"+interceptName+"-"))
# idxs has the order for non-zero coefficients, it's shorter than beta_used and coefficients_names
# new 5/28/14. glm can point to zero coefficients
# for i in idxs:
# if beta_used[i]==0.0:
## raise Exception("idxs shouldn't point to any 0 coefficients i: %s %s:" % (i, beta_used[i]))
if len(idxs) > len(beta_used):
raise Exception("idxs shouldn't be longer than beta_used %s %s" % (len(idxs), len(beta_used)))
intercept = coefficients.pop('Intercept', None)
# intercept demapping info: idxs[-1]: 54 coefficients_names[[idxs[-1]]: Intercept beta_used[-1]: -6.6866753099
# the last one shoudl be 'Intercept' ?
coefficients_names.pop()
# have to skip the output col! get it from kwargs
# better always be there!
y = kwargs['response']
# the dict keys are column headers if they exist...how to order those? new: use the 'coefficients_names'
# from the response
# Tomas created 'coefficients_names which is the coefficient list in order.
# Just use it to index coefficients! works for header or no-header cases
# I guess now we won't print the "None" cases for dropped columns (constant columns!)
# Because Tomas doesn't get everything in 'coefficients_names' if dropped by GLMQuery before
# he gets it?
def add_to_coefficient_list_and_string(c, cList, cString):
if c in coefficients:
cValue = coefficients[c]
cValueString = "%s: %.5e " % (c, cValue)
else:
print "Warning: didn't see '" + c + "' in json coefficient response.",\
"Inserting 'None' with assumption it was dropped due to constant column)"
cValue = None
cValueString = "%s: %s " % (c, cValue)
cList.append(cValue)
# we put each on newline for easy comparison to R..otherwise keep condensed
if prettyPrint:
cValueString = "H2O coefficient " + cValueString + "\n"
# not mutable?
return cString + cValueString
# creating both a string for printing and a list of values
cString = ""
cList = []
# print in order using col_names
# coefficients_names is input only now..same for header or no header, or expanded enums
for c in coefficients_names:
cString = add_to_coefficient_list_and_string(c, cList, cString)
if prettyPrint:
print "\nH2O intercept:\t\t%.5e" % intercept
print cString
else:
if not noPrint:
print "\nintercept:", intercept, cString
print "\nTotal # of coefficients:", len(coefficients_names)
# pick out the coefficent for the column we enabled for enhanced checking. Can be None.
# FIX! temporary hack to deal with disappearing/renaming columns in GLM
if (not allowZeroCoeff) and (colX is not None):
absXCoeff = abs(float(coefficients[str(colX)]))
# add kwargs to help debug without looking at console log
self.assertGreater(absXCoeff, 1e-26, (
"abs. value of GLM coefficients['" + str(colX) + "'] is " +
str(absXCoeff) + ", not >= 1e-26 for X=" + str(colX) + "\n" +
"kwargs:" + dump_json(kwargs)
))
# intercept is buried in there too
absIntercept = abs(float(intercept))
self.assertGreater(absIntercept, 1e-26, (
"abs. value of GLM coefficients['Intercept'] is " +
str(absIntercept) + ", not >= 1e-26 for Intercept" + "\n" +
"kwargs:" + dump_json(kwargs)
))
# this is good if we just want min or max
# maxCoeff = max(coefficients, key=coefficients.get)
# for more, just invert the dictionary and ...
if (len(coefficients)>0):
maxKey = max([(abs(coefficients[x]),x) for x in coefficients])[1]
print "H2O Largest abs. coefficient value:", maxKey, coefficients[maxKey]
minKey = min([(abs(coefficients[x]),x) for x in coefficients])[1]
print "H2O Smallest abs. coefficient value:", minKey, coefficients[minKey]
else:
print "Warning, no coefficients returned. Must be intercept only?"
# many of the GLM tests aren't single column though.
# quick and dirty check: if all the coefficients are zero,
# something is broken
# intercept is in there too, but this will get it okay
# just sum the abs value up..look for greater than 0
# skip this test if there is just one coefficient. Maybe pointing to a non-important coeff?
if (not allowZeroCoeff) and (len(coefficients)>1):
s = 0.0
for c in coefficients:
v = coefficients[c]
s += abs(float(v))
self.assertGreater(s, 1e-26, (
"sum of abs. value of GLM coefficients/intercept is " + str(s) + ", not >= 1e-26\n" +
"kwargs:" + dump_json(kwargs)
))
print "submodels1, run_time (milliseconds):", submodels1['run_time']
# shouldn't have any errors
check_sandbox_for_errors()
return (warnings, cList, intercept)
# compare this glm to last one. since the files are concatenations,
# the results should be similar? 10% of first is allowed delta
def compareToFirstGlm(self, key, glm, firstglm):
# if isinstance(firstglm[key], list):
# in case it's not a list allready (err is a list)
verboseprint("compareToFirstGlm key:", key)
verboseprint("compareToFirstGlm glm[key]:", glm[key])
# key could be a list or not. if a list, don't want to create list of that list
# so use extend on an empty list. covers all cases?
if type(glm[key]) is list:
kList = glm[key]
firstkList = firstglm[key]
elif type(glm[key]) is dict:
raise Exception("compareToFirstGLm: Not expecting dict for " + key)
else:
kList = [glm[key]]
firstkList = [firstglm[key]]
print "kbn:", kList, firstkList
for k, firstk in zip(kList, firstkList):
# delta must be a positive number ?
delta = .1 * abs(float(firstk))
msg = "Too large a delta (" + str(delta) + ") comparing current and first for: " + key
self.assertAlmostEqual(float(k), float(firstk), delta=delta, msg=msg)
self.assertGreaterEqual(abs(float(k)), 0.0, str(k) + " abs not >= 0.0 in current")
def simpleCheckGLMGrid(self, glmGridResult, colX=None, allowFailWarning=False, **kwargs):
# "grid": {
# "destination_keys": [
# "GLMGridResults__8222a49156af52532a34fb3ce4304308_0",
# "GLMGridResults__8222a49156af52532a34fb3ce4304308_1",
# "GLMGridResults__8222a49156af52532a34fb3ce4304308_2"
# ]
# },
destination_key = glmGridResult['grid']['destination_keys'][0]
inspectGG = h2o_nodes.nodes[0].glm_view(destination_key)
models = inspectGG['glm_model']['submodels']
verboseprint("GLMGrid inspect GLMGrid model 0(best):", dump_json(models[0]))
g = simpleCheckGLM(self, inspectGG, colX, allowFailWarning=allowFailWarning, **kwargs)
# just to get some save_model testing
for i,m in enumerate(glmGridResult['grid']['destination_keys']):
print "Saving model", m, "to model"+str(i)
h2o_nodes.nodes[0].save_model(model=m, path='model'+str(i), force=1)
return g
# This gives me a comma separated x string, for all the columns, with cols with
# missing values, enums, and optionally matching a pattern, removed. useful for GLM
# since it removes rows with any col with NA
# get input from this.
# (missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
# h2o_cmd.columnInfoFromInspect(parseResult['destination_key',
# exceptionOnMissingValues=False, timeoutSecs=300)
def goodXFromColumnInfo(y,
num_cols=None, missingValuesDict=None, constantValuesDict=None, enumSizeDict=None,
colTypeDict=None, colNameDict=None, keepPattern=None, key=None,
timeoutSecs=120, returnIgnoreX=False, noPrint=False, returnStringX=True):
y = str(y)
# if we pass a key, means we want to get the info ourselves here
if key is not None:
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(key, exceptionOnMissingValues=False,
max_column_display=99999999, timeoutSecs=timeoutSecs)
num_cols = len(colNameDict)
# now remove any whose names don't match the required keepPattern
if keepPattern is not None:
keepX = re.compile(keepPattern)
else:
keepX = None
x = range(num_cols)
# need to walk over a copy, cause we change x
xOrig = x[:]
ignore_x = [] # for use by RF
for k in xOrig:
name = colNameDict[k]
# remove it if it has the same name as the y output
if str(k)== y: # if they pass the col index as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, str(k), y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif name == y: # if they pass the name as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, name, y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif keepX is not None and not keepX.match(name):
if not noPrint:
print "Removing %d because name: %s doesn't match desired keepPattern %s" % (k, name, keepPattern)
x.remove(k)
ignore_x.append(k)
# missing values reports as constant also. so do missing first.
# remove all cols with missing values
# could change it against num_rows for a ratio
elif k in missingValuesDict:
value = missingValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has %d missing values" % (k, name, value)
x.remove(k)
ignore_x.append(k)
elif k in constantValuesDict:
value = constantValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has constant value: %s " % (k, name, str(value))
x.remove(k)
ignore_x.append(k)
# this is extra pruning..
# remove all cols with enums, if not already removed
elif k in enumSizeDict:
value = enumSizeDict[k]
if not noPrint:
print "Removing %d %s because it has enums of size: %d" % (k, name, value)
x.remove(k)
ignore_x.append(k)
if not noPrint:
print "x has", len(x), "cols"
print "ignore_x has", len(ignore_x), "cols"
# this is probably used in 'cols" in v2, which can take numbers
if returnStringX:
x = ",".join(map(str, x))
ignore_x = ",".join(map(lambda x: "C" + str(x+1), ignore_x))
if not noPrint:
print "\nx:", x
print "\nignore_x:", ignore_x
if returnIgnoreX:
return ignore_x
else:
return x
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialised object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
u'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
from __future__ import unicode_literals
import base64
import json
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_by_path
class BadSignature(Exception):
"""
Signature does not match
"""
pass
class SignatureExpired(BadSignature):
"""
Signature timestamp is older than required max_age
"""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest())
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_by_path(settings.SIGNING_BACKEND)
return Signer('django.http.cookies' + settings.SECRET_KEY, salt=salt)
class JSONSerializer(object):
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer(object):
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.sep = str(sep)
self.key = str(key or settings.SECRET_KEY)
self.salt = str(salt or
'%s.%s' % (self.__class__.__module__, self.__class__.__name__))
def signature(self, value):
signature = base64_hmac(self.salt + 'signer', value, self.key)
# Convert the signature from bytes to str only on Python 3
return force_str(signature)
def sign(self, value):
value = force_str(value)
return str('%s%s%s') % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
signed_value = force_str(signed_value)
if not self.sep in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = force_str(value)
value = str('%s%s%s') % (value, self.sep, self.timestamp())
return super(TimestampSigner, self).sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_inherit = "account.statement.from.invoice.lines"
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
line_date = time.strftime('%Y-%m-%d')
statement = statement_obj.browse(
cr, uid, statement_id, context=context)
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.amount_residual
elif line.credit > 0:
amount = -line.amount_residual
if line.amount_currency:
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, -line.amount_residual_currency, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import unicode_literals
from operator import attrgetter
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import connection
from django.test import TestCase
from django.test.utils import CaptureQueriesContext
from django.utils import six
from .models import (
Chef, CommonInfo, ItalianRestaurant, ParkingLot, Place, Post,
Restaurant, Student, Supplier, Worker, MixinModel,
Title, Copy, Base, SubBase)
class ModelInheritanceTests(TestCase):
def test_abstract(self):
# The Student and Worker models both have 'name' and 'age' fields on
# them and inherit the __unicode__() method, just as with normal Python
# subclassing. This is useful if you want to factor out common
# information for programming purposes, but still completely
# independent separate models at the database level.
w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker")
Worker.objects.create(name="Barney", age=34, job="Quarry worker")
s = Student.objects.create(name="Pebbles", age=5, school_class="1B")
self.assertEqual(six.text_type(w1), "Worker Fred")
self.assertEqual(six.text_type(s), "Student Pebbles")
# The children inherit the Meta class of their parents (if they don't
# specify their own).
self.assertQuerysetEqual(
Worker.objects.values("name"), [
{"name": "Barney"},
{"name": "Fred"},
],
lambda o: o
)
# Since Student does not subclass CommonInfo's Meta, it has the effect
# of completely overriding it. So ordering by name doesn't take place
# for Students.
self.assertEqual(Student._meta.ordering, [])
# However, the CommonInfo class cannot be used as a normal model (it
# doesn't exist as a model).
self.assertRaises(AttributeError, lambda: CommonInfo.objects.all())
def test_multiple_table(self):
post = Post.objects.create(title="Lorem Ipsum")
# The Post model has distinct accessors for the Comment and Link models.
post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True)
post.attached_link_set.create(
content="The Web framework for perfections with deadlines.",
url="http://www.djangoproject.com/"
)
# The Post model doesn't have an attribute called
# 'attached_%(class)s_set'.
self.assertRaises(
AttributeError, getattr, post, "attached_%(class)s_set"
)
# The Place/Restaurant/ItalianRestaurant models all exist as
# independent models. However, the subclasses also have transparent
# access to the fields of their ancestors.
# Create a couple of Places.
Place.objects.create(name="Master Shakes", address="666 W. Jersey")
Place.objects.create(name="Ace Hardware", address="1013 N. Ashland")
# Test constructor for Restaurant.
r = Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2
)
# Test the constructor for ItalianRestaurant.
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
ir.address = "1234 W. Elm"
ir.save()
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Make sure Restaurant and ItalianRestaurant have the right fields in
# the right order.
self.assertEqual(
[f.name for f in Restaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs",
"serves_pizza", "chef"]
)
self.assertEqual(
[f.name for f in ItalianRestaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs",
"serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi"],
)
self.assertEqual(Restaurant._meta.ordering, ["-rating"])
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a
# Restaurant object cannot access that reverse relation, since it's not
# part of the Place-Supplier Hierarchy.
self.assertQuerysetEqual(Place.objects.filter(supplier__name="foo"), [])
self.assertRaises(
FieldError, Restaurant.objects.filter, supplier__name="foo"
)
# Parent fields can be used directly in filters on the child model.
self.assertQuerysetEqual(
Restaurant.objects.filter(name="Demon Dogs"), [
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Filters against the parent model return objects of the parent's type.
p = Place.objects.get(name="Demon Dogs")
self.assertIs(type(p), Place)
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the
# child's name.
self.assertEqual(
p.restaurant, Restaurant.objects.get(name="Demon Dogs")
)
self.assertEqual(
Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
self.assertEqual(
Restaurant.objects.get(name="Ristorante Miron").italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
self.assertRaises(
ItalianRestaurant.DoesNotExist,
lambda: p.restaurant.italianrestaurant
)
# An ItalianRestaurant which does not exist is also a Place which does
# not exist.
self.assertRaises(
Place.DoesNotExist,
ItalianRestaurant.objects.get, name="The Noodle Void"
)
# MultipleObjectsReturned is also inherited.
self.assertRaises(
Place.MultipleObjectsReturned,
Restaurant.objects.get, id__lt=12321
)
# Related objects work just as they normally do.
s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St")
s1.customers = [r, ir]
s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St")
s2.customers = [ir]
# This won't work because the Place we select is not a Restaurant (it's
# a Supplier).
p = Place.objects.get(name="Joe's Chickens")
self.assertRaises(
Restaurant.DoesNotExist, lambda: p.restaurant
)
self.assertEqual(p.supplier, s1)
self.assertQuerysetEqual(
ir.provider.order_by("-name"), [
"Luigi's Pasta",
"Joe's Chickens"
],
attrgetter("name")
)
self.assertQuerysetEqual(
Restaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
],
attrgetter("name"),
)
ParkingLot.objects.create(
name="Main St", address="111 Main St", main_site=s1
)
ParkingLot.objects.create(
name="Well Lit", address="124 Sesame St", main_site=ir
)
self.assertEqual(
Restaurant.objects.get(lot__name="Well Lit").name,
"Ristorante Miron"
)
# The update() command can update fields in parent and child classes at
# once (although it executed multiple SQL queries to do so).
rows = Restaurant.objects.filter(
serves_hot_dogs=True, name__contains="D"
).update(
name="Demon Puppies", serves_hot_dogs=False
)
self.assertEqual(rows, 1)
r1 = Restaurant.objects.get(pk=r.pk)
self.assertFalse(r1.serves_hot_dogs)
self.assertEqual(r1.name, "Demon Puppies")
# The values() command also works on fields from parent models.
self.assertQuerysetEqual(
ItalianRestaurant.objects.values("name", "rating"), [
{"rating": 4, "name": "Ristorante Miron"}
],
lambda o: o
)
# select_related works with fields from the parent object as if they
# were a normal part of the model.
self.assertNumQueries(
2, lambda: ItalianRestaurant.objects.all()[0].chef
)
self.assertNumQueries(
1, lambda: ItalianRestaurant.objects.select_related("chef")[0].chef
)
def test_select_related_defer(self):
"""
#23370 - Should be able to defer child fields when using
select_related() from parent to child.
"""
Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2,
)
ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
)
qs = (Restaurant.objects
.select_related("italianrestaurant")
.defer("italianrestaurant__serves_gnocchi")
.order_by("rating"))
# Test that the field was actually deferred
with self.assertNumQueries(2):
objs = list(qs.all())
self.assertTrue(objs[1].italianrestaurant.serves_gnocchi)
# Test that model fields where assigned correct values
self.assertEqual(qs[0].name, 'Demon Dogs')
self.assertEqual(qs[0].rating, 2)
self.assertEqual(qs[1].italianrestaurant.name, 'Ristorante Miron')
self.assertEqual(qs[1].italianrestaurant.rating, 4)
def test_mixin_init(self):
m = MixinModel()
self.assertEqual(m.other_attr, 1)
def test_update_query_counts(self):
"""
Test that update queries do not generate non-necessary queries.
Refs #18304.
"""
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
with self.assertNumQueries(3):
ir.save()
def test_update_parent_filtering(self):
"""
Test that updating a field of a model subclass doesn't issue an UPDATE
query constrained by an inner query.
Refs #10399
"""
supplier = Supplier.objects.create(
name='Central market',
address='610 some street'
)
# Capture the expected query in a database agnostic way
with CaptureQueriesContext(connection) as captured_queries:
Place.objects.filter(pk=supplier.pk).update(name=supplier.name)
expected_sql = captured_queries[0]['sql']
# Capture the queries executed when a subclassed model instance is saved.
with CaptureQueriesContext(connection) as captured_queries:
supplier.save(update_fields=('name',))
for query in captured_queries:
sql = query['sql']
if 'UPDATE' in sql:
self.assertEqual(expected_sql, sql)
def test_eq(self):
# Equality doesn't transfer in multitable inheritance.
self.assertNotEqual(Place(id=1), Restaurant(id=1))
self.assertNotEqual(Restaurant(id=1), Place(id=1))
def test_ticket_12567(self):
r = Restaurant.objects.create(name='n1', address='a1')
s = Supplier.objects.create(name='s1', address='a2')
self.assertQuerysetEqual(
Place.objects.filter(supplier__isnull=False),
[Place.objects.get(pk=s.pk)],
lambda x: x
)
self.assertQuerysetEqual(
Place.objects.filter(supplier__isnull=True),
[Place.objects.get(pk=r.pk)],
lambda x: x
)
self.assertQuerysetEqual(
Place.objects.exclude(supplier__isnull=False),
[Place.objects.get(pk=r.pk)],
lambda x: x
)
self.assertQuerysetEqual(
Place.objects.exclude(supplier__isnull=True),
[Place.objects.get(pk=s.pk)],
lambda x: x
)
def test_custompk_m2m(self):
b = Base.objects.create()
b.titles.add(Title.objects.create(title="foof"))
s = SubBase.objects.create(sub_id=b.id)
b = Base.objects.get(pk=s.id)
self.assertNotEqual(b.pk, s.pk)
# Low-level test for related_val
self.assertEqual(s.titles.related_val, (s.id,))
# Higher level test for correct query values (title foof not
# accidentally found).
self.assertQuerysetEqual(
s.titles.all(), [])
class InheritanceSameModelNameTests(TestCase):
def setUp(self):
# The Title model has distinct accessors for both
# model_inheritance.Copy and model_inheritance_same_model_name.Copy
# models.
self.title = Title.objects.create(title='Lorem Ipsum')
def test_inheritance_related_name(self):
self.assertEqual(
self.title.attached_model_inheritance_copy_set.create(
content='Save $ on V1agr@',
url='http://v1agra.com/',
title='V1agra is spam',
), Copy.objects.get(
content='Save $ on V1agr@',
))
def test_inheritance_with_same_model_name(self):
with self.modify_settings(
INSTALLED_APPS={'append': ['model_inheritance.same_model_name']}):
call_command('migrate', verbosity=0)
from .same_model_name.models import Copy
self.assertEqual(
self.title.attached_same_model_name_copy_set.create(
content='The Web framework for perfectionists with deadlines.',
url='http://www.djangoproject.com/',
title='Django Rocks'
), Copy.objects.get(
content='The Web framework for perfectionists with deadlines.',
))
def test_related_name_attribute_exists(self):
# The Post model doesn't have an attribute called 'attached_%(app_label)s_%(class)s_set'.
self.assertFalse(hasattr(self.title, 'attached_%(app_label)s_%(class)s_set'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
import sys
import argparse
from fastavro import writer
parser = argparse.ArgumentParser()
parser.add_argument('output_file', help='Output Avro data file')
parser.add_argument('--num_records', dest='num_records', default=1024, type=int, help='Number of records to generate (default: 1024)')
parser.add_argument('--num_time_buckets', dest='num_time_buckets', default=16, type=int, help='Number of time buckets')
args = parser.parse_args()
print 'Generating {} records'.format(args.num_records)
schema = {
'name': 'TestRecord',
'type': 'record',
'fields': [
{ 'name': 'D0', 'type': 'string', 'pinotType': 'DIMENSION' },
{ 'name': 'D1', 'type': 'string', 'pinotType': 'DIMENSION' },
{ 'name': 'D2', 'type': 'string', 'pinotType': 'DIMENSION' },
{ 'name': 'daysSinceEpoch', 'type': 'long', 'pinotType': 'TIME' },
{ 'name': 'M0', 'type': 'long', 'pinotType': 'METRIC' },
{ 'name': 'M1', 'type': 'double', 'pinotType': 'METRIC' }
]
}
records = []
for i in xrange(args.num_records):
record = {
'D0': str(i % 2),
'D1': str(i % 4),
'D2': str(i % 8),
'daysSinceEpoch': int(i % args.num_time_buckets),
'M0': 1,
'M1': 1.0
}
records.append(record)
print 'Writing {}'.format(sys.argv[1])
with open(sys.argv[1], 'wb') as out:
writer(out, schema, records)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'StatusCheck.created_by'
db.alter_column('cabotapp_statuscheck', 'created_by_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True))
def backwards(self, orm):
# Changing field 'StatusCheck.created_by'
db.alter_column('cabotapp_statuscheck', 'created_by_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['auth.User']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cabotapp.instance': {
'Meta': {'ordering': "['name']", 'object_name': 'Instance'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'cabotapp.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cabotapp.Instance']", 'symmetrical': 'False', 'blank': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'cabotapp.servicestatussnapshot': {
'Meta': {'object_name': 'ServiceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': "orm['cabotapp.Service']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'cabotapp.shift': {
'Meta': {'object_name': 'Shift'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabotapp.statuscheck': {
'Meta': {'ordering': "['name']", 'object_name': 'StatusCheck'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cached_health': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'calculated_status': ('django.db.models.fields.CharField', [], {'default': "'passing'", 'max_length': '50', 'blank': 'True'}),
'check_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'debounce': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'endpoint': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'expected_num_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'default': "'ERROR'", 'max_length': '30'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'max_queued_build_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'metric': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'password': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_cabotapp.statuscheck_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'status_code': ('django.db.models.fields.TextField', [], {'default': '200', 'null': 'True'}),
'text_match': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True'}),
'username': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'verify_ssl_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'cabotapp.statuscheckresult': {
'Meta': {'object_name': 'StatusCheckResult'},
'check': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabotapp.StatusCheck']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'succeeded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_complete': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'})
},
'cabotapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'fallback_alert_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hipchat_alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabotapp']
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import java.lang.annotation.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Indicates Token related information to be used
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public @interface TokenInfo {
/**
* The type of TokenSelector to be used.
*
* @return TokenSelector
*/
Class<? extends TokenSelector<? extends TokenIdentifier>> value();
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenInfo.java
|
""" The file is responsable for cart in flask-webpage """
from flask import current_app as app
from flask_seguro.products import Products
class Cart(object):
""" The classe is responsable for cart in webpage """
def __init__(self, cart_dict=None):
""" Initializing class """
cart_dict = cart_dict or {}
if cart_dict == {}:
self.total = 0
self.subtotal = 0
self.items = []
else:
self.total = cart_dict["total"]
self.subtotal = cart_dict["subtotal"]
self.items = cart_dict["items"]
self.extra_amount = float(app.config['EXTRA_AMOUNT'])
def to_dict(self):
""" Attribute values to dict """
return {
"total": self.total,
"subtotal": self.subtotal,
"items": self.items,
"extra_amount": self.extra_amount
}
def change_item(self, item_id, operation):
""" Remove items in cart """
product = Products().get_one(item_id)
if product:
if operation == 'add':
self.items.append(product)
elif operation == 'remove':
cart_p = [x for x in self.items if x['id'] == product['id']]
self.items.remove(cart_p[0])
self.update()
return True
else:
return False
def update(self):
""" Remove items in cart """
subtotal = float(0)
total = float(0)
for product in self.items:
subtotal += float(product["price"])
if subtotal > 0:
total = subtotal + self.extra_amount
self.subtotal = subtotal
self.total = total
|
unknown
|
codeparrot/codeparrot-clean
| ||
"use strict";
const BinaryMiddleware = require("../lib/serialization/BinaryMiddleware");
const SerializerMiddleware = require("../lib/serialization/SerializerMiddleware");
const cont = (base, count) => {
const result = [];
for (let i = 0; i < count; i++) {
result.push(base[i % base.length]);
}
return result;
};
const mw = new BinaryMiddleware();
const other = { other: true };
const resolveLazy = (item) => {
if (SerializerMiddleware.isLazy(item)) {
const data = item();
if (Array.isArray(data)) return { resolvesTo: data.map(resolveLazy) };
return { resolvesTo: resolveLazy(data) };
}
return item;
};
describe("BinaryMiddleware", () => {
const items = [
true,
false,
null,
"",
"hi",
"hi".repeat(200),
"😀",
"😀".repeat(200),
Buffer.from("hello"),
1,
11,
0x100,
-1,
-11,
-0x100,
-1.25,
SerializerMiddleware.createLazy([5], other)
];
const itemsWithLazy = [
...items,
SerializerMiddleware.createLazy(
[SerializerMiddleware.createLazy([5], other)],
mw
),
SerializerMiddleware.createLazy(
[
1,
SerializerMiddleware.createLazy([2], mw),
SerializerMiddleware.createLazy([5], other),
4
],
mw
)
];
itemsWithLazy.push(SerializerMiddleware.createLazy([...itemsWithLazy], mw));
itemsWithLazy.push(
SerializerMiddleware.createLazy([...itemsWithLazy], other)
);
items.push(undefined);
const cases = [
...itemsWithLazy.map((item) => [item]),
[true, true],
[false, true],
[true, false],
[false, false],
[false, false, false],
[false, true, false, true],
[true, true, true],
[false, false, false],
cont([false, true, false, true], 5),
cont([true], 5),
cont([false], 5),
cont([false, true, false, true], 6),
cont([true], 6),
cont([false], 6),
cont([false, true, false, true], 7),
cont([false, true, false, true], 8),
cont([false, true, false, true], 9),
cont([false, true, false, true], 132),
cont([false, true, false, true], 133),
cont([false, true, false, true], 134),
cont([false, true, false, true], 135),
cont([false, true, false, true], 10000),
cont([true], 135),
[null],
[null, null],
[null, null, null],
cont([null], 4),
cont([null], 100),
cont([null], 300),
cont([-20], 20),
cont([400], 20),
cont([5.5], 20)
];
for (const c of [1, 100]) {
for (const caseData of cases) {
for (const prepend of items) {
for (const append of items) {
if (c > 1 && append !== undefined) continue;
const data = [prepend, ...caseData, append].filter(
(x) => x !== undefined
);
if (data.length * c > 200000) continue;
if (data.length === 0) continue;
let key = JSON.stringify(data.map(resolveLazy));
if (key.length > 100) {
key = `${key.slice(0, 50)} ... ${key.slice(-50)}`;
}
it(`should serialize ${c} x ${key} (${data.length}) correctly`, () => {
// process.stderr.write(
// `${c} x ${key.slice(0, 20)} (${data.length})\n`
// );
const realData = cont(data, data.length * c);
const serialized = mw.serialize(realData, {});
const newData = mw.deserialize(serialized, {});
expect(newData.map(resolveLazy)).toEqual(realData.map(resolveLazy));
});
}
}
}
}
});
|
javascript
|
github
|
https://github.com/webpack/webpack
|
test/BinaryMiddleware.unittest.js
|
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\Asset;
use Symfony\Component\Asset\Context\ContextInterface;
use Symfony\Component\Asset\Exception\InvalidArgumentException;
use Symfony\Component\Asset\Exception\LogicException;
use Symfony\Component\Asset\VersionStrategy\VersionStrategyInterface;
/**
* Package that adds a base URL to asset URLs in addition to a version.
*
* The package allows to use more than one base URLs in which case
* it randomly chooses one for each asset; it also guarantees that
* any given path will always use the same base URL to be nice with
* HTTP caching mechanisms.
*
* When the request context is available, this package can choose the
* best base URL to use based on the current request scheme:
*
* * For HTTP request, it chooses between all base URLs;
* * For HTTPs requests, it chooses between HTTPs base URLs and relative protocol URLs
* or falls back to any base URL if no secure ones are available.
*
* @author Fabien Potencier <fabien@symfony.com>
*/
class UrlPackage extends Package
{
private array $baseUrls = [];
private ?self $sslPackage = null;
/**
* @param string|string[] $baseUrls Base asset URLs
*/
public function __construct(string|array $baseUrls, VersionStrategyInterface $versionStrategy, ?ContextInterface $context = null)
{
parent::__construct($versionStrategy, $context);
if (!\is_array($baseUrls)) {
$baseUrls = (array) $baseUrls;
}
if (!$baseUrls) {
throw new LogicException('You must provide at least one base URL.');
}
foreach ($baseUrls as $baseUrl) {
$this->baseUrls[] = rtrim($baseUrl, '/');
}
$sslUrls = $this->getSslUrls($baseUrls);
if ($sslUrls && $baseUrls !== $sslUrls) {
$this->sslPackage = new self($sslUrls, $versionStrategy);
}
}
public function getUrl(string $path): string
{
if ($this->isAbsoluteUrl($path)) {
return $path;
}
if (null !== $this->sslPackage && $this->getContext()->isSecure()) {
return $this->sslPackage->getUrl($path);
}
$url = $this->getVersionStrategy()->applyVersion($path);
if ($this->isAbsoluteUrl($url)) {
return $url;
}
if ($url && '/' != $url[0]) {
$url = '/'.$url;
}
return $this->getBaseUrl($path).$url;
}
/**
* Returns the base URL for a path.
*/
public function getBaseUrl(string $path): string
{
if (1 === \count($this->baseUrls)) {
return $this->baseUrls[0];
}
return $this->baseUrls[$this->chooseBaseUrl($path)];
}
/**
* Determines which base URL to use for the given path.
*
* Override this method to change the default distribution strategy.
* This method should always return the same base URL index for a given path.
*/
protected function chooseBaseUrl(string $path): int
{
return abs(crc32($path)) % \count($this->baseUrls);
}
private function getSslUrls(array $urls): array
{
$sslUrls = [];
foreach ($urls as $url) {
if (str_starts_with($url, 'https://') || str_starts_with($url, '//') || '' === $url) {
$sslUrls[] = $url;
} elseif (!parse_url($url, \PHP_URL_SCHEME)) {
throw new InvalidArgumentException(\sprintf('"%s" is not a valid URL.', $url));
}
}
return $sslUrls;
}
}
|
php
|
github
|
https://github.com/symfony/symfony
|
src/Symfony/Component/Asset/UrlPackage.php
|
"""
Support for LiteJet scenes.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/scene.litejet/
"""
import logging
import homeassistant.components.litejet as litejet
from homeassistant.components.scene import Scene
DEPENDENCIES = ['litejet']
ATTR_NUMBER = 'number'
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up scenes for the LiteJet platform."""
litejet_ = hass.data['litejet_system']
devices = []
for i in litejet_.scenes():
name = litejet_.get_scene_name(i)
if not litejet.is_ignored(hass, name):
devices.append(LiteJetScene(litejet_, i, name))
add_devices(devices)
class LiteJetScene(Scene):
"""Representation of a single LiteJet scene."""
def __init__(self, lj, i, name):
"""Initialize the scene."""
self._lj = lj
self._index = i
self._name = name
@property
def name(self):
"""Return the name of the scene."""
return self._name
@property
def should_poll(self):
"""Return that polling is not necessary."""
return False
@property
def device_state_attributes(self):
"""Return the device-specific state attributes."""
return {
ATTR_NUMBER: self._index
}
def activate(self, **kwargs):
"""Activate the scene."""
self._lj.activate_scene(self._index)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from functools import partial
import numpy as np
from PIL import Image
from qtpy import QtWidgets, QtCore
from ...widgets.UtilityWidgets import open_file_dialog, open_files_dialog, save_file_dialog
# imports for type hinting in PyCharm -- DO NOT DELETE
from ...widgets.integration import IntegrationWidget
from ...model.DioptasModel import DioptasModel
from ...model.util.Pattern import Pattern
from ...model.util.HelperModule import get_partial_index, get_partial_value
from .EpicsController import EpicsController
class ImageController(object):
"""
The ImageController manages the Image actions in the Integration Window. It connects the file actions, as
well as interaction with the image_view.
"""
def __init__(self, widget, dioptas_model):
"""
:param widget: Reference to IntegrationView
:param dioptas_model: Reference to DioptasModel object
:type widget: IntegrationWidget
:type dioptas_model: DioptasModel
"""
self.widget = widget
self.model = dioptas_model
self.epics_controller = EpicsController(self.widget, self.model)
self.img_docked = True
self.view_mode = 'normal' # modes available: normal, alternative
self.roi_active = False
self.clicked_tth = None
self.clicked_azi = None
self.vertical_splitter_alternative_state = None
self.vertical_splitter_normal_state = None
self.horizontal_splitter_alternative_state = None
self.horizontal_splitter_normal_state = None
self.initialize()
self.create_signals()
self.create_mouse_behavior()
def initialize(self):
self.update_img_control_widget()
self.plot_img()
self.plot_mask()
self.widget.img_widget.auto_level()
def plot_img(self, auto_scale=None):
"""
Plots the current image loaded in self.img_data.
:param auto_scale:
Determines if intensities shouldk be auto-scaled. If value is None it will use the parameter saved in the
Object (self._auto_scale)
"""
if auto_scale is None:
auto_scale = self.widget.img_autoscale_btn.isChecked()
if self.widget.integration_image_widget.show_background_subtracted_img_btn.isChecked():
self.widget.img_widget.plot_image(self.model.img_model.img_data, False)
else:
self.widget.img_widget.plot_image(self.model.img_model.raw_img_data, False)
if auto_scale:
self.widget.img_widget.auto_level()
def plot_cake(self, auto_scale=None):
"""
Plots the cake saved in the calibration data
:param auto_scale:
Determines if the intensity should be auto-scaled. If value is None it will use the parameter saved in the
object (self._auto_scale)
"""
if auto_scale is None:
auto_scale = self.widget.img_autoscale_btn.isChecked()
shift_amount = self.widget.cake_shift_azimuth_sl.value()
self.widget.cake_widget.plot_image(np.roll(self.model.cake_data, shift_amount, axis=0))
self.plot_cake_integral()
self.update_cake_axes_range()
if auto_scale:
self.widget.cake_widget.auto_level()
def plot_cake_integral(self, tth=None):
if not self.widget.cake_widget.cake_integral_plot.isVisible() or self.clicked_tth is None:
return
if tth is None:
tth = self.clicked_tth
x, y = self.model.calibration_model.cake_integral(
tth,
self.widget.integration_control_widget.integration_options_widget.cake_integral_width_sb.value()
)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
self.widget.cake_widget.plot_cake_integral(x, np.roll(y, shift_amount))
def save_cake_integral(self):
img_filename, _ = os.path.splitext(os.path.basename(self.model.img_model.filename))
filename = save_file_dialog(
self.widget, "Save Cake Integral Data.",
os.path.join(self.model.working_directories['pattern'],
img_filename + '.xy'))
if filename != '':
integral_pattern = Pattern(*self.widget.cake_widget.cake_integral_item.getData())
integral_pattern.save(filename)
def plot_mask(self):
"""
Plots the mask data.
"""
if self.model.use_mask and self.widget.img_mode == 'Image':
self.widget.img_widget.plot_mask(self.model.mask_model.get_img())
self.widget.img_mask_btn.setChecked(True)
else:
self.widget.img_widget.plot_mask(np.zeros(self.model.mask_model.get_img().shape))
self.widget.img_mask_btn.setChecked(False)
def update_mask_transparency(self):
"""
Changes the colormap of the mask according to the transparency option selection in the GUI. Resulting Mask will
be either transparent or solid.
"""
self.model.transparent_mask = self.widget.mask_transparent_cb.isChecked()
if self.model.transparent_mask:
self.widget.img_widget.set_mask_color([255, 0, 0, 100])
else:
self.widget.img_widget.set_mask_color([255, 0, 0, 255])
def create_signals(self):
self.model.configuration_selected.connect(self.update_gui_from_configuration)
self.model.img_changed.connect(self.update_img_control_widget)
self.model.img_changed.connect(self.plot_img)
self.model.img_changed.connect(self.plot_mask)
"""
Creates all the connections of the GUI elements.
"""
self.widget.img_step_file_widget.next_btn.clicked.connect(self.load_next_img)
self.widget.img_step_file_widget.previous_btn.clicked.connect(self.load_previous_img)
self.widget.load_img_btn.clicked.connect(self.load_file)
self.widget.img_filename_txt.editingFinished.connect(self.filename_txt_changed)
self.widget.img_directory_txt.editingFinished.connect(self.directory_txt_changed)
self.widget.img_directory_btn.clicked.connect(self.img_directory_btn_click)
self.widget.img_step_series_widget.next_btn.clicked.connect(self.load_next_series_img)
self.widget.img_step_series_widget.previous_btn.clicked.connect(self.load_prev_series_img)
self.widget.img_step_series_widget.pos_txt.editingFinished.connect(self.load_series_img)
self.widget.file_info_btn.clicked.connect(self.show_file_info)
self.widget.integration_control_widget.img_control_widget.batch_btn.clicked.connect(self.show_batch_frame)
self.widget.img_step_file_widget.browse_by_name_rb.clicked.connect(self.set_iteration_mode_number)
self.widget.img_step_file_widget.browse_by_time_rb.clicked.connect(self.set_iteration_mode_time)
self.widget.image_control_widget.sources_cb.currentTextChanged.connect(self.select_source)
###
# Image widget image specific controls
self.widget.img_roi_btn.clicked.connect(self.click_roi_btn)
self.widget.img_mask_btn.clicked.connect(self.change_mask_mode)
self.widget.mask_transparent_cb.clicked.connect(self.update_mask_transparency)
###
# Image Widget cake specific controls
self.widget.img_phases_btn.clicked.connect(self.toggle_show_phases)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(partial(self.plot_cake, None))
self.widget.cake_shift_azimuth_sl.valueChanged.connect(self._update_cake_mouse_click_pos)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(self.update_cake_azimuth_axis)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(partial(self.plot_cake_integral, None))
self.widget.integration_image_widget.cake_view.img_view_box.sigRangeChanged.connect(self.update_cake_axes_range)
self.widget.pattern_q_btn.clicked.connect(partial(self.set_cake_axis_unit, 'q_A^-1'))
self.widget.pattern_tth_btn.clicked.connect(partial(self.set_cake_axis_unit, '2th_deg'))
self.widget.integration_control_widget.integration_options_widget.cake_integral_width_sb.valueChanged. \
connect(partial(self.plot_cake_integral, None))
self.widget.integration_control_widget.integration_options_widget.cake_save_integral_btn.clicked. \
connect(self.save_cake_integral)
###
# General Image Widget controls
self.widget.img_dock_btn.clicked.connect(self.img_dock_btn_clicked)
self.widget.img_autoscale_btn.clicked.connect(self.img_autoscale_btn_clicked)
self.widget.img_mode_btn.clicked.connect(self.change_view_mode)
self.widget.integration_image_widget.show_background_subtracted_img_btn.clicked.connect(
self.show_background_subtracted_img_btn_clicked)
self.widget.qa_save_img_btn.clicked.connect(self.save_img)
self.widget.load_calibration_btn.clicked.connect(self.load_calibration)
# signals
self.widget.change_view_btn.clicked.connect(self.change_view_btn_clicked)
self.widget.autoprocess_cb.toggled.connect(self.auto_process_cb_click)
def create_mouse_behavior(self):
"""
Creates the signal connections of mouse interactions
"""
self.widget.img_widget.mouse_left_clicked.connect(self.img_mouse_click)
self.widget.img_widget.mouse_moved.connect(self.show_img_mouse_position)
self.widget.cake_widget.mouse_left_clicked.connect(self.img_mouse_click)
self.widget.cake_widget.mouse_moved.connect(self.show_img_mouse_position)
self.widget.pattern_widget.mouse_left_clicked.connect(self.pattern_mouse_click)
def load_file(self, *args, **kwargs):
filename = kwargs.get('filename', None)
if filename is None:
filenames = open_files_dialog(self.widget, "Load image data file(s)",
self.model.working_directories['image'])
else:
filenames = [filename]
if filenames is not None and len(filenames) != 0:
self.model.working_directories['image'] = os.path.dirname(str(filenames[0]))
if len(filenames) == 1:
self.model.img_model.load(str(filenames[0]))
else:
if self.widget.img_batch_mode_add_rb.isChecked():
self.model.img_model.blockSignals(True)
self.model.img_model.load(str(filenames[0]))
for ind in range(1, len(filenames)):
self.model.img_model.add(filenames[ind])
self.model.img_model.blockSignals(False)
self.model.img_model.img_changed.emit()
elif self.widget.img_batch_mode_integrate_rb.isChecked():
self._load_multiple_files(filenames)
elif self.widget.img_batch_mode_image_save_rb.isChecked():
self._save_multiple_image_files(filenames)
def _load_multiple_files(self, filenames):
if not self.model.calibration_model.is_calibrated:
self.widget.show_error_msg("Can not integrate multiple images without calibration.")
return
working_directory = self._get_pattern_working_directory()
if working_directory == '':
return # abort file processing if no directory was selected
progress_dialog = self.widget.get_progress_dialog("Integrating multiple files.", "Abort Integration",
len(filenames))
self._set_up_batch_processing()
for ind in range(len(filenames)):
filename = str(filenames[ind])
base_filename = os.path.basename(filename)
progress_dialog.setValue(ind)
progress_dialog.setLabelText("Integrating: " + base_filename)
self.model.img_model.blockSignals(True)
self.model.img_model.load(filename)
self.model.img_model.blockSignals(False)
x, y = self.integrate_pattern()
self._save_pattern(base_filename, working_directory, x, y)
QtWidgets.QApplication.processEvents()
if progress_dialog.wasCanceled():
break
progress_dialog.close()
self._tear_down_batch_processing()
def _get_pattern_working_directory(self):
if self.widget.pattern_autocreate_cb.isChecked():
working_directory = self.model.working_directories['pattern']
else:
# if there is no working directory selected A file dialog opens up to choose a directory...
working_directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget, "Please choose the output directory for the integrated Patterns.",
self.model.working_directories['pattern']))
return working_directory
def _set_up_batch_processing(self):
self.model.blockSignals(True)
def _tear_down_batch_processing(self):
self.model.blockSignals(False)
self.model.img_changed.emit()
self.model.pattern_changed.emit()
def _save_multiple_image_files(self, filenames):
working_directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget, "Please choose the output directory for the Images.",
self.model.working_directories['image']))
if working_directory == '':
return
self._set_up_batch_processing()
progress_dialog = self.widget.get_progress_dialog("Saving multiple image files.", "Abort",
len(filenames))
QtWidgets.QApplication.processEvents()
self.model.current_configuration.auto_integrate_pattern = False
for ind, filename in enumerate(filenames):
base_filename = os.path.basename(filename)
progress_dialog.setValue(ind)
progress_dialog.setLabelText("Saving: " + base_filename)
self.model.img_model.load(str(filename))
self.save_img(os.path.join(working_directory, 'batch_' + base_filename))
QtWidgets.QApplication.processEvents()
if progress_dialog.wasCanceled():
break
self.model.current_configuration.auto_integrate_pattern = True
progress_dialog.close()
self._tear_down_batch_processing()
def _save_pattern(self, base_filename, working_directory, x, y):
file_endings = self._get_pattern_file_endings()
for file_ending in file_endings:
filename = os.path.join(working_directory, os.path.splitext(base_filename)[0] + file_ending)
self.model.pattern_model.set_pattern(x, y, filename, unit=self.get_integration_unit())
if file_ending == '.xy':
self.model.pattern_model.save_pattern(filename, header=self._create_pattern_header())
else:
self.model.pattern_model.save_pattern(filename)
# save the background subtracted filename
if self.model.pattern.has_background():
directory = os.path.join(working_directory, 'bkg_subtracted')
if not os.path.exists(directory):
os.mkdir(directory)
filename = os.path.join(directory, self.model.pattern.name + file_ending)
if file_ending == '.xy':
self.model.pattern_model.save_pattern(filename, header=self._create_pattern_header(),
subtract_background=True)
else:
self.model.pattern_model.save_pattern(filename, subtract_background=True)
def _create_pattern_header(self):
header = self.model.calibration_model.create_file_header()
header = header.replace('\r\n', '\n')
header += '\n#\n# ' + self.model.pattern_model.unit + '\t I'
return header
def _get_pattern_file_endings(self):
res = []
if self.widget.pattern_header_xy_cb.isChecked():
res.append('.xy')
if self.widget.pattern_header_chi_cb.isChecked():
res.append('.chi')
if self.widget.pattern_header_dat_cb.isChecked():
res.append('.dat')
return res
def show_batch_frame(self):
self.widget.batch_widget.raise_widget()
def show_file_info(self):
self.widget.file_info_widget.raise_widget()
def get_integration_unit(self):
if self.widget.pattern_tth_btn.isChecked():
return '2th_deg'
elif self.widget.pattern_q_btn.isChecked():
return 'q_A^-1'
elif self.widget.pattern_d_btn.isChecked():
return 'd_A'
def integrate_pattern(self):
if self.widget.img_mask_btn.isChecked():
mask = self.model.mask_model.get_mask()
else:
mask = None
if self.widget.img_roi_btn.isChecked():
roi_mask = self.widget.img_widget.roi.getRoiMask(self.model.img_data.shape)
else:
roi_mask = None
if roi_mask is None and mask is None:
mask = None
elif roi_mask is None and mask is not None:
mask = mask
elif roi_mask is not None and mask is None:
mask = roi_mask
elif roi_mask is not None and mask is not None:
mask = np.logical_or(mask, roi_mask)
if self.widget.pattern_tth_btn.isChecked():
integration_unit = '2th_deg'
elif self.widget.pattern_q_btn.isChecked():
integration_unit = 'q_A^-1'
elif self.widget.pattern_d_btn.isChecked():
integration_unit = 'd_A'
else:
# in case something weird happened
print('No correct integration unit selected')
return
if not self.widget.automatic_binning_cb.isChecked():
num_points = int(str(self.widget.bin_count_txt.text()))
else:
num_points = None
return self.model.calibration_model.integrate_1d(mask=mask, unit=integration_unit, num_points=num_points)
def change_mask_mode(self):
self.model.use_mask = self.widget.integration_image_widget.mask_btn.isChecked()
self.widget.mask_transparent_cb.setVisible(self.model.use_mask)
self.plot_mask()
self.model.img_model.img_changed.emit()
def update_mask_mode(self):
self.widget.integration_image_widget.mask_btn.setChecked(bool(self.model.use_mask))
self.widget.mask_transparent_cb.setVisible(bool(self.model.use_mask))
self.widget.mask_transparent_cb.setChecked(bool(self.model.transparent_mask))
def update_img_mode(self):
self.widget.img_mode_btn.click()
def load_series_img(self):
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos)
def load_prev_series_img(self):
step = int(str(self.widget.img_step_series_widget.step_txt.text()))
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos - step)
def load_next_series_img(self):
step = int(str(self.widget.img_step_series_widget.step_txt.text()))
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos + step)
def load_next_img(self):
step = int(str(self.widget.img_step_file_widget.step_txt.text()))
self.model.img_model.load_next_file(step=step)
def load_previous_img(self):
step = int(str(self.widget.img_step_file_widget.step_txt.text()))
self.model.img_model.load_previous_file(step=step)
def filename_txt_changed(self):
current_filename = os.path.basename(self.model.img_model.filename)
current_directory = str(self.widget.img_directory_txt.text())
new_filename = str(self.widget.img_filename_txt.text())
if os.path.exists(os.path.join(current_directory, new_filename)):
try:
self.load_file(filename=os.path.join(current_directory, new_filename))
except TypeError:
self.widget.img_filename_txt.setText(current_filename)
else:
self.widget.img_filename_txt.setText(current_filename)
def directory_txt_changed(self):
new_directory = str(self.widget.img_directory_txt.text())
if os.path.exists(new_directory) and new_directory != self.model.working_directories['image']:
if self.model.img_model.autoprocess:
self._files_now = dict([(f, None) for f in os.listdir(self.model.working_directories['image'])])
self.model.working_directories['image'] = os.path.abspath(new_directory)
old_filename = str(self.widget.img_filename_txt.text())
self.widget.img_filename_txt.setText(old_filename + '*')
else:
self.widget.img_directory_txt.setText(self.model.working_directories['image'])
def img_directory_btn_click(self):
directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget,
"Please choose the image working directory.",
self.model.working_directories['image']))
if directory != '':
if self.model.img_model.autoprocess:
self._files_now = dict([(f, None) for f in os.listdir(self.model.working_directories['image'])])
self.model.working_directories['image'] = directory
self.widget.img_directory_txt.setText(directory)
def update_img_control_widget(self):
self.widget.img_step_series_widget.setVisible(int(self.model.img_model.series_max > 1))
self.widget.img_step_series_widget.pos_validator.setTop(self.model.img_model.series_max)
self.widget.img_step_series_widget.pos_txt.setText(str(self.model.img_model.series_pos))
self.widget.file_info_btn.setVisible(self.model.img_model.file_info != "")
self.widget.move_btn.setVisible(len(self.model.img_model.motors_info) > 0)
self.widget.img_filename_txt.setText(os.path.basename(self.model.img_model.filename))
self.widget.img_directory_txt.setText(os.path.dirname(self.model.img_model.filename))
self.widget.file_info_widget.text_lbl.setText(self.model.img_model.file_info)
self.widget.image_control_widget.sources_widget.setVisible(not (self.model.img_model.sources is None))
if self.model.img_model.sources is not None:
sources_cb = self.widget.image_control_widget.sources_cb
sources_cb.blockSignals(True)
# remove all previous items:
for _ in range(sources_cb.count()):
sources_cb.removeItem(0)
sources_cb.addItems(self.model.img_model.sources)
sources_cb.setCurrentText(self.model.img_model.selected_source)
sources_cb.blockSignals(False)
self.widget.cbn_plot_btn.setText('Plot')
self.widget.oiadac_plot_btn.setText('Plot')
# update the window due to some errors on mac when using macports
self._get_master_parent().update()
def _get_master_parent(self):
master_widget_parent = self.widget
while master_widget_parent.parent():
master_widget_parent = master_widget_parent.parent()
return master_widget_parent
def click_roi_btn(self):
if self.model.current_configuration.roi is None:
self.model.current_configuration.roi = self.widget.img_widget.roi.getRoiLimits()
else:
self.model.current_configuration.roi = None
self.update_roi_in_gui()
def update_roi_in_gui(self):
roi = self.model.mask_model.roi
if roi is None:
self.widget.img_widget.deactivate_roi()
self.widget.img_roi_btn.setChecked(False)
if self.roi_active:
self.widget.img_widget.roi.sigRegionChangeFinished.disconnect(self.update_roi_in_model)
self.roi_active = False
return
if not self.model.current_configuration.auto_integrate_cake:
self.widget.img_roi_btn.setChecked(True)
self.widget.img_widget.activate_roi()
self.widget.img_widget.update_roi_shade_limits(self.model.img_data.shape)
pos = QtCore.QPoint(int(roi[2]), int(roi[0]))
size = QtCore.QPoint(int(roi[3] - roi[2]), int(roi[1] - roi[0]))
self.widget.img_widget.roi.setRoiLimits(pos, size)
if not self.roi_active:
self.widget.img_widget.roi.sigRegionChangeFinished.connect(self.update_roi_in_model)
self.roi_active = True
def update_roi_in_model(self):
self.model.current_configuration.roi = self.widget.img_widget.roi.getRoiLimits()
def change_view_mode(self):
if str(self.widget.img_mode_btn.text()) == 'Cake':
self.activate_cake_mode()
elif str(self.widget.img_mode_btn.text()) == 'Image':
self.activate_image_mode()
def toggle_show_phases(self):
if str(self.widget.img_phases_btn.text()) == 'Show Phases':
self.widget.integration_image_widget.cake_view.show_all_visible_cake_phases(
self.widget.phase_widget.phase_show_cbs)
self.widget.img_phases_btn.setText('Hide Phases')
self.model.enabled_phases_in_cake.emit()
elif str(self.widget.img_phases_btn.text()) == 'Hide Phases':
self.widget.integration_image_widget.cake_view.hide_all_cake_phases()
self.widget.img_phases_btn.setText('Show Phases')
def activate_cake_mode(self):
if not self.model.current_configuration.auto_integrate_cake:
self.model.current_configuration.auto_integrate_cake = True
self.model.current_configuration.integrate_image_2d()
self._update_cake_line_pos()
self._update_cake_mouse_click_pos()
self.widget.img_mode_btn.setText('Image')
self.widget.img_mode = str("Cake")
self.model.img_changed.disconnect(self.plot_img)
self.model.img_changed.disconnect(self.plot_mask)
self.model.cake_changed.connect(self.plot_cake)
self.plot_cake()
self.widget.cake_shift_azimuth_sl.setVisible(True)
self.widget.cake_shift_azimuth_sl.setMinimum(int(-len(self.model.cake_azi) / 2))
self.widget.cake_shift_azimuth_sl.setMaximum(int(len(self.model.cake_azi) / 2))
self.widget.cake_shift_azimuth_sl.setSingleStep(1)
self.widget.img_phases_btn.setVisible(True)
self.widget.integration_image_widget.img_pg_layout.hide()
self.widget.integration_image_widget.cake_pg_layout.show()
def activate_image_mode(self):
if self.model.current_configuration.auto_integrate_cake:
self.model.current_configuration.auto_integrate_cake = False
self.widget.cake_shift_azimuth_sl.setVisible(False)
self.widget.img_phases_btn.setVisible(False)
self._update_image_line_pos()
self._update_image_mouse_click_pos()
self.widget.img_mode_btn.setText('Cake')
self.widget.img_mode = str("Image")
self.model.img_changed.connect(self.plot_img)
self.model.img_changed.connect(self.plot_mask)
self.model.cake_changed.disconnect(self.plot_cake)
self.plot_img()
self.plot_mask()
self.widget.integration_image_widget.img_pg_layout.show()
self.widget.integration_image_widget.cake_pg_layout.hide()
def img_autoscale_btn_clicked(self):
if self.widget.img_autoscale_btn.isChecked():
self.widget.img_widget.auto_level()
def img_dock_btn_clicked(self):
self.img_docked = not self.img_docked
self.widget.dock_img(self.img_docked)
def show_background_subtracted_img_btn_clicked(self):
if self.widget.img_mode_btn.text() == 'Cake':
self.plot_img()
else:
self.widget.integration_image_widget.show_background_subtracted_img_btn.setChecked(False)
def _update_cake_line_pos(self):
cur_tth = self.get_current_pattern_tth()
if self.model.cake_tth is None or cur_tth < np.min(self.model.cake_tth) or cur_tth > np.max(
self.model.cake_tth):
self.widget.cake_widget.deactivate_vertical_line()
else:
new_pos = get_partial_index(self.model.cake_tth, cur_tth) + 0.5
self.widget.cake_widget.set_vertical_line_pos(new_pos, 0)
self.widget.cake_widget.activate_vertical_line()
def _update_cake_mouse_click_pos(self):
if self.clicked_tth is None or not self.model.calibration_model.is_calibrated:
return
tth = self.clicked_tth
azi = self.clicked_azi
cake_tth = self.model.cake_tth
x_pos = get_partial_index(cake_tth, tth) + 0.5
shift_amount = self.widget.cake_shift_azimuth_sl.value()
y_pos = (get_partial_index(self.model.cake_azi, azi) + 0.5 + shift_amount) % len(self.model.cake_azi)
self.widget.cake_widget.set_mouse_click_position(x_pos, y_pos)
def _update_image_line_pos(self):
if not self.model.calibration_model.is_calibrated:
return
cur_tth = self.get_current_pattern_tth()
self.widget.img_widget.set_circle_line(
self.model.calibration_model.get_two_theta_array(), cur_tth / 180 * np.pi)
def _update_image_mouse_click_pos(self):
if self.clicked_tth is None or not self.model.calibration_model.is_calibrated:
return
tth = np.deg2rad(self.clicked_tth)
azi = np.deg2rad(self.clicked_azi)
new_pos = self.model.calibration_model.get_pixel_ind(tth, azi)
if len(new_pos) == 0:
self.widget.img_widget.mouse_click_item.hide()
else:
x_ind, y_ind = new_pos
self.widget.img_widget.set_mouse_click_position(y_ind + 0.5, x_ind + 0.5)
self.widget.img_widget.mouse_click_item.show()
def get_current_pattern_tth(self):
cur_pos = self.widget.pattern_widget.pos_line.getPos()[0]
if self.widget.pattern_q_btn.isChecked():
cur_tth = self.convert_x_value(cur_pos, 'q_A^-1', '2th_deg')
elif self.widget.pattern_tth_btn.isChecked():
cur_tth = cur_pos
elif self.widget.pattern_d_btn.isChecked():
cur_tth = self.convert_x_value(cur_pos, 'd_A', '2th_deg')
else:
cur_tth = None
return cur_tth
def update_cake_axes_range(self):
if self.model.current_configuration.auto_integrate_cake:
self.update_cake_azimuth_axis()
self.update_cake_x_axis()
def update_cake_azimuth_axis(self):
data_img_item = self.widget.integration_image_widget.cake_view.data_img_item
shift_amount = self.widget.cake_shift_azimuth_sl.value()
cake_azi = self.model.cake_azi - shift_amount * np.mean(np.diff(self.model.cake_azi))
height = data_img_item.viewRect().height()
bottom = data_img_item.viewRect().top()
v_scale = (cake_azi[-1] - cake_azi[0]) / data_img_item.boundingRect().height()
v_shift = np.min(cake_azi[0])
min_azi = v_scale * bottom + v_shift
max_azi = v_scale * (bottom + height) + v_shift
self.widget.integration_image_widget.cake_view.left_axis_cake.setRange(min_azi, max_azi)
def update_cake_x_axis(self):
if self.model.cake_tth is None:
return
data_img_item = self.widget.integration_image_widget.cake_view.data_img_item
cake_tth = self.model.cake_tth
width = data_img_item.viewRect().width()
left = data_img_item.viewRect().left()
h_scale = (np.max(cake_tth) - np.min(cake_tth)) / data_img_item.boundingRect().width()
h_shift = np.min(cake_tth)
min_tth = h_scale * left + h_shift
max_tth = h_scale * (left + width) + h_shift
if self.model.current_configuration.integration_unit == '2th_deg':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setRange(min_tth, max_tth)
elif self.model.current_configuration.integration_unit == 'q_A^-1':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setRange(
self.convert_x_value(min_tth, '2th_deg', 'q_A^-1'),
self.convert_x_value(max_tth, '2th_deg', 'q_A^-1'))
def set_cake_axis_unit(self, unit='2th_deg'):
if unit == '2th_deg':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setLabel(u'2θ', u'°')
elif unit == 'q_A^-1':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setLabel('Q', 'A<sup>-1</sup>')
self.update_cake_x_axis()
def show_img_mouse_position(self, x, y):
if self.widget.img_mode == 'Cake':
img_data = self.widget.cake_widget.img_data
else:
img_data = self.widget.img_widget.img_data
img_shape = img_data.shape
if 0 < x < img_shape[1] - 1 and 0 < y < img_shape[0] - 1:
self.update_mouse_position_labels(x, y, img_data[int(np.floor(y)), int(np.floor(x))])
if self.model.calibration_model.is_calibrated:
x_temp = x
x = np.array([y])
y = np.array([x_temp])
if self.widget.img_mode == 'Cake':
tth = get_partial_value(self.model.cake_tth, y - 0.5)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
cake_azi = self.model.cake_azi - shift_amount * np.mean(np.diff(self.model.cake_azi))
azi = get_partial_value(cake_azi, x - 0.5)
q_value = self.convert_x_value(tth, '2th_deg', 'q_A^-1')
else:
tth = self.model.calibration_model.get_two_theta_img(x, y)
tth = tth / np.pi * 180.0
q_value = self.convert_x_value(tth, '2th_deg', 'q_A^-1')
azi = self.model.calibration_model.get_azi_img(x, y) / np.pi * 180
d = self.convert_x_value(tth, '2th_deg', 'd_A')
tth_str = u"2θ:%9.3f " % tth
self.widget.mouse_tth_lbl.setText(tth_str)
self.widget.mouse_d_lbl.setText('d:%9.3f ' % d)
self.widget.mouse_q_lbl.setText('Q:%9.3f ' % q_value)
self.widget.mouse_azi_lbl.setText('X:%9.3f ' % azi)
self.widget.img_widget_mouse_tth_lbl.setText(tth_str)
self.widget.img_widget_mouse_d_lbl.setText('d:%9.3f ' % d)
self.widget.img_widget_mouse_q_lbl.setText('Q:%9.3f ' % q_value)
self.widget.img_widget_mouse_azi_lbl.setText('X:%9.3f ' % azi)
else:
self.widget.mouse_tth_lbl.setText(u'2θ: -')
self.widget.mouse_d_lbl.setText('d: -')
self.widget.mouse_q_lbl.setText('Q: -')
self.widget.mouse_azi_lbl.setText('X: -')
self.widget.img_widget_mouse_tth_lbl.setText(u'2θ: -')
self.widget.img_widget_mouse_d_lbl.setText('d: -')
self.widget.img_widget_mouse_q_lbl.setText('Q: -')
self.widget.img_widget_mouse_azi_lbl.setText('X: -')
else:
self.update_mouse_position_labels(x, y, None)
def img_mouse_click(self, x, y):
if self.widget.img_mode == 'Cake':
img_data = self.widget.cake_widget.img_data
else:
img_data = self.widget.img_widget.img_data
if 0 < x < img_data.shape[1] - 1 and 0 < y < img_data.shape[0] - 1:
intensity = img_data[int(np.floor(y)), int(np.floor(x))]
else:
intensity = None
self.update_mouse_click_position_labels(x, y, intensity)
if self.model.calibration_model.is_calibrated:
x, y = y, x # the indices are reversed for the img_array
if self.widget.img_mode == 'Cake': # cake mode
# get clicked tth and azimuth
cake_shape = self.model.cake_data.shape
if x < 0 or y < 0 or x > cake_shape[0] - 1 or y > cake_shape[1] - 1:
return
x = np.array([x])
y = np.array([y])
tth = get_partial_value(self.model.cake_tth, y - 0.5)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
azi = get_partial_value(np.roll(self.model.cake_azi, shift_amount), x - 0.5)
self.widget.cake_widget.activate_vertical_line()
elif self.widget.img_mode == 'Image': # image mode
img_shape = self.model.img_data.shape
if x < 0 or y < 0 or x > img_shape[0] - 1 or y > img_shape[1] - 1:
return
x = np.array([x])
y = np.array([y])
tth = np.rad2deg(self.model.calibration_model.get_two_theta_img(x, y))
azi = np.rad2deg(self.model.calibration_model.get_azi_img(x, y))
self.widget.img_widget.set_circle_line(self.model.calibration_model.get_two_theta_array(),
np.deg2rad(tth))
else: # in the case of whatever
tth = 0
azi = 0
self.clicked_tth = tth # in degree
self.clicked_azi = azi # in degree
if self.widget.img_mode == 'Cake':
self.plot_cake_integral()
# calculate right unit for the position line the pattern widget
if self.widget.pattern_q_btn.isChecked():
pos = 4 * np.pi * np.sin(np.deg2rad(tth) / 2) / self.model.calibration_model.wavelength / 1e10
elif self.widget.pattern_tth_btn.isChecked():
pos = tth
elif self.widget.pattern_d_btn.isChecked():
pos = self.model.calibration_model.wavelength / (2 * np.sin(np.deg2rad(tth) / 2)) * 1e10
else:
pos = 0
self.widget.pattern_widget.set_pos_line(pos)
self.widget.click_tth_lbl.setText(self.widget.mouse_tth_lbl.text())
self.widget.click_d_lbl.setText(self.widget.mouse_d_lbl.text())
self.widget.click_q_lbl.setText(self.widget.mouse_q_lbl.text())
self.widget.click_azi_lbl.setText(self.widget.mouse_azi_lbl.text())
self.widget.img_widget_click_tth_lbl.setText(self.widget.mouse_tth_lbl.text())
self.widget.img_widget_click_d_lbl.setText(self.widget.mouse_d_lbl.text())
self.widget.img_widget_click_q_lbl.setText(self.widget.mouse_q_lbl.text())
self.widget.img_widget_click_azi_lbl.setText(self.widget.mouse_azi_lbl.text())
def update_mouse_position_labels(self, x, y, intensity):
x_pos_string = 'X: %4d' % x
y_pos_string = 'Y: %4d' % y
if intensity is None:
int_string = 'I:'
else:
int_string = 'I: %5d' % intensity
self.widget.mouse_x_lbl.setText(x_pos_string)
self.widget.mouse_y_lbl.setText(y_pos_string)
self.widget.mouse_int_lbl.setText(int_string)
def update_mouse_click_position_labels(self, x, y, intensity):
self.update_mouse_position_labels(x, y, intensity)
self.widget.click_x_lbl.setText(self.widget.mouse_x_lbl.text())
self.widget.click_y_lbl.setText(self.widget.mouse_y_lbl.text())
self.widget.click_int_lbl.setText(self.widget.mouse_int_lbl.text())
def pattern_mouse_click(self, x, y):
if self.model.calibration_model.is_calibrated:
if self.widget.img_mode == 'Cake':
self.set_cake_line_position(x)
elif self.widget.img_mode == 'Image':
self.set_image_line_position(x)
def set_cake_line_position(self, x):
x = self._convert_to_tth(x)
upper_ind = np.where(self.model.cake_tth > x)[0]
lower_ind = np.where(self.model.cake_tth < x)[0]
if len(upper_ind) == 0 or len(lower_ind) == 0:
self.widget.cake_widget.plot_cake_integral(np.array([]), np.array([]))
self.widget.cake_widget.deactivate_vertical_line()
return
spacing = self.model.cake_tth[upper_ind[0]] - self.model.cake_tth[lower_ind[-1]]
new_pos = lower_ind[-1] + (x - self.model.cake_tth[lower_ind[-1]]) / spacing + 0.5
self.widget.cake_widget.vertical_line.setValue(new_pos)
self.widget.cake_widget.activate_vertical_line()
self.plot_cake_integral(x)
def set_image_line_position(self, x):
x = self._convert_to_tth(x)
self.widget.img_widget.set_circle_line(
self.model.calibration_model.get_two_theta_array(), np.deg2rad(x))
def _convert_to_tth(self, x):
if self.model.integration_unit == 'q_A^-1':
return self.convert_x_value(x, 'q_A^-1', '2th_deg')
elif self.model.integration_unit == 'd_A':
return self.convert_x_value(x, 'd_A', '2th_deg')
return x
def set_iteration_mode_number(self):
self.model.img_model.set_file_iteration_mode('number')
def set_iteration_mode_time(self):
self.model.img_model.set_file_iteration_mode('time')
def select_source(self, source):
self.model.img_model.select_source(source)
def convert_x_value(self, value, previous_unit, new_unit):
wavelength = self.model.calibration_model.wavelength
if previous_unit == '2th_deg':
tth = value
elif previous_unit == 'q_A^-1':
tth = np.arcsin(
value * 1e10 * wavelength / (4 * np.pi)) * 360 / np.pi
elif previous_unit == 'd_A':
tth = 2 * np.arcsin(wavelength / (2 * value * 1e-10)) * 180 / np.pi
else:
tth = 0
if new_unit == '2th_deg':
res = tth
elif new_unit == 'q_A^-1':
res = 4 * np.pi * \
np.sin(tth / 360 * np.pi) / \
wavelength / 1e10
elif new_unit == 'd_A':
res = wavelength / (2 * np.sin(tth / 360 * np.pi)) * 1e10
else:
res = 0
return res
def load_calibration(self):
filename = open_file_dialog(
self.widget, "Load calibration...",
self.model.working_directories['calibration'],
'*.poni')
if filename != '':
self.model.working_directories['calibration'] = os.path.dirname(filename)
self.model.calibration_model.load(filename)
self.widget.calibration_lbl.setText(
self.model.calibration_model.calibration_name)
self.model.img_model.img_changed.emit()
def auto_process_cb_click(self):
self.model.img_model.autoprocess = self.widget.autoprocess_cb.isChecked()
def save_img(self, filename=None):
if not filename:
img_filename = os.path.splitext(os.path.basename(self.model.img_model.filename))[0]
filename = save_file_dialog(self.widget, "Save Image.",
os.path.join(self.model.working_directories['image'],
img_filename + '.png'),
('Image (*.png);;Data (*.tiff);;Text (*.txt)'))
if filename != '':
if filename.endswith('.png'):
if self.widget.img_mode == 'Cake':
self.widget.cake_widget.deactivate_vertical_line()
self.widget.cake_widget.deactivate_mouse_click_item()
QtWidgets.QApplication.processEvents()
self.widget.cake_widget.save_img(filename)
self.widget.cake_widget.activate_vertical_line()
self.widget.cake_widget.activate_mouse_click_item()
elif self.widget.img_mode == 'Image':
self.widget.img_widget.deactivate_circle_scatter()
self.widget.img_widget.deactivate_roi()
QtWidgets.QApplication.processEvents()
self.widget.img_widget.save_img(filename)
self.widget.img_widget.activate_circle_scatter()
if self.roi_active:
self.widget.img_widget.activate_roi()
elif filename.endswith('.tiff') or filename.endswith('.tif'):
if self.widget.img_mode == 'Image':
im_array = np.int32(self.model.img_data)
elif self.widget.img_mode == 'Cake':
im_array = np.int32(self.model.cake_data)
im_array = np.flipud(im_array)
im = Image.fromarray(im_array)
im.save(filename)
elif filename.endswith('.txt') or filename.endswith('.csv'):
if self.widget.img_mode == 'Image':
return
elif self.widget.img_mode == 'Cake': # saving cake data as a text file for export.
with open(filename, 'w') as out_file: # this is done in an odd and slow way because the headers
# should be floats and the data itself int.
cake_tth = np.insert(self.model.cake_tth, 0, 0)
np.savetxt(out_file, cake_tth[None], fmt='%6.3f')
for azi, row in zip(self.model.cake_azi, self.model.cake_data):
row_str = " ".join(["{:6.0f}".format(el) for el in row])
out_file.write("{:6.2f}".format(azi) + row_str + '\n')
def update_gui_from_configuration(self):
self.widget.img_mask_btn.setChecked(int(self.model.use_mask))
self.widget.mask_transparent_cb.setChecked(bool(self.model.transparent_mask))
self.widget.autoprocess_cb.setChecked(bool(self.model.img_model.autoprocess))
self.widget.calibration_lbl.setText(self.model.calibration_model.calibration_name)
self.update_img_control_widget()
self.update_mask_mode()
self.update_roi_in_gui()
if self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Image':
self.activate_cake_mode()
elif not self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Cake':
self.activate_image_mode()
elif self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Cake':
self._update_cake_line_pos()
self._update_cake_mouse_click_pos()
elif not self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Image':
self._update_image_line_pos()
self._update_image_mouse_click_pos()
def change_view_btn_clicked(self):
if self.view_mode == 'alternative':
self.change_view_to_normal()
elif self.view_mode == 'normal':
self.change_view_to_alternative()
def change_view_to_normal(self):
if self.view_mode == 'normal':
return
self.vertical_splitter_alternative_state = self.widget.vertical_splitter.saveState()
self.horizontal_splitter_alternative_state = self.widget.horizontal_splitter.saveState()
self.widget.vertical_splitter.addWidget(self.widget.integration_pattern_widget)
self.widget.integration_control_widget.setOrientation(QtCore.Qt.Horizontal)
if self.vertical_splitter_normal_state:
self.widget.vertical_splitter.restoreState(self.vertical_splitter_normal_state)
if self.horizontal_splitter_normal_state:
self.widget.horizontal_splitter.restoreState(self.horizontal_splitter_normal_state)
self.widget.img_widget.set_orientation("horizontal")
self.view_mode = 'normal'
def change_view_to_alternative(self):
if self.view_mode == 'alternative':
return
self.vertical_splitter_normal_state = self.widget.vertical_splitter.saveState()
self.horizontal_splitter_normal_state = self.widget.horizontal_splitter.saveState()
self.widget.vertical_splitter_left.insertWidget(0, self.widget.integration_pattern_widget)
self.widget.integration_control_widget.setOrientation(QtCore.Qt.Vertical)
if self.vertical_splitter_alternative_state:
self.widget.vertical_splitter.restoreState(self.vertical_splitter_alternative_state)
if self.horizontal_splitter_alternative_state:
self.widget.horizontal_splitter.restoreState(self.horizontal_splitter_alternative_state)
self.widget.img_widget.set_orientation("vertical")
self.view_mode = 'alternative'
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.cm.views.user.vm
@alldecoratedby{src.cm.utils.decorators.user_log}
@author Tomek Sośnicki <tom.sosnicki@gmail.com>
@author Maciej Nabożny <di.dijo@gmail.com>
@author Miłosz Zdybał <milosz.zdybal@ifj.edu.pl>
"""
from common.states import vm_states
from cm.utils.decorators import user_log
from cm.utils.exception import CMException
from cm.models.user import User
from cm.models.vm import VM
from cm.utils.threads.vm import VMThread
from cm.utils import message
@user_log(log=True)
def create(caller_id, name, description, image_id, template_id, public_ip_id, iso_list, disk_list, vnc, groups, count=1,
user_data=None,
ssh_key=None, ssh_username=None):
"""
Creates virtual machines.
@cmview_user
@param_post{name,string}
@param_post{description,string}
@param_post{image_id,int}
@param_post{template_id,int}
@param_post{public_ip_id,int}
@param_post{iso_list,list(int)} ISOs' ids
@param_post{disk_list,list(int)}
@param_post{vnc}
@param_post{count}
@param_post{groups}
@param_post{user_data} data accessible via ec2ctx
@param_post{ssh_key}
@param_post{ssh_username}
@returns @asreturned{src.cm.views.utils.vm.create()}
"""
user = User.get(caller_id)
try:
user.check_points()
except:
message.warn(caller_id, 'point_limit', {'used_points': user.used_points, 'point_limit': user.points})
vms = VM.create(user, name=name, description=description, image_id=image_id,
template_id=template_id, public_ip_id=public_ip_id, iso_list=iso_list, disk_list=disk_list,
vnc=vnc, groups=groups, count=count, user_data=user_data, ssh_key=ssh_key,
ssh_username=ssh_username)
for vm in vms:
thread = VMThread(vm, 'create')
thread.start()
return [vm.dict for vm in vms]
@user_log(log=True)
def destroy(caller_id, vm_ids):
"""
This function only destroys VM. All the cleanup (removing disk, saving,
rescuing resources, ...) is done by hook through
\c contextualization.update_vm method (yeah, intuitive).
Simple sequence diagram:
@code
CLM CM CTX Node (HOOK)
.
Destroy -->destroy
| | (LV.destroy)
| |------------------------->HookScript
. . |
. . ctx.update_vm<--|
. . | |
. . |------------->cp
. . |------------->rm
. . update_resources
@endcode
@cmview_user
@param_post{vm_ids,list} list of virtual machines' ids
@response{list(dict)} VM.destroy() retval
"""
vms = []
for vm_id in vm_ids:
vms.append(VM.get(caller_id, vm_id))
return VM.destroy(vms)
@user_log(log=True)
def save_and_shutdown(caller_id, vm_id, name, description):
"""
Calls VM.save_and_shutdown() on specified VM
@cmview_user
@param_post{vm_id,int} id of the VM to save and shutdown.
@param_post{name,string} name of the new SystemImage VM should be saved to
@param_post{description,string} description of the new SystemImage VM
should be saved to
"""
user = User.get(caller_id)
vm = VM.get(caller_id, vm_id)
if user.used_storage + vm.system_image.size > user.storage:
raise CMException('user_storage_limit')
VM.save_and_shutdown(caller_id, vm, name, description)
@user_log(log=False, pack=False)
def get_list(caller_id):
"""
Returns caller's VMs.
@cmview_user
@response{list(dict)} VM.dict property of all caller's VMs
"""
vms = VM.objects.exclude(state__in=[vm_states['closed'], vm_states['erased']]).filter(user__id__exact=caller_id) \
.filter(farm=None).order_by('-id')
vms_mod = [vm.dict for vm in vms]
return vms_mod
@user_log(log=False)
def get_by_id(caller_id, vm_id):
"""
Returns requested caller's VM.
@cmview_user
@param_post{vm_id,int} id of the requested VM
@response{dict} VM.dict property of the requested VM
"""
vm = VM.get(caller_id, vm_id)
vm_mod = vm.long_dict
return vm_mod
@user_log(log=True)
def reset(caller_id, vm_ids):
"""
Safely restarts selected callers VMs
@cmview_user
@param_post{vm_ids,list(int)} ids of the VMs to restart
@response{src.cm.views.utils.image.restart()}
"""
# get to check permissions on vms
vms = []
for vm_id in vm_ids:
vms.append(VM.get(caller_id, vm_id))
return VM.reset(vms)
@user_log(log=True)
def edit(caller_id, vm_id, name, description):
"""
Updates VM's attributes.
@cmview_user
@param_post{vm_id,int} id of the VM to edit
@param_post{name,string}
@param_post{description,string}
@response{src.cm.views.utils.image.edit()}
"""
vm = VM.get(caller_id, vm_id)
vm.name = name
vm.description = description
vm.save(update_fields=['name', 'description'])
@user_log(log=True)
def attach_vnc(caller_id, vm_id):
"""
Attaches VNC redirection to VM.
@cmview_user
@param_post{vm_id,int} id of the VM to have attached VM redirection
"""
vm = VM.get(caller_id, vm_id)
vm.attach_vnc()
try:
vm.save()
except:
raise CMException('vnc_attach')
@user_log(log=True)
def detach_vnc(caller_id, vm_id):
"""
Detaches VNC redirection from VM.
@cmview_user
@param_post{vm_id,int} id of the VM to have detached VM redirection
"""
vm = VM.get(caller_id, vm_id)
vm.detach_vnc()
try:
vm.save()
except:
raise CMException('vnc_detach')
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.apache.hadoop.oncrpc.RpcReply.ReplyState;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Test for {@link RpcReply}
*/
public class TestRpcReply {
@Test
public void testReplyStateFromValue() {
assertEquals(ReplyState.MSG_ACCEPTED, ReplyState.fromValue(0));
assertEquals(ReplyState.MSG_DENIED, ReplyState.fromValue(1));
}
@Test
public void testReplyStateFromInvalidValue1() {
assertThrows(IndexOutOfBoundsException.class, () -> ReplyState.fromValue(2));
}
@Test
public void testRpcReply() {
RpcReply reply = new RpcReply(0, ReplyState.MSG_ACCEPTED,
new VerifierNone()) {
@Override
public XDR write(XDR xdr) {
return null;
}
};
assertEquals(0, reply.getXid());
assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());
assertEquals(ReplyState.MSG_ACCEPTED, reply.getState());
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/oncrpc/TestRpcReply.java
|
#
# Symbol Table
#
import copy
import re
from Errors import warning, error, InternalError
from StringEncoding import EncodedString
import Options, Naming
import PyrexTypes
from PyrexTypes import py_object_type, unspecified_type
from TypeSlots import \
pyfunction_signature, pymethod_signature, \
get_special_method_signature, get_property_accessor_signature
import Code
import __builtin__ as builtins
iso_c99_keywords = set(
['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
'volatile', 'while',
'_Bool', '_Complex'', _Imaginary', 'inline', 'restrict'])
def c_safe_identifier(cname):
# There are some C limitations on struct entry names.
if ((cname[:2] == '__'
and not (cname.startswith(Naming.pyrex_prefix)
or cname == '__weakref__'))
or cname in iso_c99_keywords):
cname = Naming.pyrex_prefix + cname
return cname
class BufferAux(object):
writable_needed = False
def __init__(self, buflocal_nd_var, rcbuf_var):
self.buflocal_nd_var = buflocal_nd_var
self.rcbuf_var = rcbuf_var
def __repr__(self):
return "<BufferAux %r>" % self.__dict__
class Entry(object):
# A symbol table entry in a Scope or ModuleNamespace.
#
# name string Python name of entity
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is an entry in the Python builtins dict
# is_cglobal boolean Is a C global variable
# is_pyglobal boolean Is a Python module-level variable
# or class attribute during
# class construction
# is_member boolean Is an assigned class member
# is_pyclass_attr boolean Is a name in a Python class namespace
# is_variable boolean Is a variable
# is_cfunction boolean Is a C function
# is_cmethod boolean Is a C method of an extension type
# is_builtin_cmethod boolean Is a C method of a builtin type (implies is_cmethod)
# is_unbound_cmethod boolean Is an unbound C method of an extension type
# is_final_cmethod boolean Is non-overridable C method
# is_inline_cmethod boolean Is inlined C method
# is_anonymous boolean Is a anonymous pyfunction entry
# is_type boolean Is a type definition
# is_cclass boolean Is an extension class
# is_cpp_class boolean Is a C++ class
# is_const boolean Is a constant
# is_property boolean Is a property of an extension type:
# doc_cname string or None C const holding the docstring
# getter_cname string C func for getting property
# setter_cname string C func for setting or deleting property
# is_self_arg boolean Is the "self" arg of an exttype method
# is_arg boolean Is the arg of a method
# is_local boolean Is a local variable
# in_closure boolean Is referenced in an inner scope
# is_readonly boolean Can't be assigned to
# func_cname string C func implementing Python func
# func_modifiers [string] C function modifiers ('inline')
# pos position Source position where declared
# namespace_cname string If is_pyglobal, the C variable
# holding its home namespace
# pymethdef_cname string PyMethodDef structure
# signature Signature Arg & return types for Python func
# as_variable Entry Alternative interpretation of extension
# type name or builtin C function as a variable
# xdecref_cleanup boolean Use Py_XDECREF for error cleanup
# in_cinclude boolean Suppress C declaration code
# enum_values [Entry] For enum types, list of values
# qualified_name string "modname.funcname" or "modname.classname"
# or "modname.classname.funcname"
# is_declared_generic boolean Is declared as PyObject * even though its
# type is an extension type
# as_module None Module scope, if a cimported module
# is_inherited boolean Is an inherited attribute of an extension type
# pystring_cname string C name of Python version of string literal
# is_interned boolean For string const entries, value is interned
# is_identifier boolean For string const entries, value is an identifier
# used boolean
# is_special boolean Is a special method or property accessor
# of an extension type
# defined_in_pxd boolean Is defined in a .pxd file (not just declared)
# api boolean Generate C API for C class or function
# utility_code string Utility code needed when this entry is used
#
# buffer_aux BufferAux or None Extra information needed for buffer variables
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
# Ideally this should not be necesarry.
# might_overflow boolean In an arithmetic expression that could cause
# overflow (used for type inference).
# utility_code_definition For some Cython builtins, the utility code
# which contains the definition of the entry.
# Currently only supported for CythonScope entries.
# error_on_uninitialized Have Control Flow issue an error when this entry is
# used uninitialized
# cf_used boolean Entry is used
# is_fused_specialized boolean Whether this entry of a cdef or def function
# is a specialization
# TODO: utility_code and utility_code_definition serves the same purpose...
inline_func_in_pxd = False
borrowed = 0
init = ""
visibility = 'private'
is_builtin = 0
is_cglobal = 0
is_pyglobal = 0
is_member = 0
is_pyclass_attr = 0
is_variable = 0
is_cfunction = 0
is_cmethod = 0
is_builtin_cmethod = False
is_unbound_cmethod = 0
is_final_cmethod = 0
is_inline_cmethod = 0
is_anonymous = 0
is_type = 0
is_cclass = 0
is_cpp_class = 0
is_const = 0
is_property = 0
doc_cname = None
getter_cname = None
setter_cname = None
is_self_arg = 0
is_arg = 0
is_local = 0
in_closure = 0
from_closure = 0
is_declared_generic = 0
is_readonly = 0
pyfunc_cname = None
func_cname = None
func_modifiers = []
final_func_cname = None
doc = None
as_variable = None
xdecref_cleanup = 0
in_cinclude = 0
as_module = None
is_inherited = 0
pystring_cname = None
is_identifier = 0
is_interned = 0
used = 0
is_special = 0
defined_in_pxd = 0
is_implemented = 0
api = 0
utility_code = None
is_overridable = 0
buffer_aux = None
prev_entry = None
might_overflow = 0
fused_cfunction = None
is_fused_specialized = False
utility_code_definition = None
needs_property = False
in_with_gil_block = 0
from_cython_utility_code = None
error_on_uninitialized = False
cf_used = True
outer_entry = None
def __init__(self, name, cname, type, pos = None, init = None):
self.name = name
self.cname = cname
self.type = type
self.pos = pos
self.init = init
self.overloaded_alternatives = []
self.cf_assignments = []
self.cf_references = []
self.inner_entries = []
self.defining_entry = self
def __repr__(self):
return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type)
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
error(self.pos, "Previous declaration is here")
def all_alternatives(self):
return [self] + self.overloaded_alternatives
def all_entries(self):
return [self] + self.inner_entries
class InnerEntry(Entry):
"""
An entry in a closure scope that represents the real outer Entry.
"""
from_closure = True
def __init__(self, outer_entry, scope):
Entry.__init__(self, outer_entry.name,
outer_entry.cname,
outer_entry.type,
outer_entry.pos)
self.outer_entry = outer_entry
self.scope = scope
# share state with (outermost) defining entry
outermost_entry = outer_entry
while outermost_entry.outer_entry:
outermost_entry = outermost_entry.outer_entry
self.defining_entry = outermost_entry
self.inner_entries = outermost_entry.inner_entries
self.cf_assignments = outermost_entry.cf_assignments
self.cf_references = outermost_entry.cf_references
self.overloaded_alternatives = outermost_entry.overloaded_alternatives
self.inner_entries.append(self)
def __getattr__(self, name):
if name.startswith('__'):
# we wouldn't have been called if it was there
raise AttributeError(name)
return getattr(self.defining_entry, name)
def all_entries(self):
return self.defining_entry.all_entries()
class Scope(object):
# name string Unqualified name
# outer_scope Scope or None Enclosing scope
# entries {string : Entry} Python name to entry, non-types
# const_entries [Entry] Constant entries
# type_entries [Entry] Struct/union/enum/typedef/exttype entries
# sue_entries [Entry] Struct/union/enum entries
# arg_entries [Entry] Function argument entries
# var_entries [Entry] User-defined variable entries
# pyfunc_entries [Entry] Python function entries
# cfunc_entries [Entry] C function entries
# c_class_entries [Entry] All extension type entries
# cname_to_entry {string : Entry} Temp cname to entry mapping
# return_type PyrexType or None Return type of function owning scope
# is_builtin_scope boolean Is the builtin scope of Python/Cython
# is_py_class_scope boolean Is a Python class scope
# is_c_class_scope boolean Is an extension type scope
# is_closure_scope boolean Is a closure scope
# is_passthrough boolean Outer scope is passed directly
# is_cpp_class_scope boolean Is a C++ class scope
# is_property_scope boolean Is a extension type property scope
# scope_prefix string Disambiguator for C names
# in_cinclude boolean Suppress C declaration code
# qualified_name string "modname" or "modname.classname"
# Python strings in this scope
# nogil boolean In a nogil section
# directives dict Helper variable for the recursive
# analysis, contains directive values.
# is_internal boolean Is only used internally (simpler setup)
is_builtin_scope = 0
is_py_class_scope = 0
is_c_class_scope = 0
is_closure_scope = 0
is_passthrough = 0
is_cpp_class_scope = 0
is_property_scope = 0
is_module_scope = 0
is_internal = 0
scope_prefix = ""
in_cinclude = 0
nogil = 0
fused_to_specific = None
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
# The parent_scope is used to derive the qualified name of this scope.
self.name = name
self.outer_scope = outer_scope
self.parent_scope = parent_scope
mangled_name = "%d%s_" % (len(name), name)
qual_scope = self.qualifying_scope()
if qual_scope:
self.qualified_name = qual_scope.qualify_name(name)
self.scope_prefix = qual_scope.scope_prefix + mangled_name
else:
self.qualified_name = EncodedString(name)
self.scope_prefix = mangled_name
self.entries = {}
self.const_entries = []
self.type_entries = []
self.sue_entries = []
self.arg_entries = []
self.var_entries = []
self.pyfunc_entries = []
self.cfunc_entries = []
self.c_class_entries = []
self.defined_c_classes = []
self.imported_c_classes = {}
self.cname_to_entry = {}
self.string_to_entry = {}
self.identifier_to_entry = {}
self.num_to_entry = {}
self.obj_to_entry = {}
self.buffer_entries = []
self.lambda_defs = []
self.return_type = None
self.id_counters = {}
def __deepcopy__(self, memo):
return self
def merge_in(self, other, merge_unused=True, whitelist=None):
# Use with care...
entries = []
for name, entry in other.entries.iteritems():
if not whitelist or name in whitelist:
if entry.used or merge_unused:
entries.append((name, entry))
self.entries.update(entries)
for attr in ('const_entries',
'type_entries',
'sue_entries',
'arg_entries',
'var_entries',
'pyfunc_entries',
'cfunc_entries',
'c_class_entries'):
self_entries = getattr(self, attr)
names = set([e.name for e in self_entries])
for entry in getattr(other, attr):
if (entry.used or merge_unused) and entry.name not in names:
self_entries.append(entry)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
def qualifying_scope(self):
return self.parent_scope
def mangle(self, prefix, name = None):
if name:
return "%s%s%s" % (prefix, self.scope_prefix, name)
else:
return self.parent_scope.mangle(prefix, self.name)
def mangle_internal(self, name):
# Mangle an internal name so as not to clash with any
# user-defined name in this scope.
prefix = "%s%s_" % (Naming.pyrex_prefix, name)
return self.mangle(prefix)
#return self.parent_scope.mangle(prefix, self.name)
def mangle_class_private_name(self, name):
if self.parent_scope:
return self.parent_scope.mangle_class_private_name(name)
return name
def next_id(self, name=None):
# Return a cname fragment that is unique for this module
counters = self.global_scope().id_counters
try:
count = counters[name] + 1
except KeyError:
count = 0
counters[name] = count
if name:
if not count:
# unique names don't need a suffix, reoccurrences will get one
return name
return '%s%d' % (name, count)
else:
return '%d' % count
def global_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.global_scope()
def builtin_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.builtin_scope()
def declare(self, name, cname, type, pos, visibility, shadow = 0, is_type = 0):
# Create new entry, and add to dictionary if
# name is not None. Reports a warning if already
# declared.
if type.is_buffer and not isinstance(self, LocalScope): # and not is_type:
error(pos, 'Buffer types only allowed as function local variables')
if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname):
# See http://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries and not shadow:
if visibility == 'extern':
warning(pos, "'%s' redeclared " % name, 0)
elif visibility != 'ignore':
error(pos, "'%s' redeclared " % name)
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
if name:
entry.qualified_name = self.qualify_name(name)
# if name in entries and self.is_cpp():
# entries[name].overloaded_alternatives.append(entry)
# else:
# entries[name] = entry
if not shadow:
entries[name] = entry
if type.is_memoryviewslice:
import MemoryView
entry.init = MemoryView.memslice_entry_init
entry.scope = self
entry.visibility = visibility
return entry
def qualify_name(self, name):
return EncodedString("%s.%s" % (self.qualified_name, name))
def declare_const(self, name, type, value, pos, cname = None, visibility = 'private', api = 0):
# Add an entry for a named constant.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.enum_prefix, name)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_const = 1
entry.value_node = value
return entry
def declare_type(self, name, type, pos,
cname = None, visibility = 'private', api = 0, defining = 1,
shadow = 0, template = 0):
# Add an entry for a type definition.
if not cname:
cname = name
entry = self.declare(name, cname, type, pos, visibility, shadow,
is_type=True)
entry.is_type = 1
entry.api = api
if defining:
self.type_entries.append(entry)
if not template:
type.entry = entry
# here we would set as_variable to an object representing this type
return entry
def declare_typedef(self, name, base_type, pos, cname = None,
visibility = 'private', api = 0):
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
try:
type = PyrexTypes.create_typedef_type(name, base_type, cname,
(visibility == 'extern'))
except ValueError, e:
error(pos, e.args[0])
type = PyrexTypes.error_type
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api)
type.qualified_name = entry.qualified_name
return entry
def declare_struct_or_union(self, name, kind, scope,
typedef_flag, pos, cname = None,
visibility = 'private', api = 0,
packed = False):
# Add an entry for a struct or union definition.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CStructOrUnionType(
name, kind, scope, typedef_flag, cname, packed)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api,
defining = scope is not None)
self.sue_entries.append(entry)
type.entry = entry
else:
if not (entry.is_type and entry.type.is_struct_or_union
and entry.type.kind == kind):
warning(pos, "'%s' redeclared " % name, 0)
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
self.check_previous_typedef_flag(entry, typedef_flag, pos)
self.check_previous_visibility(entry, visibility, pos)
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
return entry
def declare_cpp_class(self, name, scope,
pos, cname = None, base_classes = (),
visibility = 'extern', templates = None):
if cname is None:
if self.in_cinclude or (visibility != 'private'):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
base_classes = list(base_classes)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CppClassType(
name, scope, cname, base_classes, templates = templates)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, defining = scope is not None)
self.sue_entries.append(entry)
else:
if not (entry.is_type and entry.type.is_cpp_class):
error(pos, "'%s' redeclared " % name)
return None
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
if base_classes:
if entry.type.base_classes and entry.type.base_classes != base_classes:
error(pos, "Base type does not match previous declaration")
else:
entry.type.base_classes = base_classes
if templates or entry.type.templates:
if templates != entry.type.templates:
error(pos, "Template parameters do not match previous declaration")
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
if base_class is PyrexTypes.error_type:
continue
if base_class.scope is None:
error(pos, "Cannot inherit from incomplete type")
else:
declare_inherited_attributes(entry, base_class.base_classes)
entry.type.scope.declare_inherited_cpp_attributes(base_class.scope)
if entry.type.scope:
declare_inherited_attributes(entry, base_classes)
entry.type.scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos)
if self.is_cpp_class_scope:
entry.type.namespace = self.outer_scope.lookup(self.name).type
return entry
def check_previous_typedef_flag(self, entry, typedef_flag, pos):
if typedef_flag != entry.type.typedef_flag:
error(pos, "'%s' previously declared using '%s'" % (
entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
def check_previous_visibility(self, entry, visibility, pos):
if entry.visibility != visibility:
error(pos, "'%s' previously declared as '%s'" % (
entry.name, entry.visibility))
def declare_enum(self, name, pos, cname, typedef_flag,
visibility = 'private', api = 0):
if name:
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
type = PyrexTypes.CEnumType(name, cname, typedef_flag)
else:
type = PyrexTypes.c_anon_enum_type
entry = self.declare_type(name, type, pos, cname = cname,
visibility = visibility, api = api)
entry.enum_values = []
self.sue_entries.append(entry)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a variable.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
if type.is_cpp_class and visibility != 'extern':
type.check_nullary_constructor(pos)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
entry.used = 1
if api:
entry.api = 1
entry.used = 1
return entry
def declare_builtin(self, name, pos):
return self.outer_scope.declare_builtin(name, pos)
def _declare_pyfunction(self, name, pos, visibility='extern', entry=None):
if entry and not entry.type.is_cfunction:
error(pos, "'%s' already declared" % name)
error(entry.pos, "Previous declaration is here")
entry = self.declare_var(name, py_object_type, pos, visibility=visibility)
entry.signature = pyfunction_signature
self.pyfunc_entries.append(entry)
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False, visibility='extern'):
# Add an entry for a Python function.
entry = self.lookup_here(name)
if not allow_redefine:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
if entry:
if entry.type.is_unspecified:
entry.type = py_object_type
elif entry.type is not py_object_type:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
else: # declare entry stub
self.declare_var(name, py_object_type, pos, visibility=visibility)
entry = self.declare_var(None, py_object_type, pos,
cname=name, visibility='private')
entry.name = EncodedString(name)
entry.qualified_name = self.qualify_name(name)
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def declare_lambda_function(self, lambda_name, pos):
# Add an entry for an anonymous Python function.
func_cname = self.mangle(Naming.lambda_func_prefix + u'funcdef_', lambda_name)
pymethdef_cname = self.mangle(Naming.lambda_func_prefix + u'methdef_', lambda_name)
qualified_name = self.qualify_name(lambda_name)
entry = self.declare(None, func_cname, py_object_type, pos, 'private')
entry.name = lambda_name
entry.qualified_name = qualified_name
entry.pymethdef_cname = pymethdef_cname
entry.func_cname = func_cname
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def add_lambda_def(self, def_node):
self.lambda_defs.append(def_node)
def register_pyfunction(self, entry):
self.pyfunc_entries.append(entry)
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', api = 0, in_pxd = 0,
defining = 0, modifiers = (), utility_code = None):
# Add an entry for a C function.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
entry = self.lookup_here(name)
if entry:
if visibility != 'private' and visibility != entry.visibility:
warning(pos, "Function '%s' previously declared as '%s'" % (name, entry.visibility), 1)
if not entry.type.same_as(type):
if visibility == 'extern' and entry.visibility == 'extern':
can_override = False
if self.is_cpp():
can_override = True
elif cname:
# if all alternatives have different cnames,
# it's safe to allow signature overrides
for alt_entry in entry.all_alternatives():
if not alt_entry.cname or cname == alt_entry.cname:
break # cname not unique!
else:
can_override = True
if can_override:
temp = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
temp.overloaded_alternatives = entry.all_alternatives()
entry = temp
else:
warning(pos, "Function signature does not match previous declaration", 1)
entry.type = type
else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
entry.func_cname = cname
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
if api:
entry.api = 1
if not defining and not in_pxd and visibility != 'extern':
error(pos, "Non-extern C function '%s' declared but not defined" % name)
if defining:
entry.is_implemented = True
if modifiers:
entry.func_modifiers = modifiers
if utility_code:
assert not entry.utility_code, "duplicate utility code definition in entry %s (%s)" % (name, cname)
entry.utility_code = utility_code
type.entry = entry
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos, visibility)
entry.is_cfunction = 1
if modifiers:
entry.func_modifiers = modifiers
self.cfunc_entries.append(entry)
return entry
def find(self, name, pos):
# Look up name, report error if not found.
entry = self.lookup(name)
if entry:
return entry
else:
error(pos, "'%s' is not declared" % name)
def find_imported_module(self, path, pos):
# Look up qualified name, must be a module, report error if not found.
# Path is a list of names.
scope = self
for name in path:
entry = scope.find(name, pos)
if not entry:
return None
if entry.as_module:
scope = entry.as_module
else:
error(pos, "'%s' is not a cimported module" % '.'.join(path))
return None
return scope
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
return (self.lookup_here(name)
or (self.outer_scope and self.outer_scope.lookup(name))
or None)
def lookup_here(self, name):
# Look up in this scope only, return None if not found.
return self.entries.get(name, None)
def lookup_target(self, name):
# Look up name in this scope only. Declare as Python
# variable if not found.
entry = self.lookup_here(name)
if not entry:
entry = self.declare_var(name, py_object_type, None)
return entry
def lookup_type(self, name):
entry = self.lookup(name)
if entry and entry.is_type:
if entry.type.is_fused and self.fused_to_specific:
return entry.type.specialize(self.fused_to_specific)
return entry.type
def lookup_operator(self, operator, operands):
if operands[0].type.is_cpp_class:
obj_type = operands[0].type
method = obj_type.scope.lookup("operator%s" % operator)
if method is not None:
res = PyrexTypes.best_match(operands[1:], method.all_alternatives())
if res is not None:
return res
function = self.lookup("operator%s" % operator)
if function is None:
return None
return PyrexTypes.best_match(operands, function.all_alternatives())
def lookup_operator_for_types(self, pos, operator, types):
from Nodes import Node
class FakeOperand(Node):
pass
operands = [FakeOperand(pos, type=type) for type in types]
return self.lookup_operator(operator, operands)
def use_utility_code(self, new_code):
self.global_scope().use_utility_code(new_code)
def generate_library_function_declarations(self, code):
# Generate extern decls for C library funcs used.
pass
def defines_any(self, names):
# Test whether any of the given names are
# defined in this scope.
for name in names:
if name in self.entries:
return 1
return 0
def infer_types(self):
from TypeInference import get_type_inferer
get_type_inferer().infer_types(self)
def is_cpp(self):
outer = self.outer_scope
if outer is None:
return False
else:
return outer.is_cpp()
def add_include_file(self, filename):
self.outer_scope.add_include_file(filename)
class PreImportScope(Scope):
namespace_cname = Naming.preimport_cname
def __init__(self):
Scope.__init__(self, Options.pre_import, None, None)
def declare_builtin(self, name, pos):
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_variable = True
entry.is_pyglobal = True
return entry
class BuiltinScope(Scope):
# The builtin namespace.
is_builtin_scope = True
def __init__(self):
if Options.pre_import is None:
Scope.__init__(self, "__builtin__", None, None)
else:
Scope.__init__(self, "__builtin__", PreImportScope(), None)
self.type_names = {}
for name, definition in self.builtin_entries.iteritems():
cname, type = definition
self.declare_var(name, type, None, cname)
def lookup(self, name, language_level=None):
# 'language_level' is passed by ModuleScope
if language_level == 3:
if name == 'str':
name = 'unicode'
return Scope.lookup(self, name)
def declare_builtin(self, name, pos):
if not hasattr(builtins, name):
if self.outer_scope is not None:
return self.outer_scope.declare_builtin(name, pos)
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
def declare_builtin_cfunction(self, name, type, cname, python_equiv = None,
utility_code = None):
# If python_equiv == "*", the Python equivalent has the same name
# as the entry, otherwise it has the name specified by python_equiv.
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern',
utility_code = utility_code)
if python_equiv:
if python_equiv == "*":
python_equiv = name
else:
python_equiv = EncodedString(python_equiv)
var_entry = Entry(python_equiv, python_equiv, py_object_type)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
entry.as_variable = var_entry
return entry
def declare_builtin_type(self, name, cname, utility_code = None, objstruct_cname = None):
name = EncodedString(name)
type = PyrexTypes.BuiltinObjectType(name, cname, objstruct_cname)
scope = CClassScope(name, outer_scope=None, visibility='extern')
scope.directives = {}
if name == 'bool':
type.is_final_type = True
type.set_scope(scope)
self.type_names[name] = 1
entry = self.declare_type(name, type, None, visibility='extern')
entry.utility_code = utility_code
var_entry = Entry(name = entry.name,
type = self.lookup('type').type, # make sure "type" is the first type declared...
pos = entry.pos,
cname = "((PyObject*)%s)" % entry.type.typeptr_cname)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
if Options.cache_builtins:
var_entry.is_const = True
entry.as_variable = var_entry
return type
def builtin_scope(self):
return self
builtin_entries = {
"type": ["((PyObject*)&PyType_Type)", py_object_type],
"bool": ["((PyObject*)&PyBool_Type)", py_object_type],
"int": ["((PyObject*)&PyInt_Type)", py_object_type],
"long": ["((PyObject*)&PyLong_Type)", py_object_type],
"float": ["((PyObject*)&PyFloat_Type)", py_object_type],
"complex":["((PyObject*)&PyComplex_Type)", py_object_type],
"bytes": ["((PyObject*)&PyBytes_Type)", py_object_type],
"bytearray": ["((PyObject*)&PyByteArray_Type)", py_object_type],
"str": ["((PyObject*)&PyString_Type)", py_object_type],
"unicode":["((PyObject*)&PyUnicode_Type)", py_object_type],
"tuple": ["((PyObject*)&PyTuple_Type)", py_object_type],
"list": ["((PyObject*)&PyList_Type)", py_object_type],
"dict": ["((PyObject*)&PyDict_Type)", py_object_type],
"set": ["((PyObject*)&PySet_Type)", py_object_type],
"frozenset": ["((PyObject*)&PyFrozenSet_Type)", py_object_type],
"slice": ["((PyObject*)&PySlice_Type)", py_object_type],
# "file": ["((PyObject*)&PyFile_Type)", py_object_type], # not in Py3
"None": ["Py_None", py_object_type],
"False": ["Py_False", py_object_type],
"True": ["Py_True", py_object_type],
}
const_counter = 1 # As a temporary solution for compiling code in pxds
class ModuleScope(Scope):
# module_name string Python name of the module
# module_cname string C name of Python module object
# #module_dict_cname string C name of module dict object
# method_table_cname string C name of method table
# doc string Module doc string
# doc_cname string C name of module doc string
# utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
# python_include_files [string] Standard Python headers to be included
# include_files [string] Other C headers to be included
# string_to_entry {string : Entry} Map string const to entry
# identifier_to_entry {string : Entry} Map identifier string const to entry
# context Context
# parent_module Scope Parent in the import namespace
# module_entries {string : Entry} For cimport statements
# type_names {string : 1} Set of type names (used during parsing)
# included_files [string] Cython sources included with 'include'
# pxd_file_loaded boolean Corresponding .pxd file has been processed
# cimported_modules [ModuleScope] Modules imported with cimport
# types_imported {PyrexType} Set of types for which import code generated
# has_import_star boolean Module contains import *
# cpp boolean Compiling a C++ file
# is_cython_builtin boolean Is this the Cython builtin scope (or a child scope)
# is_package boolean Is this a package module? (__init__)
is_module_scope = 1
has_import_star = 0
is_cython_builtin = 0
def __init__(self, name, parent_module, context):
import Builtin
self.parent_module = parent_module
outer_scope = Builtin.builtin_scope
Scope.__init__(self, name, outer_scope, parent_module)
if name == "__init__":
# Treat Spam/__init__.pyx specially, so that when Python loads
# Spam/__init__.so, initSpam() is defined.
self.module_name = parent_module.module_name
self.is_package = True
else:
self.module_name = name
self.is_package = False
self.module_name = EncodedString(self.module_name)
self.context = context
self.module_cname = Naming.module_cname
self.module_dict_cname = Naming.moddict_cname
self.method_table_cname = Naming.methtable_cname
self.doc = ""
self.doc_cname = Naming.moddoc_cname
self.utility_code_list = []
self.module_entries = {}
self.python_include_files = ["Python.h"]
self.include_files = []
self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
self.types_imported = set()
self.included_files = []
self.has_extern_class = 0
self.cached_builtins = []
self.undeclared_cached_builtins = []
self.namespace_cname = self.module_cname
for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__']:
self.declare_var(EncodedString(var_name), py_object_type, None)
def qualifying_scope(self):
return self.parent_module
def global_scope(self):
return self
def lookup(self, name):
entry = self.lookup_here(name)
if entry is not None:
return entry
if self.context is not None:
language_level = self.context.language_level
else:
language_level = 3
return self.outer_scope.lookup(name, language_level=language_level)
def declare_builtin(self, name, pos):
if not hasattr(builtins, name) \
and name not in Code.non_portable_builtins_map \
and name not in Code.uncachable_builtins:
if self.has_import_star:
entry = self.declare_var(name, py_object_type, pos)
return entry
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
# unknown - assume it's builtin and look it up at runtime
entry = self.declare(name, None, py_object_type, pos, 'private')
entry.is_builtin = 1
return entry
if Options.cache_builtins:
for entry in self.cached_builtins:
if entry.name == name:
return entry
entry = self.declare(None, None, py_object_type, pos, 'private')
if Options.cache_builtins and name not in Code.uncachable_builtins:
entry.is_builtin = 1
entry.is_const = 1 # cached
entry.name = name
entry.cname = Naming.builtin_prefix + name
self.cached_builtins.append(entry)
self.undeclared_cached_builtins.append(entry)
else:
entry.is_builtin = 1
entry.name = name
return entry
def find_module(self, module_name, pos):
# Find a module in the import namespace, interpreting
# relative imports relative to this module's parent.
# Finds and parses the module's .pxd file if the module
# has not been referenced before.
return self.global_scope().context.find_module(
module_name, relative_to = self.parent_module, pos = pos)
def find_submodule(self, name):
# Find and return scope for a submodule of this module,
# creating a new empty one if necessary. Doesn't parse .pxd.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = self, context = self.context)
self.module_entries[name] = scope
return scope
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
return self.module_entries.get(name, None)
def add_include_file(self, filename):
if filename not in self.python_include_files \
and filename not in self.include_files:
self.include_files.append(filename)
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
for filename in scope.include_files:
self.add_include_file(filename)
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
def add_imported_entry(self, name, entry, pos):
if entry not in self.entries:
self.entries[name] = entry
else:
warning(pos, "'%s' redeclared " % name, 0)
def declare_module(self, name, scope, pos):
# Declare a cimported module. This is represented as a
# Python module-level variable entry with a module
# scope attached to it. Reports an error and returns
# None if previously declared as something else.
entry = self.lookup_here(name)
if entry:
if entry.is_pyglobal and entry.as_module is scope:
return entry # Already declared as the same module
if not (entry.is_pyglobal and not entry.as_module):
# SAGE -- I put this here so Pyrex
# cimport's work across directories.
# Currently it tries to multiply define
# every module appearing in an import list.
# It shouldn't be an error for a module
# name to appear again, and indeed the generated
# code compiles fine.
return entry
else:
entry = self.declare_var(name, py_object_type, pos)
entry.as_module = scope
self.add_imported_module(scope)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a global variable. If it is a Python
# object type, and not declared with cdef, it will live
# in the module dictionary, otherwise it will be a C
# global variable.
if not visibility in ('private', 'public', 'extern'):
error(pos, "Module-level variable cannot be declared %s" % visibility)
if not is_cdef:
if type is unspecified_type:
type = py_object_type
if not (type.is_pyobject and not type.is_extension_type):
raise InternalError(
"Non-cdef global variable is not a generic Python object")
if not cname:
defining = not in_pxd
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
#if visibility != 'private' and visibility != entry.visibility:
# warning(pos, "Variable '%s' previously declared as '%s'" % (name, entry.visibility), 1)
if not entry.type.same_as(type):
if visibility == 'extern' and entry.visibility == 'extern':
warning(pos, "Variable '%s' type does not match previous declaration" % name, 1)
entry.type = type
#else:
# error(pos, "Variable '%s' type does not match previous declaration" % name)
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = name
if not entry.is_implemented:
entry.is_implemented = True
return entry
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if is_cdef:
entry.is_cglobal = 1
if entry.type.is_pyobject:
entry.init = 0
self.var_entries.append(entry)
else:
entry.is_pyglobal = 1
if Options.cimport_from_pyx:
entry.used = 1
return entry
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', api = 0, in_pxd = 0,
defining = 0, modifiers = (), utility_code = None):
# Add an entry for a C function.
if not cname:
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = cname
entry.func_cname = cname
entry = Scope.declare_cfunction(
self, name, type, pos,
cname = cname, visibility = visibility, api = api, in_pxd = in_pxd,
defining = defining, modifiers = modifiers, utility_code = utility_code)
return entry
def declare_global(self, name, pos):
entry = self.lookup_here(name)
if not entry:
self.declare_var(name, py_object_type, pos)
def use_utility_code(self, new_code):
if new_code is not None:
self.utility_code_list.append(new_code)
def declare_c_class(self, name, pos, defining = 0, implementing = 0,
module_name = None, base_type = None, objstruct_cname = None,
typeobj_cname = None, typeptr_cname = None, visibility = 'private', typedef_flag = 0, api = 0,
buffer_defaults = None, shadow = 0):
# If this is a non-extern typedef class, expose the typedef, but use
# the non-typedef struct internally to avoid needing forward
# declarations for anonymous structs.
if typedef_flag and visibility != 'extern':
if not (visibility == 'public' or api):
warning(pos, "ctypedef only valid for 'extern' , 'public', and 'api'", 2)
objtypedef_cname = objstruct_cname
typedef_flag = 0
else:
objtypedef_cname = None
#
# Look for previous declaration as a type
#
entry = self.lookup_here(name)
if entry and not shadow:
type = entry.type
if not (entry.is_type and type.is_extension_type):
entry = None # Will cause redeclaration and produce an error
else:
scope = type.scope
if typedef_flag and (not scope or scope.defined):
self.check_previous_typedef_flag(entry, typedef_flag, pos)
if (scope and scope.defined) or (base_type and type.base_type):
if base_type and base_type is not type.base_type:
error(pos, "Base type does not match previous declaration")
if base_type and not type.base_type:
type.base_type = base_type
#
# Make a new entry if needed
#
if not entry or shadow:
type = PyrexTypes.PyExtensionType(name, typedef_flag, base_type, visibility == 'extern')
type.pos = pos
type.buffer_defaults = buffer_defaults
if objtypedef_cname is not None:
type.objtypedef_cname = objtypedef_cname
if visibility == 'extern':
type.module_name = module_name
else:
type.module_name = self.qualified_name
if typeptr_cname:
type.typeptr_cname = typeptr_cname
else:
type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
entry = self.declare_type(name, type, pos, visibility = visibility,
defining = 0, shadow = shadow)
entry.is_cclass = True
if objstruct_cname:
type.objstruct_cname = objstruct_cname
elif not entry.in_cinclude:
type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
else:
error(entry.pos,
"Object name required for 'public' or 'extern' C class")
self.attach_var_entry_to_c_class(entry)
self.c_class_entries.append(entry)
#
# Check for re-definition and create scope if needed
#
if not type.scope:
if defining or implementing:
scope = CClassScope(name = name, outer_scope = self,
visibility = visibility)
scope.directives = self.directives.copy()
if base_type and base_type.scope:
scope.declare_inherited_c_attributes(base_type.scope)
type.set_scope(scope)
self.type_entries.append(entry)
else:
if defining and type.scope.defined:
error(pos, "C class '%s' already defined" % name)
elif implementing and type.scope.implemented:
error(pos, "C class '%s' already implemented" % name)
#
# Fill in options, checking for compatibility with any previous declaration
#
if defining:
entry.defined_in_pxd = 1
if implementing: # So that filenames in runtime exceptions refer to
entry.pos = pos # the .pyx file and not the .pxd file
if visibility != 'private' and entry.visibility != visibility:
error(pos, "Class '%s' previously declared as '%s'"
% (name, entry.visibility))
if api:
entry.api = 1
if objstruct_cname:
if type.objstruct_cname and type.objstruct_cname != objstruct_cname:
error(pos, "Object struct name differs from previous declaration")
type.objstruct_cname = objstruct_cname
if typeobj_cname:
if type.typeobj_cname and type.typeobj_cname != typeobj_cname:
error(pos, "Type object name differs from previous declaration")
type.typeobj_cname = typeobj_cname
if self.directives.get('final'):
entry.type.is_final_type = True
# cdef classes are always exported, but we need to set it to
# distinguish between unused Cython utility code extension classes
entry.used = True
#
# Return new or existing entry
#
return entry
def allocate_vtable_names(self, entry):
# If extension type has a vtable, allocate vtable struct and
# slot names for it.
type = entry.type
if type.base_type and type.base_type.vtabslot_cname:
#print "...allocating vtabslot_cname because base type has one" ###
type.vtabslot_cname = "%s.%s" % (
Naming.obj_base_cname, type.base_type.vtabslot_cname)
elif type.scope and type.scope.cfunc_entries:
# one special case here: when inheriting from builtin
# types, the methods may also be built-in, in which
# case they won't need a vtable
entry_count = len(type.scope.cfunc_entries)
base_type = type.base_type
while base_type:
# FIXME: this will break if we ever get non-inherited C methods
if not base_type.scope or entry_count > len(base_type.scope.cfunc_entries):
break
if base_type.is_builtin_type:
# builtin base type defines all methods => no vtable needed
return
base_type = base_type.base_type
#print "...allocating vtabslot_cname because there are C methods" ###
type.vtabslot_cname = Naming.vtabslot_cname
if type.vtabslot_cname:
#print "...allocating other vtable related cnames" ###
type.vtabstruct_cname = self.mangle(Naming.vtabstruct_prefix, entry.name)
type.vtabptr_cname = self.mangle(Naming.vtabptr_prefix, entry.name)
def check_c_classes_pxd(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the .pxd.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is fully declared
#
# Also allocates a name for the vtable if needed.
#
for entry in self.c_class_entries:
# Check defined
if not entry.type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % entry.name)
def check_c_class(self, entry):
type = entry.type
name = entry.name
visibility = entry.visibility
# Check defined
if not type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % name)
# Generate typeobj_cname
if visibility != 'extern' and not type.typeobj_cname:
type.typeobj_cname = self.mangle(Naming.typeobj_prefix, name)
## Generate typeptr_cname
#type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
# Check C methods defined
if type.scope:
for method_entry in type.scope.cfunc_entries:
if not method_entry.is_inherited and not method_entry.func_cname:
error(method_entry.pos, "C method '%s' is declared but not defined" %
method_entry.name)
# Allocate vtable name if necessary
if type.vtabslot_cname:
#print "ModuleScope.check_c_classes: allocating vtable cname for", self ###
type.vtable_cname = self.mangle(Naming.vtable_prefix, entry.name)
def check_c_classes(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the main
# .pyx file scope, not for cimported .pxd scopes.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is implemented
# * All required object and type names have been specified or generated
# * All non-inherited C methods are implemented
#
# Also allocates a name for the vtable if needed.
#
debug_check_c_classes = 0
if debug_check_c_classes:
print("Scope.check_c_classes: checking scope " + self.qualified_name)
for entry in self.c_class_entries:
if debug_check_c_classes:
print("...entry %s %s" % (entry.name, entry))
print("......type = ", entry.type)
print("......visibility = ", entry.visibility)
self.check_c_class(entry)
def check_c_functions(self):
# Performs post-analysis checking making sure all
# defined c functions are actually implemented.
for name, entry in self.entries.items():
if entry.is_cfunction:
if (entry.defined_in_pxd
and entry.scope is self
and entry.visibility != 'extern'
and not entry.in_cinclude
and not entry.is_implemented):
error(entry.pos, "Non-extern C function '%s' declared but not defined" % name)
def attach_var_entry_to_c_class(self, entry):
# The name of an extension class has to serve as both a type
# name and a variable name holding the type object. It is
# represented in the symbol table by a type entry with a
# variable entry attached to it. For the variable entry,
# we use a read-only C global variable whose name is an
# expression that refers to the type object.
import Builtin
var_entry = Entry(name = entry.name,
type = Builtin.type_type,
pos = entry.pos,
cname = "((PyObject*)%s)" % entry.type.typeptr_cname)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
entry.as_variable = var_entry
def is_cpp(self):
return self.cpp
def infer_types(self):
from TypeInference import PyObjectTypeInferer
PyObjectTypeInferer().infer_types(self)
class LocalScope(Scope):
# Does the function have a 'with gil:' block?
has_with_gil_block = False
# Transient attribute, used for symbol table variable declarations
_in_with_gil_block = False
def __init__(self, name, outer_scope, parent_scope = None):
if parent_scope is None:
parent_scope = outer_scope
Scope.__init__(self, name, outer_scope, parent_scope)
def mangle(self, prefix, name):
return prefix + name
def declare_arg(self, name, type, pos):
# Add an entry for an argument of a function.
cname = self.mangle(Naming.var_prefix, name)
entry = self.declare(name, cname, type, pos, 'private')
entry.is_variable = 1
if type.is_pyobject:
entry.init = "0"
entry.is_arg = 1
#entry.borrowed = 1 # Not using borrowed arg refs for now
self.arg_entries.append(entry)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a local variable.
if visibility in ('public', 'readonly'):
error(pos, "Local variable cannot be declared %s" % visibility)
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if type.is_pyobject:
entry.init = "0"
entry.is_local = 1
entry.in_with_gil_block = self._in_with_gil_block
self.var_entries.append(entry)
return entry
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
warning(pos, "'%s' redeclared ", 0)
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
def declare_nonlocal(self, name, pos):
# Pull entry from outer scope into local scope
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
else:
entry = self.lookup(name)
if entry is None or not entry.from_closure:
error(pos, "no binding for nonlocal '%s' found" % name)
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
entry = Scope.lookup(self, name)
if entry is not None:
if entry.scope is not self and entry.scope.is_closure_scope:
if hasattr(entry.scope, "scope_class"):
raise InternalError("lookup() after scope class created.")
# The actual c fragment for the different scopes differs
# on the outside and inside, so we make a new entry
entry.in_closure = True
inner_entry = InnerEntry(entry, self)
inner_entry.is_variable = True
self.entries[name] = inner_entry
return inner_entry
return entry
def mangle_closure_cnames(self, outer_scope_cname):
for entry in self.entries.values():
if entry.from_closure:
cname = entry.outer_entry.cname
if self.is_passthrough:
entry.cname = cname
else:
if cname.startswith(Naming.cur_scope_cname):
cname = cname[len(Naming.cur_scope_cname)+2:]
entry.cname = "%s->%s" % (outer_scope_cname, cname)
elif entry.in_closure:
entry.original_cname = entry.cname
entry.cname = "%s->%s" % (Naming.cur_scope_cname, entry.cname)
class GeneratorExpressionScope(Scope):
"""Scope for generator expressions and comprehensions. As opposed
to generators, these can be easily inlined in some cases, so all
we really need is a scope that holds the loop variable(s).
"""
def __init__(self, outer_scope):
name = outer_scope.global_scope().next_id(Naming.genexpr_id_ref)
Scope.__init__(self, name, outer_scope, outer_scope)
self.directives = outer_scope.directives
self.genexp_prefix = "%s%d%s" % (Naming.pyrex_prefix, len(name), name)
def mangle(self, prefix, name):
return '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(prefix, name))
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = True):
if type is unspecified_type:
# if the outer scope defines a type for this variable, inherit it
outer_entry = self.outer_scope.lookup(name)
if outer_entry and outer_entry.is_variable:
type = outer_entry.type # may still be 'unspecified_type' !
# the parent scope needs to generate code for the variable, but
# this scope must hold its name exclusively
cname = '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(Naming.var_prefix, name or self.next_id()))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
entry.is_local = 1
self.var_entries.append(entry)
self.entries[name] = entry
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
return self.outer_scope.declare_pyfunction(
name, pos, allow_redefine)
def declare_lambda_function(self, func_cname, pos):
return self.outer_scope.declare_lambda_function(func_cname, pos)
def add_lambda_def(self, def_node):
return self.outer_scope.add_lambda_def(def_node)
class ClosureScope(LocalScope):
is_closure_scope = True
def __init__(self, name, scope_name, outer_scope, parent_scope=None):
LocalScope.__init__(self, name, outer_scope, parent_scope)
self.closure_cname = "%s%s" % (Naming.closure_scope_prefix, scope_name)
# def mangle_closure_cnames(self, scope_var):
# for entry in self.entries.values() + self.temp_entries:
# entry.in_closure = 1
# LocalScope.mangle_closure_cnames(self, scope_var)
# def mangle(self, prefix, name):
# return "%s->%s" % (self.cur_scope_cname, name)
# return "%s->%s" % (self.closure_cname, name)
def declare_pyfunction(self, name, pos, allow_redefine=False):
return LocalScope.declare_pyfunction(self, name, pos, allow_redefine, visibility='private')
class StructOrUnionScope(Scope):
# Namespace of a C struct or union.
def __init__(self, name="?"):
Scope.__init__(self, name, None, None)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0,
allow_pyobject = 0):
# Add an entry for an attribute.
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
if type.is_cfunction:
type = PyrexTypes.CPtrType(type)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
error(pos,
"C struct/union member cannot be a Python object")
if visibility != 'private':
error(pos,
"C struct/union member cannot be declared %s" % visibility)
return entry
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', api = 0, in_pxd = 0,
defining = 0, modifiers = ()): # currently no utility code ...
return self.declare_var(name, type, pos,
cname=cname, visibility=visibility)
class ClassScope(Scope):
# Abstract base class for namespace of
# Python class or extension type.
#
# class_name string Python name of the class
# scope_prefix string Additional prefix for names
# declared in the class
# doc string or None Doc string
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, outer_scope)
self.class_name = name
self.doc = None
def lookup(self, name):
entry = Scope.lookup(self, name)
if entry:
return entry
if name == "classmethod":
# We don't want to use the builtin classmethod here 'cause it won't do the
# right thing in this scope (as the class members aren't still functions).
# Don't want to add a cfunction to this scope 'cause that would mess with
# the type definition, so we just return the right entry.
entry = Entry(
"classmethod",
"__Pyx_Method_ClassMethod",
PyrexTypes.CFuncType(
py_object_type,
[PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0))
entry.utility_code_definition = Code.UtilityCode.load_cached("ClassMethod", "CythonFunction.c")
entry.is_cfunction = 1
return entry
class PyClassScope(ClassScope):
# Namespace of a Python class.
#
# class_obj_cname string C variable holding class object
is_py_class_scope = 1
def mangle_class_private_name(self, name):
return self.mangle_special_name(name)
def mangle_special_name(self, name):
if name and name.startswith('__') and not name.endswith('__'):
name = EncodedString('_%s%s' % (self.class_name.lstrip('_'), name))
return name
def lookup_here(self, name):
name = self.mangle_special_name(name)
return ClassScope.lookup_here(self, name)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
name = self.mangle_special_name(name)
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
entry.is_pyglobal = 1
entry.is_pyclass_attr = 1
return entry
def declare_nonlocal(self, name, pos):
# Pull entry from outer scope into local scope
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
else:
entry = self.lookup(name)
if entry is None:
error(pos, "no binding for nonlocal '%s' found" % name)
else:
# FIXME: this works, but it's unclear if it's the
# right thing to do
self.entries[name] = entry
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
warning(pos, "'%s' redeclared ", 0)
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
def add_default_value(self, type):
return self.outer_scope.add_default_value(type)
class CClassScope(ClassScope):
# Namespace of an extension type.
#
# parent_type CClassType
# #typeobj_cname string or None
# #objstruct_cname string
# method_table_cname string
# getset_table_cname string
# has_pyobject_attrs boolean Any PyObject attributes?
# has_memoryview_attrs boolean Any memory view attributes?
# has_cyclic_pyobject_attrs boolean Any PyObject attributes that may need GC?
# property_entries [Entry]
# defined boolean Defined in .pxd file
# implemented boolean Defined in .pyx file
# inherited_var_entries [Entry] Adapted var entries from base class
is_c_class_scope = 1
has_pyobject_attrs = False
has_memoryview_attrs = False
has_cyclic_pyobject_attrs = False
defined = False
implemented = False
def __init__(self, name, outer_scope, visibility):
ClassScope.__init__(self, name, outer_scope)
if visibility != 'extern':
self.method_table_cname = outer_scope.mangle(Naming.methtab_prefix, name)
self.getset_table_cname = outer_scope.mangle(Naming.gstab_prefix, name)
self.property_entries = []
self.inherited_var_entries = []
def needs_gc(self):
# If the type or any of its base types have Python-valued
# C attributes, then it needs to participate in GC.
if self.has_cyclic_pyobject_attrs:
return True
base_type = self.parent_type.base_type
if base_type and base_type.scope is not None:
return base_type.scope.needs_gc()
elif self.parent_type.is_builtin_type:
return not self.parent_type.is_gc_simple
return False
def needs_tp_clear(self):
"""
Do we need to generate an implementation for the tp_clear slot? Can
be disabled to keep references for the __dealloc__ cleanup function.
"""
return self.needs_gc() and not self.directives.get('no_gc_clear', False)
def get_refcounted_entries(self, include_weakref=False,
include_gc_simple=True):
py_attrs = []
py_buffers = []
memoryview_slices = []
for entry in self.var_entries:
if entry.type.is_pyobject:
if include_weakref or entry.name != "__weakref__":
if include_gc_simple or not entry.type.is_gc_simple:
py_attrs.append(entry)
elif entry.type == PyrexTypes.c_py_buffer_type:
py_buffers.append(entry)
elif entry.type.is_memoryviewslice:
memoryview_slices.append(entry)
have_entries = py_attrs or py_buffers or memoryview_slices
return have_entries, (py_attrs, py_buffers, memoryview_slices)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
if is_cdef:
# Add an entry for an attribute.
if self.defined:
error(pos,
"C attributes cannot be added in implementation part of"
" extension type defined in a pxd")
if get_special_method_signature(name):
error(pos,
"The name '%s' is reserved for a special method."
% name)
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
if type.is_cpp_class and visibility != 'extern':
type.check_nullary_constructor(pos)
self.use_utility_code(Code.UtilityCode("#include <new>"))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_memoryviewslice:
self.has_memoryview_attrs = True
elif type.is_pyobject and name != '__weakref__':
self.has_pyobject_attrs = True
if (not type.is_builtin_type
or not type.scope or type.scope.needs_gc()):
self.has_cyclic_pyobject_attrs = True
if visibility not in ('private', 'public', 'readonly'):
error(pos,
"Attribute of extension type cannot be declared %s" % visibility)
if visibility in ('public', 'readonly'):
# If the field is an external typedef, we cannot be sure about the type,
# so do conversion ourself rather than rely on the CPython mechanism (through
# a property; made in AnalyseDeclarationsTransform).
entry.needs_property = True
if name == "__weakref__":
error(pos, "Special attribute __weakref__ cannot be exposed to Python")
if not type.is_pyobject:
if (not type.create_to_py_utility_code(self) or
(visibility=='public' and not
type.create_from_py_utility_code(self))):
error(pos,
"C attribute of type '%s' cannot be accessed from Python" % type)
else:
entry.needs_property = False
return entry
else:
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
entry.is_member = 1
entry.is_pyglobal = 1 # xxx: is_pyglobal changes behaviour in so many places that
# I keep it in for now. is_member should be enough
# later on
self.namespace_cname = "(PyObject *)%s" % self.parent_type.typeptr_cname
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
if name in ('__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__'):
error(pos, "Special method %s must be implemented via __richcmp__" % name)
if name == "__new__":
error(pos, "__new__ method of extension type will change semantics "
"in a future version of Pyrex and Cython. Use __cinit__ instead.")
entry = self.declare_var(name, py_object_type, pos,
visibility='extern')
special_sig = get_special_method_signature(name)
if special_sig:
# Special methods get put in the method table with a particular
# signature declared in advance.
entry.signature = special_sig
entry.is_special = 1
else:
entry.signature = pymethod_signature
entry.is_special = 0
self.pyfunc_entries.append(entry)
return entry
def lookup_here(self, name):
if name == "__new__":
name = EncodedString("__cinit__")
entry = ClassScope.lookup_here(self, name)
if entry and entry.is_builtin_cmethod:
if not self.parent_type.is_builtin_type:
# For subtypes of builtin types, we can only return
# optimised C methods if the type if final.
# Otherwise, subtypes may choose to override the
# method, but the optimisation would prevent the
# subtype method from being called.
if not self.parent_type.is_final_type:
return None
return entry
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', api = 0, in_pxd = 0,
defining = 0, modifiers = (), utility_code = None):
if get_special_method_signature(name) and not self.parent_type.is_builtin_type:
error(pos, "Special methods must be declared with 'def', not 'cdef'")
args = type.args
if not args:
error(pos, "C method has no self argument")
elif not self.parent_type.assignable_from(args[0].type):
error(pos, "Self argument (%s) of C method '%s' does not match parent type (%s)" %
(args[0].type, name, self.parent_type))
entry = self.lookup_here(name)
if cname is None:
cname = c_safe_identifier(name)
if entry:
if not entry.is_cfunction:
warning(pos, "'%s' redeclared " % name, 0)
else:
if defining and entry.func_cname:
error(pos, "'%s' already defined" % name)
#print "CClassScope.declare_cfunction: checking signature" ###
if entry.is_final_cmethod and entry.is_inherited:
error(pos, "Overriding final methods is not allowed")
elif type.same_c_signature_as(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
pass
elif type.compatible_signature_with(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
entry = self.add_cfunction(name, type, pos, cname, visibility='ignore', modifiers=modifiers)
defining = 1
else:
error(pos, "Signature not compatible with previous declaration")
error(entry.pos, "Previous declaration is here")
else:
if self.defined:
error(pos,
"C method '%s' not previously declared in definition part of"
" extension type" % name)
entry = self.add_cfunction(name, type, pos, cname,
visibility, modifiers)
if defining:
entry.func_cname = self.mangle(Naming.func_prefix, name)
entry.utility_code = utility_code
type.entry = entry
if u'inline' in modifiers:
entry.is_inline_cmethod = True
if (self.parent_type.is_final_type or entry.is_inline_cmethod or
self.directives.get('final')):
entry.is_final_cmethod = True
entry.final_func_cname = entry.func_cname
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers):
# Add a cfunction entry without giving it a func_cname.
prev_entry = self.lookup_here(name)
entry = ClassScope.add_cfunction(self, name, type, pos, cname,
visibility, modifiers)
entry.is_cmethod = 1
entry.prev_entry = prev_entry
return entry
def declare_builtin_cfunction(self, name, type, cname, utility_code = None):
# overridden methods of builtin types still have their Python
# equivalent that must be accessible to support bound methods
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern',
utility_code = utility_code)
var_entry = Entry(name, name, py_object_type)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
entry.as_variable = var_entry
return entry
def declare_property(self, name, doc, pos):
entry = self.lookup_here(name)
if entry is None:
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_property = 1
entry.doc = doc
entry.scope = PropertyScope(name,
outer_scope = self.global_scope(), parent_scope = self)
entry.scope.parent_type = self.parent_type
self.property_entries.append(entry)
return entry
def declare_inherited_c_attributes(self, base_scope):
# Declare entries for all the C attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
def adapt(cname):
return "%s.%s" % (Naming.obj_base_cname, base_entry.cname)
entries = base_scope.inherited_var_entries + base_scope.var_entries
for base_entry in entries:
entry = self.declare(
base_entry.name, adapt(base_entry.cname),
base_entry.type, None, 'private')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
# If the class defined in a pxd, specific entries have not been added.
# Ensure now that the parent (base) scope has specific entries
# Iterate over a copy as get_all_specialized_function_types() will mutate
for base_entry in base_scope.cfunc_entries[:]:
if base_entry.type.is_fused:
base_entry.type.get_all_specialized_function_types()
for base_entry in base_scope.cfunc_entries:
cname = base_entry.cname
var_entry = base_entry.as_variable
is_builtin = var_entry and var_entry.is_builtin
if not is_builtin:
cname = adapt(cname)
entry = self.add_cfunction(base_entry.name, base_entry.type,
base_entry.pos, cname,
base_entry.visibility, base_entry.func_modifiers)
entry.is_inherited = 1
if base_entry.is_final_cmethod:
entry.is_final_cmethod = True
entry.is_inline_cmethod = base_entry.is_inline_cmethod
if (self.parent_scope == base_scope.parent_scope or
entry.is_inline_cmethod):
entry.final_func_cname = base_entry.final_func_cname
if is_builtin:
entry.is_builtin_cmethod = True
entry.as_variable = var_entry
if base_entry.utility_code:
entry.utility_code = base_entry.utility_code
class CppClassScope(Scope):
# Namespace of a C++ class.
is_cpp_class_scope = 1
default_constructor = None
type = None
def __init__(self, name, outer_scope, templates=None):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
self.inherited_var_entries = []
if templates is not None:
for T in templates:
template_entry = self.declare(
T, T, PyrexTypes.TemplatePlaceholderType(T), None, 'extern')
template_entry.is_type = 1
def declare_var(self, name, type, pos,
cname = None, visibility = 'extern',
api = 0, in_pxd = 0, is_cdef = 0,
allow_pyobject = 0, defining = 0):
# Add an entry for an attribute.
if not cname:
cname = name
entry = self.lookup_here(name)
if defining and entry is not None:
if not entry.type.same_as(type):
error(pos, "Function signature does not match previous declaration")
else:
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if type.is_cfunction and self.type:
entry.func_cname = "%s::%s" % (self.type.declaration_code(""), cname)
if name != "this" and (defining or name != "<init>"):
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
error(pos,
"C++ class member cannot be a Python object")
return entry
def check_base_default_constructor(self, pos):
# Look for default constructors in all base classes.
if self.default_constructor is None:
entry = self.lookup(self.name)
if not entry.type.base_classes:
self.default_constructor = True
return
for base_class in entry.type.base_classes:
if base_class is PyrexTypes.error_type:
continue
temp_entry = base_class.scope.lookup_here("<init>")
found = False
if temp_entry is None:
continue
for alternative in temp_entry.all_alternatives():
type = alternative.type
if type.is_ptr:
type = type.base_type
if not type.args:
found = True
break
if not found:
self.default_constructor = temp_entry.scope.name
error(pos, "no matching function for call to " \
"%s::%s()" % (temp_entry.scope.name, temp_entry.scope.name))
elif not self.default_constructor:
error(pos, "no matching function for call to %s::%s()" %
(self.default_constructor, self.default_constructor))
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'extern', api = 0, in_pxd = 0,
defining = 0, modifiers = (), utility_code = None):
if name in (self.name.split('::')[-1], '__init__') and cname is None:
self.check_base_default_constructor(pos)
cname = self.type.cname
name = '<init>'
type.return_type = PyrexTypes.InvisibleVoidType()
elif name == '__dealloc__' and cname is None:
cname = "~%s" % self.type.cname
name = '<del>'
type.return_type = PyrexTypes.InvisibleVoidType()
prev_entry = self.lookup_here(name)
entry = self.declare_var(name, type, pos,
defining=defining,
cname=cname, visibility=visibility)
if prev_entry and not defining:
entry.overloaded_alternatives = prev_entry.all_alternatives()
entry.utility_code = utility_code
type.entry = entry
return entry
def declare_inherited_cpp_attributes(self, base_scope):
# Declare entries for all the C++ attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
#contructor is not inherited
if base_entry.name == "<init>":
continue
#print base_entry.name, self.entries
if base_entry.name in self.entries:
base_entry.name # FIXME: is there anything to do in this case?
entry = self.declare(base_entry.name, base_entry.cname,
base_entry.type, None, 'extern')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
for base_entry in base_scope.cfunc_entries:
entry = self.declare_cfunction(base_entry.name, base_entry.type,
base_entry.pos, base_entry.cname,
base_entry.visibility, 0,
modifiers = base_entry.func_modifiers,
utility_code = base_entry.utility_code)
entry.is_inherited = 1
def specialize(self, values):
scope = CppClassScope(self.name, self.outer_scope)
for entry in self.entries.values():
if entry.is_type:
scope.declare_type(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
template=1)
elif entry.type.is_cfunction:
for e in entry.all_alternatives():
scope.declare_cfunction(e.name,
e.type.specialize(values),
e.pos,
e.cname,
utility_code = e.utility_code)
else:
scope.declare_var(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
entry.visibility)
return scope
class PropertyScope(Scope):
# Scope holding the __get__, __set__ and __del__ methods for
# a property of an extension type.
#
# parent_type PyExtensionType The type to which the property belongs
is_property_scope = 1
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
signature = get_property_accessor_signature(name)
if signature:
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_special = 1
entry.signature = signature
return entry
else:
error(pos, "Only __get__, __set__ and __del__ methods allowed "
"in a property declaration")
return None
class CConstScope(Scope):
def __init__(self, const_base_type_scope):
Scope.__init__(
self,
'const_' + const_base_type_scope.name,
const_base_type_scope.outer_scope,
const_base_type_scope.parent_scope)
self.const_base_type_scope = const_base_type_scope
def lookup_here(self, name):
entry = self.const_base_type_scope.lookup_here(name)
if entry is not None:
entry = copy.copy(entry)
entry.type = PyrexTypes.c_const_type(entry.type)
return entry
class TemplateScope(Scope):
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import print_function
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_ext'))
import ultramock
ultramock.activate()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Invenio-Accounts'
copyright = u'2015, CERN'
author = u'CERN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('..', 'invenio_accounts', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only set the theme when we are not on RTD
if not on_rtd:
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
print("`sphinx_rtd_theme` not found, pip install it", file=sys.stderr)
html_theme = 'alabaster'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'invenio-accounts_namedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'invenio-accounts.tex', u'invenio-accounts Documentation',
u'CERN', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'invenio-accounts', u'invenio-accounts Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'invenio-accounts', u'Invenio-Accounts Documentation',
author, 'invenio-accounts', 'Invenio module for managing user accounts.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
unknown
|
codeparrot/codeparrot-clean
| ||
#include <ATen/native/transformers/attention.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#include <ATen/native/transformers/xpu/flash_attn/flash_api.h>
namespace at {
namespace native {
std::tuple<
Tensor,
Tensor,
Tensor,
Tensor,
c10::SymInt,
c10::SymInt,
Tensor,
Tensor,
Tensor>
_scaled_dot_product_flash_attention_xpu(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool is_causal,
bool return_debug_mask,
std::optional<double> scale) {
auto
[attention,
logsumexp,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
philox_seed,
philox_offset] =
sycltla::flash_attention_forward(
query,
key,
value,
dropout_p,
is_causal,
scale.has_value() ? scale.value()
: (1.0 / std::sqrt(query.size(3))));
return std::make_tuple(
attention,
logsumexp,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
philox_seed,
philox_offset,
/* debug_attn_mask */ at::Tensor());
}
} // namespace native
} // namespace at
|
cpp
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/native/transformers/xpu/attention.cpp
|
#ifndef COMP_CREATOR_INCLUDED
#define COMP_CREATOR_INCLUDED
/* Copyright (c) 2019, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
// Declaration of chooser_compare_func_creator, in a separate file
// to make sure that parser_yystype.h does not need to depend on
// item_subselect.h.
class Comp_creator;
/**
Convenience typedef for a function that returns factories for Item comparators
(ie., returns Comp_creator).
@retval nullptr In case of semantic errors.
*/
using chooser_compare_func_creator = Comp_creator *(*)(bool invert);
#endif // COMP_CREATOR_INCLUDED
|
c
|
github
|
https://github.com/mysql/mysql-server
|
sql/comp_creator.h
|
import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Sqrt
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class SqrtTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_sqrt=Sqrt('normal')).first()
self.assertIsNone(obj.null_sqrt)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_sqrt=Sqrt('n1'), n2_sqrt=Sqrt('n2')).first()
self.assertIsInstance(obj.n1_sqrt, Decimal)
self.assertIsInstance(obj.n2_sqrt, Decimal)
self.assertAlmostEqual(obj.n1_sqrt, Decimal(math.sqrt(obj.n1)))
self.assertAlmostEqual(obj.n2_sqrt, Decimal(math.sqrt(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_sqrt=Sqrt('f1'), f2_sqrt=Sqrt('f2')).first()
self.assertIsInstance(obj.f1_sqrt, float)
self.assertIsInstance(obj.f2_sqrt, float)
self.assertAlmostEqual(obj.f1_sqrt, math.sqrt(obj.f1))
self.assertAlmostEqual(obj.f2_sqrt, math.sqrt(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=20, normal=15, big=1)
obj = IntegerModel.objects.annotate(
small_sqrt=Sqrt('small'),
normal_sqrt=Sqrt('normal'),
big_sqrt=Sqrt('big'),
).first()
self.assertIsInstance(obj.small_sqrt, float)
self.assertIsInstance(obj.normal_sqrt, float)
self.assertIsInstance(obj.big_sqrt, float)
self.assertAlmostEqual(obj.small_sqrt, math.sqrt(obj.small))
self.assertAlmostEqual(obj.normal_sqrt, math.sqrt(obj.normal))
self.assertAlmostEqual(obj.big_sqrt, math.sqrt(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Sqrt):
DecimalModel.objects.create(n1=Decimal('6.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__sqrt__gt=2).get()
self.assertEqual(obj.n1, Decimal('6.0'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, prompts, flag_text
from ..core import io
from ..objects.exceptions import NotFoundError, NoEnvironmentForBranchError
from ..operations import commonops, terminateops
class TerminateController(AbstractBaseController):
class Meta:
label = 'terminate'
description = strings['terminate.info']
arguments = AbstractBaseController.Meta.arguments + [
(['--force'], dict(action='store_true',
help=flag_text['terminate.force'])),
(['--all'], dict(action='store_true',
help=flag_text['terminate.all'])),
(['-nh', '--nohang'], dict(action='store_true',
help=flag_text['terminate.nohang'])),
(['--timeout'], dict(type=int, help=flag_text['general.timeout'])),
]
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
epilog = strings['terminate.epilog']
def do_command(self):
app_name = self.get_app_name()
force = self.app.pargs.force
all = self.app.pargs.all
timeout = self.app.pargs.timeout
nohang = self.app.pargs.nohang
if all:
cleanup = False if self.app.pargs.region else True
terminateops.delete_app(app_name, force, nohang=nohang,
cleanup=cleanup, timeout=timeout)
else:
try:
env_name = self.get_env_name()
except NoEnvironmentForBranchError as e:
io.echo(strings['terminate.noenv'])
raise e
if not force:
# make sure env exists
env_names = commonops.get_env_names(app_name)
if env_name not in env_names:
raise NotFoundError('Environment ' +
env_name + ' not found')
io.echo(prompts['terminate.confirm'].replace('{env-name}',
env_name))
io.validate_action(prompts['terminate.validate'], env_name)
terminateops.terminate(env_name, nohang=nohang,
timeout=timeout)
|
unknown
|
codeparrot/codeparrot-clean
| ||
use std::iter::FromIterator;
use gccjit::{BinaryOp, RValue, ToRValue, Type};
#[cfg(feature = "master")]
use gccjit::{ComparisonOp, UnaryOp};
use rustc_abi::{Align, Size};
use rustc_codegen_ssa::base::compare_simd_types;
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
#[cfg(feature = "master")]
use rustc_codegen_ssa::errors::ExpectedPointerMutability;
use rustc_codegen_ssa::errors::InvalidMonomorphization;
use rustc_codegen_ssa::mir::operand::OperandRef;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
#[cfg(feature = "master")]
use rustc_hir as hir;
use rustc_middle::mir::BinOp;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::{self, Ty};
use rustc_span::{Span, Symbol, sym};
use crate::builder::Builder;
#[cfg(not(feature = "master"))]
use crate::common::SignType;
#[cfg(feature = "master")]
use crate::context::CodegenCx;
pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
bx: &mut Builder<'a, 'gcc, 'tcx>,
name: Symbol,
args: &[OperandRef<'tcx, RValue<'gcc>>],
ret_ty: Ty<'tcx>,
llret_ty: Type<'gcc>,
span: Span,
) -> Result<RValue<'gcc>, ()> {
// macros for error handling:
macro_rules! return_error {
($err:expr) => {{
bx.tcx.dcx().emit_err($err);
return Err(());
}};
}
macro_rules! require {
($cond:expr, $err:expr) => {
if !$cond {
return_error!($err);
}
};
}
macro_rules! require_simd {
($ty: expr, $diag: expr) => {
require!($ty.is_simd(), $diag)
};
}
// TODO(antoyo): refactor with the above require_simd macro that was changed in cg_llvm.
#[cfg(feature = "master")]
macro_rules! require_simd2 {
($ty: expr, $variant:ident) => {{
require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
$ty.simd_size_and_type(bx.tcx())
}};
}
if name == sym::simd_select_bitmask {
require_simd!(
args[1].layout.ty,
InvalidMonomorphization::SimdArgument { span, name, ty: args[1].layout.ty }
);
let (len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let expected_int_bits = (len.max(8) - 1).next_power_of_two();
let expected_bytes = len / 8 + ((!len.is_multiple_of(8)) as u64);
let mask_ty = args[0].layout.ty;
let mut mask = match *mask_ty.kind() {
ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
ty::Array(elem, len)
if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8))
&& len
.try_to_target_usize(bx.tcx)
.expect("expected monomorphic const in codegen")
== expected_bytes =>
{
let place = PlaceRef::alloca(bx, args[0].layout);
args[0].val.store(bx, place);
let int_ty = bx.type_ix(expected_bytes * 8);
let ptr = bx.pointercast(place.val.llval, bx.cx.type_ptr_to(int_ty));
bx.load(int_ty, ptr, Align::ONE)
}
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
span,
name,
mask_ty,
expected_int_bits,
expected_bytes
}),
};
let arg1 = args[1].immediate();
let arg1_type = arg1.get_type();
let arg1_vector_type = arg1_type.unqualified().dyncast_vector().expect("vector type");
let arg1_element_type = arg1_vector_type.get_element_type();
// NOTE: since the arguments can be vectors of floats, make sure the mask is a vector of
// integer.
let mask_element_type = bx.type_ix(arg1_element_type.get_size() as u64 * 8);
let vector_mask_type =
bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64);
let mut elements = vec![];
let one = bx.context.new_rvalue_one(mask.get_type());
for _ in 0..len {
let element = bx.context.new_cast(None, mask & one, mask_element_type);
elements.push(element);
mask = mask >> one;
}
let vector_mask = bx.context.new_rvalue_from_vector(None, vector_mask_type, &elements);
return Ok(bx.vector_select(vector_mask, arg1, args[2].immediate()));
}
#[cfg(feature = "master")]
if name == sym::simd_splat {
let (out_len, out_ty) = require_simd2!(ret_ty, SimdReturn);
require!(
args[0].layout.ty == out_ty,
InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
expected_element: out_ty,
vector_type: ret_ty,
}
);
let vec_ty = llret_ty.unqualified().dyncast_vector().expect("vector return type");
let elem_ty = vec_ty.get_element_type();
// Cast pointer type to usize (GCC does not support pointer SIMD vectors).
let value = args[0];
let scalar = if value.layout.ty.is_numeric() {
value.immediate()
} else if value.layout.ty.is_raw_ptr() {
bx.ptrtoint(value.immediate(), elem_ty)
} else {
return_error!(InvalidMonomorphization::UnsupportedOperation {
span,
name,
in_ty: ret_ty,
in_elem: value.layout.ty
});
};
let elements = vec![scalar; out_len as usize];
return Ok(bx.context.new_rvalue_from_vector(bx.location, llret_ty, &elements));
}
// every intrinsic below takes a SIMD vector as its first argument
require_simd!(
args[0].layout.ty,
InvalidMonomorphization::SimdInput { span, name, ty: args[0].layout.ty }
);
let in_ty = args[0].layout.ty;
let comparison = match name {
sym::simd_eq => Some(BinOp::Eq),
sym::simd_ne => Some(BinOp::Ne),
sym::simd_lt => Some(BinOp::Lt),
sym::simd_le => Some(BinOp::Le),
sym::simd_gt => Some(BinOp::Gt),
sym::simd_ge => Some(BinOp::Ge),
_ => None,
};
let (in_len, in_elem) = args[0].layout.ty.simd_size_and_type(bx.tcx());
if let Some(cmp_op) = comparison {
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len
}
);
require!(
bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
);
let arg1 = args[0].immediate();
// NOTE: we get different vector types for the same vector type and libgccjit doesn't
// compare them as equal, so bitcast.
// FIXME(antoyo): allow comparing vector types as equal in libgccjit.
let arg2 = bx.context.new_bitcast(None, args[1].immediate(), arg1.get_type());
return Ok(compare_simd_types(bx, arg1, arg2, in_elem, llret_ty, cmp_op));
}
let simd_bswap = |bx: &mut Builder<'a, 'gcc, 'tcx>, vector: RValue<'gcc>| -> RValue<'gcc> {
let v_type = vector.get_type();
let vector_type = v_type.unqualified().dyncast_vector().expect("vector type");
let elem_type = vector_type.get_element_type();
let elem_size_bytes = elem_type.get_size();
if elem_size_bytes == 1 {
return vector;
}
let type_size_bytes = elem_size_bytes as u64 * in_len;
let shuffle_indices = Vec::from_iter(0..type_size_bytes);
let byte_vector_type = bx.context.new_vector_type(bx.type_u8(), type_size_bytes);
let byte_vector = bx.context.new_bitcast(None, args[0].immediate(), byte_vector_type);
#[cfg(not(feature = "master"))]
let shuffled = {
let new_elements: Vec<_> = shuffle_indices
.chunks_exact(elem_size_bytes as _)
.flat_map(|x| x.iter().rev())
.map(|&i| {
let index = bx.context.new_rvalue_from_long(bx.u64_type, i as _);
bx.extract_element(byte_vector, index)
})
.collect();
bx.context.new_rvalue_from_vector(None, byte_vector_type, &new_elements)
};
#[cfg(feature = "master")]
let shuffled = {
let indices: Vec<_> = shuffle_indices
.chunks_exact(elem_size_bytes as _)
.flat_map(|x| x.iter().rev())
.map(|&i| bx.context.new_rvalue_from_int(bx.u8_type, i as _))
.collect();
let mask = bx.context.new_rvalue_from_vector(None, byte_vector_type, &indices);
bx.context.new_rvalue_vector_perm(None, byte_vector, byte_vector, mask)
};
bx.context.new_bitcast(None, shuffled, v_type)
};
if matches!(name, sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop) {
require!(
bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
);
}
#[cfg(feature = "master")]
if name == sym::simd_funnel_shl {
return Ok(simd_funnel_shift(
bx,
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
true,
));
}
#[cfg(feature = "master")]
if name == sym::simd_funnel_shr {
return Ok(simd_funnel_shift(
bx,
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
false,
));
}
if name == sym::simd_bswap {
return Ok(simd_bswap(bx, args[0].immediate()));
}
let simd_ctpop = |bx: &mut Builder<'a, 'gcc, 'tcx>, vector: RValue<'gcc>| -> RValue<'gcc> {
let mut vector_elements = vec![];
let elem_ty = bx.element_type(llret_ty);
for i in 0..in_len {
let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64);
let element = bx.extract_element(vector, index).to_rvalue();
let result = bx.context.new_cast(None, bx.pop_count(element), elem_ty);
vector_elements.push(result);
}
bx.context.new_rvalue_from_vector(None, llret_ty, &vector_elements)
};
if name == sym::simd_ctpop {
return Ok(simd_ctpop(bx, args[0].immediate()));
}
// We use a different algorithm from non-vector bitreverse to take advantage of most
// processors' vector shuffle units. It works like this:
// 1. Generate pre-reversed low and high nibbles as a vector.
// 2. Byte-swap the input.
// 3. Mask off the low and high nibbles of each byte in the byte-swapped input.
// 4. Shuffle the pre-reversed low and high-nibbles using the masked nibbles as a shuffle mask.
// 5. Combine the results of the shuffle back together and cast back to the original type.
#[cfg(feature = "master")]
if name == sym::simd_bitreverse {
let vector = args[0].immediate();
let v_type = vector.get_type();
let vector_type = v_type.unqualified().dyncast_vector().expect("vector type");
let elem_type = vector_type.get_element_type();
let elem_size_bytes = elem_type.get_size();
let type_size_bytes = elem_size_bytes as u64 * in_len;
// We need to ensure at least 16 entries in our vector type, since the pre-reversed vectors
// we generate below have 16 entries in them. `new_rvalue_vector_perm` requires the mask
// vector to be of the same length as the source vectors.
let byte_vector_type_size = type_size_bytes.max(16);
let byte_vector_type = bx.context.new_vector_type(bx.u8_type, type_size_bytes);
let long_byte_vector_type = bx.context.new_vector_type(bx.u8_type, byte_vector_type_size);
// Step 1: Generate pre-reversed low and high nibbles as a vector.
let zero_byte = bx.context.new_rvalue_zero(bx.u8_type);
let hi_nibble_elements: Vec<_> = (0u8..16)
.map(|x| bx.context.new_rvalue_from_int(bx.u8_type, x.reverse_bits() as _))
.chain((16..byte_vector_type_size).map(|_| zero_byte))
.collect();
let hi_nibble =
bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &hi_nibble_elements);
let lo_nibble_elements: Vec<_> = (0u8..16)
.map(|x| bx.context.new_rvalue_from_int(bx.u8_type, (x.reverse_bits() >> 4) as _))
.chain((16..byte_vector_type_size).map(|_| zero_byte))
.collect();
let lo_nibble =
bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &lo_nibble_elements);
let mask = bx.context.new_rvalue_from_vector(
None,
long_byte_vector_type,
&vec![bx.context.new_rvalue_from_int(bx.u8_type, 0x0f); byte_vector_type_size as _],
);
let four_vec = bx.context.new_rvalue_from_vector(
None,
long_byte_vector_type,
&vec![bx.context.new_rvalue_from_int(bx.u8_type, 4); byte_vector_type_size as _],
);
// Step 2: Byte-swap the input.
let swapped = simd_bswap(bx, args[0].immediate());
let byte_vector = bx.context.new_bitcast(None, swapped, byte_vector_type);
// We're going to need to extend the vector with zeros to make sure that the types are the
// same, since that's what new_rvalue_vector_perm expects.
let byte_vector = if byte_vector_type_size > type_size_bytes {
let mut byte_vector_elements = Vec::with_capacity(byte_vector_type_size as _);
for i in 0..type_size_bytes {
let idx = bx.context.new_rvalue_from_int(bx.u32_type, i as _);
let val = bx.extract_element(byte_vector, idx);
byte_vector_elements.push(val);
}
for _ in type_size_bytes..byte_vector_type_size {
byte_vector_elements.push(zero_byte);
}
bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &byte_vector_elements)
} else {
bx.context.new_bitcast(None, byte_vector, long_byte_vector_type)
};
// Step 3: Mask off the low and high nibbles of each byte in the byte-swapped input.
let masked_hi = (byte_vector >> four_vec) & mask;
let masked_lo = byte_vector & mask;
// Step 4: Shuffle the pre-reversed low and high-nibbles using the masked nibbles as a shuffle mask.
let hi = bx.context.new_rvalue_vector_perm(None, hi_nibble, hi_nibble, masked_lo);
let lo = bx.context.new_rvalue_vector_perm(None, lo_nibble, lo_nibble, masked_hi);
// Step 5: Combine the results of the shuffle back together and cast back to the original type.
let result = hi | lo;
let cast_ty =
bx.context.new_vector_type(elem_type, byte_vector_type_size / (elem_size_bytes as u64));
// we might need to truncate if sizeof(v_type) < sizeof(cast_type)
if type_size_bytes < byte_vector_type_size {
let cast_result = bx.context.new_bitcast(None, result, cast_ty);
let elems: Vec<_> = (0..in_len)
.map(|i| {
let idx = bx.context.new_rvalue_from_int(bx.u32_type, i as _);
bx.extract_element(cast_result, idx)
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(None, v_type, &elems));
}
// avoid the unnecessary truncation as an optimization.
return Ok(bx.context.new_bitcast(None, result, v_type));
}
// since gcc doesn't have vector shuffle methods available in non-patched builds, fallback to
// component-wise bitreverses if they're not available.
#[cfg(not(feature = "master"))]
if name == sym::simd_bitreverse {
let vector = args[0].immediate();
let vector_ty = vector.get_type();
let vector_type = vector_ty.unqualified().dyncast_vector().expect("vector type");
let num_elements = vector_type.get_num_units();
let elem_type = vector_type.get_element_type();
let elem_size_bytes = elem_type.get_size();
let num_type = elem_type.to_unsigned(bx.cx);
let new_elements: Vec<_> = (0..num_elements)
.map(|idx| {
let index = bx.context.new_rvalue_from_long(num_type, idx as _);
let extracted_value = bx.extract_element(vector, index).to_rvalue();
bx.bit_reverse(elem_size_bytes as u64 * 8, extracted_value)
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(None, vector_ty, &new_elements));
}
if name == sym::simd_ctlz || name == sym::simd_cttz {
let vector = args[0].immediate();
let elements: Vec<_> = (0..in_len)
.map(|i| {
let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
let value = bx.extract_element(vector, index).to_rvalue();
let value_type = value.get_type();
let element = if name == sym::simd_ctlz {
bx.count_leading_zeroes(value_type.get_size() as u64 * 8, value)
} else {
bx.count_trailing_zeroes(value_type.get_size() as u64 * 8, value)
};
bx.context.new_cast(None, element, value_type)
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(None, vector.get_type(), &elements));
}
if name == sym::simd_shuffle {
// Make sure this is actually a SIMD vector.
let idx_ty = args[2].layout.ty;
let n: u64 = if idx_ty.is_simd()
&& matches!(*idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
{
idx_ty.simd_size_and_type(bx.cx.tcx).0
} else {
return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
};
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
require!(
out_len == n,
InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
);
require!(
in_elem == out_ty,
InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
);
let vector = args[2].immediate();
return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), vector));
}
#[cfg(feature = "master")]
if name == sym::simd_insert || name == sym::simd_insert_dyn {
require!(
in_elem == args[2].layout.ty,
InvalidMonomorphization::InsertedType {
span,
name,
in_elem,
in_ty,
out_ty: args[2].layout.ty
}
);
// TODO(antoyo): For simd_insert, check if the index is a constant of the correct size.
let vector = args[0].immediate();
let index = args[1].immediate();
let value = args[2].immediate();
let variable = bx.current_func().new_local(None, vector.get_type(), "new_vector");
bx.llbb().add_assignment(None, variable, vector);
let lvalue = bx.context.new_vector_access(None, variable.to_rvalue(), index);
// TODO(antoyo): if simd_insert is constant, use BIT_REF.
bx.llbb().add_assignment(None, lvalue, value);
return Ok(variable.to_rvalue());
}
#[cfg(feature = "master")]
if name == sym::simd_extract || name == sym::simd_extract_dyn {
require!(
ret_ty == in_elem,
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
// TODO(antoyo): For simd_extract, check if the index is a constant of the correct size.
let vector = args[0].immediate();
let index = args[1].immediate();
return Ok(bx.context.new_vector_access(None, vector, index).to_rvalue());
}
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
require_simd!(
args[1].layout.ty,
InvalidMonomorphization::SimdArgument { span, name, ty: args[1].layout.ty }
);
let (v_len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx());
require!(
m_len == v_len,
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
);
match *m_elem_ty.kind() {
ty::Int(_) | ty::Uint(_) => {}
_ => return_error!(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: m_elem_ty
}),
}
return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate()));
}
if name == sym::simd_cast_ptr {
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len
}
);
match *in_elem.kind() {
ty::RawPtr(p_ty, _) => {
let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
bx.tcx.normalize_erasing_regions(ty::TypingEnv::fully_monomorphized(), ty)
});
require!(
metadata.is_unit(),
InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
);
}
_ => {
return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
}
}
match *out_elem.kind() {
ty::RawPtr(p_ty, _) => {
let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
bx.tcx.normalize_erasing_regions(ty::TypingEnv::fully_monomorphized(), ty)
});
require!(
metadata.is_unit(),
InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
);
}
_ => {
return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
}
}
let arg = args[0].immediate();
let elem_type = llret_ty.dyncast_vector().expect("vector return type").get_element_type();
let values: Vec<_> = (0..in_len)
.map(|i| {
let idx = bx.gcc_int(bx.usize_type, i as _);
let value = bx.extract_element(arg, idx);
bx.pointercast(value, elem_type)
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(bx.location, llret_ty, &values));
}
if name == sym::simd_expose_provenance {
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len
}
);
match *in_elem.kind() {
ty::RawPtr(_, _) => {}
_ => {
return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
}
}
match *out_elem.kind() {
ty::Uint(ty::UintTy::Usize) => {}
_ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
}
let arg = args[0].immediate();
let elem_type = llret_ty.dyncast_vector().expect("vector return type").get_element_type();
let values: Vec<_> = (0..in_len)
.map(|i| {
let idx = bx.gcc_int(bx.usize_type, i as _);
let value = bx.extract_element(arg, idx);
bx.ptrtoint(value, elem_type)
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(bx.location, llret_ty, &values));
}
if name == sym::simd_with_exposed_provenance {
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len
}
);
match *in_elem.kind() {
ty::Uint(ty::UintTy::Usize) => {}
_ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
}
match *out_elem.kind() {
ty::RawPtr(_, _) => {}
_ => {
return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
}
}
let arg = args[0].immediate();
let elem_type = llret_ty.dyncast_vector().expect("vector return type").get_element_type();
let values: Vec<_> = (0..in_len)
.map(|i| {
let idx = bx.gcc_int(bx.usize_type, i as _);
let value = bx.extract_element(arg, idx);
bx.inttoptr(value, elem_type)
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(bx.location, llret_ty, &values));
}
#[cfg(feature = "master")]
if name == sym::simd_cast || name == sym::simd_as {
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len
}
);
// casting cares about nominal type, not just structural type
if in_elem == out_elem {
return Ok(args[0].immediate());
}
enum Style {
Float,
Int,
Unsupported,
}
let in_style = match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => Style::Int,
ty::Float(_) => Style::Float,
_ => Style::Unsupported,
};
let out_style = match *out_elem.kind() {
ty::Int(_) | ty::Uint(_) => Style::Int,
ty::Float(_) => Style::Float,
_ => Style::Unsupported,
};
match (in_style, out_style) {
(Style::Unsupported, Style::Unsupported) => {
require!(
false,
InvalidMonomorphization::UnsupportedCast {
span,
name,
in_ty,
in_elem,
ret_ty,
out_elem
}
);
}
_ => return Ok(bx.context.convert_vector(None, args[0].immediate(), llret_ty)),
}
}
macro_rules! arith_binary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
match *in_elem.kind() {
$($(ty::$p(_))|* => {
return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
})*
_ => {},
}
return_error!(InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem })
})*
}
}
if name == sym::simd_bitmask {
// The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
// vector mask and returns the most significant bit (MSB) of each lane in the form
// of either:
// * an unsigned integer
// * an array of `u8`
// If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
//
// The bit order of the result depends on the byte endianness, LSB-first for little
// endian and MSB-first for big endian.
let vector = args[0].immediate();
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type");
let elem_type = vector_type.get_element_type();
let expected_int_bits = in_len.max(8);
let expected_bytes =
expected_int_bits / 8 + ((!expected_int_bits.is_multiple_of(8)) as u64);
// FIXME(antoyo): that's not going to work for masks bigger than 128 bits.
let result_type = bx.type_ix(expected_int_bits);
let mut result = bx.context.new_rvalue_zero(result_type);
let elem_size = elem_type.get_size() * 8;
let sign_shift = bx.context.new_rvalue_from_int(elem_type, elem_size as i32 - 1);
let one = bx.context.new_rvalue_one(elem_type);
for i in 0..in_len {
let elem =
bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32));
let shifted = elem >> sign_shift;
let masked = shifted & one;
result = result
| (bx.context.new_cast(None, masked, result_type)
<< bx.context.new_rvalue_from_int(result_type, i as i32));
}
match *ret_ty.kind() {
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
// Zero-extend iN to the bitmask type:
return Ok(result);
}
ty::Array(elem, len)
if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8))
&& len
.try_to_target_usize(bx.tcx)
.expect("expected monomorphic const in codegen")
== expected_bytes =>
{
// Zero-extend iN to the array length:
let ze = bx.zext(result, bx.type_ix(expected_bytes * 8));
// Convert the integer to a byte array
let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
bx.store(ze, ptr, Align::ONE);
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
return Ok(bx.load(array_ty, ptr, Align::ONE));
}
_ => return_error!(InvalidMonomorphization::CannotReturn {
span,
name,
ret_ty,
expected_int_bits,
expected_bytes
}),
}
}
fn simd_simple_float_intrinsic<'gcc, 'tcx>(
name: Symbol,
in_elem: Ty<'_>,
in_ty: Ty<'_>,
in_len: u64,
bx: &mut Builder<'_, 'gcc, 'tcx>,
span: Span,
args: &[OperandRef<'tcx, RValue<'gcc>>],
) -> Result<RValue<'gcc>, ()> {
macro_rules! return_error {
($err:expr) => {{
bx.tcx.dcx().emit_err($err);
return Err(());
}};
}
let ty::Float(ref f) = *in_elem.kind() else {
return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
};
let elem_ty = bx.cx.type_float_from_ty(*f);
let (elem_ty_str, elem_ty, cast_type) = match f.bit_width() {
16 => ("", elem_ty, Some(bx.cx.double_type)),
32 => ("f", elem_ty, None),
64 => ("", elem_ty, None),
_ => {
return_error!(InvalidMonomorphization::FloatingPointVector {
span,
name,
f_ty: *f,
in_ty
});
}
};
let vec_ty = bx.cx.type_vector(elem_ty, in_len);
let intr_name = match name {
sym::simd_ceil => "ceil",
sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103
sym::simd_fcos => "cos",
sym::simd_fexp2 => "exp2",
sym::simd_fexp => "exp",
sym::simd_flog10 => "log10",
sym::simd_flog2 => "log2",
sym::simd_flog => "log",
sym::simd_floor => "floor",
sym::simd_fma => "fma",
sym::simd_relaxed_fma => "fma", // FIXME: this should relax to non-fused multiply-add when necessary
sym::simd_fsin => "sin",
sym::simd_fsqrt => "sqrt",
sym::simd_round => "round",
sym::simd_round_ties_even => "rint",
sym::simd_trunc => "trunc",
_ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
};
let builtin_name = format!("{}{}", intr_name, elem_ty_str);
let function = bx.context.get_builtin_function(builtin_name);
// TODO(antoyo): add platform-specific behavior here for architectures that have these
// intrinsics as instructions (for instance, gpus)
let mut vector_elements = vec![];
for i in 0..in_len {
let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64);
let mut arguments = vec![];
for arg in args {
let mut element = bx.extract_element(arg.immediate(), index).to_rvalue();
// FIXME: it would probably be better to not have casts here and use the proper
// instructions.
if let Some(typ) = cast_type {
element = bx.context.new_cast(None, element, typ);
}
arguments.push(element);
}
let mut result = bx.context.new_call(None, function, &arguments);
if cast_type.is_some() {
result = bx.context.new_cast(None, result, elem_ty);
}
vector_elements.push(result);
}
let c = bx.context.new_rvalue_from_vector(None, vec_ty, &vector_elements);
Ok(c)
}
if std::matches!(
name,
sym::simd_ceil
| sym::simd_fabs
| sym::simd_fcos
| sym::simd_fexp2
| sym::simd_fexp
| sym::simd_flog10
| sym::simd_flog2
| sym::simd_flog
| sym::simd_floor
| sym::simd_fma
| sym::simd_relaxed_fma
| sym::simd_fsin
| sym::simd_fsqrt
| sym::simd_round
| sym::simd_round_ties_even
| sym::simd_trunc
) {
return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
}
#[cfg(feature = "master")]
fn vector_ty<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
elem_ty: Ty<'tcx>,
vec_len: u64,
) -> Type<'gcc> {
// FIXME: use cx.layout_of(ty).llvm_type() ?
let elem_ty = match *elem_ty.kind() {
ty::Int(v) => cx.type_int_from_ty(v),
ty::Uint(v) => cx.type_uint_from_ty(v),
ty::Float(v) => cx.type_float_from_ty(v),
_ => unreachable!(),
};
cx.type_vector(elem_ty, vec_len)
}
#[cfg(feature = "master")]
fn gather<'a, 'gcc, 'tcx>(
default: RValue<'gcc>,
pointers: RValue<'gcc>,
mask: RValue<'gcc>,
bx: &mut Builder<'a, 'gcc, 'tcx>,
in_len: u64,
invert: bool,
) -> RValue<'gcc> {
let vector_type = default.get_type();
let elem_type =
vector_type.unqualified().dyncast_vector().expect("vector type").get_element_type();
let mut values = Vec::with_capacity(in_len as usize);
for i in 0..in_len {
let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
let int = bx.context.new_vector_access(None, pointers, index).to_rvalue();
let ptr_type = elem_type.make_pointer();
let ptr = bx.context.new_bitcast(None, int, ptr_type);
let value = ptr.dereference(None).to_rvalue();
values.push(value);
}
let vector = bx.context.new_rvalue_from_vector(None, vector_type, &values);
let mut mask_types = Vec::with_capacity(in_len as usize);
let mut mask_values = Vec::with_capacity(in_len as usize);
for i in 0..in_len {
let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
mask_types.push(bx.context.new_field(None, bx.i32_type, "m"));
let mask_value = bx.context.new_vector_access(None, mask, index).to_rvalue();
let mask_value_cast = bx.context.new_cast(None, mask_value, bx.i32_type);
let masked =
bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value_cast;
let value = index + masked;
mask_values.push(value);
}
let mask_type = bx.context.new_struct_type(None, "mask_type", &mask_types);
let mask = bx.context.new_struct_constructor(None, mask_type.as_type(), None, &mask_values);
if invert {
bx.shuffle_vector(vector, default, mask)
} else {
bx.shuffle_vector(default, vector, mask)
}
}
#[cfg(feature = "master")]
if name == sym::simd_gather {
// simd_gather(values: <N x T>, pointers: <N x *_ T>,
// mask: <N x i{M}>) -> <N x T>
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
require_simd!(
args[1].layout.ty,
InvalidMonomorphization::SimdSecond { span, name, ty: args[1].layout.ty }
);
require_simd!(
args[2].layout.ty,
InvalidMonomorphization::SimdThird { span, name, ty: args[2].layout.ty }
);
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
// Of the same length:
let (out_len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let (out_len2, _) = args[2].layout.ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
InvalidMonomorphization::SecondArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[1].layout.ty,
out_len
}
);
require!(
in_len == out_len2,
InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[2].layout.ty,
out_len: out_len2
}
);
// The return type must match the first argument type
require!(
ret_ty == in_ty,
InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
);
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match *t.kind() {
ty::RawPtr(p_ty, _) => 1 + ptr_count(p_ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match *t.kind() {
ty::RawPtr(p_ty, _) => non_ptr(p_ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = args[0].layout.ty.simd_size_and_type(bx.tcx());
let (_, element_ty1) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match *element_ty1.kind() {
ty::RawPtr(p_ty, _) if p_ty == in_elem => {
(ptr_count(element_ty1), non_ptr(element_ty1))
}
_ => {
require!(
false,
InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: element_ty1,
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Not,
}
);
unreachable!();
}
};
assert!(pointer_count > 0);
assert_eq!(pointer_count - 1, ptr_count(element_ty0));
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be an integer type of any width:
// TODO: also support unsigned integers.
let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx());
match *element_ty2.kind() {
ty::Int(_) => (),
_ => {
require!(
false,
InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
);
}
}
return Ok(gather(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
bx,
in_len,
false,
));
}
#[cfg(feature = "master")]
if name == sym::simd_scatter {
// simd_scatter(values: <N x T>, pointers: <N x *mut T>,
// mask: <N x i{M}>) -> ()
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
require_simd!(
args[1].layout.ty,
InvalidMonomorphization::SimdSecond { span, name, ty: args[1].layout.ty }
);
require_simd!(
args[2].layout.ty,
InvalidMonomorphization::SimdThird { span, name, ty: args[2].layout.ty }
);
// Of the same length:
let (element_len1, _) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let (element_len2, _) = args[2].layout.ty.simd_size_and_type(bx.tcx());
require!(
in_len == element_len1,
InvalidMonomorphization::SecondArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[1].layout.ty,
out_len: element_len1
}
);
require!(
in_len == element_len2,
InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[2].layout.ty,
out_len: element_len2
}
);
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match *t.kind() {
ty::RawPtr(p_ty, _) => 1 + ptr_count(p_ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match *t.kind() {
ty::RawPtr(p_ty, _) => non_ptr(p_ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = args[0].layout.ty.simd_size_and_type(bx.tcx());
let (_, element_ty1) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match *element_ty1.kind() {
ty::RawPtr(p_ty, mutability)
if p_ty == in_elem && mutability == hir::Mutability::Mut =>
{
(ptr_count(element_ty1), non_ptr(element_ty1))
}
_ => {
require!(
false,
InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: element_ty1,
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Mut,
}
);
unreachable!();
}
};
assert!(pointer_count > 0);
assert_eq!(pointer_count - 1, ptr_count(element_ty0));
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width:
// TODO: also support unsigned integers.
match *element_ty2.kind() {
ty::Int(_) => (),
_ => {
require!(
false,
InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
);
}
}
let result =
gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), bx, in_len, true);
let pointers = args[1].immediate();
let vector_type = if pointer_count > 1 {
bx.context.new_vector_type(bx.usize_type, in_len)
} else {
vector_ty(bx, underlying_ty, in_len)
};
let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type();
for i in 0..in_len {
let index = bx.context.new_rvalue_from_int(bx.int_type, i as i32);
let value = bx.context.new_vector_access(None, result, index);
let int = bx.context.new_vector_access(None, pointers, index).to_rvalue();
let ptr_type = elem_type.make_pointer();
let ptr = bx.context.new_bitcast(None, int, ptr_type);
bx.llbb().add_assignment(None, ptr.dereference(None), value);
}
return Ok(bx.context.new_rvalue_zero(bx.i32_type));
}
arith_binary! {
simd_add: Uint, Int => add, Float => fadd;
simd_sub: Uint, Int => sub, Float => fsub;
simd_mul: Uint, Int => mul, Float => fmul;
simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
simd_rem: Uint => urem, Int => srem, Float => frem;
simd_shl: Uint, Int => shl;
simd_shr: Uint => lshr, Int => ashr;
simd_and: Uint, Int => and;
simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
simd_xor: Uint, Int => xor;
simd_fmin: Float => vector_fmin;
simd_fmax: Float => vector_fmax;
}
macro_rules! arith_unary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
match *in_elem.kind() {
$($(ty::$p(_))|* => {
return Ok(bx.$call(args[0].immediate()))
})*
_ => {},
}
return_error!(InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem })
})*
}
}
arith_unary! {
simd_neg: Int => neg, Float => fneg;
}
#[cfg(feature = "master")]
if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
let lhs = args[0].immediate();
let rhs = args[1].immediate();
let is_add = name == sym::simd_saturating_add;
let ptr_bits = bx.tcx().data_layout.pointer_size().bits() as _;
let (signed, elem_width, elem_ty) = match *in_elem.kind() {
ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)),
ty::Uint(i) => {
(false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i))
}
_ => {
return_error!(InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
vector_type: args[0].layout.ty,
});
}
};
let result = match (signed, is_add) {
(false, true) => {
let res = lhs + rhs;
let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
res | cmp
}
(true, true) => {
// Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition
// TODO(antoyo): improve using conditional operators if possible.
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
let arg_type = lhs.get_type().unqualified();
// TODO(antoyo): convert lhs and rhs to unsigned.
let sum = lhs + rhs;
let vector_type = arg_type.dyncast_vector().expect("vector type");
let unit = vector_type.get_num_units();
let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1);
let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]);
let xor1 = lhs ^ rhs;
let xor2 = lhs ^ sum;
let and =
bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2;
let mask = and >> width;
let one = bx.context.new_rvalue_one(elem_ty);
let ones =
bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]);
let shift1 = ones << width;
let shift2 = sum >> width;
let mask_min = shift1 ^ shift2;
let and1 =
bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum;
let and2 = mask & mask_min;
and1 + and2
}
(false, false) => {
let res = lhs - rhs;
let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
res & cmp
}
(true, false) => {
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
let arg_type = lhs.get_type().unqualified();
// TODO(antoyo): this uses the same algorithm from saturating add, but add the
// negative of the right operand. Find a proper subtraction algorithm.
let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs);
// TODO(antoyo): convert lhs and rhs to unsigned.
let sum = lhs + rhs;
let vector_type = arg_type.dyncast_vector().expect("vector type");
let unit = vector_type.get_num_units();
let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1);
let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]);
let xor1 = lhs ^ rhs;
let xor2 = lhs ^ sum;
let and =
bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2;
let mask = and >> width;
let one = bx.context.new_rvalue_one(elem_ty);
let ones =
bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]);
let shift1 = ones << width;
let shift2 = sum >> width;
let mask_min = shift1 ^ shift2;
let and1 =
bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum;
let and2 = mask & mask_min;
and1 + and2
}
};
return Ok(result);
}
macro_rules! arith_red {
($name:ident : $vec_op:expr, $float_reduce:ident, $ordered:expr, $op:ident,
$identity:expr) => {
if name == sym::$name {
require!(
ret_ty == in_elem,
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_op(args[0].immediate(), $vec_op);
if $ordered {
// if overflow occurs, the result is the
// mathematical result modulo 2^n:
Ok(bx.$op(args[1].immediate(), r))
} else {
Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
}
}
ty::Float(_) => {
if $ordered {
// ordered arithmetic reductions take an accumulator
let acc = args[1].immediate();
Ok(bx.$float_reduce(acc, args[0].immediate()))
} else {
Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
}
}
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::$name,
in_ty,
in_elem,
ret_ty
}),
};
}
};
}
arith_red!(
simd_reduce_add_unordered: BinaryOp::Plus,
vector_reduce_fadd_reassoc,
false,
add,
0.0 // TODO: Use this argument.
);
arith_red!(
simd_reduce_mul_unordered: BinaryOp::Mult,
vector_reduce_fmul_reassoc,
false,
mul,
1.0
);
arith_red!(
simd_reduce_add_ordered: BinaryOp::Plus,
vector_reduce_fadd,
true,
add,
0.0
);
arith_red!(
simd_reduce_mul_ordered: BinaryOp::Mult,
vector_reduce_fmul,
true,
mul,
1.0
);
macro_rules! minmax_red {
($name:ident: $int_red:ident, $float_red:ident) => {
if name == sym::$name {
require!(
ret_ty == in_elem,
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())),
ty::Float(_) => Ok(bx.$float_red(args[0].immediate())),
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::$name,
in_ty,
in_elem,
ret_ty
}),
};
}
};
}
minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
macro_rules! bitwise_red {
($name:ident : $op:expr, $boolean:expr) => {
if name == sym::$name {
let input = if !$boolean {
require!(
ret_ty == in_elem,
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
args[0].immediate()
} else {
match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {}
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::$name,
in_ty,
in_elem,
ret_ty
}),
}
args[0].immediate()
};
return match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_op(input, $op);
Ok(if !$boolean {
r
} else {
bx.icmp(
IntPredicate::IntNE,
r,
bx.context.new_rvalue_zero(r.get_type()),
)
})
}
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::$name,
in_ty,
in_elem,
ret_ty
}),
};
}
};
}
bitwise_red!(simd_reduce_and: BinaryOp::BitwiseAnd, false);
bitwise_red!(simd_reduce_or: BinaryOp::BitwiseOr, false);
bitwise_red!(simd_reduce_xor: BinaryOp::BitwiseXor, false);
bitwise_red!(simd_reduce_all: BinaryOp::BitwiseAnd, true);
bitwise_red!(simd_reduce_any: BinaryOp::BitwiseOr, true);
#[cfg(feature = "master")]
if name == sym::simd_masked_load {
// simd_masked_load<_, _, _, const ALIGN: SimdAlign>(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T>
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// Loads contiguous elements from memory behind `pointer`, but only for
// those lanes whose `mask` bit is enabled.
// The memory addresses corresponding to the “off” lanes are not accessed.
// TODO: handle the alignment.
// The element type of the "mask" argument must be a signed integer type of any width
let mask_ty = in_ty;
let mask_len = in_len;
// The second argument must be a pointer matching the element type
let pointer_ty = args[1].layout.ty;
// The last argument is a passthrough vector providing values for disabled lanes
let values_ty = args[2].layout.ty;
let (values_len, values_elem) = require_simd2!(values_ty, SimdThird);
require_simd2!(ret_ty, SimdReturn);
// Of the same length:
require!(
values_len == mask_len,
InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len: mask_len,
in_ty: mask_ty,
arg_ty: values_ty,
out_len: values_len
}
);
// The return type must match the last argument type
require!(
ret_ty == values_ty,
InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
);
require!(
matches!(
*pointer_ty.kind(),
ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
),
InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: values_elem,
second_arg: pointer_ty,
in_elem: values_elem,
in_ty: values_ty,
mutability: ExpectedPointerMutability::Not,
}
);
let mask = args[0].immediate();
let pointer = args[1].immediate();
let default = args[2].immediate();
let default_type = default.get_type();
let vector_type = default_type.unqualified().dyncast_vector().expect("vector type");
let value_type = vector_type.get_element_type();
let new_pointer_type = value_type.make_pointer();
let pointer = bx.context.new_cast(None, pointer, new_pointer_type);
let mask_vector_type = mask.get_type().unqualified().dyncast_vector().expect("vector type");
let elem_type = mask_vector_type.get_element_type();
let zero = bx.context.new_rvalue_zero(elem_type);
let mut elements = vec![];
for i in 0..mask_len {
let i = bx.context.new_rvalue_from_int(bx.int_type, i as i32);
let mask = bx.context.new_vector_access(None, mask, i).to_rvalue();
let mask = bx.context.new_comparison(None, ComparisonOp::NotEquals, mask, zero);
let then_val = bx.context.new_array_access(None, pointer, i).to_rvalue();
let else_val = bx.context.new_vector_access(None, default, i).to_rvalue();
let element = bx.select(mask, then_val, else_val);
elements.push(element);
}
let result = bx.context.new_rvalue_from_vector(None, default_type, &elements);
return Ok(result);
}
#[cfg(feature = "master")]
if name == sym::simd_masked_store {
// simd_masked_store<_, _, _, const ALIGN: SimdAlign>(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> ()
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// Stores contiguous elements to memory behind `pointer`, but only for
// those lanes whose `mask` bit is enabled.
// The memory addresses corresponding to the “off” lanes are not accessed.
// TODO: handle the alignment.
// The element type of the "mask" argument must be a signed integer type of any width
let mask_ty = in_ty;
let mask_len = in_len;
// The second argument must be a pointer matching the element type
let pointer_ty = args[1].layout.ty;
// The last argument specifies the values to store to memory
let values_ty = args[2].layout.ty;
let (values_len, values_elem) = require_simd2!(values_ty, SimdThird);
// Of the same length:
require!(
values_len == mask_len,
InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len: mask_len,
in_ty: mask_ty,
arg_ty: values_ty,
out_len: values_len
}
);
// The second argument must be a mutable pointer type matching the element type
require!(
matches!(
*pointer_ty.kind(),
ty::RawPtr(p_ty, p_mutbl)
if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
),
InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: values_elem,
second_arg: pointer_ty,
in_elem: values_elem,
in_ty: values_ty,
mutability: ExpectedPointerMutability::Mut,
}
);
let mask = args[0].immediate();
let pointer = args[1].immediate();
let values = args[2].immediate();
let values_type = values.get_type();
let vector_type = values_type.unqualified().dyncast_vector().expect("vector type");
let value_type = vector_type.get_element_type();
let new_pointer_type = value_type.make_pointer();
let pointer = bx.context.new_cast(None, pointer, new_pointer_type);
let vector_type = mask.get_type().unqualified().dyncast_vector().expect("vector type");
let elem_type = vector_type.get_element_type();
let zero = bx.context.new_rvalue_zero(elem_type);
for i in 0..mask_len {
let i = bx.context.new_rvalue_from_int(bx.int_type, i as i32);
let mask = bx.context.new_vector_access(None, mask, i).to_rvalue();
let mask = bx.context.new_comparison(None, ComparisonOp::NotEquals, mask, zero);
let after_block = bx.current_func().new_block("after");
let then_block = bx.current_func().new_block("then");
bx.llbb().end_with_conditional(None, mask, then_block, after_block);
bx.switch_to_block(then_block);
let lvalue = bx.context.new_array_access(None, pointer, i);
let value = bx.context.new_vector_access(None, values, i).to_rvalue();
bx.llbb().add_assignment(None, lvalue, value);
bx.llbb().end_with_jump(None, after_block);
bx.switch_to_block(after_block);
}
let dummy_value = bx.context.new_rvalue_zero(bx.int_type);
return Ok(dummy_value);
}
unimplemented!("simd {}", name);
}
#[cfg(feature = "master")]
fn simd_funnel_shift<'a, 'gcc, 'tcx>(
bx: &mut Builder<'a, 'gcc, 'tcx>,
a: RValue<'gcc>,
b: RValue<'gcc>,
shift: RValue<'gcc>,
shift_left: bool,
) -> RValue<'gcc> {
use crate::common::SignType;
let a_type = a.get_type();
let vector_type = a_type.unqualified().dyncast_vector().expect("vector type");
let num_units = vector_type.get_num_units();
let elem_type = vector_type.get_element_type();
let (new_int_type, int_shift_val, int_mask) = if elem_type.is_compatible_with(bx.u8_type)
|| elem_type.is_compatible_with(bx.i8_type)
{
(bx.u16_type, 8, u8::MAX as u64)
} else if elem_type.is_compatible_with(bx.u16_type) || elem_type.is_compatible_with(bx.i16_type)
{
(bx.u32_type, 16, u16::MAX as u64)
} else if elem_type.is_compatible_with(bx.u32_type) || elem_type.is_compatible_with(bx.i32_type)
{
(bx.u64_type, 32, u32::MAX as u64)
} else if elem_type.is_compatible_with(bx.u64_type) || elem_type.is_compatible_with(bx.i64_type)
{
(bx.u128_type, 64, u64::MAX)
} else {
unimplemented!("funnel shift on {:?}", elem_type);
};
let int_mask = bx.context.new_rvalue_from_long(new_int_type, int_mask as i64);
let int_shift_val = bx.context.new_rvalue_from_int(new_int_type, int_shift_val);
let mut elements = vec![];
let unsigned_type = elem_type.to_unsigned(bx);
for i in 0..num_units {
let index = bx.context.new_rvalue_from_int(bx.int_type, i as i32);
let a_val = bx.context.new_vector_access(None, a, index).to_rvalue();
let a_val = bx.context.new_bitcast(None, a_val, unsigned_type);
let a_val = bx.gcc_int_cast(a_val, new_int_type);
let b_val = bx.context.new_vector_access(None, b, index).to_rvalue();
let b_val = bx.context.new_bitcast(None, b_val, unsigned_type);
let b_val = bx.gcc_int_cast(b_val, new_int_type);
let shift_val = bx.context.new_vector_access(None, shift, index).to_rvalue();
let shift_val = bx.gcc_int_cast(shift_val, new_int_type);
let mut val = a_val << int_shift_val | b_val;
if shift_left {
val = (val << shift_val) >> int_shift_val;
} else {
val = (val >> shift_val) & int_mask;
}
let val = bx.gcc_int_cast(val, elem_type);
elements.push(val);
}
bx.context.new_rvalue_from_vector(None, a_type, &elements)
}
|
rust
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
|
from typing import Any
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from typing_extensions import override
class SequentialRetriever(BaseRetriever):
"""Test util that returns a sequence of documents."""
sequential_responses: list[list[Document]]
response_index: int = 0
@override
def _get_relevant_documents(
self,
query: str,
**kwargs: Any,
) -> list[Document]:
if self.response_index >= len(self.sequential_responses):
return []
self.response_index += 1
return self.sequential_responses[self.response_index - 1]
@override
async def _aget_relevant_documents(
self,
query: str,
**kwargs: Any,
) -> list[Document]:
return self._get_relevant_documents(query)
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/tests/unit_tests/retrievers/sequential_retriever.py
|
import os
class NodeGroups(object):
def __init__(self):
if os.path.isfile('/etc/salt/master.d/nodegroups.conf') == True:
print ""
else:
nodegroups = file("/etc/salt/master.d/nodegroups.conf","w+")
add = ["nodegroups:\n"]
nodegroups.writelines(add)
nodegroups.close()
def list_groups(self):
nodegroups = []
os.system("sed '1d' /etc/salt/master.d/nodegroups.conf | awk '{print $1}' |awk -F: '{print $1}' > /tmp/nodegroups")
nodegroup = open("/tmp/nodegroups","r").readlines()
for i in nodegroup:
z = i.split('\n')[0]
nodegroups += [z]
return nodegroups
#原字典返回,改造成列表 数组返回,前端可排序
def list_groups_hosts(self):
all_group_host = {}
os.system("sed '1d' /etc/salt/master.d/nodegroups.conf | awk '{print $1}' |awk -F: '{print $1}' > /tmp/nodegroups")
nodegroup = open("/tmp/nodegroups","r").readlines()
for i in nodegroup:
group = i.split('\n')[0]
cmd = ''' sed -n "s/^ ''' + group + '''.*@/'/gp" /etc/salt/master.d/nodegroups.conf | sed -n "s/'//gp"'''
hosts = os.popen(cmd).read().split('\n')[0].split(',')[0:-1]
sort_hosts = sorted(hosts, key=lambda ele: ele)
group_host_dic = {group: sort_hosts}
all_group_host.update(group_host_dic)
sort_all=sorted(all_group_host.iteritems(), key=lambda d:d[0])
return sort_all
def add_groups(self,group):
nodegroups = file("/etc/salt/master.d/nodegroups.conf","a+")
group_name = " " + group + ":" + " " + "'L@'\n"
add = [group_name]
nodegroups.writelines(add)
nodegroups.close()
def del_groups(self,group):
if group.strip() == "":
print "group null"
else:
cmd = "sed -i '/^ " + group + ":/d' /etc/salt/master.d/nodegroups.conf"
os.system(cmd)
def modify_groups(self,group,modify_group):
cmd = "sed -i 's/^ " + group + ":/ " + modify_group + ":/g' /etc/salt/master.d/nodegroups.conf"
os.system(cmd)
def list_hosts(self,group):
cmd = ''' sed -n "s/^ ''' + group + '''.*@/'/gp" /etc/salt/master.d/nodegroups.conf | sed -n "s/'//gp"'''
hosts = os.popen(cmd).read().split('\n')[0].split(',')[0:-1]
sort_hosts = sorted(hosts, key=lambda ele: ele)
return sort_hosts
def add_hosts(self,group,host):
cmd = "sed -i 's/^ " + group + ":.*L@/&" + host + ",/g' /etc/salt/master.d/nodegroups.conf"
os.system(cmd)
def del_hosts(self,group,host):
cmd = "sed -i '/.*" + group + ".*/s/" + host + ",//g' /etc/salt/master.d/nodegroups.conf"
os.system(cmd)
def hosts_in_group(self,host):
cmd = "grep " + host + " /etc/salt/master.d/nodegroups.conf | awk -F: '{print $1}'"
gname = os.popen(cmd).read()
gname_dic = {'group': gname}
return gname_dic
def main():
host = NodeGroups()
#c = host.list_groups_hosts()
#b = host.del_hosts('SAX','192.168.10.8')
#print b
# = host.list_hosts('Salt')
#rint a
c = host.hosts_in_group("echoeee")
print c
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! /usr/bin/env python3
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
from matplotlib.lines import Line2D
#
# First, use
# ./postprocessing.py > postprocessing_output.txt
# to generate the .txt file
#
fig, ax = plt.subplots(figsize=(10,7))
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
#mode = 'simtime'
mode = 'dt'
with open('postprocessing_output_vort.txt') as f:
lines = f.readlines()
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
markers = []
for m in Line2D.markers:
try:
if len(m) == 1 and m != ' ' and m != '':
markers.append(m)
except TypeError:
pass
linestyles = ['-', '--', ':', '-.']
if len(sys.argv) > 1:
output_filename = sys.argv[1]
else:
output_filename = "./postprocessing_output_vort_err_vs_"+mode+".pdf"
if len(sys.argv) > 2:
plot_set = sys.argv[2:]
else:
plot_set = []
def plot(x, y, marker, linestyle, label):
# plot values and prev_name
print(label)
#print(values_err)
#print(values_time)
#print("")
if len(x) == 0:
return
if len(plot_set) != 0:
if prev_name not in plot_set:
return
ax.plot(x, y, marker=marker, linestyle=linestyle, label=label)
prev_name = ''
values_err = []
values_time = []
c = 2
for l in lines:
if l[-1] == '\n':
l = l[0:-1]
d = l.split("\t")
if d[0] == 'Running tests for new group:':
plot(values_time, values_err, markers[c % len(markers)], linestyles[c % len(linestyles)], prev_name)
for i, txt in enumerate(values_time):
ax.annotate("%.1f" % txt, (values_time[i]*1.03, values_err[i]*1.03))
prev_name = d[0]
values_err = []
values_time = []
c = c+1
continue
if len(d) != 5:
continue
if d[0] == 'SIMNAME':
continue
prev_name = d[0]
prev_name = prev_name.replace('script_ln2_b100_g9.81_h10000_f7.2921e-05_p0_a6371220_u0.0_rob1_fsph0_tsm_', '')
prev_name = prev_name.replace('_M0128_MPI_space01_time128', '')
prev_name = prev_name.replace('_M0128_MPI_space01_time001', '')
prev_name = prev_name.replace('_prcircle_nrm0_hlf0_pre1_ext00', '')
prev_name = prev_name.replace('_tso2_tsob2_REXICI', '')
prev_name = prev_name.replace('_C0040', '')
prev_name = prev_name.replace('_C0080', '')
prev_name = prev_name.replace('_C0160', '')
prev_name = prev_name.replace('_C0320', '')
prev_name = prev_name.replace('_C0640', '')
prev_name = prev_name.replace('_C1280', '')
prev_name = prev_name.replace('_C2560', '')
prev_name = prev_name.replace('_mr10.0_mi30.0', '')
prev_name = prev_name.replace('_n0064_sx50.0_sy50.0', '')
prev_name = prev_name.replace('_n0064', '')
prev_name = prev_name.replace('_sx50.0_sy50.0', '')
prev_name = re.sub(r"_mu.*", "", prev_name)
prev_name = re.sub(r"0000", "", prev_name)
values_err.append(float(d[1]))
if mode == 'simtime':
#
# SIMTIME
#
values_time.append(float(d[4]))
plt.xlabel("simulation time")
elif mode == 'dt':
#
# DT
#
m = re.search('_C([0-9]*)', d[0])
dt = float(m.group(1))
values_time.append(dt)
plt.xlabel("Timestep size")
plt.ylabel("Error")
plot(values_time, values_err, markers[c % len(markers)], linestyles[c % len(linestyles)], prev_name)
plt.legend()
plt.savefig(output_filename)
#plt.show()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# PyGEGL - Python bindings for the GEGL image processing library
# Copyright (C) 2007 Manish Singh
#
# __init__.py: initialization file for the Gegl package
#
# PyGEGL is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# PyGEGL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with PyGEGL; if not, see <http://www.gnu.org/licenses/>.
# dl tricks from GST python's __init__.py
import sys
def setdlopenflags():
oldflags = sys.getdlopenflags()
try:
from DLFCN import RTLD_GLOBAL, RTLD_LAZY
except ImportError:
RTLD_GLOBAL = -1
RTLD_LAZY = -1
import os
osname = os.uname()[0]
if osname == 'Linux' or osname == 'SunOS' or osname == 'FreeBSD':
RTLD_GLOBAL = 0x100
RTLD_LAZY = 0x1
elif osname == 'Darwin':
RTLD_GLOBAL = 0x8
RTLD_LAZY = 0x1
del os
except:
RTLD_GLOBAL = -1
RTLD_LAZY = -1
if RTLD_GLOBAL != -1 and RTLD_LAZY != -1:
sys.setdlopenflags(RTLD_LAZY | RTLD_GLOBAL)
return oldflags
oldflags = setdlopenflags()
from _gegl import *
sys.setdlopenflags(oldflags)
del sys, setdlopenflags
from fifthleg import *
import atexit
atexit.register(exit)
del exit, atexit
del _gegl
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.errors import AnsibleError
from ansible.galaxy.collection import _extract_tar_dir
@pytest.fixture
def fake_tar_obj(mocker):
m_tarfile = mocker.Mock()
m_tarfile.type = mocker.Mock(return_value=b'99')
m_tarfile.SYMTYPE = mocker.Mock(return_value=b'22')
return m_tarfile
def test_extract_tar_member_trailing_sep(mocker):
m_tarfile = mocker.Mock()
m_tarfile.getmember = mocker.Mock(side_effect=KeyError)
with pytest.raises(AnsibleError, match='Unable to extract'):
_extract_tar_dir(m_tarfile, '/some/dir/', b'/some/dest')
assert m_tarfile.getmember.call_count == 1
def test_extract_tar_member_no_trailing_sep(mocker):
m_tarfile = mocker.Mock()
m_tarfile.getmember = mocker.Mock(side_effect=KeyError)
with pytest.raises(AnsibleError, match='Unable to extract'):
_extract_tar_dir(m_tarfile, '/some/dir', b'/some/dest')
assert m_tarfile.getmember.call_count == 2
def test_extract_tar_dir_exists(mocker, fake_tar_obj):
mocker.patch('os.makedirs', return_value=None)
m_makedir = mocker.patch('os.mkdir', return_value=None)
mocker.patch('os.path.isdir', return_value=True)
_extract_tar_dir(fake_tar_obj, '/some/dir', b'/some/dest')
assert not m_makedir.called
def test_extract_tar_dir_does_not_exist(mocker, fake_tar_obj):
mocker.patch('os.makedirs', return_value=None)
m_makedir = mocker.patch('os.mkdir', return_value=None)
mocker.patch('os.path.isdir', return_value=False)
_extract_tar_dir(fake_tar_obj, '/some/dir', b'/some/dest')
assert m_makedir.called
assert m_makedir.call_args[0] == (b'/some/dir', 0o0755)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
# Apache Hadoop Changelog
## Release 2.0.5-alpha - 2013-06-06
### BUG FIXES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HDFS-4482](https://issues.apache.org/jira/browse/HDFS-4482) | ReplicationMonitor thread can exit with NPE due to the race between delete and replication of same file. | Blocker | namenode | Uma Maheswara Rao G | Uma Maheswara Rao G |
| [HADOOP-8419](https://issues.apache.org/jira/browse/HADOOP-8419) | GzipCodec NPE upon reset with IBM JDK | Major | io | Luke Lu | Yu Li |
| [MAPREDUCE-5240](https://issues.apache.org/jira/browse/MAPREDUCE-5240) | inside of FileOutputCommitter the initialized Credentials cache appears to be empty | Blocker | mrv2 | Roman Shaposhnik | Vinod Kumar Vavilapalli |
| [HADOOP-9407](https://issues.apache.org/jira/browse/HADOOP-9407) | commons-daemon 1.0.3 dependency has bad group id causing build issues | Major | build | Sangjin Lee | Sangjin Lee |
| [HADOOP-9614](https://issues.apache.org/jira/browse/HADOOP-9614) | smart-test-patch.sh hangs for new version of patch (2.7.1) | Major | . | Ravi Prakash | Ravi Prakash |
|
unknown
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/CHANGELOG.2.0.5-alpha.md
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to reload Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_reload
author: "Dave Kasberg (@dkasberg)"
short_description: Perform switch restart on devices running Lenovo CNOS
description:
- This module allows you to restart the switch using the current startup configuration.
The module is usually invoked after the running configuration has been saved over the startup configuration.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_reload.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are written in the main.yml file of the tasks directory.
---
- name: Test Reload
cnos_reload:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_reload_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Device is Reloading. Please wait..."
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "reload \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "(y/n):", 2, remote_conn)
# Send the Confirmation y
output = output + cnos.waitForDeviceResponse("y\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
errorMsg = None
if(errorMsg is None):
module.exit_json(changed=True, msg="Device is Reloading. Please wait...")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
//===- bolt/Passes/IdenticalCodeFolding.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef BOLT_PASSES_IDENTICAL_CODE_FOLDING_H
#define BOLT_PASSES_IDENTICAL_CODE_FOLDING_H
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Passes/BinaryPasses.h"
#include "llvm/ADT/SparseBitVector.h"
namespace llvm {
namespace bolt {
/// An optimization that replaces references to identical functions with
/// references to a single one of them.
///
class IdenticalCodeFolding : public BinaryFunctionPass {
protected:
/// Return true if the function is safe to fold.
bool shouldOptimize(const BinaryFunction &BF) const override;
public:
enum class ICFLevel {
None, /// No ICF. (Default)
Safe, /// Safe ICF.
All, /// Aggressive ICF.
};
explicit IdenticalCodeFolding(const cl::opt<bool> &PrintPass)
: BinaryFunctionPass(PrintPass) {}
const char *getName() const override { return "identical-code-folding"; }
Error runOnFunctions(BinaryContext &BC) override;
private:
static constexpr uint64_t VTableAddressGranularity = 4;
/// Bit vector of memory addresses of vtables.
llvm::SparseBitVector<> VTableBitVector;
/// Return true if the memory address is in a vtable.
bool isAddressInVTable(uint64_t Address) const {
return VTableBitVector.test(Address / VTableAddressGranularity);
}
/// Mark memory address of a vtable as used.
void setAddressUsedInVTable(uint64_t Address) {
VTableBitVector.set(Address / VTableAddressGranularity);
}
/// Scan symbol table and mark memory addresses of
/// vtables.
void initVTableReferences(const BinaryContext &BC);
/// Analyze code section and relocations and mark functions that are not
/// safe to fold.
void markFunctionsUnsafeToFold(BinaryContext &BC);
/// Process static and dynamic relocations in the data sections to identify
/// function references, and mark them as unsafe to fold. It filters out
/// symbol references that are in vtables.
void analyzeDataRelocations(BinaryContext &BC);
/// Process functions that have been disassembled and mark functions that are
/// used in non-control flow instructions as unsafe to fold.
void analyzeFunctions(BinaryContext &BC);
};
class DeprecatedICFNumericOptionParser
: public cl::parser<IdenticalCodeFolding::ICFLevel> {
public:
explicit DeprecatedICFNumericOptionParser(cl::Option &O)
: cl::parser<IdenticalCodeFolding::ICFLevel>(O) {}
bool parse(cl::Option &O, StringRef ArgName, StringRef Arg,
IdenticalCodeFolding::ICFLevel &Value) {
if (Arg == "0" || Arg == "1") {
Value = (Arg == "0") ? IdenticalCodeFolding::ICFLevel::None
: IdenticalCodeFolding::ICFLevel::All;
errs() << formatv("BOLT-WARNING: specifying numeric value \"{0}\" "
"for option -{1} is deprecated\n",
Arg, ArgName);
return false;
}
return cl::parser<IdenticalCodeFolding::ICFLevel>::parse(O, ArgName, Arg,
Value);
}
};
} // namespace bolt
} // namespace llvm
#endif
|
c
|
github
|
https://github.com/llvm/llvm-project
|
bolt/include/bolt/Passes/IdenticalCodeFolding.h
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/kernel_def.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import attr_value_pb2 as tensorflow_dot_core_dot_framework_dot_attr__value__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/kernel_def.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n*tensorflow/core/framework/kernel_def.proto\x12\ntensorflow\x1a*tensorflow/core/framework/attr_value.proto\"\xdd\x01\n\tKernelDef\x12\n\n\x02op\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65vice_type\x18\x02 \x01(\t\x12\x38\n\nconstraint\x18\x03 \x03(\x0b\x32$.tensorflow.KernelDef.AttrConstraint\x12\x17\n\x0fhost_memory_arg\x18\x04 \x03(\t\x12\r\n\x05label\x18\x05 \x01(\t\x1aM\n\x0e\x41ttrConstraint\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x0e\x61llowed_values\x18\x02 \x01(\x0b\x32\x15.tensorflow.AttrValueB0\n\x18org.tensorflow.frameworkB\x0fKernelDefProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_attr__value__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_KERNELDEF_ATTRCONSTRAINT = _descriptor.Descriptor(
name='AttrConstraint',
full_name='tensorflow.KernelDef.AttrConstraint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.KernelDef.AttrConstraint.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allowed_values', full_name='tensorflow.KernelDef.AttrConstraint.allowed_values', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=324,
)
_KERNELDEF = _descriptor.Descriptor(
name='KernelDef',
full_name='tensorflow.KernelDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='op', full_name='tensorflow.KernelDef.op', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_type', full_name='tensorflow.KernelDef.device_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='constraint', full_name='tensorflow.KernelDef.constraint', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_memory_arg', full_name='tensorflow.KernelDef.host_memory_arg', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='tensorflow.KernelDef.label', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_KERNELDEF_ATTRCONSTRAINT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=103,
serialized_end=324,
)
_KERNELDEF_ATTRCONSTRAINT.fields_by_name['allowed_values'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_KERNELDEF_ATTRCONSTRAINT.containing_type = _KERNELDEF
_KERNELDEF.fields_by_name['constraint'].message_type = _KERNELDEF_ATTRCONSTRAINT
DESCRIPTOR.message_types_by_name['KernelDef'] = _KERNELDEF
KernelDef = _reflection.GeneratedProtocolMessageType('KernelDef', (_message.Message,), dict(
AttrConstraint = _reflection.GeneratedProtocolMessageType('AttrConstraint', (_message.Message,), dict(
DESCRIPTOR = _KERNELDEF_ATTRCONSTRAINT,
__module__ = 'tensorflow.core.framework.kernel_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.KernelDef.AttrConstraint)
))
,
DESCRIPTOR = _KERNELDEF,
__module__ = 'tensorflow.core.framework.kernel_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.KernelDef)
))
_sym_db.RegisterMessage(KernelDef)
_sym_db.RegisterMessage(KernelDef.AttrConstraint)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\017KernelDefProtosP\001\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package schema
import (
"reflect"
"testing"
"github.com/hashicorp/terraform/internal/legacy/terraform"
)
func TestDiffFieldReader_impl(t *testing.T) {
var _ FieldReader = new(DiffFieldReader)
}
func TestDiffFieldReader_NestedSetUpdate(t *testing.T) {
hashFn := func(a interface{}) int {
m := a.(map[string]interface{})
return m["val"].(int)
}
schema := map[string]*Schema{
"list_of_sets_1": &Schema{
Type: TypeList,
Elem: &Resource{
Schema: map[string]*Schema{
"nested_set": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"val": &Schema{
Type: TypeInt,
},
},
},
Set: hashFn,
},
},
},
},
"list_of_sets_2": &Schema{
Type: TypeList,
Elem: &Resource{
Schema: map[string]*Schema{
"nested_set": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"val": &Schema{
Type: TypeInt,
},
},
},
Set: hashFn,
},
},
},
},
}
r := &DiffFieldReader{
Schema: schema,
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"list_of_sets_1.0.nested_set.1.val": &terraform.ResourceAttrDiff{
Old: "1",
New: "0",
NewRemoved: true,
},
"list_of_sets_1.0.nested_set.2.val": &terraform.ResourceAttrDiff{
New: "2",
},
},
},
}
r.Source = &MultiLevelFieldReader{
Readers: map[string]FieldReader{
"diff": r,
"set": &MapFieldReader{Schema: schema},
"state": &MapFieldReader{
Map: &BasicMapReader{
"list_of_sets_1.#": "1",
"list_of_sets_1.0.nested_set.#": "1",
"list_of_sets_1.0.nested_set.1.val": "1",
"list_of_sets_2.#": "1",
"list_of_sets_2.0.nested_set.#": "1",
"list_of_sets_2.0.nested_set.1.val": "1",
},
Schema: schema,
},
},
Levels: []string{"state", "config"},
}
out, err := r.ReadField([]string{"list_of_sets_2"})
if err != nil {
t.Fatalf("err: %v", err)
}
s := &Set{F: hashFn}
s.Add(map[string]interface{}{"val": 1})
expected := s.List()
l := out.Value.([]interface{})
i := l[0].(map[string]interface{})
actual := i["nested_set"].(*Set).List()
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("bad: NestedSetUpdate\n\nexpected: %#v\n\ngot: %#v\n\n", expected, actual)
}
}
// https://github.com/hashicorp/terraform/issues/914
func TestDiffFieldReader_MapHandling(t *testing.T) {
schema := map[string]*Schema{
"tags": &Schema{
Type: TypeMap,
},
}
r := &DiffFieldReader{
Schema: schema,
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"tags.%": &terraform.ResourceAttrDiff{
Old: "1",
New: "2",
},
"tags.baz": &terraform.ResourceAttrDiff{
Old: "",
New: "qux",
},
},
},
Source: &MapFieldReader{
Schema: schema,
Map: BasicMapReader(map[string]string{
"tags.%": "1",
"tags.foo": "bar",
}),
},
}
result, err := r.ReadField([]string{"tags"})
if err != nil {
t.Fatalf("ReadField failed: %#v", err)
}
expected := map[string]interface{}{
"foo": "bar",
"baz": "qux",
}
if !reflect.DeepEqual(expected, result.Value) {
t.Fatalf("bad: DiffHandling\n\nexpected: %#v\n\ngot: %#v\n\n", expected, result.Value)
}
}
func TestDiffFieldReader_extra(t *testing.T) {
schema := map[string]*Schema{
"stringComputed": &Schema{Type: TypeString},
"listMap": &Schema{
Type: TypeList,
Elem: &Schema{
Type: TypeMap,
},
},
"mapRemove": &Schema{Type: TypeMap},
"setChange": &Schema{
Type: TypeSet,
Optional: true,
Elem: &Resource{
Schema: map[string]*Schema{
"index": &Schema{
Type: TypeInt,
Required: true,
},
"value": &Schema{
Type: TypeString,
Required: true,
},
},
},
Set: func(a interface{}) int {
m := a.(map[string]interface{})
return m["index"].(int)
},
},
"setEmpty": &Schema{
Type: TypeSet,
Optional: true,
Elem: &Resource{
Schema: map[string]*Schema{
"index": &Schema{
Type: TypeInt,
Required: true,
},
"value": &Schema{
Type: TypeString,
Required: true,
},
},
},
Set: func(a interface{}) int {
m := a.(map[string]interface{})
return m["index"].(int)
},
},
}
r := &DiffFieldReader{
Schema: schema,
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"stringComputed": &terraform.ResourceAttrDiff{
Old: "foo",
New: "bar",
NewComputed: true,
},
"listMap.0.bar": &terraform.ResourceAttrDiff{
NewRemoved: true,
},
"mapRemove.bar": &terraform.ResourceAttrDiff{
NewRemoved: true,
},
"setChange.10.value": &terraform.ResourceAttrDiff{
Old: "50",
New: "80",
},
"setEmpty.#": &terraform.ResourceAttrDiff{
Old: "2",
New: "0",
},
},
},
Source: &MapFieldReader{
Schema: schema,
Map: BasicMapReader(map[string]string{
"listMap.#": "2",
"listMap.0.foo": "bar",
"listMap.0.bar": "baz",
"listMap.1.baz": "baz",
"mapRemove.foo": "bar",
"mapRemove.bar": "bar",
"setChange.#": "1",
"setChange.10.index": "10",
"setChange.10.value": "50",
"setEmpty.#": "2",
"setEmpty.10.index": "10",
"setEmpty.10.value": "50",
"setEmpty.20.index": "20",
"setEmpty.20.value": "50",
}),
},
}
cases := map[string]struct {
Addr []string
Result FieldReadResult
Err bool
}{
"stringComputed": {
[]string{"stringComputed"},
FieldReadResult{
Value: "",
Exists: true,
Computed: true,
},
false,
},
"listMapRemoval": {
[]string{"listMap"},
FieldReadResult{
Value: []interface{}{
map[string]interface{}{
"foo": "bar",
},
map[string]interface{}{
"baz": "baz",
},
},
Exists: true,
},
false,
},
"mapRemove": {
[]string{"mapRemove"},
FieldReadResult{
Value: map[string]interface{}{
"foo": "bar",
},
Exists: true,
Computed: false,
},
false,
},
"setChange": {
[]string{"setChange"},
FieldReadResult{
Value: []interface{}{
map[string]interface{}{
"index": 10,
"value": "80",
},
},
Exists: true,
},
false,
},
"setEmpty": {
[]string{"setEmpty"},
FieldReadResult{
Value: []interface{}{},
Exists: true,
},
false,
},
}
for name, tc := range cases {
out, err := r.ReadField(tc.Addr)
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to a list so its more easily checked.
out.Value = s.List()
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
}
}
func TestDiffFieldReader(t *testing.T) {
testFieldReader(t, func(s map[string]*Schema) FieldReader {
return &DiffFieldReader{
Schema: s,
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"bool": &terraform.ResourceAttrDiff{
Old: "",
New: "true",
},
"int": &terraform.ResourceAttrDiff{
Old: "",
New: "42",
},
"float": &terraform.ResourceAttrDiff{
Old: "",
New: "3.1415",
},
"string": &terraform.ResourceAttrDiff{
Old: "",
New: "string",
},
"stringComputed": &terraform.ResourceAttrDiff{
Old: "foo",
New: "bar",
NewComputed: true,
},
"list.#": &terraform.ResourceAttrDiff{
Old: "0",
New: "2",
},
"list.0": &terraform.ResourceAttrDiff{
Old: "",
New: "foo",
},
"list.1": &terraform.ResourceAttrDiff{
Old: "",
New: "bar",
},
"listInt.#": &terraform.ResourceAttrDiff{
Old: "0",
New: "2",
},
"listInt.0": &terraform.ResourceAttrDiff{
Old: "",
New: "21",
},
"listInt.1": &terraform.ResourceAttrDiff{
Old: "",
New: "42",
},
"map.foo": &terraform.ResourceAttrDiff{
Old: "",
New: "bar",
},
"map.bar": &terraform.ResourceAttrDiff{
Old: "",
New: "baz",
},
"mapInt.%": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapInt.one": &terraform.ResourceAttrDiff{
Old: "",
New: "1",
},
"mapInt.two": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapIntNestedSchema.%": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapIntNestedSchema.one": &terraform.ResourceAttrDiff{
Old: "",
New: "1",
},
"mapIntNestedSchema.two": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapFloat.%": &terraform.ResourceAttrDiff{
Old: "",
New: "1",
},
"mapFloat.oneDotTwo": &terraform.ResourceAttrDiff{
Old: "",
New: "1.2",
},
"mapBool.%": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapBool.True": &terraform.ResourceAttrDiff{
Old: "",
New: "true",
},
"mapBool.False": &terraform.ResourceAttrDiff{
Old: "",
New: "false",
},
"set.#": &terraform.ResourceAttrDiff{
Old: "0",
New: "2",
},
"set.10": &terraform.ResourceAttrDiff{
Old: "",
New: "10",
},
"set.50": &terraform.ResourceAttrDiff{
Old: "",
New: "50",
},
"setDeep.#": &terraform.ResourceAttrDiff{
Old: "0",
New: "2",
},
"setDeep.10.index": &terraform.ResourceAttrDiff{
Old: "",
New: "10",
},
"setDeep.10.value": &terraform.ResourceAttrDiff{
Old: "",
New: "foo",
},
"setDeep.50.index": &terraform.ResourceAttrDiff{
Old: "",
New: "50",
},
"setDeep.50.value": &terraform.ResourceAttrDiff{
Old: "",
New: "bar",
},
},
},
Source: &MapFieldReader{
Schema: s,
Map: BasicMapReader(map[string]string{
"listMap.#": "2",
"listMap.0.foo": "bar",
"listMap.0.bar": "baz",
"listMap.1.baz": "baz",
}),
},
}
})
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/legacy/helper/schema/field_reader_diff_test.go
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import socket
from urlparse import urlparse
import os
import struct
import uuid
import sha
import base64
import logging
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
# websocket supported version.
VERSION = 13
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_TLS_HANDSHAKE_ERROR = 1015
logger = logging.getLogger()
class WebSocketException(Exception):
"""
websocket exeception class.
"""
pass
default_timeout = None
traceEnabled = False
def enableTrace(tracable):
"""
turn on/off the tracability.
tracable: boolean value. if set True, tracability is enabled.
"""
global traceEnabled
traceEnabled = tracable
if tracable:
if not logger.handlers:
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
def setdefaulttimeout(timeout):
"""
Set the global timeout setting to connect.
timeout: default socket timeout time. This value is second.
"""
global default_timeout
default_timeout = timeout
def getdefaulttimeout():
"""
Return the global timeout setting(second) to connect.
"""
return default_timeout
def _parse_url(url):
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
url: url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
url = url.rstrip("/")
parsed = urlparse(url, scheme="http")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
return (hostname, port, resource, is_secure)
def create_connection(url, timeout=None, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "headers" dict object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... headers={"User-Agent": "MyProgram"})
timeout: socket timeout time. This value is integer.
if you set None for this value, it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value, the custom HTTP headers are added.
"""
websock = WebSocket()
websock.settimeout(timeout != None and timeout or default_timeout)
websock.connect(url, **options)
return websock
_MAX_INTEGER = (1 << 32) -1
_AVAILABLE_KEY_CHARS = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
_MAX_CHAR_BYTE = (1<<8) -1
# ref. Websocket gets an update, and it breaks stuff.
# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html
def _create_sec_websocket_key():
uid = uuid.uuid4()
return base64.encodestring(uid.bytes).strip()
_HEADERS_TO_CHECK = {
"upgrade": "websocket",
"connection": "upgrade",
}
class _SSLSocketWrapper(object):
def __init__(self, sock):
self.ssl = socket.ssl(sock)
def recv(self, bufsize):
return self.ssl.read(bufsize)
def send(self, payload):
return self.ssl.write(payload)
_BOOL_VALUES = (0, 1)
def _is_bool(*values):
for v in values:
if v not in _BOOL_VALUES:
return False
return True
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
# available operation code value tuple
OPCODES = (OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong"
}
# data length threashold.
LENGTH_7 = 0x7d
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(self, fin = 0, rsv1 = 0, rsv2 = 0, rsv3 = 0,
opcode = OPCODE_TEXT, mask = 1, data = ""):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
self.data = data
self.get_mask_key = os.urandom
@staticmethod
def create_frame(data, opcode):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is uniocde,
data value is conveted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, unicode):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(1, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if not _is_bool(self.fin, self.rsv1, self.rsv2, self.rsv3):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(self.fin << 7
| self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4
| self.opcode)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7e)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7f)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
return mask_key + "".join(s)
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
_m = map(ord, mask_key)
_d = map(ord, data)
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
s = map(chr, _d)
return "".join(s)
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/recieve data.
The following example is a echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
"""
def __init__(self, get_mask_key = None):
"""
Initalize WebSocket object.
"""
self.connected = False
self.io_sock = self.sock = socket.socket()
self.get_mask_key = get_mask_key
def set_mask_key(self, func):
"""
set function to create musk key. You can custumize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the fuct must 1 argument as integer.
The argument means length of mask key.
This func must be return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock.settimeout(timeout)
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock.gettimeout()
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme. ie. ws://host:port/resource
You can customize using 'options'.
If you set "headers" dict object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... headers={"User-Agent": "MyProgram"})
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value,
the custom HTTP headers are added.
"""
hostname, port, resource, is_secure = _parse_url(url)
# we need to support proxy
self.sock.connect((hostname, port))
if is_secure:
self.io_sock = _SSLSocketWrapper(self.sock)
self._handshake(hostname, port, resource, **options)
def _handshake(self, host, port, resource, **options):
sock = self.io_sock
headers = []
headers.append("GET %s HTTP/1.1" % resource)
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
if port == 80:
hostport = host
else:
hostport = "%s:%d" % (host, port)
headers.append("Host: %s" % hostport)
headers.append("Origin: %s" % hostport)
key = _create_sec_websocket_key()
headers.append("Sec-WebSocket-Key: %s" % key)
headers.append("Sec-WebSocket-Protocol: chat, superchat")
headers.append("Sec-WebSocket-Version: %s" % VERSION)
if "header" in options:
headers.extend(options["header"])
headers.append("")
headers.append("")
header_str = "\r\n".join(headers)
sock.send(header_str)
if traceEnabled:
logger.debug( "--- request header ---")
logger.debug( header_str)
logger.debug("-----------------------")
status, resp_headers = self._read_headers()
if status != 101:
self.close()
raise WebSocketException("Handshake Status %d" % status)
success = self._validate_header(resp_headers, key)
if not success:
self.close()
raise WebSocketException("Invalid WebSocket Header")
self.connected = True
def _validate_header(self, headers, key):
for k, v in _HEADERS_TO_CHECK.iteritems():
r = headers.get(k, None)
if not r:
return False
r = r.lower()
if v != r:
return False
result = headers.get("sec-websocket-accept", None)
if not result:
return False
result = result.lower()
value = key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
hashed = base64.encodestring(sha.sha(value).digest()).strip().lower()
return hashed == result
def _read_headers(self):
status = None
headers = {}
if traceEnabled:
logger.debug("--- response header ---")
while True:
line = self._recv_line()
if line == "\r\n":
break
line = line.strip()
if traceEnabled:
logger.debug(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip().lower()
else:
raise WebSocketException("Invalid header")
if traceEnabled:
logger.debug("-----------------------")
return status, headers
def send(self, payload, opcode = ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicoce,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
self.io_sock.send(data)
if traceEnabled:
logger.debug("send: " + repr(data))
def ping(self, payload = ""):
"""
send ping data.
payload: data payload to send server.
"""
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
return data
def recv_data(self):
"""
Recieve data with operation code.
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
return (frame.opcode, frame.data)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, None)
elif frame.opcode == ABNF.OPCODE_PING:
self.pong("Hi!")
def recv_frame(self):
"""
recieve data as frame from server.
return value: ABNF frame object.
"""
header_bytes = self._recv(2)
if not header_bytes:
return None
b1 = ord(header_bytes[0])
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = ord(header_bytes[1])
mask = b2 >> 7 & 1
length = b2 & 0x7f
length_data = ""
if length == 0x7e:
length_data = self._recv(2)
length = struct.unpack("!H", length_data)[0]
elif length == 0x7f:
length_data = self._recv(8)
length = struct.unpack("!Q", length_data)[0]
mask_key = ""
if mask:
mask_key = self._recv(4)
data = self._recv_strict(length)
if traceEnabled:
recieved = header_bytes + length_data + mask_key + data
logger.debug("recv: " + repr(recieved))
if mask:
data = ABNF.mask(mask_key, data)
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, mask, data)
return frame
def send_close(self, status = STATUS_NORMAL, reason = ""):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status = STATUS_NORMAL, reason = ""):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
timeout = self.sock.gettimeout()
self.sock.settimeout(3)
try:
frame = self.recv_frame()
if logger.isEnabledFor(logging.DEBUG):
logger.error("close status: " + repr(frame.data))
except:
pass
self.sock.settimeout(timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self._closeInternal()
def _closeInternal(self):
self.connected = False
self.sock.close()
self.io_sock = self.sock
def _recv(self, bufsize):
bytes = self.io_sock.recv(bufsize)
return bytes
def _recv_strict(self, bufsize):
remaining = bufsize
bytes = ""
while remaining:
bytes += self._recv(remaining)
remaining = bufsize - len(bytes)
return bytes
def _recv_line(self):
line = []
while True:
c = self._recv(1)
line.append(c)
if c == "\n":
break
return "".join(line)
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url,
on_open = None, on_message = None, on_error = None,
on_close = None, keep_running = True, get_mask_key = None):
"""
url: websocket url.
on_open: callable object which is called at opening websocket.
this function has one argument. The arugment is this class object.
on_message: callbale object which is called when recieved data.
on_message has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The arugment is this class object.
keep_running: a boolean flag indicating whether the app's main loop should
keep running, defaults to True
get_mask_key: a callable to produce new mask keys, see the WebSocket.set_mask_key's
docstring for more information
"""
self.url = url
self.on_open = on_open
self.on_message = on_message
self.on_error = on_error
self.on_close = on_close
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
def send(self, data):
"""
send message. data must be utf-8 string or unicode.
"""
self.sock.send(data)
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
self.sock.close()
def run_forever(self):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
"""
if self.sock:
raise WebSocketException("socket is already opened")
try:
self.sock = WebSocket(self.get_mask_key)
self.sock.connect(self.url)
self._run_with_no_err(self.on_open)
while self.keep_running:
data = self.sock.recv()
if data is None:
break
self._run_with_no_err(self.on_message, data)
except Exception, e:
self._run_with_no_err(self.on_error, e)
finally:
self.sock.close()
self._run_with_no_err(self.on_close)
self.sock = None
def _run_with_no_err(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception, e:
if logger.isEnabledFor(logging.DEBUG):
logger.error(e)
if __name__ == "__main__":
enableTrace(True)
ws = create_connection("ws://echo.websocket.org/")
print "Sending 'Hello, World'..."
ws.send("Hello, World")
print "Sent"
print "Receiving..."
result = ws.recv()
print "Received '%s'" % result
ws.close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import serial
import string
import math
import time
from Tkinter import *
from threading import Timer
comPort = '/dev/ttyACM0' #default com port
comPortBaud = 38400
class App:
grid_size = 15
num_pixels = 30
image_started = FALSE
image_current_row = 0;
ser = serial.Serial(comPort, comPortBaud)
pixel_dictionary = {}
def __init__(self, master):
# set main window's title
master.title("ADNS3080ImageGrabber")
frame = Frame(master)
frame.grid(row=0,column=0)
self.comPortStr = StringVar()
self.comPort = Entry(frame,textvariable=self.comPortStr)
self.comPort.grid(row=0,column=0)
self.comPort.delete(0, END)
self.comPort.insert(0,comPort)
self.button = Button(frame, text="Open", fg="red", command=self.open_serial)
self.button.grid(row=0,column=1)
self.entryStr = StringVar()
self.entry = Entry(frame,textvariable=self.entryStr)
self.entry.grid(row=0,column=2)
self.entry.delete(0, END)
self.entry.insert(0,"I")
self.send_button = Button(frame, text="Send", command=self.send_to_serial)
self.send_button.grid(row=0,column=3)
self.canvas = Canvas(master, width=self.grid_size*self.num_pixels, height=self.grid_size*self.num_pixels)
self.canvas.grid(row=1)
## start attempts to read from serial port
self.read_loop()
def __del__(self):
self.stop_read_loop()
def open_serial(self):
# close the serial port
if( self.ser.isOpen() ):
try:
self.ser.close()
except:
i=i # do nothing
# open the serial port
try:
self.ser = serial.Serial(port=self.comPortStr.get(),baudrate=comPortBaud, timeout=1)
print("serial port '" + self.comPortStr.get() + "' opened!")
except:
print("failed to open serial port '" + self.comPortStr.get() + "'")
def send_to_serial(self):
if self.ser.isOpen():
self.ser.write(self.entryStr.get())
print "sent '" + self.entryStr.get() + "' to " + self.ser.portstr
else:
print "Serial port not open!"
def read_loop(self):
try:
self.t.cancel()
except:
aVar = 1 # do nothing
#print("reading")
if( self.ser.isOpen() ) :
self.read_from_serial();
self.t = Timer(0.0,self.read_loop)
self.t.start()
def stop_read_loop(self):
try:
self.t.cancel()
except:
print("failed to cancel timer")
# do nothing
def read_from_serial(self):
if( self.ser.isOpen() ):
while( self.ser.inWaiting() > 0 ):
self.line_processed = FALSE
line = self.ser.readline()
# process the line read
print("line starts")
if( line.find("-------------------------") == 0 ):
self.line_processed = TRUE
self.image_started = FALSE
self.image_current_row = 0
else:
self.image_started= TRUE
if( self.image_started == TRUE ):
if( self.image_current_row >= self.num_pixels ):
self.image_started == FALSE
else:
words = line.split()
if len(words) >= 30:
self.line_processed = TRUE
x = 0
for v in words:
try:
colour = int(v)
except:
colour = 0;
#self.display_pixel(x,self.image_current_row,colour)
self.display_pixel(self.num_pixels-1-self.image_current_row,self.num_pixels-1-x,colour)
x += 1
self.image_current_row += 1
else:
print("line " + str(self.image_current_row) + "incomplete (" + str(len(words)) + " of " + str(self.num_pixels) + "), ignoring")
#print("bad line: " + line);
if( line.find("image data") >= 0 ):
self.line_processed = TRUE
self.image_started = TRUE
self.image_current_row = 0
# clear canvas
#self.canvas.delete(ALL) # remove all items
#display the line if we couldn't understand it
# if( self.line_processed == FALSE ):
# print( line )
def display_default_image(self):
# display the grid
for x in range(0, self.num_pixels-1):
for y in range(0, self.num_pixels-1):
colour = x * y / 3.53
self.display_pixel(x,y,colour)
def display_pixel(self, x, y, colour):
if( x >= 0 and x < self.num_pixels and y >= 0 and y < self.num_pixels ) :
#find the old pixel if it exists and delete it
if self.pixel_dictionary.has_key(x+y*self.num_pixels) :
self.old_pixel = self.pixel_dictionary[x+y*self.num_pixels]
self.canvas.delete(self.old_pixel)
del(self.old_pixel)
fillColour = "#%02x%02x%02x" % (colour, colour, colour)
#draw a new pixel and add to pixel_array
self.new_pixel = self.canvas.create_rectangle(x*self.grid_size, y*self.grid_size, (x+1)*self.grid_size, (y+1)*self.grid_size, fill=fillColour)
self.pixel_dictionary[x+y*self.num_pixels] = self.new_pixel
## main loop ##
root = Tk()
#root.withdraw()
#serPort = SerialHandler(comPort,comPortBaud)
# create main display
app = App(root)
app.display_default_image()
print("entering main loop!")
root.mainloop()
app.stop_read_loop()
print("exiting")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_dinov2 import *
from .modeling_dinov2 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
python
|
github
|
https://github.com/huggingface/transformers
|
src/transformers/models/dinov2/__init__.py
|
"""
make gifti images for participation index and correlations with behav variables
"""
import os,sys
import numpy
import nibabel.gifti.giftiio
from myconnectome.utils import labels_to_gii,load_dataframe
basedir=os.environ['MYCONNECTOME_DIR']
rsfmridir=os.path.join(basedir,'rsfmri')
def mk_participation_index_giftis():
pidata=numpy.loadtxt(os.path.join(rsfmridir,'PIpos_weighted_louvain_bct.txt'))
df=load_dataframe.load_dataframe(os.path.join(basedir,'timeseries/out.dat.pindex_behav.txt'),thresh=0.05)
associations={}
for v in df.iterkeys():
if not associations.has_key(v[1]):
associations[v[1]]=numpy.zeros(634)
vertexnum=int(v[0].replace('V',''))-1
associations[v[1]][vertexnum]=df[v][2]
vars=associations.keys()
vars.sort()
data=numpy.zeros((634,len(vars)))
for i in range(len(vars)):
data[:,i]=associations[vars[i]]
data=data[:620,:]
meanpi=numpy.mean(pidata,1)
data=numpy.hstack((meanpi[:620,None],data))
vars=['meanPI']+vars
labels_to_gii.labels_to_gii(data,vars,'PI',basedir=basedir,outdir=rsfmridir)
if __name__ == "__main__":
mk_participation_index_giftis()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import datetime
import logging
import multiprocessing
import os
import secrets
import shutil
from typing import Any, Dict, Iterable, List, Optional, Tuple
import orjson
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.utils.timezone import now as timezone_now
from psycopg2.extras import execute_values
from psycopg2.sql import SQL, Identifier
from analytics.models import RealmCount, StreamCount, UserCount
from zerver.lib.actions import (
UserMessageLite,
bulk_insert_ums,
do_change_avatar_fields,
do_change_plan_type,
)
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.bulk_create import bulk_create_users, bulk_set_users_or_streams_recipient_fields
from zerver.lib.export import DATE_FIELDS, Field, Path, Record, TableData, TableName
from zerver.lib.markdown import markdown_convert
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import get_last_message_id
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.streams import render_stream_description
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.upload import BadImageError, get_bucket, guess_type, sanitize_name
from zerver.lib.utils import generate_api_key, process_list_in_batches
from zerver.models import (
AlertWord,
Attachment,
BotConfigData,
BotStorageData,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
Huddle,
Message,
MutedTopic,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
Service,
Stream,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
get_huddle_hash,
get_system_bot,
get_user_profile_by_id,
)
realm_tables = [("zerver_defaultstream", DefaultStream, "defaultstream"),
("zerver_realmemoji", RealmEmoji, "realmemoji"),
("zerver_realmdomain", RealmDomain, "realmdomain"),
("zerver_realmfilter", RealmFilter, "realmfilter")] # List[Tuple[TableName, Any, str]]
# ID_MAP is a dictionary that maps table names to dictionaries
# that map old ids to new ids. We use this in
# re_map_foreign_keys and other places.
#
# We explicitly initialize ID_MAP with the tables that support
# id re-mapping.
#
# Code reviewers: give these tables extra scrutiny, as we need to
# make sure to reload related tables AFTER we re-map the ids.
ID_MAP: Dict[str, Dict[int, int]] = {
'alertword': {},
'client': {},
'user_profile': {},
'huddle': {},
'realm': {},
'stream': {},
'recipient': {},
'subscription': {},
'defaultstream': {},
'reaction': {},
'realmemoji': {},
'realmdomain': {},
'realmfilter': {},
'message': {},
'user_presence': {},
'useractivity': {},
'useractivityinterval': {},
'usermessage': {},
'customprofilefield': {},
'customprofilefieldvalue': {},
'attachment': {},
'realmauditlog': {},
'recipient_to_huddle_map': {},
'userhotspot': {},
'mutedtopic': {},
'service': {},
'usergroup': {},
'usergroupmembership': {},
'botstoragedata': {},
'botconfigdata': {},
'analytics_realmcount': {},
'analytics_streamcount': {},
'analytics_usercount': {},
}
id_map_to_list: Dict[str, Dict[int, List[int]]] = {
'huddle_to_user_list': {},
}
path_maps: Dict[str, Dict[str, str]] = {
'attachment_path': {},
}
def update_id_map(table: TableName, old_id: int, new_id: int) -> None:
if table not in ID_MAP:
raise Exception(f'''
Table {table} is not initialized in ID_MAP, which could
mean that we have not thought through circular
dependencies.
''')
ID_MAP[table][old_id] = new_id
def fix_datetime_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field_name in DATE_FIELDS[table]:
if item[field_name] is not None:
item[field_name] = datetime.datetime.fromtimestamp(item[field_name], tz=datetime.timezone.utc)
def fix_upload_links(data: TableData, message_table: TableName) -> None:
"""
Because the URLs for uploaded files encode the realm ID of the
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process.
"""
for message in data[message_table]:
if message['has_attachment'] is True:
for key, value in path_maps['attachment_path'].items():
if key in message['content']:
message['content'] = message['content'].replace(key, value)
if message['rendered_content']:
message['rendered_content'] = message['rendered_content'].replace(key, value)
def create_subscription_events(data: TableData, realm_id: int) -> None:
"""
When the export data doesn't contain the table `zerver_realmauditlog`,
this function creates RealmAuditLog objects for `subscription_created`
type event for all the existing Stream subscriptions.
This is needed for all the export tools which do not include the
table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate
data about when a user was subscribed is not exported by the third-party
service.
"""
all_subscription_logs = []
event_last_message_id = get_last_message_id()
event_time = timezone_now()
recipient_id_to_stream_id = {
d['id']: d['type_id']
for d in data['zerver_recipient']
if d['type'] == Recipient.STREAM
}
for sub in data['zerver_subscription']:
recipient_id = sub['recipient_id']
stream_id = recipient_id_to_stream_id.get(recipient_id)
if stream_id is None:
continue
user_id = sub['user_profile_id']
all_subscription_logs.append(RealmAuditLog(realm_id=realm_id,
acting_user_id=user_id,
modified_user_id=user_id,
modified_stream_id=stream_id,
event_last_message_id=event_last_message_id,
event_time=event_time,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED))
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def fix_service_tokens(data: TableData, table: TableName) -> None:
"""
The tokens in the services are created by 'generate_api_key'.
As the tokens are unique, they should be re-created for the imports.
"""
for item in data[table]:
item['token'] = generate_api_key()
def process_huddle_hash(data: TableData, table: TableName) -> None:
"""
Build new huddle hashes with the updated ids of the users
"""
for huddle in data[table]:
user_id_list = id_map_to_list['huddle_to_user_list'][huddle['id']]
huddle['huddle_hash'] = get_huddle_hash(user_id_list)
def get_huddles_from_subscription(data: TableData, table: TableName) -> None:
"""
Extract the IDs of the user_profiles involved in a huddle from the subscription object
This helps to generate a unique huddle hash from the updated user_profile ids
"""
id_map_to_list['huddle_to_user_list'] = {
value: [] for value in ID_MAP['recipient_to_huddle_map'].values()}
for subscription in data[table]:
if subscription['recipient'] in ID_MAP['recipient_to_huddle_map']:
huddle_id = ID_MAP['recipient_to_huddle_map'][subscription['recipient']]
id_map_to_list['huddle_to_user_list'][huddle_id].append(subscription['user_profile_id'])
def fix_customprofilefield(data: TableData) -> None:
"""
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
re-mapped.
"""
field_type_USER_id_list = []
for item in data['zerver_customprofilefield']:
if item['field_type'] == CustomProfileField.USER:
field_type_USER_id_list.append(item['id'])
for item in data['zerver_customprofilefieldvalue']:
if item['field_id'] in field_type_USER_id_list:
old_user_id_list = orjson.loads(item['value'])
new_id_list = re_map_foreign_keys_many_to_many_internal(
table='zerver_customprofilefieldvalue',
field_name='value',
related_table='user_profile',
old_id_list=old_user_id_list)
item['value'] = orjson.dumps(new_id_list).decode()
def fix_message_rendered_content(realm: Realm,
sender_map: Dict[int, Record],
messages: List[Record]) -> None:
"""
This function sets the rendered_content of all the messages
after the messages have been imported from a non-Zulip platform.
"""
for message in messages:
if message['rendered_content'] is not None:
# For Zulip->Zulip imports, we use the original rendered
# Markdown; this avoids issues where e.g. a mention can no
# longer render properly because a user has changed their
# name.
#
# However, we still need to update the data-user-id and
# similar values stored on mentions, stream mentions, and
# similar syntax in the rendered HTML.
soup = BeautifulSoup(message["rendered_content"], "html.parser")
user_mentions = soup.findAll("span", {"class": "user-mention"})
if len(user_mentions) != 0:
user_id_map = ID_MAP["user_profile"]
for mention in user_mentions:
if not mention.has_attr("data-user-id"):
# Legacy mentions don't have a data-user-id
# field; we should just import them
# unmodified.
continue
if mention['data-user-id'] == "*":
# No rewriting is required for wildcard mentions
continue
old_user_id = int(mention["data-user-id"])
if old_user_id in user_id_map:
mention["data-user-id"] = str(user_id_map[old_user_id])
message['rendered_content'] = str(soup)
stream_mentions = soup.findAll("a", {"class": "stream"})
if len(stream_mentions) != 0:
stream_id_map = ID_MAP["stream"]
for mention in stream_mentions:
old_stream_id = int(mention["data-stream-id"])
if old_stream_id in stream_id_map:
mention["data-stream-id"] = str(stream_id_map[old_stream_id])
message['rendered_content'] = str(soup)
user_group_mentions = soup.findAll("span", {"class": "user-group-mention"})
if len(user_group_mentions) != 0:
user_group_id_map = ID_MAP["usergroup"]
for mention in user_group_mentions:
old_user_group_id = int(mention["data-user-group-id"])
if old_user_group_id in user_group_id_map:
mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id])
message['rendered_content'] = str(soup)
continue
try:
content = message['content']
sender_id = message['sender_id']
sender = sender_map[sender_id]
sent_by_bot = sender['is_bot']
translate_emoticons = sender['translate_emoticons']
# We don't handle alert words on import from third-party
# platforms, since they generally don't have an "alert
# words" type feature, and notifications aren't important anyway.
realm_alert_words_automaton = None
rendered_content = markdown_convert(
content=content,
realm_alert_words_automaton=realm_alert_words_automaton,
message_realm=realm,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
)
message['rendered_content'] = rendered_content
message['rendered_content_version'] = markdown_version
except Exception:
# This generally happens with two possible causes:
# * rendering Markdown throwing an uncaught exception
# * rendering Markdown failing with the exception being
# caught in Markdown (which then returns None, causing the the
# rendered_content assert above to fire).
logging.warning("Error in Markdown rendering for message ID %s; continuing", message['id'])
def current_table_ids(data: TableData, table: TableName) -> List[int]:
"""
Returns the ids present in the current table
"""
id_list = []
for item in data[table]:
id_list.append(item["id"])
return id_list
def idseq(model_class: Any) -> str:
if model_class == RealmDomain:
return 'zerver_realmalias_id_seq'
elif model_class == BotStorageData:
return 'zerver_botuserstatedata_id_seq'
elif model_class == BotConfigData:
return 'zerver_botuserconfigdata_id_seq'
return f'{model_class._meta.db_table}_id_seq'
def allocate_ids(model_class: Any, count: int) -> List[int]:
"""
Increases the sequence number for a given table by the amount of objects being
imported into that table. Hence, this gives a reserved range of IDs to import the
converted Slack objects into the tables.
"""
conn = connection.cursor()
sequence = idseq(model_class)
conn.execute("select nextval(%s) from generate_series(1, %s)",
[sequence, count])
query = conn.fetchall() # Each element in the result is a tuple like (5,)
conn.close()
# convert List[Tuple[int]] to List[int]
return [item[0] for item in query]
def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:
'''
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
'''
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name]
def re_map_foreign_keys(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
"""
This is a wrapper function for all the realm data tables
and only avatar and attachment records need to be passed through the internal function
because of the difference in data format (TableData corresponding to realm data tables
and List[Record] corresponding to the avatar and attachment records)
"""
# See comments in bulk_import_user_message_data.
assert('usermessage' not in related_table)
re_map_foreign_keys_internal(data[table], table, field_name, related_table, verbose, id_field,
recipient_field, reaction_field)
def re_map_foreign_keys_internal(data_table: List[Record],
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
'''
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
'''
lookup_table = ID_MAP[related_table]
for item in data_table:
old_id = item[field_name]
if recipient_field:
if related_table == "stream" and item['type'] == 2:
pass
elif related_table == "user_profile" and item['type'] == 1:
pass
elif related_table == "huddle" and item['type'] == 3:
# save the recipient id with the huddle id, so that we can extract
# the user_profile ids involved in a huddle with the help of the
# subscription object
# check function 'get_huddles_from_subscription'
ID_MAP['recipient_to_huddle_map'][item['id']] = lookup_table[old_id]
else:
continue
old_id = item[field_name]
if reaction_field:
if item['reaction_type'] == Reaction.REALM_EMOJI:
old_id = int(old_id)
else:
continue
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s',
table, field_name + '_id', old_id, new_id)
else:
new_id = old_id
if not id_field:
item[field_name + "_id"] = new_id
del item[field_name]
else:
if reaction_field:
item[field_name] = str(new_id)
else:
item[field_name] = new_id
def re_map_foreign_keys_many_to_many(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False) -> None:
"""
We need to assign new ids to rows during the import/export
process.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this wrapper function does
the re-mapping only for ManyToMany fields.
"""
for item in data[table]:
old_id_list = item[field_name]
new_id_list = re_map_foreign_keys_many_to_many_internal(
table, field_name, related_table, old_id_list, verbose)
item[field_name] = new_id_list
del item[field_name]
def re_map_foreign_keys_many_to_many_internal(table: TableName,
field_name: Field,
related_table: TableName,
old_id_list: List[int],
verbose: bool=False) -> List[int]:
"""
This is an internal function for tables with ManyToMany fields,
which takes the old ID list of the ManyToMany relation and returns the
new updated ID list.
"""
lookup_table = ID_MAP[related_table]
new_id_list = []
for old_id in old_id_list:
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s',
table, field_name + '_id', old_id, new_id)
else:
new_id = old_id
new_id_list.append(new_id)
return new_id_list
def fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None:
for item in data[table]:
item[field_name] = item[field_name + '_mask']
del item[field_name + '_mask']
def fix_realm_authentication_bitfield(data: TableData, table: TableName, field_name: Field) -> None:
"""Used to fixup the authentication_methods bitfield to be a string"""
for item in data[table]:
values_as_bitstring = ''.join('1' if field[1] else '0' for field in
item[field_name])
values_as_int = int(values_as_bitstring, 2)
item[field_name] = values_as_int
def remove_denormalized_recipient_column_from_data(data: TableData) -> None:
"""
The recipient column shouldn't be imported, we'll set the correct values
when Recipient table gets imported.
"""
for stream_dict in data['zerver_stream']:
if "recipient" in stream_dict:
del stream_dict["recipient"]
for user_profile_dict in data['zerver_userprofile']:
if 'recipient' in user_profile_dict:
del user_profile_dict['recipient']
for huddle_dict in data['zerver_huddle']:
if 'recipient' in huddle_dict:
del huddle_dict['recipient']
def get_db_table(model_class: Any) -> str:
"""E.g. (RealmDomain -> 'zerver_realmdomain')"""
return model_class._meta.db_table
def update_model_ids(model: Any, data: TableData, related_table: TableName) -> None:
table = get_db_table(model)
# Important: remapping usermessage rows is
# not only unnessary, it's expensive and can cause
# memory errors. We don't even use ids from ID_MAP.
assert('usermessage' not in table)
old_id_list = current_table_ids(data, table)
allocated_id_list = allocate_ids(model, len(data[table]))
for item in range(len(data[table])):
update_id_map(related_table, old_id_list[item], allocated_id_list[item])
re_map_foreign_keys(data, table, 'id', related_table=related_table, id_field=True)
def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:
model = UserMessage
table = 'zerver_usermessage'
lst = data[table]
# IMPORTANT NOTE: We do not use any primary id
# data from either the import itself or ID_MAP.
# We let the DB itself generate ids. Note that
# no tables use user_message.id as a foreign key,
# so we can safely avoid all re-mapping complexity.
def process_batch(items: List[Dict[str, Any]]) -> None:
ums = [
UserMessageLite(
user_profile_id = item['user_profile_id'],
message_id = item['message_id'],
flags=item['flags'],
)
for item in items
]
bulk_insert_ums(ums)
chunk_size = 10000
process_list_in_batches(
lst=lst,
chunk_size=chunk_size,
process_batch=process_batch,
)
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None:
table = get_db_table(model)
# TODO, deprecate dump_file_id
model.objects.bulk_create(model(**item) for item in data[table])
if dump_file_id is None:
logging.info("Successfully imported %s from %s.", model, table)
else:
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
# Client is a table shared by multiple realms, so in order to
# correctly import multiple realms into the same server, we need to
# check if a Client object already exists, and so we need to support
# remap all Client IDs to the values in the new DB.
def bulk_import_client(data: TableData, model: Any, table: TableName) -> None:
for item in data[table]:
try:
client = Client.objects.get(name=item['name'])
except Client.DoesNotExist:
client = Client.objects.create(name=item['name'])
update_id_map(table='client', old_id=item['id'], new_id=client.id)
def process_avatars(record: Dict[str, Any]) -> None:
from zerver.lib.upload import upload_backend
if record['s3_path'].endswith('.original'):
user_profile = get_user_profile_by_id(record['user_profile_id'])
if settings.LOCAL_UPLOADS_DIR is not None:
avatar_path = user_avatar_path_from_ids(user_profile.id, record['realm_id'])
medium_file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_path) + '-medium.png'
if os.path.exists(medium_file_path):
# We remove the image here primarily to deal with
# issues when running the import script multiple
# times in development (where one might reuse the
# same realm ID from a previous iteration).
os.remove(medium_file_path)
try:
upload_backend.ensure_medium_avatar_image(user_profile=user_profile)
if record.get("importer_should_thumbnail"):
upload_backend.ensure_basic_avatar_image(user_profile=user_profile)
except BadImageError:
logging.warning(
"Could not thumbnail avatar image for user %s; ignoring",
user_profile.id,
)
# Delete the record of the avatar to avoid 404s.
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=None)
def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_avatars: bool=False,
processing_emojis: bool=False, processing_realm_icons: bool=False) -> None:
if processing_avatars and processing_emojis:
raise AssertionError("Cannot import avatars and emojis at the same time!")
if processing_avatars:
logging.info("Importing avatars")
elif processing_emojis:
logging.info("Importing emojis")
elif processing_realm_icons:
logging.info("Importing realm icons and logos")
else:
logging.info("Importing uploaded files")
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename, "rb") as records_file:
records: List[Dict[str, Any]] = orjson.loads(records_file.read())
timestamp = datetime_to_timestamp(timezone_now())
re_map_foreign_keys_internal(records, 'records', 'realm_id', related_table="realm",
id_field=True)
if not processing_emojis and not processing_realm_icons:
re_map_foreign_keys_internal(records, 'records', 'user_profile_id',
related_table="user_profile", id_field=True)
s3_uploads = settings.LOCAL_UPLOADS_DIR is None
if s3_uploads:
if processing_avatars or processing_emojis or processing_realm_icons:
bucket_name = settings.S3_AVATAR_BUCKET
else:
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
bucket = get_bucket(bucket_name)
count = 0
for record in records:
count += 1
if count % 1000 == 0:
logging.info("Processed %s/%s uploads", count, len(records))
if processing_avatars:
# For avatars, we need to rehash the user ID with the
# new server's avatar salt
relative_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])
if record['s3_path'].endswith('.original'):
relative_path += '.original'
else:
# TODO: This really should be unconditional. However,
# until we fix the S3 upload backend to use the .png
# path suffix for its normal avatar URLs, we need to
# only do this for the LOCAL_UPLOADS_DIR backend.
if not s3_uploads:
relative_path += '.png'
elif processing_emojis:
# For emojis we follow the function 'upload_emoji_image'
relative_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=record['realm_id'],
emoji_file_name=record['file_name'])
record['last_modified'] = timestamp
elif processing_realm_icons:
icon_name = os.path.basename(record["path"])
relative_path = os.path.join(str(record['realm_id']), "realm", icon_name)
record['last_modified'] = timestamp
else:
# Should be kept in sync with its equivalent in zerver/lib/uploads in the
# function 'upload_message_file'
relative_path = "/".join([
str(record['realm_id']),
secrets.token_urlsafe(18),
sanitize_name(os.path.basename(record['path'])),
])
path_maps['attachment_path'][record['s3_path']] = relative_path
if s3_uploads:
key = bucket.Object(relative_path)
metadata = {}
if processing_emojis and "user_profile_id" not in record:
# Exported custom emoji from tools like Slack don't have
# the data for what user uploaded them in `user_profile_id`.
pass
elif processing_realm_icons and "user_profile_id" not in record:
# Exported realm icons and logos from local export don't have
# the value of user_profile_id in the associated record.
pass
else:
user_profile_id = int(record['user_profile_id'])
# Support email gateway bot and other cross-realm messages
if user_profile_id in ID_MAP["user_profile"]:
logging.info("Uploaded by ID mapped user: %s!", user_profile_id)
user_profile_id = ID_MAP["user_profile"][user_profile_id]
user_profile = get_user_profile_by_id(user_profile_id)
metadata["user_profile_id"] = str(user_profile.id)
if 'last_modified' in record:
metadata["orig_last_modified"] = str(record['last_modified'])
metadata["realm_id"] = str(record['realm_id'])
# Zulip exports will always have a content-type, but third-party exports might not.
content_type = record.get("content_type")
if content_type is None:
content_type = guess_type(record['s3_path'])[0]
if content_type is None:
# This is the default for unknown data. Note that
# for `.original` files, this is the value we'll
# set; that is OK, because those are never served
# directly anyway.
content_type = 'application/octet-stream'
key.upload_file(os.path.join(import_dir, record['path']),
ExtraArgs={
'ContentType': content_type,
'Metadata': metadata})
else:
if processing_avatars or processing_emojis or processing_realm_icons:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", relative_path)
else:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", relative_path)
orig_file_path = os.path.join(import_dir, record['path'])
os.makedirs(os.path.dirname(file_path), exist_ok=True)
shutil.copy(orig_file_path, file_path)
if processing_avatars:
# Ensure that we have medium-size avatar images for every
# avatar. TODO: This implementation is hacky, both in that it
# does get_user_profile_by_id for each user, and in that it
# might be better to require the export to just have these.
if processes == 1:
for record in records:
process_avatars(record)
else:
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(process_avatars, records):
pass
# Importing data suffers from a difficult ordering problem because of
# models that reference each other circularly. Here is a correct order.
#
# * Client [no deps]
# * Realm [-notifications_stream]
# * Stream [only depends on realm]
# * Realm's notifications_stream
# * Now can do all realm_tables
# * UserProfile, in order by ID to avoid bot loop issues
# * Huddle
# * Recipient
# * Subscription
# * Message
# * UserMessage
#
# Because the Python object => JSON conversion process is not fully
# faithful, we have to use a set of fixers (e.g. on DateTime objects
# and Foreign Keys) to do the import correctly.
def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm:
logging.info("Importing realm dump %s", import_dir)
if not os.path.exists(import_dir):
raise Exception("Missing import directory!")
realm_data_filename = os.path.join(import_dir, "realm.json")
if not os.path.exists(realm_data_filename):
raise Exception("Missing realm.json file!")
if not server_initialized():
create_internal_realm()
logging.info("Importing realm data from %s", realm_data_filename)
with open(realm_data_filename, "rb") as f:
data = orjson.loads(f.read())
remove_denormalized_recipient_column_from_data(data)
sort_by_date = data.get('sort_by_date', False)
bulk_import_client(data, Client, 'zerver_client')
# We don't import the Stream model yet, since it depends on Realm,
# which isn't imported yet. But we need the Stream model IDs for
# notifications_stream.
update_model_ids(Stream, data, 'stream')
re_map_foreign_keys(data, 'zerver_realm', 'notifications_stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realm', 'signup_notifications_stream', related_table="stream")
fix_datetime_fields(data, 'zerver_realm')
# Fix realm subdomain information
data['zerver_realm'][0]['string_id'] = subdomain
data['zerver_realm'][0]['name'] = subdomain
fix_realm_authentication_bitfield(data, 'zerver_realm', 'authentication_methods')
update_model_ids(Realm, data, 'realm')
realm = Realm(**data['zerver_realm'][0])
if realm.notifications_stream_id is not None:
notifications_stream_id: Optional[int] = int(realm.notifications_stream_id)
else:
notifications_stream_id = None
realm.notifications_stream_id = None
if realm.signup_notifications_stream_id is not None:
signup_notifications_stream_id: Optional[int] = int(realm.signup_notifications_stream_id)
else:
signup_notifications_stream_id = None
realm.signup_notifications_stream_id = None
realm.save()
# Email tokens will automatically be randomly generated when the
# Stream objects are created by Django.
fix_datetime_fields(data, 'zerver_stream')
re_map_foreign_keys(data, 'zerver_stream', 'realm', related_table="realm")
# Handle rendering of stream descriptions for import from non-Zulip
for stream in data['zerver_stream']:
if 'rendered_description' in stream:
continue
stream["rendered_description"] = render_stream_description(stream["description"])
bulk_import_model(data, Stream)
realm.notifications_stream_id = notifications_stream_id
realm.signup_notifications_stream_id = signup_notifications_stream_id
realm.save()
# Remap the user IDs for notification_bot and friends to their
# appropriate IDs on this server
for item in data['zerver_userprofile_crossrealm']:
logging.info("Adding to ID map: %s %s", item['id'], get_system_bot(item['email']).id)
new_user_id = get_system_bot(item['email']).id
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
update_id_map(table='recipient', old_id=item['recipient_id'], new_id=new_recipient_id)
# Merge in zerver_userprofile_mirrordummy
data['zerver_userprofile'] = data['zerver_userprofile'] + data['zerver_userprofile_mirrordummy']
del data['zerver_userprofile_mirrordummy']
data['zerver_userprofile'].sort(key=lambda r: r['id'])
# To remap foreign key for UserProfile.last_active_message_id
update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date)
fix_datetime_fields(data, 'zerver_userprofile')
update_model_ids(UserProfile, data, 'user_profile')
re_map_foreign_keys(data, 'zerver_userprofile', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_userprofile', 'bot_owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_sending_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_events_register_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'last_active_message_id',
related_table="message", id_field=True)
for user_profile_dict in data['zerver_userprofile']:
user_profile_dict['password'] = None
user_profile_dict['api_key'] = generate_api_key()
# Since Zulip doesn't use these permissions, drop them
del user_profile_dict['user_permissions']
del user_profile_dict['groups']
# The short_name field is obsolete in Zulip, but it's
# convenient for third party exports to populate it.
if 'short_name' in user_profile_dict:
del user_profile_dict['short_name']
user_profiles = [UserProfile(**item) for item in data['zerver_userprofile']]
for user_profile in user_profiles:
user_profile.set_unusable_password()
UserProfile.objects.bulk_create(user_profiles)
re_map_foreign_keys(data, 'zerver_defaultstream', 'stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realmemoji', 'author', related_table="user_profile")
for (table, model, related_table) in realm_tables:
re_map_foreign_keys(data, table, 'realm', related_table="realm")
update_model_ids(model, data, related_table)
bulk_import_model(data, model)
if 'zerver_huddle' in data:
update_model_ids(Huddle, data, 'huddle')
# We don't import Huddle yet, since we don't have the data to
# compute huddle hashes until we've imported some of the
# tables below.
# TODO: double-check this.
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="stream",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="user_profile",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="huddle",
recipient_field=True, id_field=True)
update_model_ids(Recipient, data, 'recipient')
bulk_import_model(data, Recipient)
bulk_set_users_or_streams_recipient_fields(Stream, Stream.objects.filter(realm=realm))
bulk_set_users_or_streams_recipient_fields(UserProfile, UserProfile.objects.filter(realm=realm))
re_map_foreign_keys(data, 'zerver_subscription', 'user_profile', related_table="user_profile")
get_huddles_from_subscription(data, 'zerver_subscription')
re_map_foreign_keys(data, 'zerver_subscription', 'recipient', related_table="recipient")
update_model_ids(Subscription, data, 'subscription')
bulk_import_model(data, Subscription)
if 'zerver_realmauditlog' in data:
fix_datetime_fields(data, 'zerver_realmauditlog')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'acting_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_stream',
related_table="stream")
update_model_ids(RealmAuditLog, data, related_table="realmauditlog")
bulk_import_model(data, RealmAuditLog)
else:
logging.info('about to call create_subscription_events')
create_subscription_events(
data=data,
realm_id=realm.id,
)
logging.info('done with create_subscription_events')
if 'zerver_huddle' in data:
process_huddle_hash(data, 'zerver_huddle')
bulk_import_model(data, Huddle)
for huddle in Huddle.objects.filter(recipient_id=None):
recipient = Recipient.objects.get(type=Recipient.HUDDLE, type_id=huddle.id)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
if 'zerver_alertword' in data:
re_map_foreign_keys(data, 'zerver_alertword', 'user_profile', related_table='user_profile')
re_map_foreign_keys(data, 'zerver_alertword', 'realm', related_table='realm')
update_model_ids(AlertWord, data, 'alertword')
bulk_import_model(data, AlertWord)
if 'zerver_userhotspot' in data:
fix_datetime_fields(data, 'zerver_userhotspot')
re_map_foreign_keys(data, 'zerver_userhotspot', 'user', related_table='user_profile')
update_model_ids(UserHotspot, data, 'userhotspot')
bulk_import_model(data, UserHotspot)
if 'zerver_mutedtopic' in data:
fix_datetime_fields(data, 'zerver_mutedtopic')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'user_profile', related_table='user_profile')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'stream', related_table='stream')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'recipient', related_table='recipient')
update_model_ids(MutedTopic, data, 'mutedtopic')
bulk_import_model(data, MutedTopic)
if 'zerver_service' in data:
re_map_foreign_keys(data, 'zerver_service', 'user_profile', related_table='user_profile')
fix_service_tokens(data, 'zerver_service')
update_model_ids(Service, data, 'service')
bulk_import_model(data, Service)
if 'zerver_usergroup' in data:
re_map_foreign_keys(data, 'zerver_usergroup', 'realm', related_table='realm')
re_map_foreign_keys_many_to_many(data, 'zerver_usergroup',
'members', related_table='user_profile')
update_model_ids(UserGroup, data, 'usergroup')
bulk_import_model(data, UserGroup)
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_group', related_table='usergroup')
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_profile', related_table='user_profile')
update_model_ids(UserGroupMembership, data, 'usergroupmembership')
bulk_import_model(data, UserGroupMembership)
if 'zerver_botstoragedata' in data:
re_map_foreign_keys(data, 'zerver_botstoragedata', 'bot_profile', related_table='user_profile')
update_model_ids(BotStorageData, data, 'botstoragedata')
bulk_import_model(data, BotStorageData)
if 'zerver_botconfigdata' in data:
re_map_foreign_keys(data, 'zerver_botconfigdata', 'bot_profile', related_table='user_profile')
update_model_ids(BotConfigData, data, 'botconfigdata')
bulk_import_model(data, BotConfigData)
fix_datetime_fields(data, 'zerver_userpresence')
re_map_foreign_keys(data, 'zerver_userpresence', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userpresence', 'client', related_table='client')
re_map_foreign_keys(data, 'zerver_userpresence', 'realm', related_table="realm")
update_model_ids(UserPresence, data, 'user_presence')
bulk_import_model(data, UserPresence)
fix_datetime_fields(data, 'zerver_useractivity')
re_map_foreign_keys(data, 'zerver_useractivity', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_useractivity', 'client', related_table='client')
update_model_ids(UserActivity, data, 'useractivity')
bulk_import_model(data, UserActivity)
fix_datetime_fields(data, 'zerver_useractivityinterval')
re_map_foreign_keys(data, 'zerver_useractivityinterval', 'user_profile', related_table="user_profile")
update_model_ids(UserActivityInterval, data, 'useractivityinterval')
bulk_import_model(data, UserActivityInterval)
re_map_foreign_keys(data, 'zerver_customprofilefield', 'realm', related_table="realm")
update_model_ids(CustomProfileField, data, related_table="customprofilefield")
bulk_import_model(data, CustomProfileField)
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'user_profile',
related_table="user_profile")
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'field',
related_table="customprofilefield")
fix_customprofilefield(data)
update_model_ids(CustomProfileFieldValue, data, related_table="customprofilefieldvalue")
bulk_import_model(data, CustomProfileFieldValue)
# Import uploaded files and avatars
import_uploads(realm, os.path.join(import_dir, "avatars"), processes, processing_avatars=True)
import_uploads(realm, os.path.join(import_dir, "uploads"), processes)
# We need to have this check as the emoji files are only present in the data
# importer from Slack
# For Zulip export, this doesn't exist
if os.path.exists(os.path.join(import_dir, "emoji")):
import_uploads(realm, os.path.join(import_dir, "emoji"), processes, processing_emojis=True)
if os.path.exists(os.path.join(import_dir, "realm_icons")):
import_uploads(realm, os.path.join(import_dir, "realm_icons"), processes,
processing_realm_icons=True)
sender_map = {
user['id']: user
for user in data['zerver_userprofile']
}
# Import zerver_message and zerver_usermessage
import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)
re_map_foreign_keys(data, 'zerver_reaction', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_reaction', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_reaction', 'emoji_code', related_table="realmemoji", id_field=True,
reaction_field=True)
update_model_ids(Reaction, data, 'reaction')
bulk_import_model(data, Reaction)
# Similarly, we need to recalculate the first_message_id for stream objects.
for stream in Stream.objects.filter(realm=realm):
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
first_message = Message.objects.filter(recipient=recipient).first()
if first_message is None:
stream.first_message_id = None
else:
stream.first_message_id = first_message.id
stream.save(update_fields=["first_message_id"])
# Do attachments AFTER message data is loaded.
# TODO: de-dup how we read these json files.
fn = os.path.join(import_dir, "attachment.json")
if not os.path.exists(fn):
raise Exception("Missing attachment.json file!")
logging.info("Importing attachment data from %s", fn)
with open(fn, "rb") as f:
data = orjson.loads(f.read())
import_attachments(data)
# Import the analytics file.
import_analytics_data(realm=realm, import_dir=import_dir)
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
else:
do_change_plan_type(realm, Realm.SELF_HOSTED)
return realm
# create_users and do_import_system_bots differ from their equivalent
# in zerver/lib/server_initialization.py because here we check if the
# bots don't already exist and only then create a user for these bots.
def do_import_system_bots(realm: Any) -> None:
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)
print("Finished importing system bots.")
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]],
bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
if not UserProfile.objects.filter(email=email):
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def update_message_foreign_keys(import_dir: Path,
sort_by_date: bool) -> None:
old_id_list = get_incoming_message_ids(
import_dir=import_dir,
sort_by_date=sort_by_date,
)
count = len(old_id_list)
new_id_list = allocate_ids(model_class=Message, count=count)
for old_id, new_id in zip(old_id_list, new_id_list):
update_id_map(
table='message',
old_id=old_id,
new_id=new_id,
)
# We don't touch user_message keys here; that happens later when
# we're actually read the files a second time to get actual data.
def get_incoming_message_ids(import_dir: Path,
sort_by_date: bool) -> List[int]:
'''
This function reads in our entire collection of message
ids, which can be millions of integers for some installations.
And then we sort the list. This is necessary to ensure
that the sort order of incoming ids matches the sort order
of date_sent, which isn't always guaranteed by our
utilities that convert third party chat data. We also
need to move our ids to a new range if we're dealing
with a server that has data for other realms.
'''
if sort_by_date:
tups: List[Tuple[int, int]] = []
else:
message_ids: List[int] = []
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
# Aggressively free up memory.
del data['zerver_usermessage']
for row in data['zerver_message']:
# We truncate date_sent to int to theoretically
# save memory and speed up the sort. For
# Zulip-to-Zulip imports, the
# message_id will generally be a good tiebreaker.
# If we occasionally mis-order the ids for two
# messages from the same second, it's not the
# end of the world, as it's likely those messages
# arrived to the original server in somewhat
# arbitrary order.
message_id = row['id']
if sort_by_date:
date_sent = int(row['date_sent'])
tup = (date_sent, message_id)
tups.append(tup)
else:
message_ids.append(message_id)
dump_file_id += 1
if sort_by_date:
tups.sort()
message_ids = [tup[1] for tup in tups]
return message_ids
def import_message_data(realm: Realm,
sender_map: Dict[int, Record],
import_dir: Path) -> None:
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
logging.info("Importing message dump %s", message_filename)
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table="recipient")
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
fix_datetime_fields(data, 'zerver_message')
# Parser to update message content with the updated attachment URLs
fix_upload_links(data, 'zerver_message')
# We already create mappings for zerver_message ids
# in update_message_foreign_keys(), so here we simply
# apply them.
message_id_map = ID_MAP['message']
for row in data['zerver_message']:
row['id'] = message_id_map[row['id']]
for row in data['zerver_usermessage']:
assert(row['message'] in message_id_map)
fix_message_rendered_content(
realm=realm,
sender_map=sender_map,
messages=data['zerver_message'],
)
logging.info("Successfully rendered Markdown for message batch")
# A LOT HAPPENS HERE.
# This is where we actually import the message data.
bulk_import_model(data, Message)
# Due to the structure of these message chunks, we're
# guaranteed to have already imported all the Message objects
# for this batch of UserMessage objects.
re_map_foreign_keys(data, 'zerver_usermessage', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_usermessage', 'user_profile', related_table="user_profile")
fix_bitfield_keys(data, 'zerver_usermessage', 'flags')
bulk_import_user_message_data(data, dump_file_id)
dump_file_id += 1
def import_attachments(data: TableData) -> None:
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, 'zerver_attachment')
re_map_foreign_keys(data, 'zerver_attachment', 'owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_attachment', 'realm', related_table="realm")
# Configure ourselves. Django models many-to-many (m2m)
# relations asymmetrically. The parent here refers to the
# Model that has the ManyToManyField. It is assumed here
# the child models have been loaded, but we are in turn
# responsible for loading the parents and the m2m rows.
parent_model = Attachment
parent_db_table_name = 'zerver_attachment'
parent_singular = 'attachment'
child_singular = 'message'
child_plural = 'messages'
m2m_table_name = 'zerver_attachment_messages'
parent_id = 'attachment_id'
child_id = 'message_id'
update_model_ids(parent_model, data, 'attachment')
# We don't bulk_import_model yet, because we need to first compute
# the many-to-many for this table.
# First, build our list of many-to-many (m2m) rows.
# We do this in a slightly convoluted way to anticipate
# a future where we may need to call re_map_foreign_keys.
m2m_rows: List[Record] = []
for parent_row in data[parent_db_table_name]:
for fk_id in parent_row[child_plural]:
m2m_row: Record = {}
m2m_row[parent_singular] = parent_row['id']
m2m_row[child_singular] = ID_MAP['message'][fk_id]
m2m_rows.append(m2m_row)
# Create our table data for insert.
m2m_data: TableData = {m2m_table_name: m2m_rows}
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
m2m_rows = m2m_data[m2m_table_name]
# Next, delete out our child data from the parent rows.
for parent_row in data[parent_db_table_name]:
del parent_row[child_plural]
# Update 'path_id' for the attachments
for attachment in data[parent_db_table_name]:
attachment['path_id'] = path_maps['attachment_path'][attachment['path_id']]
# Next, load the parent rows.
bulk_import_model(data, parent_model)
# Now, go back to our m2m rows.
# TODO: Do this the kosher Django way. We may find a
# better way to do this in Django 1.9 particularly.
with connection.cursor() as cursor:
sql_template = SQL('''
INSERT INTO {m2m_table_name} ({parent_id}, {child_id}) VALUES %s
''').format(
m2m_table_name=Identifier(m2m_table_name),
parent_id=Identifier(parent_id),
child_id=Identifier(child_id),
)
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
execute_values(cursor.cursor, sql_template, tups)
logging.info('Successfully imported M2M table %s', m2m_table_name)
def import_analytics_data(realm: Realm, import_dir: Path) -> None:
analytics_filename = os.path.join(import_dir, "analytics.json")
if not os.path.exists(analytics_filename):
return
logging.info("Importing analytics data from %s", analytics_filename)
with open(analytics_filename, "rb") as f:
data = orjson.loads(f.read())
# Process the data through the fixer functions.
fix_datetime_fields(data, 'analytics_realmcount')
re_map_foreign_keys(data, 'analytics_realmcount', 'realm', related_table="realm")
update_model_ids(RealmCount, data, 'analytics_realmcount')
bulk_import_model(data, RealmCount)
fix_datetime_fields(data, 'analytics_usercount')
re_map_foreign_keys(data, 'analytics_usercount', 'realm', related_table="realm")
re_map_foreign_keys(data, 'analytics_usercount', 'user', related_table="user_profile")
update_model_ids(UserCount, data, 'analytics_usercount')
bulk_import_model(data, UserCount)
fix_datetime_fields(data, 'analytics_streamcount')
re_map_foreign_keys(data, 'analytics_streamcount', 'realm', related_table="realm")
re_map_foreign_keys(data, 'analytics_streamcount', 'stream', related_table="stream")
update_model_ids(StreamCount, data, 'analytics_streamcount')
bulk_import_model(data, StreamCount)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.timeline import async_slice
from telemetry.timeline import model as model_module
from telemetry.web_perf.metrics import mainthread_jank_stats
from telemetry.web_perf import timeline_interaction_record as tir_module
class MainthreadJankTests(unittest.TestCase):
def CreateTestRecord(self, name, start, end, thread_start, thread_end,
parent_thread):
s = async_slice.AsyncSlice(
'cat', 'Interaction.%s' % name,
timestamp=start, duration=end - start, start_thread=parent_thread,
end_thread=parent_thread, thread_start=thread_start,
thread_duration=thread_end - thread_start)
return tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
def testComputeMainthreadJankStatsForRecord(self):
# The slice hierarchy should look something like this:
# [ MessageLoop::RunTask ] [MessageLoop::RunTask][ MessagLoop::RunTask ]
# [ foo ] [ bar ]
# | |
# 200ms 800ms
# (thread_start) (thread_end)
#
# Note: all timings mentioned here and in comments below are thread time.
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# [ MessageLoop::RunTask ]
# 100ms 300ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 112, 100)
renderer_main.EndSlice(240, 300)
# [ MessageLoop::RunTask ]
# 450ms [ foo ] 475 ms
# 460ms 470ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 462, 450)
renderer_main.BeginSlice('otherlevel', 'foo', 468, 460)
renderer_main.EndSlice(475, 470)
renderer_main.EndSlice(620, 475)
# [ MessageLoop::RunTask ]
# 620ms [ bar ] 900ms
# 750ms 850ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 652, 620)
renderer_main.BeginSlice('otherlevel', 'bar', 785, 750)
renderer_main.EndSlice(875, 850)
renderer_main.EndSlice(1040, 900)
model.FinalizeImport(shift_world_to_zero=False)
# Make a record that starts at 200ms and ends at 800ms in thread time
record = self.CreateTestRecord('test', 100, 700, 200, 800, renderer_main)
# pylint: disable=W0212
stat = mainthread_jank_stats._ComputeMainthreadJankStatsForRecord(
renderer_main, record)
# The overlapped between thread time range(200ms -> 800ms)
# with the first top slice (100ms -> 300ms) is 300 - 200 = 100ms,
# with the second slice (450ms -> 475ms) is 475 - 450 = 25 ms,
# with the third slice (620ms -> 900ms) is 800 - 620 = 180 ms.
#
# Hence we have 2 big top slices which overlapped duration > 50ms,
# the biggest top slice is 180ms, and the total big top slice's thread time
# is 100 + 180 = 280ms.
self.assertEquals(180, stat.biggest_top_slice_thread_time)
self.assertEquals(280, stat.sum_big_top_slices_thread_time)
def testMainthreadJankStats(self):
# [ MessageLoop::RunTask] [MessageLoop::RunTask] [MessagLoop::RunTask]
# 10 100 120 400 450 750
# [ record_1 ] [ record_2 ] [ record_3 ]
# 40 70 120 200 220 900
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# [ MessageLoop::RunTask ]
# 10ms 100ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 12, 10)
renderer_main.EndSlice(120, 100)
# [ MessageLoop::RunTask ]
# 120ms 200ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 115, 120)
renderer_main.EndSlice(410, 400)
# [ MessageLoop::RunTask ]
# 220ms 900ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 477, 450)
renderer_main.EndSlice(772, 750)
model.FinalizeImport(shift_world_to_zero=False)
test_records = [
self.CreateTestRecord('record_1', 10, 80, 40, 70, renderer_main),
self.CreateTestRecord('record_2', 100, 210, 120, 200, renderer_main),
self.CreateTestRecord('record_3', 215, 920, 220, 900, renderer_main)
]
stats = mainthread_jank_stats.MainthreadJankStats(
renderer_main, test_records)
# Main thread janks covered by records' ranges are:
# Record 1: (40ms -> 70ms)
# Record 2: (120ms -> 200ms)
# Record 3: (220ms -> 400ms), (450ms -> 750ms)
self.assertEquals(560, stats.total_big_jank_thread_time)
self.assertEquals(300, stats.biggest_jank_thread_time)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* The copyright in this software is being made available under the 2-clauses
* BSD License, included below. This software may be subject to other third
* party and contributor rights, including patent rights, and no such rights
* are granted under this license.
*
* Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium
* Copyright (c) 2002-2014, Professor Benoit Macq
* Copyright (c) 2001-2003, David Janssens
* Copyright (c) 2002-2003, Yannick Verschueren
* Copyright (c) 2003-2007, Francois-Olivier Devaux
* Copyright (c) 2003-2014, Antonin Descampe
* Copyright (c) 2005, Herve Drolon, FreeImage Team
* Copyright (c) 2008, 2011-2012, Centre National d'Etudes Spatiales (CNES), FR
* Copyright (c) 2012, CS Systemes d'Information, France
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef OPJ_CIO_H
#define OPJ_CIO_H
/**
@file cio.h
@brief Implementation of a byte input-output process (CIO)
The functions in CIO.C have for goal to realize a byte input / output process.
*/
/** @defgroup CIO CIO - byte input-output stream */
/*@{*/
#include "opj_config_private.h"
/* ----------------------------------------------------------------------- */
#if defined(OPJ_BIG_ENDIAN)
#define opj_write_bytes opj_write_bytes_BE
#define opj_read_bytes opj_read_bytes_BE
#define opj_write_double opj_write_double_BE
#define opj_read_double opj_read_double_BE
#define opj_write_float opj_write_float_BE
#define opj_read_float opj_read_float_BE
#else
#define opj_write_bytes opj_write_bytes_LE
#define opj_read_bytes opj_read_bytes_LE
#define opj_write_double opj_write_double_LE
#define opj_read_double opj_read_double_LE
#define opj_write_float opj_write_float_LE
#define opj_read_float opj_read_float_LE
#endif
#define OPJ_STREAM_STATUS_OUTPUT 0x1U
#define OPJ_STREAM_STATUS_INPUT 0x2U
#define OPJ_STREAM_STATUS_END 0x4U
#define OPJ_STREAM_STATUS_ERROR 0x8U
/**
Byte input-output stream.
*/
typedef struct opj_stream_private {
/**
* User data, be it files, ... The actual data depends on the type of the stream.
*/
void * m_user_data;
/**
* Pointer to function to free m_user_data (NULL at initialization)
* when destroying the stream. If pointer is NULL the function is not
* called and the m_user_data is not freed (even if non-NULL).
*/
opj_stream_free_user_data_fn m_free_user_data_fn;
/**
* User data length
*/
OPJ_UINT64 m_user_data_length;
/**
* Pointer to actual read function (NULL at the initialization of the cio.
*/
opj_stream_read_fn m_read_fn;
/**
* Pointer to actual write function (NULL at the initialization of the cio.
*/
opj_stream_write_fn m_write_fn;
/**
* Pointer to actual skip function (NULL at the initialization of the cio.
* There is no seek function to prevent from back and forth slow procedures.
*/
opj_stream_skip_fn m_skip_fn;
/**
* Pointer to actual seek function (if available).
*/
opj_stream_seek_fn m_seek_fn;
/**
* Actual data stored into the stream if read from. Data is read by chunk of fixed size.
* you should never access this data directly.
*/
OPJ_BYTE * m_stored_data;
/**
* Pointer to the current read data.
*/
OPJ_BYTE * m_current_data;
/**
* FIXME DOC.
*/
OPJ_OFF_T(* m_opj_skip)(struct opj_stream_private *, OPJ_OFF_T,
struct opj_event_mgr *);
/**
* FIXME DOC.
*/
OPJ_BOOL(* m_opj_seek)(struct opj_stream_private *, OPJ_OFF_T,
struct opj_event_mgr *);
/**
* number of bytes containing in the buffer.
*/
OPJ_SIZE_T m_bytes_in_buffer;
/**
* The number of bytes read/written from the beginning of the stream
*/
OPJ_OFF_T m_byte_offset;
/**
* The size of the buffer.
*/
OPJ_SIZE_T m_buffer_size;
/**
* Flags to tell the status of the stream.
* Used with OPJ_STREAM_STATUS_* defines.
*/
OPJ_UINT32 m_status;
}
opj_stream_private_t;
/** @name Exported functions (see also openjpeg.h) */
/*@{*/
/* ----------------------------------------------------------------------- */
/**
* Write some bytes to the given data buffer, this function is used in Big Endian cpus.
* @param p_buffer pointer the data buffer to write data to.
* @param p_value the value to write
* @param p_nb_bytes the number of bytes to write
*/
void opj_write_bytes_BE(OPJ_BYTE * p_buffer, OPJ_UINT32 p_value,
OPJ_UINT32 p_nb_bytes);
/**
* Reads some bytes from the given data buffer, this function is used in Big Endian cpus.
* @param p_buffer pointer the data buffer to read data from.
* @param p_value pointer to the value that will store the data.
* @param p_nb_bytes the nb bytes to read.
* @return the number of bytes read or -1 if an error occurred.
*/
void opj_read_bytes_BE(const OPJ_BYTE * p_buffer, OPJ_UINT32 * p_value,
OPJ_UINT32 p_nb_bytes);
/**
* Write some bytes to the given data buffer, this function is used in Little Endian cpus.
* @param p_buffer pointer the data buffer to write data to.
* @param p_value the value to write
* @param p_nb_bytes the number of bytes to write
* @return the number of bytes written or -1 if an error occurred
*/
void opj_write_bytes_LE(OPJ_BYTE * p_buffer, OPJ_UINT32 p_value,
OPJ_UINT32 p_nb_bytes);
/**
* Reads some bytes from the given data buffer, this function is used in Little Endian cpus.
* @param p_buffer pointer the data buffer to read data from.
* @param p_value pointer to the value that will store the data.
* @param p_nb_bytes the nb bytes to read.
* @return the number of bytes read or -1 if an error occurred.
*/
void opj_read_bytes_LE(const OPJ_BYTE * p_buffer, OPJ_UINT32 * p_value,
OPJ_UINT32 p_nb_bytes);
/**
* Write some bytes to the given data buffer, this function is used in Little Endian cpus.
* @param p_buffer pointer the data buffer to write data to.
* @param p_value the value to write
*/
void opj_write_double_LE(OPJ_BYTE * p_buffer, OPJ_FLOAT64 p_value);
/***
* Write some bytes to the given data buffer, this function is used in Big Endian cpus.
* @param p_buffer pointer the data buffer to write data to.
* @param p_value the value to write
*/
void opj_write_double_BE(OPJ_BYTE * p_buffer, OPJ_FLOAT64 p_value);
/**
* Reads some bytes from the given data buffer, this function is used in Little Endian cpus.
* @param p_buffer pointer the data buffer to read data from.
* @param p_value pointer to the value that will store the data.
*/
void opj_read_double_LE(const OPJ_BYTE * p_buffer, OPJ_FLOAT64 * p_value);
/**
* Reads some bytes from the given data buffer, this function is used in Big Endian cpus.
* @param p_buffer pointer the data buffer to read data from.
* @param p_value pointer to the value that will store the data.
*/
void opj_read_double_BE(const OPJ_BYTE * p_buffer, OPJ_FLOAT64 * p_value);
/**
* Reads some bytes from the given data buffer, this function is used in Little Endian cpus.
* @param p_buffer pointer the data buffer to read data from.
* @param p_value pointer to the value that will store the data.
*/
void opj_read_float_LE(const OPJ_BYTE * p_buffer, OPJ_FLOAT32 * p_value);
/**
* Reads some bytes from the given data buffer, this function is used in Big Endian cpus.
* @param p_buffer pointer the data buffer to read data from.
* @param p_value pointer to the value that will store the data.
*/
void opj_read_float_BE(const OPJ_BYTE * p_buffer, OPJ_FLOAT32 * p_value);
/**
* Write some bytes to the given data buffer, this function is used in Little Endian cpus.
* @param p_buffer pointer the data buffer to write data to.
* @param p_value the value to write
*/
void opj_write_float_LE(OPJ_BYTE * p_buffer, OPJ_FLOAT32 p_value);
/***
* Write some bytes to the given data buffer, this function is used in Big Endian cpus.
* @param p_buffer pointer the data buffer to write data to.
* @param p_value the value to write
*/
void opj_write_float_BE(OPJ_BYTE * p_buffer, OPJ_FLOAT32 p_value);
/**
* Reads some bytes from the stream.
* @param p_stream the stream to read data from.
* @param p_buffer pointer to the data buffer that will receive the data.
* @param p_size number of bytes to read.
* @param p_event_mgr the user event manager to be notified of special events.
* @return the number of bytes read, or -1 if an error occurred or if the stream is at the end.
*/
OPJ_SIZE_T opj_stream_read_data(opj_stream_private_t * p_stream,
OPJ_BYTE * p_buffer, OPJ_SIZE_T p_size, struct opj_event_mgr * p_event_mgr);
/**
* Writes some bytes to the stream.
* @param p_stream the stream to write data to.
* @param p_buffer pointer to the data buffer holds the data to be writtent.
* @param p_size number of bytes to write.
* @param p_event_mgr the user event manager to be notified of special events.
* @return the number of bytes writtent, or -1 if an error occurred.
*/
OPJ_SIZE_T opj_stream_write_data(opj_stream_private_t * p_stream,
const OPJ_BYTE * p_buffer, OPJ_SIZE_T p_size,
struct opj_event_mgr * p_event_mgr);
/**
* Writes the content of the stream buffer to the stream.
* @param p_stream the stream to write data to.
* @param p_event_mgr the user event manager to be notified of special events.
* @return true if the data could be flushed, false else.
*/
OPJ_BOOL opj_stream_flush(opj_stream_private_t * p_stream,
struct opj_event_mgr * p_event_mgr);
/**
* Skips a number of bytes from the stream.
* @param p_stream the stream to skip data from.
* @param p_size the number of bytes to skip.
* @param p_event_mgr the user event manager to be notified of special events.
* @return the number of bytes skipped, or -1 if an error occurred.
*/
OPJ_OFF_T opj_stream_skip(opj_stream_private_t * p_stream, OPJ_OFF_T p_size,
struct opj_event_mgr * p_event_mgr);
/**
* Tells the byte offset on the stream (similar to ftell).
*
* @param p_stream the stream to get the information from.
*
* @return the current position o fthe stream.
*/
OPJ_OFF_T opj_stream_tell(const opj_stream_private_t * p_stream);
/**
* Get the number of bytes left before the end of the stream (similar to cio_numbytesleft).
*
* @param p_stream the stream to get the information from.
*
* @return Number of bytes left before the end of the stream.
*/
OPJ_OFF_T opj_stream_get_number_byte_left(const opj_stream_private_t *
p_stream);
/**
* Skips a number of bytes from the stream.
* @param p_stream the stream to skip data from.
* @param p_size the number of bytes to skip.
* @param p_event_mgr the user event manager to be notified of special events.
* @return the number of bytes skipped, or -1 if an error occurred.
*/
OPJ_OFF_T opj_stream_write_skip(opj_stream_private_t * p_stream,
OPJ_OFF_T p_size, struct opj_event_mgr * p_event_mgr);
/**
* Skips a number of bytes from the stream.
* @param p_stream the stream to skip data from.
* @param p_size the number of bytes to skip.
* @param p_event_mgr the user event manager to be notified of special events.
* @return the number of bytes skipped, or -1 if an error occurred.
*/
OPJ_OFF_T opj_stream_read_skip(opj_stream_private_t * p_stream,
OPJ_OFF_T p_size, struct opj_event_mgr * p_event_mgr);
/**
* Skips a number of bytes from the stream.
* @param p_stream the stream to skip data from.
* @param p_size the number of bytes to skip.
* @param p_event_mgr the user event manager to be notified of special events.
* @return OPJ_TRUE if success, or OPJ_FALSE if an error occurred.
*/
OPJ_BOOL opj_stream_read_seek(opj_stream_private_t * p_stream, OPJ_OFF_T p_size,
struct opj_event_mgr * p_event_mgr);
/**
* Skips a number of bytes from the stream.
* @param p_stream the stream to skip data from.
* @param p_size the number of bytes to skip.
* @param p_event_mgr the user event manager to be notified of special events.
* @return the number of bytes skipped, or -1 if an error occurred.
*/
OPJ_BOOL opj_stream_write_seek(opj_stream_private_t * p_stream,
OPJ_OFF_T p_size, struct opj_event_mgr * p_event_mgr);
/**
* Seeks a number of bytes from the stream.
* @param p_stream the stream to skip data from.
* @param p_size the number of bytes to skip.
* @param p_event_mgr the user event manager to be notified of special events.
* @return true if the stream is seekable.
*/
OPJ_BOOL opj_stream_seek(opj_stream_private_t * p_stream, OPJ_OFF_T p_size,
struct opj_event_mgr * p_event_mgr);
/**
* Tells if the given stream is seekable.
*/
OPJ_BOOL opj_stream_has_seek(const opj_stream_private_t * p_stream);
/**
* FIXME DOC.
*/
OPJ_SIZE_T opj_stream_default_read(void * p_buffer, OPJ_SIZE_T p_nb_bytes,
void * p_user_data);
/**
* FIXME DOC.
*/
OPJ_SIZE_T opj_stream_default_write(void * p_buffer, OPJ_SIZE_T p_nb_bytes,
void * p_user_data);
/**
* FIXME DOC.
*/
OPJ_OFF_T opj_stream_default_skip(OPJ_OFF_T p_nb_bytes, void * p_user_data);
/**
* FIXME DOC.
*/
OPJ_BOOL opj_stream_default_seek(OPJ_OFF_T p_nb_bytes, void * p_user_data);
/* ----------------------------------------------------------------------- */
/*@}*/
/*@}*/
#endif /* OPJ_CIO_H */
|
c
|
github
|
https://github.com/opencv/opencv
|
3rdparty/openjpeg/openjp2/cio.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin.internals;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.KafkaMetric;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.metrics.stats.Avg;
import org.apache.kafka.common.metrics.stats.Max;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.util.Collections;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class AdminFetchMetricsManagerTest {
private static final double EPSILON = 0.0001;
private final Time time = new MockTime(1, 0, 0);
private Metrics metrics;
private AdminFetchMetricsManager adminFetchMetricsManager;
private final String group = "group";
@BeforeEach
public void setup() {
metrics = new Metrics(time);
adminFetchMetricsManager = new AdminFetchMetricsManager(metrics);
}
@AfterEach
public void tearDown() {
Utils.closeQuietly(metrics, "metrics");
metrics = null;
adminFetchMetricsManager = null;
}
@Test
public void testSingleNodeLatency() {
String connectionId = "0";
MetricName nodeLatencyAvg = metrics.metricName("request-latency-avg", group);
MetricName nodeLatencyMax = metrics.metricName("request-latency-max", group);
registerNodeLatencyMetric(connectionId, nodeLatencyAvg, nodeLatencyMax);
adminFetchMetricsManager.recordLatency(connectionId, 333);
mockSleepTimeWindow();
adminFetchMetricsManager.recordLatency(connectionId, 444);
assertEquals(388.5, metricValue(nodeLatencyAvg), EPSILON);
assertEquals(444, metricValue(nodeLatencyMax), EPSILON);
adminFetchMetricsManager.recordLatency(connectionId, 666);
assertEquals(481, metricValue(nodeLatencyAvg), EPSILON);
assertEquals(666, metricValue(nodeLatencyMax), EPSILON);
// first record(333) expired
mockSleepTimeWindow();
assertEquals(555, metricValue(nodeLatencyAvg), EPSILON);
assertEquals(666, metricValue(nodeLatencyMax), EPSILON);
// all records expired
mockSleepTimeWindow();
assertTrue(Double.isNaN(metricValue(nodeLatencyAvg)));
assertTrue(Double.isNaN(metricValue(nodeLatencyMax)));
}
@Test
public void testMultiNodeLatency() {
String connectionId0 = "0";
MetricName nodeLatencyAvg0 = metrics.metricName("request-latency-avg", group, genericTag(connectionId0));
MetricName nodeLatencyMax0 = metrics.metricName("request-latency-max", group, genericTag(connectionId0));
registerNodeLatencyMetric(connectionId0, nodeLatencyAvg0, nodeLatencyMax0);
adminFetchMetricsManager.recordLatency(connectionId0, 5);
adminFetchMetricsManager.recordLatency(connectionId0, 8);
// Record metric against another node.
String connectionId1 = "1";
MetricName nodeLatencyAvg1 = metrics.metricName("request-latency-avg", group, genericTag(connectionId1));
MetricName nodeLatencyMax1 = metrics.metricName("request-latency-max", group, genericTag(connectionId1));
registerNodeLatencyMetric(connectionId1, nodeLatencyAvg1, nodeLatencyMax1);
adminFetchMetricsManager.recordLatency(connectionId1, 105);
adminFetchMetricsManager.recordLatency(connectionId1, 108);
assertEquals(6.5, metricValue(nodeLatencyAvg0), EPSILON);
assertEquals(8, metricValue(nodeLatencyMax0), EPSILON);
assertEquals(106.5, metricValue(nodeLatencyAvg1), EPSILON);
assertEquals(108, metricValue(nodeLatencyMax1), EPSILON);
mockSleepTimeWindow();
adminFetchMetricsManager.recordLatency(connectionId0, 11);
adminFetchMetricsManager.recordLatency(connectionId1, 111);
assertEquals(8, metricValue(nodeLatencyAvg0), EPSILON);
assertEquals(11, metricValue(nodeLatencyMax0), EPSILON);
assertEquals(108, metricValue(nodeLatencyAvg1), EPSILON);
assertEquals(111, metricValue(nodeLatencyMax1), EPSILON);
mockSleepTimeWindow();
assertEquals(11, metricValue(nodeLatencyAvg0), EPSILON);
assertEquals(11, metricValue(nodeLatencyMax0), EPSILON);
assertEquals(111, metricValue(nodeLatencyAvg1), EPSILON);
assertEquals(111, metricValue(nodeLatencyMax1), EPSILON);
mockSleepTimeWindow();
assertTrue(Double.isNaN(metricValue(nodeLatencyAvg0)));
assertTrue(Double.isNaN(metricValue(nodeLatencyMax0)));
assertTrue(Double.isNaN(metricValue(nodeLatencyAvg1)));
assertTrue(Double.isNaN(metricValue(nodeLatencyMax1)));
adminFetchMetricsManager.recordLatency(connectionId0, 500);
adminFetchMetricsManager.recordLatency(connectionId0, 600);
mockSleepTimeWindow();
adminFetchMetricsManager.recordLatency(connectionId1, 800);
adminFetchMetricsManager.recordLatency(connectionId1, 900);
assertEquals(550, metricValue(nodeLatencyAvg0), EPSILON);
assertEquals(600, metricValue(nodeLatencyMax0), EPSILON);
assertEquals(850, metricValue(nodeLatencyAvg1), EPSILON);
assertEquals(900, metricValue(nodeLatencyMax1), EPSILON);
mockSleepTimeWindow();
assertTrue(Double.isNaN(metricValue(nodeLatencyAvg0)));
assertTrue(Double.isNaN(metricValue(nodeLatencyMax0)));
assertEquals(850, metricValue(nodeLatencyAvg1), EPSILON);
assertEquals(900, metricValue(nodeLatencyMax1), EPSILON);
mockSleepTimeWindow();
assertTrue(Double.isNaN(metricValue(nodeLatencyAvg1)));
assertTrue(Double.isNaN(metricValue(nodeLatencyMax1)));
}
private Map<String, String> genericTag(String connectionId) {
return Collections.singletonMap("node-id", "node-" + connectionId);
}
private void mockSleepTimeWindow() {
time.sleep(metrics.config().timeWindowMs() + 1);
}
private void registerNodeLatencyMetric(String connectionId, MetricName nodeLatencyAvg, MetricName nodeLatencyMax) {
String nodeTimeName = "node-" + connectionId + ".latency";
Sensor nodeRequestTime = metrics.sensor(nodeTimeName);
nodeRequestTime.add(nodeLatencyAvg, new Avg());
nodeRequestTime.add(nodeLatencyMax, new Max());
}
private double metricValue(MetricName metricName) {
KafkaMetric metric = metrics.metric(metricName);
return (double) metric.metricValue();
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminFetchMetricsManagerTest.java
|
{
"applyable": true,
"complete": true,
"configuration": {
"provider_config": {
"tfcoremock": {
"full_name": "registry.terraform.io/hashicorp/tfcoremock",
"name": "tfcoremock",
"version_constraint": "0.1.1"
}
},
"root_module": {
"resources": [
{
"address": "tfcoremock_map.map",
"expressions": {
"id": {
"constant_value": "F40F2AB4-100C-4AE8-BFD0-BF332A158415"
},
"map": {
"constant_value": {
"key_one": {
"id": "3BFC1A84-023F-44FA-A8EE-EFD88E18B8F7"
},
"key_three": {
"id": "4B7178A8-AB9D-4FF4-8B3D-48B754DE537B"
},
"key_two": {
"id": "07F887E2-FDFF-4B2E-9BFB-B6AA4A05EDB9"
}
}
}
},
"mode": "managed",
"name": "map",
"provider_config_key": "tfcoremock",
"schema_version": 0,
"type": "tfcoremock_map"
}
]
}
},
"errored": false,
"format_version": "1.2",
"planned_values": {
"root_module": {
"resources": [
{
"address": "tfcoremock_map.map",
"mode": "managed",
"name": "map",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"schema_version": 0,
"sensitive_values": {
"map": {
"key_one": {},
"key_three": {},
"key_two": {}
}
},
"type": "tfcoremock_map",
"values": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"map": {
"key_one": {
"id": "3BFC1A84-023F-44FA-A8EE-EFD88E18B8F7"
},
"key_three": {
"id": "4B7178A8-AB9D-4FF4-8B3D-48B754DE537B"
},
"key_two": {
"id": "07F887E2-FDFF-4B2E-9BFB-B6AA4A05EDB9"
}
}
}
}
]
}
},
"prior_state": {
"format_version": "1.0",
"values": {
"root_module": {
"resources": [
{
"address": "tfcoremock_map.map",
"mode": "managed",
"name": "map",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"schema_version": 0,
"sensitive_values": {
"map": {
"key_one": {},
"key_three": {},
"key_two": {}
}
},
"type": "tfcoremock_map",
"values": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"map": {
"key_one": {
"id": "3BFC1A84-023F-44FA-A8EE-EFD88E18B8F7"
},
"key_three": {
"id": "4B7178A8-AB9D-4FF4-8B3D-48B754DE537B"
},
"key_two": {
"id": "56C7E07F-B9DF-4799-AF62-E703D1167A51"
}
}
}
}
]
}
}
},
"resource_changes": [
{
"action_reason": "replace_because_cannot_update",
"address": "tfcoremock_map.map",
"change": {
"actions": [
"delete",
"create"
],
"after": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"map": {
"key_one": {
"id": "3BFC1A84-023F-44FA-A8EE-EFD88E18B8F7"
},
"key_three": {
"id": "4B7178A8-AB9D-4FF4-8B3D-48B754DE537B"
},
"key_two": {
"id": "07F887E2-FDFF-4B2E-9BFB-B6AA4A05EDB9"
}
}
},
"after_sensitive": {
"map": {
"key_one": {},
"key_three": {},
"key_two": {}
}
},
"after_unknown": {},
"before": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"map": {
"key_one": {
"id": "3BFC1A84-023F-44FA-A8EE-EFD88E18B8F7"
},
"key_three": {
"id": "4B7178A8-AB9D-4FF4-8B3D-48B754DE537B"
},
"key_two": {
"id": "56C7E07F-B9DF-4799-AF62-E703D1167A51"
}
}
},
"before_sensitive": {
"map": {
"key_one": {},
"key_three": {},
"key_two": {}
}
},
"replace_paths": [
[
"map",
"key_two",
"id"
]
]
},
"mode": "managed",
"name": "map",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"type": "tfcoremock_map"
}
]
}
|
json
|
github
|
https://github.com/hashicorp/terraform
|
testing/equivalence-tests/outputs/replace_within_map/plan.json
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admissionregistration
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
// to make sure that all the tuple expansions are valid.
type Rule struct {
// APIGroups is the API groups the resources belong to. '*' is all groups.
// If '*' is present, the length of the slice must be one.
// Required.
APIGroups []string
// APIVersions is the API versions the resources belong to. '*' is all versions.
// If '*' is present, the length of the slice must be one.
// Required.
APIVersions []string
// Resources is a list of resources this rule applies to.
//
// For example:
// 'pods' means pods.
// 'pods/log' means the log subresource of pods.
// '*' means all resources, but not subresources.
// 'pods/*' means all subresources of pods.
// '*/scale' means all scale subresources.
// '*/*' means all resources and their subresources.
//
// If wildcard is present, the validation rule will ensure resources do not
// overlap with each other.
//
// Depending on the enclosing object, subresources might not be allowed.
// Required.
Resources []string
// scope specifies the scope of this rule.
// Valid values are "Cluster", "Namespaced", and "*"
// "Cluster" means that only cluster-scoped resources will match this rule.
// Namespace API objects are cluster-scoped.
// "Namespaced" means that only namespaced resources will match this rule.
// "*" means that there are no scope restrictions.
// Subresources match the scope of their parent resource.
// Default is "*".
//
// +optional
Scope *ScopeType
}
// ScopeType specifies the type of scope being used
type ScopeType string
const (
// ClusterScope means that scope is limited to cluster-scoped objects.
// Namespace objects are cluster-scoped.
ClusterScope ScopeType = "Cluster"
// NamespacedScope means that scope is limited to namespaced objects.
NamespacedScope ScopeType = "Namespaced"
// AllScopes means that all scopes are included.
AllScopes ScopeType = "*"
)
// ParameterNotFoundActionType specifies a failure policy that defines how a binding
// is evaluated when the param referred by its perNamespaceParamRef is not found.
type ParameterNotFoundActionType string
const (
// Allow means all requests will be admitted if no param resources
// could be found.
AllowAction ParameterNotFoundActionType = "Allow"
// Deny means all requests will be denied if no param resources are found.
DenyAction ParameterNotFoundActionType = "Deny"
)
// FailurePolicyType specifies the type of failure policy
type FailurePolicyType string
const (
// Ignore means that an error calling the admission webhook or admission policy is ignored.
Ignore FailurePolicyType = "Ignore"
// Fail means that an error calling the admission webhook or admission policy causes resource admission to fail.
Fail FailurePolicyType = "Fail"
)
// MatchPolicyType specifies the type of match policy
type MatchPolicyType string
const (
// Exact means requests should only be sent to the admission webhook or admission policy if they exactly match a given rule.
Exact MatchPolicyType = "Exact"
// Equivalent means requests should be sent to the admission webhook or admission policy if they modify a resource listed
// in rules via another API group or version.
Equivalent MatchPolicyType = "Equivalent"
)
// SideEffectClass denotes the type of side effects resulting from calling the webhook
type SideEffectClass string
const (
// SideEffectClassUnknown means that no information is known about the side effects of calling the webhook.
// If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail.
SideEffectClassUnknown SideEffectClass = "Unknown"
// SideEffectClassNone means that calling the webhook will have no side effects.
SideEffectClassNone SideEffectClass = "None"
// SideEffectClassSome means that calling the webhook will possibly have side effects.
// If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail.
SideEffectClassSome SideEffectClass = "Some"
// SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the
// request being reviewed has the dry-run attribute, the side effects will be suppressed.
SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
type ValidatingAdmissionPolicy struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the ValidatingAdmissionPolicy.
Spec ValidatingAdmissionPolicySpec
// The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy
// behaves in the expected way.
// Populated by the system.
// Read-only.
// +optional
Status ValidatingAdmissionPolicyStatus
}
// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.
type ValidatingAdmissionPolicyStatus struct {
// The generation observed by the controller.
// +optional
ObservedGeneration int64
// The results of type checking for each expression.
// Presence of this field indicates the completion of the type checking.
// +optional
TypeChecking *TypeChecking
// The conditions represent the latest available observations of a policy's current state.
// +optional
// +listType=map
// +listMapKey=type
Conditions []metav1.Condition
}
// ValidatingAdmissionPolicyConditionType is the condition type of admission validation policy.
type ValidatingAdmissionPolicyConditionType string
// TypeChecking contains results of type checking the expressions in the
// ValidatingAdmissionPolicy
type TypeChecking struct {
// The type checking warnings for each expression.
// +optional
// +listType=atomic
ExpressionWarnings []ExpressionWarning
}
// ExpressionWarning is a warning information that targets a specific expression.
type ExpressionWarning struct {
// The path to the field that refers the expression.
// For example, the reference to the expression of the first item of
// validations is "spec.validations[0].expression"
FieldRef string
// The content of type checking information in a human-readable form.
// Each line of the warning contains the type that the expression is checked
// against, followed by the type check error from the compiler.
Warning string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.
type ValidatingAdmissionPolicyList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta
// List of ValidatingAdmissionPolicy.
Items []ValidatingAdmissionPolicy
}
// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
type ValidatingAdmissionPolicySpec struct {
// ParamKind specifies the kind of resources used to parameterize this policy.
// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
// If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
// If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.
// +optional
ParamKind *ParamKind
// MatchConstraints specifies what resources this policy is designed to validate.
// The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraint.
// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
// ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding.
// Required.
MatchConstraints *MatchResources
// validations contain CEL expressions which are used to validate admission requests.
// validations and auditAnnotations may not both be empty; a minimum of one validations or auditAnnotations is
// required.
// +optional
Validations []Validation
// MatchConditions is a list of conditions that must be met for a request to be validated.
// Match conditions filter requests that have already been matched by the rules,
// namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
// There are a maximum of 64 match conditions allowed.
//
// If a parameter object is provided, it can be accessed via the `params` handle in the same
// manner as validation expressions.
//
// The exact matching logic is (in order):
// 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
// 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
// 3. If any matchCondition evaluates to an error (but none are FALSE):
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the policy is skipped
//
// +optional
MatchConditions []MatchCondition
// failurePolicy defines how to handle failures for the admission policy. Failures can
// occur from CEL expression parse errors, type check errors, runtime errors and invalid
// or mis-configured policy definitions or bindings.
//
// A policy is invalid if spec.paramKind refers to a non-existent Kind.
// A binding is invalid if spec.paramRef.name refers to a non-existent resource.
//
// failurePolicy does not define how validations that evaluate to false are handled.
//
// When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions
// define how failures are enforced.
//
// Allowed values are Ignore or Fail. Defaults to Fail.
// +optional
FailurePolicy *FailurePolicyType
// auditAnnotations contains CEL expressions which are used to produce audit
// annotations for the audit event of the API request.
// validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is
// required.
// A maximum of 20 auditAnnotation are allowed per ValidatingAdmissionPolicy.
// +optional
AuditAnnotations []AuditAnnotation
// Variables contain definitions of variables that can be used in composition of other expressions.
// Each variable is defined as a named CEL expression.
// The variables defined here will be available under `variables` in other expressions of the policy
// except MatchConditions because MatchConditions are evaluated before the rest of the policy.
//
// The expression of a variable can refer to other variables defined earlier in the list but not those after.
// Thus, Variables must be sorted by the order of first appearance and acyclic.
// +listType=atomic
// +optional
Variables []Variable
}
// ParamKind is a tuple of Group Kind and Version.
type ParamKind struct {
// APIVersion is the API group version the resources belong to.
// In format of "group/version".
// Required.
APIVersion string
// Kind is the API kind the resources belong to.
// Required.
Kind string
}
// Validation specifies the CEL expression which is used to apply the validation.
type Validation struct {
// Expression represents the expression which will be evaluated by CEL.
// ref: https://github.com/google/cel-spec
// CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:
//
//'object' - The object from the incoming request. The value is null for DELETE requests.
//'oldObject' - The existing object. The value is null for CREATE requests.
//'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
//'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
//'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
//'variables' - Map of composited variables, from its name to its lazily evaluated value.
// For example, a variable named 'foo' can be accessed as 'variables.foo'
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
// request resource.
//
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
// object. No other metadata properties are accessible.
//
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
// Accessible property names are escaped according to the following rules when accessed in the expression:
// - '__' escapes to '__underscores__'
// - '.' escapes to '__dot__'
// - '-' escapes to '__dash__'
// - '/' escapes to '__slash__'
// - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
// "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
// "import", "let", "loop", "package", "namespace", "return".
// Examples:
// - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"}
// - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"}
// - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"}
//
// Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
// Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
// - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
// non-intersecting elements in `Y` are appended, retaining their partial order.
// - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
// are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
// non-intersecting keys are appended, retaining their partial order.
// Required.
Expression string
// Message represents the message displayed when validation fails. The message is required if the Expression contains
// line breaks. The message must not contain line breaks.
// If unset, the message is "failed rule: {Rule}".
// e.g. "must be a URL with the host matching spec.host"
// If ExpressMessage is specified, Message will be ignored
// If the Expression contains line breaks. Eith Message or ExpressMessage is required.
// The message must not contain line breaks.
// If unset, the message is "failed Expression: {Expression}".
// +optional
Message string
// Reason represents a machine-readable description of why this validation failed.
// If this is the first validation in the list to fail, this reason, as well as the
// corresponding HTTP response code, are used in the
// HTTP response to the client.
// The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge".
// If not set, StatusReasonInvalid is used in the response to the client.
// +optional
Reason *metav1.StatusReason
// messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
// Since messageExpression is used as a failure message, it must evaluate to a string.
// If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails.
// If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
// as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
// that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
// the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
// messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'.
// Example:
// "object.x must be less than max ("+string(params.max)+")"
// +optional
MessageExpression string
}
// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.
// +structType=atomic
type Variable struct {
// Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
// The variable can be accessed in other expressions through `variables`
// For example, if name is "foo", the variable will be available as `variables.foo`
Name string
// Expression is the expression that will be evaluated as the value of the variable.
// The CEL expression has access to the same identifiers as the CEL expressions in Validation.
Expression string
}
// AuditAnnotation describes how to produce an audit annotation for an API request.
type AuditAnnotation struct {
// key specifies the audit annotation key. The audit annotation keys of
// a ValidatingAdmissionPolicy must be unique. The key must be a qualified
// name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.
//
// The key is combined with the resource name of the
// ValidatingAdmissionPolicy to construct an audit annotation key:
// "{ValidatingAdmissionPolicy name}/{key}".
//
// If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy
// and the same audit annotation key, the annotation key will be identical.
// In this case, the first annotation written with the key will be included
// in the audit event and all subsequent annotations with the same key
// will be discarded.
//
// Required.
Key string
// valueExpression represents the expression which is evaluated by CEL to
// produce an audit annotation value. The expression must evaluate to either
// a string or null value. If the expression evaluates to a string, the
// audit annotation is included with the string value. If the expression
// evaluates to null or empty string the audit annotation will be omitted.
// The valueExpression may be no longer than 5kb in length.
// If the result of the valueExpression is more than 10kb in length, it
// will be truncated to 10kb.
//
// If multiple ValidatingAdmissionPolicyBinding resources match an
// API request, then the valueExpression will be evaluated for
// each binding. All unique values produced by the valueExpressions
// will be joined together in a comma-separated list.
//
// Required.
ValueExpression string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
//
// For a given admission request, each binding will cause its policy to be
// evaluated N times, where N is 1 for policies/bindings that don't use
// params, otherwise N is the number of parameters selected by the binding.
//
// The CEL expressions of a policy must have a computed CEL cost below the maximum
// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
// Adding/removing policies, bindings, or params can not affect whether a
// given (policy, binding, param) combination is within its own CEL budget.
type ValidatingAdmissionPolicyBinding struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
Spec ValidatingAdmissionPolicyBindingSpec
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ValidatingAdmissionPolicyBindingList is a list of PolicyBinding.
type ValidatingAdmissionPolicyBindingList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta
// List of PolicyBinding.
Items []ValidatingAdmissionPolicyBinding
}
// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
type ValidatingAdmissionPolicyBindingSpec struct {
// PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to.
// If the referenced resource does not exist, this binding is considered invalid and will be ignored
// Required.
PolicyName string
// paramRef specifies the parameter resource used to configure the admission control policy.
// It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
// +optional
ParamRef *ParamRef
// MatchResources declares what resources match this binding and will be validated by it.
// Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this.
// If this is unset, all resources matched by the policy are validated by this binding
// When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated.
// Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.
// +optional
MatchResources *MatchResources
// validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced.
// If a validation evaluates to false it is always enforced according to these actions.
//
// Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according
// to these actions only if the FailurePolicy is set to Fail, otherwise the failures are
// ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.
//
// validationActions is declared as a set of action values. Order does
// not matter. validationActions may not contain duplicates of the same action.
//
// The supported actions values are:
//
// "Deny" specifies that a validation failure results in a denied request.
//
// "Warn" specifies that a validation failure is reported to the request client
// in HTTP Warning headers, with a warning code of 299. Warnings can be sent
// both for allowed or denied admission responses.
//
// "Audit" specifies that a validation failure is included in the published
// audit event for the request. The audit event will contain a
// `validation.policy.admission.k8s.io/validation_failure` audit annotation
// with a value containing the details of the validation failures, formatted as
// a JSON list of objects, each with the following fields:
// - message: The validation failure message string
// - policy: The resource name of the ValidatingAdmissionPolicy
// - binding: The resource name of the ValidatingAdmissionPolicyBinding
// - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy
// - validationActions: The enforcement actions enacted for the validation failure
// Example audit annotation:
// `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"`
//
// Clients should expect to handle additional values by ignoring
// any values not recognized.
//
// "Deny" and "Warn" may not be used together since this combination
// needlessly duplicates the validation failure both in the
// API response body and the HTTP warning headers.
//
// Required.
ValidationActions []ValidationAction
}
// ParamRef describes how to locate the params to be used as input to
// expressions of rules applied by a policy binding.
// +structType=atomic
type ParamRef struct {
// name is the name of the resource being referenced.
//
// One of `name` or `selector` must be set, but `name` and `selector` are
// mutually exclusive properties. If one is set, the other must be unset.
//
// A single parameter used for all admission requests can be configured
// by setting the `name` field, leaving `selector` blank, and setting namespace
// if `paramKind` is namespace-scoped.
//
// +optional
Name string
// namespace is the namespace of the referenced resource. Allows limiting
// the search for params to a specific namespace. Applies to both `name` and
// `selector` fields.
//
// A per-namespace parameter may be used by specifying a namespace-scoped
// `paramKind` in the policy and leaving this field empty.
//
// - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
// field results in a configuration error.
//
// - If `paramKind` is namespace-scoped, the namespace of the object being
// evaluated for admission will be used when this field is left unset. Take
// care that if this is left empty the binding must not match any cluster-scoped
// resources, which will result in an error.
//
// +optional
Namespace string
// selector can be used to match multiple param objects based on their labels.
// Supply selector: {} to match all resources of the ParamKind.
//
// If multiple params are found, they are all evaluated with the policy expressions
// and the results are ANDed together.
//
// One of `name` or `selector` must be set, but `name` and `selector` are
// mutually exclusive properties. If one is set, the other must be unset.
//
// +optional
Selector *metav1.LabelSelector
// parameterNotFoundAction controls the behavior of the binding when the resource
// exists, and name or selector is valid, but there are no parameters
// matched by the binding. If the value is set to `Allow`, then no
// matched parameters will be treated as successful validation by the binding.
// If set to `Deny`, then no matched parameters will be subject to the
// `failurePolicy` of the policy.
//
// Allowed values are `Allow` or `Deny`
//
// Required
ParameterNotFoundAction *ParameterNotFoundActionType
}
// MatchResources decides whether to run the admission control policy on an object based
// on whether it meets the match criteria.
// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
type MatchResources struct {
// NamespaceSelector decides whether to run the admission control policy on an object based
// on whether the namespace for that object matches the selector. If the
// object itself is a namespace, the matching is performed on
// object.metadata.labels. If the object is another cluster scoped resource,
// it never skips the policy.
//
// For example, to run the webhook on any objects whose namespace is not
// associated with "runlevel" of "0" or "1"; you will set the selector as
// follows:
// "namespaceSelector": {
// "matchExpressions": [
// {
// "key": "runlevel",
// "operator": "NotIn",
// "values": [
// "0",
// "1"
// ]
// }
// ]
// }
//
// If instead you want to only run the policy on any objects whose
// namespace is associated with the "environment" of "prod" or "staging";
// you will set the selector as follows:
// "namespaceSelector": {
// "matchExpressions": [
// {
// "key": "environment",
// "operator": "In",
// "values": [
// "prod",
// "staging"
// ]
// }
// ]
// }
//
// See
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
// for more examples of label selectors.
//
// Default to the empty LabelSelector, which matches everything.
// +optional
NamespaceSelector *metav1.LabelSelector
// ObjectSelector decides whether to run the policy based on if the
// object has matching labels. objectSelector is evaluated against both
// the oldObject and newObject that would be sent to the cel policy, and
// is considered to match if either object matches the selector. A null
// object (oldObject in the case of create, or newObject in the case of
// delete) or an object that cannot have labels (like a
// DeploymentRollback or a PodProxyOptions object) is not considered to
// match.
// Use the object selector only if the webhook is opt-in, because end
// users may skip the admission webhook by setting the labels.
// Default to the empty LabelSelector, which matches everything.
// +optional
ObjectSelector *metav1.LabelSelector
// ResourceRules describes what operations on what resources/subresources the policy matches.
// The policy cares about an operation if it matches _any_ Rule.
// +listType=atomic
// +optional
ResourceRules []NamedRuleWithOperations
// ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about.
// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
// +listType=atomic
// +optional
ExcludeResourceRules []NamedRuleWithOperations
// matchPolicy defines how the "MatchResources" list is used to match incoming requests.
// Allowed values are "Exact" or "Equivalent".
//
// - Exact: match a request only if it exactly matches a specified rule.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
// a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the policy.
//
// - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
// a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the policy.
//
// Defaults to "Equivalent"
// +optional
MatchPolicy *MatchPolicyType
}
// ValidationAction specifies a policy enforcement action.
type ValidationAction string
const (
// Deny specifies that a validation failure results in a denied request.
Deny ValidationAction = "Deny"
// Warn specifies that a validation failure is reported to the request client
// in HTTP Warning headers, with a warning code of 299. Warnings can be sent
// both for allowed or denied admission responses.
Warn ValidationAction = "Warn"
// Audit specifies that a validation failure is included in the published
// audit event for the request. The audit event will contain a
// `validation.policy.admission.k8s.io/validation_failure` audit annotation
// with a value containing the details of the validation failure.
Audit ValidationAction = "Audit"
)
// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
type NamedRuleWithOperations struct {
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
// +optional
ResourceNames []string
// RuleWithOperations is a tuple of Operations and Resources.
RuleWithOperations RuleWithOperations
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ValidatingWebhookConfiguration describes the configuration of an admission webhook that accepts or rejects and object without changing it.
type ValidatingWebhookConfiguration struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// Webhooks is a list of webhooks and the affected resources and operations.
// +optional
Webhooks []ValidatingWebhook
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.
type ValidatingWebhookConfigurationList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta
// List of ValidatingWebhookConfigurations.
Items []ValidatingWebhookConfiguration
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
type MutatingWebhookConfiguration struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// Webhooks is a list of webhooks and the affected resources and operations.
// +optional
Webhooks []MutatingWebhook
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.
type MutatingWebhookConfigurationList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta
// List of MutatingWebhookConfiguration.
Items []MutatingWebhookConfiguration
}
// ValidatingWebhook describes an admission webhook and the resources and operations it applies to.
type ValidatingWebhook struct {
// The name of the admission webhook.
// Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
// "imagepolicy" is the name of the webhook, and kubernetes.io is the name
// of the organization.
// Required.
Name string
// ClientConfig defines how to communicate with the hook.
// Required
ClientConfig WebhookClientConfig
// Rules describes what operations on what resources/subresources the webhook cares about.
// The webhook cares about an operation if it matches _any_ Rule.
Rules []RuleWithOperations
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
// allowed values are Ignore or Fail. Defaults to Ignore.
// +optional
FailurePolicy *FailurePolicyType
// matchPolicy defines how the "rules" list is used to match incoming requests.
// Allowed values are "Exact" or "Equivalent".
//
// - Exact: match a request only if it exactly matches a specified rule.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
// a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
//
// - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
// a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.
//
// +optional
MatchPolicy *MatchPolicyType
// NamespaceSelector decides whether to run the webhook on an object based
// on whether the namespace for that object matches the selector. If the
// object itself is a namespace, the matching is performed on
// object.metadata.labels. If the object is another cluster scoped resource,
// it never skips the webhook.
//
// For example, to run the webhook on any objects whose namespace is not
// associated with "runlevel" of "0" or "1"; you will set the selector as
// follows:
// "namespaceSelector": {
// "matchExpressions": [
// {
// "key": "runlevel",
// "operator": "NotIn",
// "values": [
// "0",
// "1"
// ]
// }
// ]
// }
//
// If instead you want to only run the webhook on any objects whose
// namespace is associated with the "environment" of "prod" or "staging";
// you will set the selector as follows:
// "namespaceSelector": {
// "matchExpressions": [
// {
// "key": "environment",
// "operator": "In",
// "values": [
// "prod",
// "staging"
// ]
// }
// ]
// }
//
// See
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
// for more examples of label selectors.
//
// Default to the empty LabelSelector, which matches everything.
// +optional
NamespaceSelector *metav1.LabelSelector
// ObjectSelector decides whether to run the webhook based on if the
// object has matching labels. objectSelector is evaluated against both
// the oldObject and newObject that would be sent to the webhook, and
// is considered to match if either object matches the selector. A null
// object (oldObject in the case of create, or newObject in the case of
// delete) or an object that cannot have labels (like a
// DeploymentRollback or a PodProxyOptions object) is not considered to
// match.
// Use the object selector only if the webhook is opt-in, because end
// users may skip the admission webhook by setting the labels.
// Default to the empty LabelSelector, which matches everything.
// +optional
ObjectSelector *metav1.LabelSelector
// SideEffects states whether this webhook has side effects.
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
// rejected by a future step in the admission chain and the side effects therefore need to be undone.
// Requests with the dryRun attribute will be auto-rejected if they match a webhook with
// sideEffects == Unknown or Some. Defaults to Unknown.
// +optional
SideEffects *SideEffectClass
// TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
// the webhook call will be ignored or the API call will fail based on the
// failure policy.
// The timeout value must be between 1 and 30 seconds.
// +optional
TimeoutSeconds *int32
// AdmissionReviewVersions is an ordered list of preferred `AdmissionReview`
// versions the Webhook expects. API server will try to use first version in
// the list which it supports. If none of the versions specified in this list
// supported by API server, validation will fail for this object.
// If the webhook configuration has already been persisted with a version apiserver
// does not understand, calls to the webhook will fail and be subject to the failure policy.
// +optional
AdmissionReviewVersions []string
// MatchConditions is a list of conditions that must be met for a request to be sent to this
// webhook. Match conditions filter requests that have already been matched by the rules,
// namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
// There are a maximum of 64 match conditions allowed.
//
// The exact matching logic is (in order):
// 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.
// 2. If ALL matchConditions evaluate to TRUE, the webhook is called.
// 3. If any matchCondition evaluates to an error (but none are FALSE):
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
// +optional
MatchConditions []MatchCondition
}
// MutatingWebhook describes an admission webhook and the resources and operations it applies to.
type MutatingWebhook struct {
// The name of the admission webhook.
// Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
// "imagepolicy" is the name of the webhook, and kubernetes.io is the name
// of the organization.
// Required.
Name string
// ClientConfig defines how to communicate with the hook.
// Required
ClientConfig WebhookClientConfig
// Rules describes what operations on what resources/subresources the webhook cares about.
// The webhook cares about an operation if it matches _any_ Rule.
Rules []RuleWithOperations
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
// allowed values are Ignore or Fail. Defaults to Ignore.
// +optional
FailurePolicy *FailurePolicyType
// matchPolicy defines how the "rules" list is used to match incoming requests.
// Allowed values are "Exact" or "Equivalent".
//
// - Exact: match a request only if it exactly matches a specified rule.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
// a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
//
// - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
// a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.
//
// +optional
MatchPolicy *MatchPolicyType
// NamespaceSelector decides whether to run the webhook on an object based
// on whether the namespace for that object matches the selector. If the
// object itself is a namespace, the matching is performed on
// object.metadata.labels. If the object is another cluster scoped resource,
// it never skips the webhook.
//
// For example, to run the webhook on any objects whose namespace is not
// associated with "runlevel" of "0" or "1"; you will set the selector as
// follows:
// "namespaceSelector": {
// "matchExpressions": [
// {
// "key": "runlevel",
// "operator": "NotIn",
// "values": [
// "0",
// "1"
// ]
// }
// ]
// }
//
// If instead you want to only run the webhook on any objects whose
// namespace is associated with the "environment" of "prod" or "staging";
// you will set the selector as follows:
// "namespaceSelector": {
// "matchExpressions": [
// {
// "key": "environment",
// "operator": "In",
// "values": [
// "prod",
// "staging"
// ]
// }
// ]
// }
//
// See
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
// for more examples of label selectors.
//
// Default to the empty LabelSelector, which matches everything.
// +optional
NamespaceSelector *metav1.LabelSelector
// ObjectSelector decides whether to run the webhook based on if the
// object has matching labels. objectSelector is evaluated against both
// the oldObject and newObject that would be sent to the webhook, and
// is considered to match if either object matches the selector. A null
// object (oldObject in the case of create, or newObject in the case of
// delete) or an object that cannot have labels (like a
// DeploymentRollback or a PodProxyOptions object) is not considered to
// match.
// Use the object selector only if the webhook is opt-in, because end
// users may skip the admission webhook by setting the labels.
// Default to the empty LabelSelector, which matches everything.
// +optional
ObjectSelector *metav1.LabelSelector
// SideEffects states whether this webhook has side effects.
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
// rejected by a future step in the admission chain and the side effects therefore need to be undone.
// Requests with the dryRun attribute will be auto-rejected if they match a webhook with
// sideEffects == Unknown or Some. Defaults to Unknown.
// +optional
SideEffects *SideEffectClass
// TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
// the webhook call will be ignored or the API call will fail based on the
// failure policy.
// The timeout value must be between 1 and 30 seconds.
// +optional
TimeoutSeconds *int32
// AdmissionReviewVersions is an ordered list of preferred `AdmissionReview`
// versions the Webhook expects. API server will try to use first version in
// the list which it supports. If none of the versions specified in this list
// supported by API server, validation will fail for this object.
// If the webhook configuration has already been persisted with a version apiserver
// does not understand, calls to the webhook will fail and be subject to the failure policy.
// +optional
AdmissionReviewVersions []string
// reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation.
// Allowed values are "Never" and "IfNeeded".
//
// Never: the webhook will not be called more than once in a single admission evaluation.
//
// IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation
// if the object being admitted is modified by other admission plugins after the initial webhook call.
// Webhooks that specify this option *must* be idempotent, and hence able to process objects they previously admitted.
// Note:
// * the number of additional invocations is not guaranteed to be exactly one.
// * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again.
// * webhooks that use this option may be reordered to minimize the number of additional invocations.
// * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.
//
// Defaults to "Never".
// +optional
ReinvocationPolicy *ReinvocationPolicyType
// MatchConditions is a list of conditions that must be met for a request to be sent to this
// webhook. Match conditions filter requests that have already been matched by the rules,
// namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
// There are a maximum of 64 match conditions allowed.
//
// The exact matching logic is (in order):
// 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.
// 2. If ALL matchConditions evaluate to TRUE, the webhook is called.
// 3. If any matchCondition evaluates to an error (but none are FALSE):
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
// +optional
MatchConditions []MatchCondition
}
// ReinvocationPolicyType specifies what type of policy is used when other admission plugins also perform
// modifications.
// +enum
type ReinvocationPolicyType string
var (
// NeverReinvocationPolicy indicates that the mutation must not be called more than once in a
// single admission evaluation.
NeverReinvocationPolicy ReinvocationPolicyType = "Never"
// IfNeededReinvocationPolicy indicates that the mutation may be called at least one
// additional time as part of the admission evaluation if the object being admitted is
// modified by other admission plugins after the initial mutation call.
IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded"
)
// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make
// sure that all the tuple expansions are valid.
type RuleWithOperations struct {
// Operations is the operations the admission hook cares about - CREATE, UPDATE, or *
// for all operations.
// If '*' is present, the length of the slice must be one.
// Required.
Operations []OperationType
// Rule is embedded, it describes other criteria of the rule, like
// APIGroups, APIVersions, Resources, etc.
Rule
}
// OperationType specifies what type of operation the admission hook cares about.
type OperationType string
// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go.
const (
OperationAll OperationType = "*"
Create OperationType = "CREATE"
Update OperationType = "UPDATE"
Delete OperationType = "DELETE"
Connect OperationType = "CONNECT"
)
// WebhookClientConfig contains the information to make a TLS
// connection with the webhook
type WebhookClientConfig struct {
// `url` gives the location of the webhook, in standard URL form
// (`scheme://host:port/path`). Exactly one of `url` or `service`
// must be specified.
//
// The `host` should not refer to a service running in the cluster; use
// the `service` field instead. The host might be resolved via external
// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
// in-cluster DNS as that would be a layering violation). `host` may
// also be an IP address.
//
// Please note that using `localhost` or `127.0.0.1` as a `host` is
// risky unless you take great care to run this webhook on all hosts
// which run an apiserver which might need to make calls to this
// webhook. Such installs are likely to be non-portable, i.e., not easy
// to turn up in a new cluster.
//
// The scheme must be "https"; the URL must begin with "https://".
//
// A path is optional, and if present may be any string permissible in
// a URL. You may use the path to pass an arbitrary string to the
// webhook, for example, a cluster identifier.
//
// Attempting to use a user or basic auth e.g. "user:password@" is not
// allowed. Fragments ("#...") and query parameters ("?...") are not
// allowed, either.
//
// +optional
URL *string
// `service` is a reference to the service for this webhook. Either
// `service` or `url` must be specified.
//
// If the webhook is running within the cluster, then you should use `service`.
//
// +optional
Service *ServiceReference
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
// If unspecified, system trust roots on the apiserver are used.
// +optional
CABundle []byte
}
// ServiceReference holds a reference to Service.legacy.k8s.io
type ServiceReference struct {
// `namespace` is the namespace of the service.
// Required
Namespace string
// `name` is the name of the service.
// Required
Name string
// `path` is an optional URL path which will be sent in any request to
// this service.
// +optional
Path *string
// If specified, the port on the service that hosting webhook.
// `port` should be a valid port number (1-65535, inclusive).
// +optional
Port int32
}
// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.
type MatchCondition struct {
// Name is an identifier for this match condition, used for strategic merging of MatchConditions,
// as well as providing an identifier for logging purposes. A good name should be descriptive of
// the associated expression.
// Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and
// must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or
// '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an
// optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')
//
// Required.
Name string
// Expression represents the expression which will be evaluated by CEL. Must evaluate to bool.
// CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:
//
// 'object' - The object from the incoming request. The value is null for DELETE requests.
// 'oldObject' - The existing object. The value is null for CREATE requests.
// 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest).
// 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
// request resource.
// 'variables' - Map of composited variables, from its name to its lazily evaluated value.
// For example, a variable named 'foo' can be access as 'variables.foo'
// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
//
// Required.
Expression string
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.32
// MutatingAdmissionPolicy describes an admission policy that may mutate an object.
type MutatingAdmissionPolicy struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the MutatingAdmissionPolicy.
Spec MutatingAdmissionPolicySpec
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.32
// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
type MutatingAdmissionPolicyList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta
// List of ValidatingAdmissionPolicy.
Items []MutatingAdmissionPolicy
}
// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
type MutatingAdmissionPolicySpec struct {
// paramKind specifies the kind of resources used to parameterize this policy.
// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
// If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
// If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
// +optional
ParamKind *ParamKind
// matchConstraints specifies what resources this policy is designed to validate.
// The AdmissionPolicy cares about a request if it matches _all_ Constraints.
// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
// MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
// Only the CREATE, UPDATE, and CONNECT operations are allowed.
// '*' matches only CREATE, UPDATE, and CONNECT.
// Required.
MatchConstraints *MatchResources
// variables contain definitions of variables that can be used in composition of other expressions.
// Each variable is defined as a named CEL expression.
// The variables defined here will be available under `variables` in other expressions of the policy
// except matchConditions because matchConditions are evaluated before the rest of the policy.
//
// The expression of a variable can refer to other variables defined earlier in the list but not those after.
// Thus, variables must be sorted by the order of first appearance and acyclic.
// +listType=atomic
// +optional
Variables []Variable
// mutations contain operations to perform on matching objects.
// mutations may not be empty; a minimum of one mutation is required.
// mutations are evaluated in order, and are reinvoked according to
// the reinvocationPolicy.
// The mutations of a policy are invoked for each binding of this policy
// and reinvocation of mutations occurs on a per binding basis.
//
// +listType=atomic
// +optional
Mutations []Mutation
// failurePolicy defines how to handle failures for the admission policy. Failures can
// occur from CEL expression parse errors, type check errors, runtime errors and invalid
// or mis-configured policy definitions or bindings.
//
// A policy is invalid if paramKind refers to a non-existent Kind.
// A binding is invalid if paramRef.name refers to a non-existent resource.
//
// failurePolicy does not define how validations that evaluate to false are handled.
//
// Allowed values are Ignore or Fail. Defaults to Fail.
// +optional
FailurePolicy *FailurePolicyType
// matchConditions is a list of conditions that must be met for a request to be validated.
// Match conditions filter requests that have already been matched by the matchConstraints,
// An empty list of matchConditions matches all requests.
// There are a maximum of 64 match conditions allowed.
//
// If a parameter object is provided, it can be accessed via the `params` handle in the same
// manner as validation expressions.
//
// The exact matching logic is (in order):
// 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
// 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
// 3. If any matchCondition evaluates to an error (but none are FALSE):
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the policy is skipped
//
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=map
// +listMapKey=name
// +optional
MatchConditions []MatchCondition
// reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
// as part of a single admission evaluation.
// Allowed values are "Never" and "IfNeeded".
//
// Never: These mutations will not be called more than once per binding in a single admission evaluation.
//
// IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
// order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
// reinvoked when mutations change the object after this mutation is invoked.
// Required.
ReinvocationPolicy ReinvocationPolicyType
}
// Mutation specifies the operation that performs a Mutation.
type Mutation struct {
// patchType indicates the patch strategy used.
// Allowed values are "ApplyConfiguration" and "JSONPatch".
// Required.
//
// +unionDiscriminator
PatchType PatchType
// applyConfiguration defines the desired configuration values of an object.
// The configuration is applied to the admission object using
// [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
// A CEL expression is used to create apply configuration.
ApplyConfiguration *ApplyConfiguration
// jsonPatch defines a [JSON patch](https://jsonpatch.com/) to perform a mutation to the object.
// A CEL expression is used to create the JSON patch.
JSONPatch *JSONPatch
}
// PatchType specifies the type of patch operation for a mutation.
// +enum
type PatchType string
const (
// ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object.
PatchTypeApplyConfiguration PatchType = "ApplyConfiguration"
// JSONPatch indicates that the object is mutated through JSON Patch.
PatchTypeJSONPatch PatchType = "JSONPatch"
)
// ApplyConfiguration defines the desired configuration values of an object.
type ApplyConfiguration struct {
// expression will be evaluated by CEL to create an apply configuration.
// ref: https://github.com/google/cel-spec
//
// Apply configurations are declared in CEL using object initialization. For example, this CEL expression
// returns an apply configuration to set a single field:
//
// Object{
// spec: Object.spec{
// serviceAccountName: "example"
// }
// }
//
// Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
// values not included in the apply configuration.
//
// CEL expressions have access to the object types needed to create apply configurations:
// - 'Object' - CEL type of the resource object.
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
//
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
//
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
// - 'oldObject' - The existing object. The value is null for CREATE requests.
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
// request resource.
//
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
// object. No other metadata properties are accessible.
//
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
// Required.
Expression string
}
// JSONPatch defines a JSON Patch.
type JSONPatch struct {
// expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
// ref: https://github.com/google/cel-spec
//
// expression must return an array of JSONPatch values.
//
// For example, this CEL expression returns a JSON patch to conditionally modify a value:
//
// [
// JSONPatch{op: "test", path: "/spec/example", value: "Red"},
// JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
// ]
//
// To define an object for the patch value, use Object types. For example:
//
// [
// JSONPatch{
// op: "add",
// path: "/spec/selector",
// value: Object.spec.selector{matchLabels: {"environment": "test"}}
// }
// ]
//
// To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
//
// [
// JSONPatch{
// op: "add",
// path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
// value: "test"
// },
// ]
//
// CEL expressions have access to the types needed to create JSON patches and objects:
//
// - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
// See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
// integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
// [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
// function may be used to escape path keys containing '/' and '~'.
// - 'Object' - CEL type of the resource object.
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
//
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
//
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
// - 'oldObject' - The existing object. The value is null for CREATE requests.
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
// request resource.
//
// CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
// as well as:
//
// - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
//
//
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
// Required.
Expression string
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.32
// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
// configure policies for clusters.
//
// For a given admission request, each binding will cause its policy to be
// evaluated N times, where N is 1 for policies/bindings that don't use
// params, otherwise N is the number of parameters selected by the binding.
// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
//
// Adding/removing policies, bindings, or params can not affect whether a
// given (policy, binding, param) combination is within its own CEL budget.
type MutatingAdmissionPolicyBinding struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
Spec MutatingAdmissionPolicyBindingSpec
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.32
// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
type MutatingAdmissionPolicyBindingList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta
// List of PolicyBinding.
Items []MutatingAdmissionPolicyBinding
}
// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
type MutatingAdmissionPolicyBindingSpec struct {
// policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
// If the referenced resource does not exist, this binding is considered invalid and will be ignored
// Required.
PolicyName string
// paramRef specifies the parameter resource used to configure the admission control policy.
// It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
// +optional
ParamRef *ParamRef
// matchResources limits what resources match this binding and may be mutated by it.
// Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
// matchConditions before the resource may be mutated.
// When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
// and matchConditions must match for the resource to be mutated.
// Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
// Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
// '*' matches CREATE, UPDATE and CONNECT.
// +optional
MatchResources *MatchResources
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/apis/admissionregistration/types.go
|
#! /usr/bin/env python3
"""
combinerefs path
A helper for analyzing PYTHONDUMPREFS output.
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
time Py_Finalize() prints the list of all live objects twice: first it
prints the repr() of each object while the interpreter is still fully intact.
After cleaning up everything it can, it prints all remaining live objects
again, but the second time just prints their addresses, refcounts, and type
names (because the interpreter has been torn down, calling repr methods at
this point can get into infinite loops or blow up).
Save all this output into a file, then run this script passing the path to
that file. The script finds both output chunks, combines them, then prints
a line of output for each object still alive at the end:
address refcnt typename repr
address is the address of the object, in whatever format the platform C
produces for a %p format code.
refcnt is of the form
"[" ref "]"
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
or
"[" ref_before "->" ref_after "]"
if the refcount changed.
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
output block.
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
CAUTION: If object is a container type, it may not actually contain all the
objects shown in the repr: the repr was captured from the first output block,
and some of the containees may have been released since then. For example,
it's common for the line showing the dict of interned strings to display
strings that no longer exist at the end of Py_Finalize; this can be recognized
(albeit painfully) because such containees don't have a line of their own.
The objects are listed in allocation order, with most-recently allocated
printed first, and the first object allocated printed last.
Simple examples:
00857060 [14] str '__len__'
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
output blocks said there were 14 references to it. This is probably due to
C modules that intern the string "__len__" and keep a reference to it in a
file static.
00857038 [46->5] tuple ()
46-5 = 41 references to the empty tuple were removed by the cleanup actions
between the times PYTHONDUMPREFS produced output.
00858028 [1025->1456] str '<dummy key>'
The string '<dummy key>', which is used in dictobject.c to overwrite a real
key that gets deleted, grew several hundred references during cleanup. It
suggests that stuff did get removed from dicts by cleanup, but that the dicts
themselves are staying alive for some reason. """
import re
import sys
# Generate lines from fileiter. If whilematch is true, continue reading
# while the regexp object pat matches line. If whilematch is false, lines
# are read so long as pat doesn't match them. In any case, the first line
# that doesn't match pat (when whilematch is true), or that does match pat
# (when whilematch is false), is lost, and fileiter will resume at the line
# following it.
def read(fileiter, pat, whilematch):
for line in fileiter:
if bool(pat.match(line)) == whilematch:
yield line
else:
break
def combine(fname):
f = open(fname)
fi = iter(f)
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
pass
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
addr2rc = {}
addr2guts = {}
before = 0
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
m = crack.match(line)
if m:
addr, addr2rc[addr], addr2guts[addr] = m.groups()
before += 1
else:
print('??? skipped:', line)
after = 0
for line in read(fi, crack, True):
after += 1
m = crack.match(line)
assert m
addr, rc, guts = m.groups() # guts is type name here
if addr not in addr2rc:
print('??? new object created while tearing down:', line.rstrip())
continue
print(addr, end=' ')
if rc == addr2rc[addr]:
print('[%s]' % rc, end=' ')
else:
print('[%s->%s]' % (addr2rc[addr], rc), end=' ')
print(guts, addr2guts[addr])
f.close()
print("%d objects before, %d after" % (before, after))
if __name__ == '__main__':
combine(sys.argv[1])
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import numpy as np
import pytest
from scipy import linalg, sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import eigsh
from sklearn import config_context
from sklearn.datasets import make_low_rank_matrix, make_sparse_spd_matrix
from sklearn.utils import gen_batches
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._array_api import (
_convert_to_numpy,
_get_namespace_device_dtype_ids,
_max_precision_float_dtype,
get_namespace,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._array_api import (
device as array_device,
)
from sklearn.utils._testing import (
_array_api_for_tests,
assert_allclose,
assert_allclose_dense_sparse,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
skip_if_32bit,
)
from sklearn.utils.extmath import (
_approximate_mode,
_deterministic_vector_sign_flip,
_incremental_mean_and_var,
_randomized_eigsh,
_safe_accumulator_op,
cartesian,
density,
randomized_range_finder,
randomized_svd,
row_norms,
safe_sparse_dot,
softmax,
stable_cumsum,
svd_flip,
weighted_mode,
)
from sklearn.utils.fixes import (
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
DOK_CONTAINERS,
LIL_CONTAINERS,
_mode,
)
@pytest.mark.parametrize(
"sparse_container",
COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS,
)
def test_density(sparse_container):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
assert density(sparse_container(X)) == density(X)
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = _mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis=axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
@pytest.mark.parametrize("dtype", (np.int32, np.int64, np.float32, np.float64))
def test_randomized_svd_low_rank_all_dtypes(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=rank,
tail_strength=0.0,
random_state=0,
).astype(dtype, copy=False)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
U, s, Vt = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
Vt = Vt.astype(dtype, copy=False)
for normalizer in ["auto", "LU", "QR"]: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0
)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == "f":
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert Ua.shape == (n_samples, k)
assert sa.shape == (k,)
assert Va.shape == (k, n_features)
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(
np.dot(U[:, :k], Vt[:k, :]), np.dot(Ua, Va), decimal=decimal
)
# check the sparse matrix representation
for csr_container in CSR_CONTAINERS:
X = csr_container(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0
)
if dtype.kind == "f":
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == "f"
assert sa.dtype.kind == "f"
assert Va.dtype.kind == "f"
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
@pytest.mark.parametrize("dtype", (np.int32, np.int64, np.float32, np.float64))
def test_randomized_eigsh(dtype):
"""Test that `_randomized_eigsh` returns the appropriate components"""
rng = np.random.RandomState(42)
X = np.diag(np.array([1.0, -2.0, 0.0, 3.0], dtype=dtype))
# random rotation that preserves the eigenvalues of X
rand_rot = np.linalg.qr(rng.normal(size=X.shape))[0]
X = rand_rot @ X @ rand_rot.T
# with 'module' selection method, the negative eigenvalue shows up
eigvals, eigvecs = _randomized_eigsh(X, n_components=2, selection="module")
# eigenvalues
assert eigvals.shape == (2,)
assert_array_almost_equal(eigvals, [3.0, -2.0]) # negative eigenvalue here
# eigenvectors
assert eigvecs.shape == (4, 2)
# with 'value' selection method, the negative eigenvalue does not show up
with pytest.raises(NotImplementedError):
_randomized_eigsh(X, n_components=2, selection="value")
@pytest.mark.parametrize("k", (10, 50, 100, 199, 200))
def test_randomized_eigsh_compared_to_others(k):
"""Check that `_randomized_eigsh` is similar to other `eigsh`
Tests that for a random PSD matrix, `_randomized_eigsh` provides results
comparable to LAPACK (scipy.linalg.eigh) and ARPACK
(scipy.sparse.linalg.eigsh).
Note: some versions of ARPACK do not support k=n_features.
"""
# make a random PSD matrix
n_features = 200
X = make_sparse_spd_matrix(n_features, random_state=0)
# compare two versions of randomized
# rough and fast
eigvals, eigvecs = _randomized_eigsh(
X, n_components=k, selection="module", n_iter=25, random_state=0
)
# more accurate but slow (TODO find realistic settings here)
eigvals_qr, eigvecs_qr = _randomized_eigsh(
X,
n_components=k,
n_iter=25,
n_oversamples=20,
random_state=0,
power_iteration_normalizer="QR",
selection="module",
)
# with LAPACK
eigvals_lapack, eigvecs_lapack = eigh(
X, subset_by_index=(n_features - k, n_features - 1)
)
indices = eigvals_lapack.argsort()[::-1]
eigvals_lapack = eigvals_lapack[indices]
eigvecs_lapack = eigvecs_lapack[:, indices]
# -- eigenvalues comparison
assert eigvals_lapack.shape == (k,)
# comparison precision
assert_array_almost_equal(eigvals, eigvals_lapack, decimal=6)
assert_array_almost_equal(eigvals_qr, eigvals_lapack, decimal=6)
# -- eigenvectors comparison
assert eigvecs_lapack.shape == (n_features, k)
# flip eigenvectors' sign to enforce deterministic output
dummy_vecs = np.zeros_like(eigvecs).T
eigvecs, _ = svd_flip(eigvecs, dummy_vecs)
eigvecs_qr, _ = svd_flip(eigvecs_qr, dummy_vecs)
eigvecs_lapack, _ = svd_flip(eigvecs_lapack, dummy_vecs)
assert_array_almost_equal(eigvecs, eigvecs_lapack, decimal=4)
assert_array_almost_equal(eigvecs_qr, eigvecs_lapack, decimal=6)
# comparison ARPACK ~ LAPACK (some ARPACK implems do not support k=n)
if k < n_features:
v0 = _init_arpack_v0(n_features, random_state=0)
# "LA" largest algebraic <=> selection="value" in randomized_eigsh
eigvals_arpack, eigvecs_arpack = eigsh(
X, k, which="LA", tol=0, maxiter=None, v0=v0
)
indices = eigvals_arpack.argsort()[::-1]
# eigenvalues
eigvals_arpack = eigvals_arpack[indices]
assert_array_almost_equal(eigvals_lapack, eigvals_arpack, decimal=10)
# eigenvectors
eigvecs_arpack = eigvecs_arpack[:, indices]
eigvecs_arpack, _ = svd_flip(eigvecs_arpack, dummy_vecs)
assert_array_almost_equal(eigvecs_arpack, eigvecs_lapack, decimal=8)
@pytest.mark.parametrize(
"n,rank",
[
(10, 7),
(100, 10),
(100, 80),
(500, 10),
(500, 250),
(500, 400),
],
)
def test_randomized_eigsh_reconst_low_rank(n, rank):
"""Check that randomized_eigsh is able to reconstruct a low rank psd matrix
Tests that the decomposition provided by `_randomized_eigsh` leads to
orthonormal eigenvectors, and that a low rank PSD matrix can be effectively
reconstructed with good accuracy using it.
"""
assert rank < n
# create a low rank PSD
rng = np.random.RandomState(69)
X = rng.randn(n, rank)
A = X @ X.T
# approximate A with the "right" number of components
S, V = _randomized_eigsh(A, n_components=rank, random_state=rng)
# orthonormality checks
assert_array_almost_equal(np.linalg.norm(V, axis=0), np.ones(S.shape))
assert_array_almost_equal(V.T @ V, np.diag(np.ones(S.shape)))
# reconstruction
A_reconstruct = V @ np.diag(S) @ V.T
# test that the approximation is good
assert_array_almost_equal(A_reconstruct, A, decimal=6)
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_row_norms(dtype, csr_container):
X = np.random.RandomState(42).randn(100, 100)
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype, copy=False)
sq_norm = (X**2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = csr_container(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype, copy=False)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype, copy=False)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=rank,
tail_strength=0.1,
random_state=0,
)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ["auto", "none", "LU", "QR"]:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(
X, k, n_iter=0, power_iteration_normalizer=normalizer, random_state=0
)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.01
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0
)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=rank,
tail_strength=1.0,
random_state=0,
)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ["auto", "none", "LU", "QR"]:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(
X, k, n_iter=0, power_iteration_normalizer=normalizer, random_state=0
)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.1
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(
X, k, n_iter=5, power_iteration_normalizer=normalizer, random_state=0
)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=rank,
tail_strength=0.5,
random_state=0,
)
assert X.shape == (n_samples, n_features)
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False, random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True, random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose="auto", random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]), decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]), decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, Vt = randomized_svd(
X, n_components, n_iter=2, power_iteration_normalizer="none", random_state=0
)
A = X - U.dot(np.diag(s).dot(Vt))
error_2 = linalg.norm(A, ord="fro")
U, s, Vt = randomized_svd(
X, n_components, n_iter=20, power_iteration_normalizer="none", random_state=0
)
A = X - U.dot(np.diag(s).dot(Vt))
error_20 = linalg.norm(A, ord="fro")
assert np.abs(error_2 - error_20) > 100
for normalizer in ["LU", "QR", "auto"]:
U, s, Vt = randomized_svd(
X,
n_components,
n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0,
)
A = X - U.dot(np.diag(s).dot(Vt))
error_2 = linalg.norm(A, ord="fro")
for i in [5, 10, 50]:
U, s, Vt = randomized_svd(
X,
n_components,
n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0,
)
A = X - U.dot(np.diag(s).dot(Vt))
error = linalg.norm(A, ord="fro")
assert 15 > np.abs(error_2 - error)
@pytest.mark.parametrize("sparse_container", DOK_CONTAINERS + LIL_CONTAINERS)
def test_randomized_svd_sparse_warnings(sparse_container):
# randomized_svd throws a warning for lil and dok matrix
rng = np.random.RandomState(42)
X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng)
n_components = 5
X = sparse_container(X)
warn_msg = (
"Calculating SVD of a {} is expensive. csr_matrix is more efficient.".format(
sparse_container.__name__
)
)
with pytest.warns(sparse.SparseEfficiencyWarning, match=warn_msg):
randomized_svd(X, n_components, n_iter=1, power_iteration_normalizer="none")
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, Vt = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, Vt, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, Vt = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, Vt, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, Vt, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, Vt, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
@pytest.mark.parametrize("n_samples, n_features", [(3, 4), (4, 3)])
def test_svd_flip_max_abs_cols(n_samples, n_features, global_random_seed):
rs = np.random.RandomState(global_random_seed)
X = rs.randn(n_samples, n_features)
U, _, Vt = linalg.svd(X, full_matrices=False)
U1, _ = svd_flip(U, Vt, u_based_decision=True)
max_abs_U1_row_idx_for_col = np.argmax(np.abs(U1), axis=0)
assert (U1[max_abs_U1_row_idx_for_col, np.arange(U1.shape[1])] >= 0).all()
_, V2 = svd_flip(U, Vt, u_based_decision=False)
max_abs_V2_col_idx_for_row = np.argmax(np.abs(V2), axis=1)
assert (V2[np.arange(V2.shape[0]), max_abs_V2_col_idx_for_row] >= 0).all()
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True, random_state=0)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert u_based
assert not v_based
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True, random_state=0
)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose
)
assert u_based
assert not v_based
@pytest.mark.parametrize("n", [50, 100, 300])
@pytest.mark.parametrize("m", [50, 100, 300])
@pytest.mark.parametrize("k", [10, 20, 50])
@pytest.mark.parametrize("seed", range(5))
def test_randomized_svd_lapack_driver(n, m, k, seed):
# Check that different SVD drivers provide consistent results
# Matrix being compressed
rng = np.random.RandomState(seed)
X = rng.rand(n, m)
# Number of components
u1, s1, vt1 = randomized_svd(X, k, svd_lapack_driver="gesdd", random_state=0)
u2, s2, vt2 = randomized_svd(X, k, svd_lapack_driver="gesvd", random_state=0)
# Check shape and contents
assert u1.shape == u2.shape
assert_allclose(u1, u2, atol=0, rtol=1e-3)
assert s1.shape == s2.shape
assert_allclose(s1, s2, atol=0, rtol=1e-3)
assert vt1.shape == vt2.shape
assert_allclose(vt1, vt2, atol=0, rtol=1e-3)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array(
[
[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7],
]
)
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
@pytest.mark.parametrize(
"arrays, output_dtype",
[
(
[np.array([1, 2, 3], dtype=np.int32), np.array([4, 5], dtype=np.int64)],
np.dtype(np.int64),
),
(
[np.array([1, 2, 3], dtype=np.int32), np.array([4, 5], dtype=np.float64)],
np.dtype(np.float64),
),
(
[np.array([1, 2, 3], dtype=np.int32), np.array(["x", "y"], dtype=object)],
np.dtype(object),
),
],
)
def test_cartesian_mix_types(arrays, output_dtype):
"""Check that the cartesian product works with mixed types."""
output = cartesian(arrays)
assert output.dtype == output_dtype
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("as_list", (True, False))
def test_incremental_weighted_mean_and_variance_simple(dtype, as_list):
rng = np.random.RandomState(42)
mult = 10
X = rng.rand(1000, 20).astype(dtype) * mult
sample_weight = rng.rand(X.shape[0]) * mult
X1 = X.tolist() if as_list else X
mean, var, _ = _incremental_mean_and_var(X1, 0, 0, 0, sample_weight=sample_weight)
expected_mean = np.average(X, weights=sample_weight, axis=0)
expected_var = np.average(X**2, weights=sample_weight, axis=0) - expected_mean**2
assert_almost_equal(mean, expected_mean)
assert_almost_equal(var, expected_var)
@pytest.mark.parametrize(
"array_namespace, device, dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_incremental_weighted_mean_and_variance_array_api(
array_namespace, device, dtype
):
xp = _array_api_for_tests(array_namespace, device)
rng = np.random.RandomState(42)
mult = 10
X = rng.rand(1000, 20).astype(dtype) * mult
sample_weight = rng.rand(X.shape[0]).astype(dtype) * mult
mean, var, _ = _incremental_mean_and_var(X, 0, 0, 0, sample_weight=sample_weight)
X_xp = xp.asarray(X, device=device)
sample_weight_xp = xp.asarray(sample_weight, device=device)
with config_context(array_api_dispatch=True):
mean_xp, var_xp, _ = _incremental_mean_and_var(
X_xp, 0, 0, 0, sample_weight=sample_weight_xp
)
# The attributes like mean and var are computed and set with respect to the
# maximum supported float dtype
assert array_device(mean_xp) == array_device(X_xp)
assert mean_xp.dtype == _max_precision_float_dtype(xp, device=device)
assert array_device(var_xp) == array_device(X_xp)
assert var_xp.dtype == _max_precision_float_dtype(xp, device=device)
mean_xp = _convert_to_numpy(mean_xp, xp=xp)
var_xp = _convert_to_numpy(var_xp, xp=xp)
assert_allclose(mean, mean_xp)
assert_allclose(var, var_xp)
@pytest.mark.parametrize("mean", [0, 1e7, -1e7])
@pytest.mark.parametrize("var", [1, 1e-8, 1e5])
@pytest.mark.parametrize(
"weight_loc, weight_scale", [(0, 1), (0, 1e-8), (1, 1e-8), (10, 1), (1e7, 1)]
)
def test_incremental_weighted_mean_and_variance(mean, var, weight_loc, weight_scale):
rng = np.random.RandomState(42)
# Testing of correctness and numerical stability
def _assert(X, sample_weight, expected_mean, expected_var):
n = X.shape[0]
for chunk_size in [1, n // 10 + 1, n // 4 + 1, n // 2 + 1, n]:
last_mean, last_weight_sum, last_var = 0, 0, 0
for batch in gen_batches(n, chunk_size):
last_mean, last_var, last_weight_sum = _incremental_mean_and_var(
X[batch],
last_mean,
last_var,
last_weight_sum,
sample_weight=sample_weight[batch],
)
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-6)
size = (100, 20)
weight = rng.normal(loc=weight_loc, scale=weight_scale, size=size[0])
# Compare to weighted average: np.average
X = rng.normal(loc=mean, scale=var, size=size)
expected_mean = _safe_accumulator_op(np.average, X, weights=weight, axis=0)
expected_var = _safe_accumulator_op(
np.average, (X - expected_mean) ** 2, weights=weight, axis=0
)
_assert(X, weight, expected_mean, expected_var)
# Compare to unweighted mean: np.mean
X = rng.normal(loc=mean, scale=var, size=size)
ones_weight = np.ones(size[0])
expected_mean = _safe_accumulator_op(np.mean, X, axis=0)
expected_var = _safe_accumulator_op(np.var, X, axis=0)
_assert(X, ones_weight, expected_mean, expected_var)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_incremental_weighted_mean_and_variance_ignore_nan(dtype):
old_means = np.array([535.0, 535.0, 535.0, 535.0])
old_variances = np.array([4225.0, 4225.0, 4225.0, 4225.0])
old_weight_sum = np.array([2, 2, 2, 2], dtype=np.int32)
sample_weights_X = np.ones(3)
sample_weights_X_nan = np.ones(4)
X = np.array(
[[170, 170, 170, 170], [430, 430, 430, 430], [300, 300, 300, 300]]
).astype(dtype)
X_nan = np.array(
[
[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan],
]
).astype(dtype)
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_weight_sum, sample_weight=sample_weights_X
)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan,
old_means,
old_variances,
old_weight_sum,
sample_weight=sample_weights_X_nan,
)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from https://www.mathsisfun.com/data/standard-deviation.html
A = np.array(
[
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
]
).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = np.full(X1.shape[1], X1.shape[0], dtype=np.int32)
final_means, final_variances, final_count = _incremental_mean_and_var(
X2, old_means, old_variances, old_sample_count
)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_mean_and_variance_ignore_nan():
old_means = np.array([535.0, 535.0, 535.0, 535.0])
old_variances = np.array([4225.0, 4225.0, 4225.0, 4225.0])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int32)
X = np.array([[170, 170, 170, 170], [430, 430, 430, 430], [300, 300, 300, 300]])
X_nan = np.array(
[
[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan],
]
)
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_sample_count
)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan, old_means, old_variances, old_sample_count
)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X**2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean) ** 2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance, last_sample_count):
updated_sample_count = last_sample_count + 1
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = (
last_variance * samples_ratio
+ (x - last_mean) * (x - updated_mean) / updated_sample_count
)
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
# Naive one pass var: >tol (=1063)
assert np.abs(np_var(A) - one_pass_var(A)).max() > tol
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = naive_mean_variance_update(A1[i, :], mean, var, n)
assert n == A.shape[0]
# the mean is also slightly unstable
assert np.abs(A.mean(axis=0) - mean).max() > 1e-6
assert np.abs(np_var(A) - var).max() > tol
# Robust implementation: <tol (177)
mean, var = A0[0, :], np.zeros(n_features)
n = np.full(n_features, n_samples // 2, dtype=np.int32)
for i in range(A1.shape[0]):
mean, var, n = _incremental_mean_and_var(
A1[i, :].reshape((1, A1.shape[1])), mean, var, n
)
assert_array_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert tol > np.abs(np_var(A) - var).max()
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in itertools.pairwise(steps):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = np.full(batch.shape[1], batch.shape[0], dtype=np.int32)
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances, sample_count
)
(incremental_means, incremental_variances, incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances, calculated_variances, 6)
assert_array_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum_deprecation():
with pytest.warns(FutureWarning, match="stable_cumsum.+is deprecated"):
stable_cumsum([1, 2, 3])
@pytest.mark.parametrize(
"A_container",
[np.array, *CSR_CONTAINERS],
ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS],
)
@pytest.mark.parametrize(
"B_container",
[np.array, *CSR_CONTAINERS],
ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS],
)
def test_safe_sparse_dot_2d(A_container, B_container):
rng = np.random.RandomState(0)
A = rng.random_sample((30, 10))
B = rng.random_sample((10, 20))
expected = np.dot(A, B)
A = A_container(A)
B = B_container(B)
actual = safe_sparse_dot(A, B, dense_output=True)
assert_allclose(actual, expected)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_safe_sparse_dot_nd(csr_container):
rng = np.random.RandomState(0)
# dense ND / sparse
A = rng.random_sample((2, 3, 4, 5, 6))
B = rng.random_sample((6, 7))
expected = np.dot(A, B)
B = csr_container(B)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
# sparse / dense ND
A = rng.random_sample((2, 3))
B = rng.random_sample((4, 5, 3, 6))
expected = np.dot(A, B)
A = csr_container(A)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
@pytest.mark.parametrize(
"container",
[np.array, *CSR_CONTAINERS],
ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS],
)
def test_safe_sparse_dot_2d_1d(container):
rng = np.random.RandomState(0)
B = rng.random_sample((10))
# 2D @ 1D
A = rng.random_sample((30, 10))
expected = np.dot(A, B)
actual = safe_sparse_dot(container(A), B)
assert_allclose(actual, expected)
# 1D @ 2D
A = rng.random_sample((10, 30))
expected = np.dot(B, A)
actual = safe_sparse_dot(B, container(A))
assert_allclose(actual, expected)
@pytest.mark.parametrize("dense_output", [True, False])
def test_safe_sparse_dot_dense_output(dense_output):
rng = np.random.RandomState(0)
A = sparse.random(30, 10, density=0.1, random_state=rng)
B = sparse.random(10, 20, density=0.1, random_state=rng)
expected = A.dot(B)
actual = safe_sparse_dot(A, B, dense_output=dense_output)
assert sparse.issparse(actual) == (not dense_output)
if dense_output:
expected = expected.toarray()
assert_allclose_dense_sparse(actual, expected)
def test_approximate_mode():
"""Make sure sklearn.utils.extmath._approximate_mode returns valid
results for cases where "class_counts * n_draws" is enough
to overflow 32-bit signed integer.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20774
"""
X = np.array([99000, 1000], dtype=np.int32)
ret = _approximate_mode(class_counts=X, n_draws=25000, rng=0)
# Draws 25% of the total population, so in this case a fair draw means:
# 25% * 99.000 = 24.750
# 25% * 1.000 = 250
assert_array_equal(ret, [24750, 250])
@pytest.mark.parametrize(
"array_namespace, device, dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_randomized_svd_array_api_compliance(array_namespace, device, dtype):
xp = _array_api_for_tests(array_namespace, device)
rng = np.random.RandomState(0)
X = rng.normal(size=(30, 10)).astype(dtype)
X_xp = xp.asarray(X, device=device)
n_components = 5
atol = 1e-5 if dtype == "float32" else 0
with config_context(array_api_dispatch=True):
u_np, s_np, vt_np = randomized_svd(X, n_components, random_state=0)
u_xp, s_xp, vt_xp = randomized_svd(X_xp, n_components, random_state=0)
assert get_namespace(u_xp)[0].__name__ == xp.__name__
assert get_namespace(s_xp)[0].__name__ == xp.__name__
assert get_namespace(vt_xp)[0].__name__ == xp.__name__
assert_allclose(_convert_to_numpy(u_xp, xp), u_np, atol=atol)
assert_allclose(_convert_to_numpy(s_xp, xp), s_np, atol=atol)
assert_allclose(_convert_to_numpy(vt_xp, xp), vt_np, atol=atol)
@pytest.mark.parametrize(
"array_namespace, device, dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_randomized_range_finder_array_api_compliance(array_namespace, device, dtype):
xp = _array_api_for_tests(array_namespace, device)
rng = np.random.RandomState(0)
X = rng.normal(size=(30, 10)).astype(dtype)
X_xp = xp.asarray(X, device=device)
size = 5
n_iter = 10
atol = 1e-5 if dtype == "float32" else 0
with config_context(array_api_dispatch=True):
Q_np = randomized_range_finder(X, size=size, n_iter=n_iter, random_state=0)
Q_xp = randomized_range_finder(X_xp, size=size, n_iter=n_iter, random_state=0)
assert get_namespace(Q_xp)[0].__name__ == xp.__name__
assert_allclose(_convert_to_numpy(Q_xp, xp), Q_np, atol=atol)
|
python
|
github
|
https://github.com/scikit-learn/scikit-learn
|
sklearn/utils/tests/test_extmath.py
|
---
navigation_title: "Reindex indices"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html
applies_to:
stack: all
serverless: ga
---
# Reindex indices examples
The [Reindex API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) copies documents from a source index, data stream, or alias to a destination, allowing for optional data modification via scripts or ingest pipelines.
You can learn how to:
**Run and control reindexing**
- [Basic reindexing example](#basic-reindexing-example)
- [Reindex asynchronously](#docs-reindex-task-api)
- [Reindex multiple indices sequentially](#docs-reindex-multiple-sequentially)
- [Reindex from multiple indices in a single request](#docs-reindex-multiple-sources)
- [Reindex with throttling](#docs-reindex-throttle)
- [Reindex with rethrottling](#docs-reindex-rethrottle)
- [Reindex with slicing](#docs-reindex-slice)
**Filter and transform documents**
- [Reindex selected documents with a query](#docs-reindex-select-query)
- [Reindex a limited number of documents with `max_docs`](#docs-reindex-select-max-docs)
- [Reindex selected fields](#docs-reindex-filter-source)
- [Reindex to change the name of a field](#docs-reindex-change-name)
- [Modify documents during reindexing](#reindex-scripts)
- [Extract a random subset of the source](#docs-reindex-api-subset)
- [Reindex daily indices](#docs-reindex-daily-indices)
**Route or send data elsewhere**
- [Reindex with custom routing](#docs-reindex-routing)
- [Reindex with an ingest pipeline](#reindex-with-an-ingest-pipeline)
- [Reindex from remote](#reindex-from-remote)
**Troubleshooting**
- [Monitor reindex tasks](#monitor-reindex-tasks)
- [Diagnose node failures](#diagnose-node-failures)
- [Version conflicts](#version-conflicts)
## Basic reindexing example
Use the Reindex API to copy all documents from one index to another.
```console
POST _reindex
{
"source": {
"index": "my-index-000001"
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:my_index_big]
<!--
```console-result
{
"took" : 147,
"timed_out": false,
"created": 120,
"updated": 0,
"deleted": 0,
"batches": 1,
"version_conflicts": 0,
"noops": 0,
"retries": {
"bulk": 0,
"search": 0
},
"throttled_millis": 0,
"requests_per_second": -1.0,
"throttled_until_millis": 0,
"total": 120,
"failures" : [ ]
}
```
% TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/]
-->
## Reindex asynchronously [docs-reindex-task-api]
If the request contains `wait_for_completion=false`, {{es}} performs some preflight checks, launches the request, and returns a `task` you can use to cancel or get the status of the task. {{es}} creates a record of this task as a document at `_tasks/<task_id>`.
## Reindex multiple indices sequentially [docs-reindex-multiple-sequentially]
If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.
That way you can resume the process if there are any errors by removing the partially completed source and starting over.
It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.
One-off bash scripts seem to work nicely for this:
```bash
for index in i1 i2 i3 i4 i5; do
curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
"source": {
"index": "'$index'"
},
"dest": {
"index": "'$index'-reindexed"
}
}'
done
```
% NOTCONSOLE
## Reindex with throttling [docs-reindex-throttle]
Set `requests_per_second` to any positive decimal number (for example, `1.4`, `6`, or `1000`) to throttle the rate at which the reindex API issues batches of index operations.
Requests are throttled by padding each batch with a wait time.
To disable throttling, set `requests_per_second` to `-1`.
The throttling is done by waiting between batches so that the `scroll` that the reindex API uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`:
```txt
target_time = 1000 / 500 per second = 2 seconds
wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
```
Since the batch is issued as a single `_bulk` request, large batch sizes cause {{es}} to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth".
## Reindex with rethrottling [docs-reindex-rethrottle]
The value of `requests_per_second` can be changed on a running reindex using the `_rethrottle` API. For example:
```console
POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
```
The task ID can be found using the [task management APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks).
Just like when setting it on the Reindex API, `requests_per_second` can be either `-1` to disable throttling or any decimal number like `1.7` or `12` to throttle to that level.
Rethrottling that speeds up the query takes effect immediately, but rethrottling that slows down the query will take effect after completing the current batch.
This prevents scroll timeouts.
## Reindex with slicing [docs-reindex-slice]
Reindex supports [sliced scroll](paginate-search-results.md#slice-scroll) to parallelize the reindexing process.
This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.
::::{note}
Reindexing from remote clusters does not support manual or automatic slicing.
::::
### Reindex with manual slicing [docs-reindex-manual-slice]
Slice a reindex request manually by providing a slice id and total number of slices to each request:
```console
POST _reindex
{
"source": {
"index": "my-index-000001",
"slice": {
"id": 0,
"max": 2
}
},
"dest": {
"index": "my-new-index-000001"
}
}
POST _reindex
{
"source": {
"index": "my-index-000001",
"slice": {
"id": 1,
"max": 2
}
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:my_index_big]
You can verify this works by:
```console
GET _refresh
POST my-new-index-000001/_search?size=0&filter_path=hits.total
```
% TEST[continued]
which results in a sensible `total` like this one:
```console-result
{
"hits": {
"total" : {
"value": 120,
"relation": "eq"
}
}
}
```
### Reindex with automatic slicing [docs-reindex-automatic-slice]
You can also let the reindex API automatically parallelize using [sliced scroll](paginate-search-results.md#slice-scroll) to slice on `_id`.
Use `slices` to specify the number of slices to use:
```console
POST _reindex?slices=5&refresh
{
"source": {
"index": "my-index-000001"
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:my_index_big]
You can also verify this works by:
```console
POST my-new-index-000001/_search?size=0&filter_path=hits.total
```
% TEST[continued]
which results in a sensible `total` like this one:
```console-result
{
"hits": {
"total" : {
"value": 120,
"relation": "eq"
}
}
}
```
Setting `slices` to `auto` will let {{es}} choose the number of slices to use.
This setting will use one slice per shard, up to a certain limit.
If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards.
Adding `slices` to the reindex API just automates the manual process used in the section above, creating sub-requests which means it has some quirks:
* You can see these requests in the [task management APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks). These sub-requests are "child" tasks of the task for the request with `slices`.
* Fetching the status of the task for the request with `slices` only contains the status of completed slices.
* These sub-requests are individually addressable for things like cancellation and rethrottling.
* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.
* Canceling the request with `slices` will cancel each sub-request.
* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.
* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.
### Picking the number of slices [docs-reindex-picking-slices]
If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use these guidelines.
Query performance is most efficient when the number of `slices` is equal to the number of shards in the index. If that number is large (for example, 500), choose a lower number as too many `slices` will hurt performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead.
Indexing performance scales linearly across available resources with the number of slices.
Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.
## Reindex with custom routing [docs-reindex-routing]
By default if the reindex API sees a document with routing then the routing is preserved unless it's changed by the script. You can set `routing` on the `dest` request to change this.
For example:
`keep`
: Sets the routing on the bulk request sent for each match to the routing on the match. This is the default value.
`discard`
: Sets the routing on the bulk request sent for each match to `null`.
`=<some text>`
: Sets the routing on the bulk request sent for each match to all text after the `=`.
You can use the following request to copy all documents from the `source` with the company name `cat` into the `dest` with routing set to `cat`:
```console
POST _reindex
{
"source": {
"index": "source",
"query": {
"match": {
"company": "cat"
}
}
},
"dest": {
"index": "dest",
"routing": "=cat"
}
}
```
% TEST[s/^/PUT source\n/]
By default the reindex API uses scroll batches of 1000. You can change the batch size with the `size` field in the `source` element:
```console
POST _reindex
{
"source": {
"index": "source",
"size": 100
},
"dest": {
"index": "dest",
"routing": "=cat"
}
}
```
% TEST[s/^/PUT source\n/]
## Reindex with an ingest pipeline [reindex-with-an-ingest-pipeline]
Reindex can also use the [ingest pipelines](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md) feature by specifying a `pipeline` like this:
```console
POST _reindex
{
"source": {
"index": "source"
},
"dest": {
"index": "dest",
"pipeline": "some_ingest_pipeline"
}
}
```
% TEST[s/^/PUT source\n/]
## Reindex selected documents with a query [docs-reindex-select-query]
You can limit the documents by adding a query to the `source`. For example, the following request only copies documents with a `user.id` of `kimchy` into `my-new-index-000001`:
```console
POST _reindex
{
"source": {
"index": "my-index-000001",
"query": {
"term": {
"user.id": "kimchy"
}
}
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:my_index]
## Reindex a limited number of documents with `max_docs` [docs-reindex-select-max-docs]
You can limit the number of processed documents by setting `max_docs`.
For example, this request copies a single document from `my-index-000001` to `my-new-index-000001`:
```console
POST _reindex
{
"max_docs": 1,
"source": {
"index": "my-index-000001"
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:my_index]
## Reindex from multiple indices in a single request [docs-reindex-multiple-sources]
The `index` attribute in `source` can be a list, allowing you to copy from lots of sources in one request.
This will copy documents from the `my-index-000001` and `my-index-000002` indices:
```console
POST _reindex
{
"source": {
"index": ["my-index-000001", "my-index-000002"]
},
"dest": {
"index": "my-new-index-000002"
}
}
```
% TEST[setup:my_index]
% TEST[s/^/PUT my-index-000002\/_doc\/post1?refresh\n{"test": "foo"}\n/]
::::{note}
The reindex API makes no effort to handle ID collisions so the last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique using a script.
::::
## Reindex selected fields [docs-reindex-filter-source]
You can use source filtering to reindex a subset of the fields in the original documents.
For example, the following request only reindexes the `user.id` and `_doc` fields of each document:
```console
POST _reindex
{
"source": {
"index": "my-index-000001",
"_source": ["user.id", "_doc"]
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:my_index]
## Reindex to change the name of a field [docs-reindex-change-name]
The reindex API can be used to build a copy of an index with renamed fields.
Say you create an index containing documents that look like this:
```console
POST my-index-000001/_doc/1?refresh
{
"text": "words words",
"flag": "foo"
}
```
If you don't like the name `flag` and want to replace it with `tag`, the reindex API can create the other index for you:
```console
POST _reindex
{
"source": {
"index": "my-index-000001"
},
"dest": {
"index": "my-new-index-000001"
},
"script": {
"source": "ctx._source.tag = ctx._source.remove(\"flag\")"
}
}
```
% TEST[continued]
Now you can get the new document:
```console
GET my-new-index-000001/_doc/1
```
% TEST[continued]
...which will return:
```console-result
{
"found": true,
"_id": "1",
"_index": "my-new-index-000001",
"_version": 1,
"_seq_no": 44,
"_primary_term": 1,
"_source": {
"text": "words words",
"tag": "foo"
}
}
```
% TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term": 1/"_primary_term" : $body._primary_term/]
## Reindex daily indices [docs-reindex-daily-indices]
You can use the reindex API in combination with [Painless](/reference/scripting-languages/painless/painless.md) to reindex daily indices to apply a new template to the existing documents.
Assuming you have indices that contain documents like:
```console
PUT metricbeat-2016.05.30/_doc/1?refresh
{"system.cpu.idle.pct": 0.908}
PUT metricbeat-2016.05.31/_doc/1?refresh
{"system.cpu.idle.pct": 0.105}
```
The new template for the `metricbeat-*` indices is already loaded into {{es}}, but it applies only to the newly created indices. Painless can be used to reindex the existing documents and apply the new template.
The script below extracts the date from the index name and creates a new index with `-1` appended. All data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`.
```console
POST _reindex
{
"source": {
"index": "metricbeat-*"
},
"dest": {
"index": "metricbeat"
},
"script": {
"lang": "painless",
"source": "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'"
}
}
```
% TEST[continued]
All documents from the previous metricbeat indices can now be found in the `*-1` indices.
```console
GET metricbeat-2016.05.30-1/_doc/1
GET metricbeat-2016.05.31-1/_doc/1
```
% TEST[continued]
The previous method can also be used in conjunction with [changing a field name](#docs-reindex-change-name) to load only the existing data into the new index and rename any fields if needed.
## Extract a random subset of the source [docs-reindex-api-subset]
The reindex API can be used to extract a random subset of the source for testing:
```console
POST _reindex
{
"max_docs": 10,
"source": {
"index": "my-index-000001",
"query": {
"function_score" : {
"random_score" : {},
"min_score" : 0.9 <1>
}
}
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:my_index_big]
1. You may need to adjust the `min_score` depending on the relative amount of data extracted from source.
## Modify documents during reindexing [reindex-scripts]
Like `_update_by_query`, the reindex API supports a script that modifies the document.
Unlike `_update_by_query`, the script is allowed to modify the document's metadata.
This example bumps the version of the source document:
```console
POST _reindex
{
"source": {
"index": "my-index-000001"
},
"dest": {
"index": "my-new-index-000001",
"version_type": "external"
},
"script": {
"source": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}",
"lang": "painless"
}
}
```
% TEST[setup:my_index]
Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination:
`noop`
: Set `ctx.op = "noop"` if your script decides that the document doesn't have to be indexed in the destination. This no operation will be reported in the `noop` counter in the response body.
`delete`
: Set `ctx.op = "delete"` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body.
Setting `ctx.op` to anything other than `noop` or `delete` will result in an error. Similarly, modifying other fields in `ctx` besides `_id`, `_index`, `_version`, and `_routing` will also fail.
Think of the possibilities! Just be careful; you are able to change:
* `_id`
* `_index`
* `_version`
* `_routing`
Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request; it will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API request.
## Reindex from remote [reindex-from-remote]
```{applies_to}
stack: ga
serverless: preview
```
Reindex supports reindexing from a remote {{es}} cluster:
```console
POST _reindex
{
"source": {
"remote": {
"host": "<OTHER_HOST_URL>",
"username": "user",
"password": "pass"
},
"index": "my-index-000001",
"query": {
"match": {
"test": "data"
}
}
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:host]
% TEST[s/^/PUT my-index-000001\n/]
% TEST[s/"host": [^}]*,/"host": "http:\/\/\${host}",/]
% TEST[s/"username": "user",/"username": "test_admin",/]
% TEST[s/"password": "pass"/"password": "x-pack-test-password"/]
The `host` parameter must contain a scheme, host, port (for example, `https://<OTHER_HOST_URL>:9200`), and optional path (for example, `https://<OTHER_HOST_URL>:9200/proxy`).
### Using basic auth [reindex-basic-auth]
To authenticate with the remote cluster using basic auth, set the `username` and `password` parameters, as in the example above.
Be sure to use `https` when using basic auth, or the password will be sent in plain text. There are a [range of settings](#reindex-ssl) available to configure the behaviour of the `https` connection.
### Using an API key [reindex-api-key]
It is also possible (and encouraged) to authenticate with the remote cluster through the use of a valid API key:
::::{applies-switch}
:::{applies-item} { "stack": "ga 9.3+", "serverless": "preview" }
```console
POST _reindex
{
"source": {
"remote": {
"host": "<OTHER_HOST_URL>",
"api_key": "<API_KEY_VALUE>"
},
"index": "my-index-000001",
"query": {
"match": {
"test": "data"
}
}
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:host]
% TEST[s/^/PUT my-index-000001\n/]
% TEST[s/"host": [^}]*,/"host": "http:\/\/\${host}",/]
% TEST[s/"headers": \{[^}]*\}/"username": "test_admin", "password": "x-pack-test-password"/]
:::
:::{applies-item} { "stack": "ga 9.0-9.2" }
```console
POST _reindex
{
"source": {
"remote": {
"host": "<OTHER_HOST_URL>",
"headers": {
"Authorization": "<API_KEY_VALUE>"
}
},
"index": "my-index-000001",
"query": {
"match": {
"test": "data"
}
}
},
"dest": {
"index": "my-new-index-000001"
}
}
```
% TEST[setup:host]
% TEST[s/^/PUT my-index-000001\n/]
% TEST[s/"host": [^}]*,/"host": "http:\/\/\${host}",/]
% TEST[s/"headers": \{[^}]*\}/"username": "test_admin", "password": "x-pack-test-password"/]
:::
::::
Be sure to use `https` when using an API key, or it will be sent in plain text. There are a [range of settings](#reindex-ssl) available to configure the behaviour of the `https` connection.
### Permitted remote hosts [reindex-remote-whitelist]
The remote hosts that you can use depend on whether you're using the versioned {{stack}} or {{serverless-short}}.
* In the versioned {{stack}}, remote hosts have to be explicitly allowed in elasticsearch.yml using the `reindex.remote.whitelist` property. It can be set to a comma-delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example:
```
reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
```
The list of allowed hosts must be configured on any node that will coordinate the reindex.
* In {{serverless-full}}, only remote hosts in Elastic Cloud Hosted are allowed. {applies_to}`serverless: preview`
### Compatibility [reindex-remote-compatibility]
This feature should work with remote clusters of any version of {{es}} you are likely to find. This should allow you to upgrade from any version of {{es}} to the current version by reindexing from a cluster of the old version.
::::{warning}
{{es}} does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster.
::::
To enable queries sent to older versions of {{es}} the `query` parameter is sent directly to the remote host without validation or modification.
::::{note}
Reindexing from remote clusters does not support manual or automatic slicing.
::::
### Tuning parameters [reindex-remote-tuning]
Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.
If the remote index includes very large documents you'll need to use a smaller batch size.
The example below sets the batch size to `10` which is very, very small.
```console
POST _reindex
{
"source": {
"remote": {
"host": "<OTHER_HOST_URL>",
...
},
"index": "source",
"size": 10,
"query": {
"match": {
"test": "data"
}
}
},
"dest": {
"index": "dest"
}
}
```
% TEST[setup:host]
% TEST[s/^/PUT source\n/]
% TEST[s/"host": [^}]*,/"host": "http:\/\/\${host}",/]
% TEST[s/\.\.\./"username": "test_admin", "password": "x-pack-test-password"/]
It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.
Both default to 30 seconds.
This example sets the socket read timeout to one minute and the connection timeout to 10 seconds:
```console
POST _reindex
{
"source": {
"remote": {
"host": "<OTHER_HOST_URL>",
...,
"socket_timeout": "1m",
"connect_timeout": "10s"
},
"index": "source",
"query": {
"match": {
"test": "data"
}
}
},
"dest": {
"index": "dest"
}
}
```
% TEST[setup:host]
% TEST[s/^/PUT source\n/]
% TEST[s/"host": [^}]*,/"host": "http:\/\/\${host}",/]
% TEST[s/\.\.\.,/"username": "test_admin", "password": "x-pack-test-password",/]
### Configuring SSL parameters [reindex-ssl]
Reindex from remote supports configurable SSL settings.
These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the {{es}} keystore.
It is not possible to configure SSL in the body of the reindex API request.
Refer to [Reindex settings](/reference/elasticsearch/configuration-reference/index-management-settings.md#reindex-settings).
## Monitor reindex tasks [monitor-reindex-tasks]
When run asynchronously with `wait_for_completion=false`, a reindex task can be monitored with the task management API:
```console
GET _tasks/r1A2WoRbTwKZ516z6NEs5A:36619
```
% TEST[catch:missing]
::::{note}
- If the `completed` field in the response to the `GET _tasks/<task_id>` call is `false` then the reindex is still running.
- If the `completed` field is `true` and the `error` field is present then the reindex failed. Check the `error` object for details.
- If the `completed` field is `true` and the `response` field is present then the reindex at least partially succeeded. Check the `failures` field in the `response` object to see if there were partial failures.
- If this call returns a 404 (`NOT FOUND`) then reindex failed because the task was lost, perhaps due to a node restart.
In any of the failure cases, partial data may have been written to the destination index.
::::
To view all currently running reindex tasks (where this API is available):
```console
GET _tasks?actions=*reindex
```
You can also cancel a running reindex task:
```console
POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
```
If this API is not available, you can achieve a similar effect by deleting the
target index:
```console
DELETE dest
```
This will cause the reindex task to fail with a `index_not_found_exception`
error.
## Diagnose node failures [diagnose-node-failures]
Node crashes can sometimes be caused by insufficient disk space. To check disk allocation across your cluster:
```console
GET _cat/allocation?v
```
## Version conflicts [version-conflicts]
By default, version conflicts abort the reindexing process.
To continue reindexing in the case of conflicts, set `conflicts` to `proceed`.
This may be necessary when retrying a failed reindex operation, as the destination index could be left in a partial state.
```console
POST _reindex
{
"source": {
"index": "my-index-000001"
},
"dest": {
"index": "my-new-index-000001",
"op_type": "create"
},
"conflicts": "proceed"
}
```
% TEST[setup:my_index]
|
unknown
|
github
|
https://github.com/elastic/elasticsearch
|
docs/reference/elasticsearch/rest-apis/reindex-indices.md
|
# Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import tarfile
import os
import six
import docker
from tests.utils.compat import Path
from scalyr_agent import compat
DEFAULT_FILE_PATHS_TO_COPY = [
"/var/log/scalyr-agent-2/agent.log",
"/root/scalyr-agent-dev/log/agent.log",
]
def dockerized_case(
builder_cls,
file_path,
file_paths_to_copy=None,
artifacts_use_subdirectory=True,
remove_container=True,
python_executable="python3",
):
"""
Decorator that makes decorated test case run inside docker container.
:param builder_cls: Image builder class to use.
:param file_path: Path to the test file.
:param file_paths_to_copy: A list of file paths to copy from the container to the artifacts
directory specified --artifacts-path option.
:param artifacts_use_subdirectory: True to store artifacts in a subdirectory which matches the
test name. This comes handy in scenarios where single test
file contains multiple test functions.
:param remove_container: True to remove container after run.
:param python_executable: Python executable to use to run tests with (aka pytest).
"""
# We always include agent log file
file_paths_to_copy = set(file_paths_to_copy or [])
file_paths_to_copy.update(set(DEFAULT_FILE_PATHS_TO_COPY))
def dockerized_real(f):
func_name = f.__name__
root = Path(__file__).parent.parent.parent
rel_path = Path("agent_source") / Path(file_path).relative_to(root)
command = "{0} -m pytest {1}::{2} -s --color=yes --no-dockerize".format(
python_executable, rel_path, func_name
)
def wrapper(request, *args, **kwargs):
no_dockerize = request.config.getoption("--no-dockerize")
if no_dockerize:
result = f(request, *args, **kwargs)
return result
builder = builder_cls()
no_rebuild = request.config.getoption("--no-rebuild", False)
if builder.is_image_exists():
# we rebuild image if there is no option to skip rebuild.
if not no_rebuild:
builder.build(skip_requirements=True)
else:
try:
builder.build(skip_requirements=no_rebuild)
except docker.errors.BuildError as e:
# Throw a more user-friendly exception if the base image doesn't exist
if "does not exist" in str(e) and "-base" in str(e):
try:
base_image_name = builder.REQUIRED_CHECKSUM_IMAGES[
0
].IMAGE_TAG
except Exception:
base_image_name = "unknown"
msg = (
'Base container image "%s" doesn\'t exist and --no-rebuild flag is '
"used. You need to either manually build the base image or remove "
"the --no-rebuild flag.\n\nOriginal error: %s"
% (base_image_name, str(e))
)
raise Exception(msg)
docker_client = docker.from_env()
container_name = "{0}-{1}-{2}".format(
builder.image_tag, Path(file_path).name.replace(".py", ""), func_name
)
try:
# remove container if it was created previously.
container = docker_client.containers.get(container_name)
container.remove()
except docker.errors.NotFound:
pass
print(
"Create container '{0}' from '{1}' image.".format(
container_name, builder.image_tag
)
)
container = docker_client.containers.run(
builder.image_tag,
name=container_name,
detach=True,
command=command,
stdout=True,
stderr=True,
environment=get_environment_for_docker_run(),
)
exit_code = container.wait()["StatusCode"]
logs = six.ensure_text(container.logs(follow=True))
print(logs)
# save logs if artifacts path is specified.
artifacts_path = request.config.getoption("--artifacts-path", None)
if artifacts_path:
coverage_file_path = Path("/", ".coverage")
artifacts_path = Path(artifacts_path)
if artifacts_use_subdirectory:
# We run each test case in a new container instance so we make sure we store
# logs under a sub-directory which matches the test function name
artifacts_path = artifacts_path / func_name
file_paths_to_copy.add(six.text_type(coverage_file_path))
copy_artifacts(
container=container,
file_paths=file_paths_to_copy,
destination_path=artifacts_path,
)
if remove_container:
container.remove(force=True)
print("Container '{0}' removed.".format(builder.image_tag))
# raise failed assertion, due to non-zero result from container.
if exit_code:
raise AssertionError(
"Test case inside container failed (container exited with %s "
"status code)." % (exit_code)
)
return wrapper
return dockerized_real
def copy_artifacts(container, file_paths, destination_path):
"""
Copy provided file paths from Docker container to a destination on a host.
:param container: Container instance to use.
:param file_paths: A list of file paths inside the Docker container to copy over.
:param destination_path: Destination directory on the host where the files should be copied to.
"""
if not file_paths:
return
try:
os.makedirs(destination_path)
except OSError:
pass
for file_path in file_paths:
# fetch file as tar file stream if it exists
try:
stream, _ = container.get_archive(file_path)
except docker.errors.NotFound as e:
# Not all the test files produce agent.log so we simply ignore the error
# if agent log file doesn't exist
msg = str(e).lower()
if "could not find the file" in msg:
print("File path %s doesn't exist, skipping copy" % (file_path))
continue
raise e
print('Copying file path "%s" to "%s"' % (file_path, destination_path))
data_tar_path = destination_path / "data.tar"
# write it to file.
with data_tar_path.open("wb") as data_fp:
for b in stream:
data_fp.write(b)
# extract tar file in a directory for this function
with tarfile.open(data_tar_path) as tar_file:
tar_file.extractall(destination_path)
# remove tar file.
os.remove(data_tar_path)
def get_environment_for_docker_run():
"""
Return sanitized environment to be used with containers.run() command.
The returned environment excludes any environment variables which could effect tests
and cause a failure.
"""
env_vars_to_delete = ["PATH", "HOME"]
environment = compat.os_environ_unicode.copy()
for env_var in env_vars_to_delete:
if env_var in environment:
del environment[env_var]
return environment
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package approver implements an automated approver for kubelet certificates.
package approver
import (
"context"
"crypto/x509"
"fmt"
authorization "k8s.io/api/authorization/v1"
capi "k8s.io/api/certificates/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
certificatesinformers "k8s.io/client-go/informers/certificates/v1"
clientset "k8s.io/client-go/kubernetes"
capihelper "k8s.io/kubernetes/pkg/apis/certificates"
"k8s.io/kubernetes/pkg/controller/certificates"
)
type csrRecognizer struct {
recognize func(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool
permission authorization.ResourceAttributes
successMessage string
}
type sarApprover struct {
client clientset.Interface
recognizers []csrRecognizer
}
// NewCSRApprovingController creates a new CSRApprovingController.
func NewCSRApprovingController(ctx context.Context, client clientset.Interface, csrInformer certificatesinformers.CertificateSigningRequestInformer) *certificates.CertificateController {
approver := &sarApprover{
client: client,
recognizers: recognizers(),
}
return certificates.NewCertificateController(
ctx,
"csrapproving",
client,
csrInformer,
approver.handle,
)
}
func recognizers() []csrRecognizer {
recognizers := []csrRecognizer{
{
recognize: isSelfNodeClientCert,
permission: authorization.ResourceAttributes{Group: "certificates.k8s.io", Resource: "certificatesigningrequests", Verb: "create", Subresource: "selfnodeclient", Version: "*"},
successMessage: "Auto approving self kubelet client certificate after SubjectAccessReview.",
},
{
recognize: isNodeClientCert,
permission: authorization.ResourceAttributes{Group: "certificates.k8s.io", Resource: "certificatesigningrequests", Verb: "create", Subresource: "nodeclient", Version: "*"},
successMessage: "Auto approving kubelet client certificate after SubjectAccessReview.",
},
}
return recognizers
}
func (a *sarApprover) handle(ctx context.Context, csr *capi.CertificateSigningRequest) error {
if len(csr.Status.Certificate) != 0 {
return nil
}
if approved, denied := certificates.GetCertApprovalCondition(&csr.Status); approved || denied {
return nil
}
x509cr, err := capihelper.ParseCSR(csr.Spec.Request)
if err != nil {
return fmt.Errorf("unable to parse csr %q: %v", csr.Name, err)
}
tried := []string{}
for _, r := range a.recognizers {
if !r.recognize(csr, x509cr) {
continue
}
tried = append(tried, r.permission.Subresource)
approved, err := a.authorize(ctx, csr, r.permission)
if err != nil {
return err
}
if approved {
appendApprovalCondition(csr, r.successMessage)
_, err = a.client.CertificatesV1().CertificateSigningRequests().UpdateApproval(ctx, csr.Name, csr, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("error updating approval for csr: %v", err)
}
return nil
}
}
if len(tried) != 0 {
return certificates.IgnorableError("recognized csr %q as %v but subject access review was not approved", csr.Name, tried)
}
return nil
}
func (a *sarApprover) authorize(ctx context.Context, csr *capi.CertificateSigningRequest, rattrs authorization.ResourceAttributes) (bool, error) {
extra := make(map[string]authorization.ExtraValue)
for k, v := range csr.Spec.Extra {
extra[k] = authorization.ExtraValue(v)
}
sar := &authorization.SubjectAccessReview{
Spec: authorization.SubjectAccessReviewSpec{
User: csr.Spec.Username,
UID: csr.Spec.UID,
Groups: csr.Spec.Groups,
Extra: extra,
ResourceAttributes: &rattrs,
},
}
sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(ctx, sar, metav1.CreateOptions{})
if err != nil {
return false, err
}
return sar.Status.Allowed, nil
}
func appendApprovalCondition(csr *capi.CertificateSigningRequest, message string) {
csr.Status.Conditions = append(csr.Status.Conditions, capi.CertificateSigningRequestCondition{
Type: capi.CertificateApproved,
Status: corev1.ConditionTrue,
Reason: "AutoApproved",
Message: message,
})
}
func isNodeClientCert(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool {
if csr.Spec.SignerName != capi.KubeAPIServerClientKubeletSignerName {
return false
}
return capihelper.IsKubeletClientCSR(x509cr, usagesToSet(csr.Spec.Usages))
}
func isSelfNodeClientCert(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool {
if csr.Spec.Username != x509cr.Subject.CommonName {
return false
}
return isNodeClientCert(csr, x509cr)
}
func usagesToSet(usages []capi.KeyUsage) sets.String {
result := sets.NewString()
for _, usage := range usages {
result.Insert(string(usage))
}
return result
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/controller/certificates/approver/sarapprove.go
|
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { render } from "@testing-library/react";
import { describe, expect, test } from "vitest";
import { Login } from "src/login/Login";
import { Wrapper } from "src/test-utils";
describe("Login page", () => {
test("Components renders properly", () => {
const { getAllByText } = render(<Login />, {
wrapper: Wrapper,
});
expect(getAllByText("Sign into Airflow")).toHaveLength(1);
expect(getAllByText("Enter your username and password below:")).toHaveLength(1);
expect(getAllByText("Username")).toHaveLength(1);
expect(getAllByText("Password")).toHaveLength(1);
});
});
|
typescript
|
github
|
https://github.com/apache/airflow
|
airflow-core/src/airflow/api_fastapi/auth/managers/simple/ui/src/login/Login.test.tsx
|
#
# CapTipper is a malicious HTTP traffic explorer tool
# By Omri Herscovici <omriher AT gmail.com>
# http://omriher.com
# @omriher
#
#
# This file is part of CapTipper, and part of the Whatype library
# Whatype is an independent file type identification python library
# https://github.com/omriher/whatype
#
# CapTipper is a free software under the GPLv3 License
#
import os
class WhatypeErr(Exception):
def __init__(self, when, error):
self.when = when
self.error = error
def __str__(self):
return repr("Whatype Error on " + self.when + " : " + self.error)
class MagicNode(object):
def __init__(self, byte):
self.byte = byte
self.filetype = ""
self.ext = ""
self.strings = ""
self.children = []
def add_child(self, obj):
n = MagicNode(obj)
self.children.append(n)
return n
def has_child(self, data):
for child in self.children:
if child.byte.lower() == data.lower():
return child
return None
def get_childrens_by_byte(self, data):
childrens = []
for child in self.children:
if child.byte.lower() == data.lower():
#return child
childrens.append(child)
return childrens
class Whatype(object):
WTver = "0.1"
WTrev = "01"
MAGICLIST_NAME = "magics.csv"
def __init__(self,magic_file=""):
if magic_file:
if os.path.isfile(magic_file):
self.magic_list_file = magic_file
else:
raise WhatypeErr("magics list load", "Couldn't find " + magic_file)
else:
default_mgc = os.path.join(os.path.dirname(os.path.realpath(__file__)),Whatype.MAGICLIST_NAME)
if os.path.isfile(default_mgc):
self.magic_list_file = default_mgc
else:
raise WhatypeErr("loading default magics list","Couldn't find default magics list. " \
"Please provide a magics CSV file")
# Create main prefix tree graph (Trie)
self.Tree = MagicNode("all_magics")
with open(self.magic_list_file, "r") as ins:
for line in ins:
parts = line.split(",")
# parts[0] = File Type
# parts[1] = Magic bytes
# parts[2] = File Ext
# parts[3] = File Strings
self.create_branch(0, self.Tree, parts[0], parts[1], parts[2],parts[3])
def create_branch(self, node_level, father, filetype, magic, ext, strings):
magic_bytes = magic.split(" ")
byte = magic_bytes[node_level]
son = father.has_child(byte)
node_level += 1
if (node_level < len(magic_bytes)):
if son is None:
son = father.add_child(byte)
self.create_branch(node_level, son, filetype, magic, ext,strings)
else:
if (node_level == len(magic_bytes)):
son = father.add_child(byte)
son.filetype = filetype
son.ext = ext
son.strings = strings
def print_tree(self,Node, index):
for nd in Node.children:
print "--" * index + nd.byte
if (len(nd.children) > 0):
self.print_tree(nd, index + 1)
def strings_search(self,strings_list, content):
bGood = True
for str in strings_list.split(";"):
if content.lower().find(str.lower().rstrip()) == -1:
bGood = False
return bGood
def return_magic(self,cont,Name,Ext):
if not Name:
Name = "Inconclusive. "
if self.istext(cont):
Name += "Probably text"
Ext = "TEXT"
else:
Name += "Probably binary"
Ext = "BINARY"
return Name,Ext
def istext(self,cont):
# Based on http://code.activestate.com/recipes/173220/
import string
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
if not cont:
# Empty files are considered text
return True
if "\0" in cont:
# Files with null bytes are likely binary
return False
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = cont.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/float(len(cont)) > 0.30:
return False
return True
def find(self, cont, Node, index=0, magic_history=[]):
if cont == "" or cont is None:
return "",""
curr_byte = cont[index].encode('hex')
NextNode = Node.get_childrens_by_byte(curr_byte)
if NextNode:
magic_history.extend(NextNode)
Name, Ext = self.find(cont, NextNode[0], index+1, magic_history)
if Ext == "Rollback":
for i in range(len(magic_history)):
Node = magic_history.pop()
if Node.filetype != "":
if self.strings_search(Node.strings, cont):
return Node.filetype, Node.ext
else:
return Name, Ext
return self.return_magic(cont,"","")
#return ""
else:
# last hex node found
if Node.filetype != "":
if self.strings_search(Node.strings, cont):
return Node.filetype, Node.ext
if len(magic_history) == 0:
#return "",""
return self.return_magic(cont,"","")
return "", "Rollback" # Magic search went too far, rollbacking
def identify_file(self,filepath):
try:
file_content = open(filepath).read()
return self.find(file_content, self.Tree)
except Exception, e:
raise WhatypeErr("file identification", str(e))
def identify_buffer(self,file_content):
try:
return self.find(file_content, self.Tree,0,[])
except Exception, e:
raise WhatypeErr("buffer identification", str(e))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the indexing engine."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
from invenio import bibindex_engine_stemmer
from invenio.testutils import make_test_suite, run_test_suite
class TestStemmer(InvenioTestCase):
"""Test stemmer."""
def test_stemmer_none(self):
"""bibindex engine - no stemmer"""
self.assertEqual("information",
bibindex_engine_stemmer.stem("information", None))
def test_stemmer_english(self):
"""bibindex engine - English stemmer"""
english_test_cases = [['information', 'inform'],
['experiment', 'experi'],
['experiments', 'experi'],
['experimented', 'experi'],
['experimenting', 'experi'],
['experimental', 'experiment'],
['experimentally', 'experiment'],
['experimentation', 'experiment'],
['experimentalism', 'experiment'],
['experimenter', 'experiment'],
['experimentalise', 'experimentalis'],
['experimentalist', 'experimentalist'],
['experimentalists', 'experimentalist'],
['GeV', 'GeV'],
['$\Omega$', '$\Omega$'],
['e^-', 'e^-'],
['C#', 'C#'],
['C++', 'C++']]
for test_word, expected_result in english_test_cases:
self.assertEqual(expected_result,
bibindex_engine_stemmer.stem(test_word, "en"))
def test_stemmer_greek(self):
"""bibindex engine - Greek stemmer"""
greek_test_cases = [['πληροφορίες', 'ΠΛΗΡΟΦΟΡΙ'],
['πείραμα', 'ΠΕΙΡΑΜ'],
['πειράματα', 'ΠΕΙΡΑΜ'],
['πειραματιστής', 'ΠΕΙΡΑΜΑΤΙΣΤ'],
['πειραματίζομαι', 'ΠΕΙΡΑΜΑΤΙΖ'],
['πειραματίζεσαι', 'ΠΕΙΡΑΜΑΤΙΖ'],
['πειραματίστηκα', 'ΠΕΙΡΑΜΑΤΙΣΤ'],
['πειραματόζωο', 'ΠΕΙΡΑΜΑΤΟΖΩ'],
['ζώο', 'ΖΩ'],
['πειραματισμός', 'ΠΕΙΡΑΜΑΤΙΣΜ'],
['πειραματικός', 'ΠΕΙΡΑΜΑΤΙΚ'],
['πειραματικά', 'ΠΕΙΡΑΜΑΤ'],
['ηλεκτρόνιο', 'ΗΛΕΚΤΡΟΝΙ'],
['ηλεκτρονιακός', 'ΗΛΕΚΤΡΟΝΙΑΚ'],
['ακτίνα', 'ΑΚΤΙΝ'],
['ακτινοβολία', 'ΑΚΤΙΝΟΒΟΛ'],
['E=mc^2', 'E=MC^2'],
['α+β=γ', 'Α+Β=Γ']]
for test_word, expected_result in greek_test_cases:
self.assertEqual(expected_result,
bibindex_engine_stemmer.stem(test_word, "el"))
TEST_SUITE = make_test_suite(TestStemmer,)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
navigation_title: "Distance feature"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-distance-feature-query.html
---
# Distance feature query [query-dsl-distance-feature-query]
Boosts the [relevance score](/reference/query-languages/query-dsl/query-filter-context.md#relevance-scores) of documents closer to a provided `origin` date or point. For example, you can use this query to give more weight to documents closer to a certain date or location.
You can use the `distance_feature` query to find the nearest neighbors to a location. You can also use the query in a [`bool`](/reference/query-languages/query-dsl/query-dsl-bool-query.md) search’s `should` filter to add boosted relevance scores to the `bool` query’s scores.
## Example request [distance-feature-query-ex-request]
### Index setup [distance-feature-index-setup]
To use the `distance_feature` query, your index must include a [`date`](/reference/elasticsearch/mapping-reference/date.md), [`date_nanos`](/reference/elasticsearch/mapping-reference/date_nanos.md) or [`geo_point`](/reference/elasticsearch/mapping-reference/geo-point.md) field.
To see how you can set up an index for the `distance_feature` query, try the following example.
1. Create an `items` index with the following field mapping:
* `name`, a [`keyword`](/reference/elasticsearch/mapping-reference/keyword.md) field
* `production_date`, a [`date`](/reference/elasticsearch/mapping-reference/date.md) field
* `location`, a [`geo_point`](/reference/elasticsearch/mapping-reference/geo-point.md) field
```console
PUT /items
{
"mappings": {
"properties": {
"name": {
"type": "keyword"
},
"production_date": {
"type": "date"
},
"location": {
"type": "geo_point"
}
}
}
}
```
% TESTSETUP
2. Index several documents to this index.
```console
PUT /items/_doc/1?refresh
{
"name" : "chocolate",
"production_date": "2018-02-01",
"location": [-71.34, 41.12]
}
PUT /items/_doc/2?refresh
{
"name" : "chocolate",
"production_date": "2018-01-01",
"location": [-71.3, 41.15]
}
PUT /items/_doc/3?refresh
{
"name" : "chocolate",
"production_date": "2017-12-01",
"location": [-71.3, 41.12]
}
```
### Example queries [distance-feature-query-ex-query]
#### Boost documents based on date [distance-feature-query-date-ex]
The following `bool` search returns documents with a `name` value of `chocolate`. The search also uses the `distance_feature` query to increase the relevance score of documents with a `production_date` value closer to `now`.
```console
GET /items/_search
{
"query": {
"bool": {
"must": {
"match": {
"name": "chocolate"
}
},
"should": {
"distance_feature": {
"field": "production_date",
"pivot": "7d",
"origin": "now"
}
}
}
}
}
```
#### Boost documents based on location [distance-feature-query-distance-ex]
The following `bool` search returns documents with a `name` value of `chocolate`. The search also uses the `distance_feature` query to increase the relevance score of documents with a `location` value closer to `[-71.3, 41.15]`.
```console
GET /items/_search
{
"query": {
"bool": {
"must": {
"match": {
"name": "chocolate"
}
},
"should": {
"distance_feature": {
"field": "location",
"pivot": "1000m",
"origin": [-71.3, 41.15]
}
}
}
}
}
```
## Top-level parameters for `distance_feature` [distance-feature-top-level-params]
`field`
: (Required, string) Name of the field used to calculate distances. This field must meet the following criteria:
* Be a [`date`](/reference/elasticsearch/mapping-reference/date.md), [`date_nanos`](/reference/elasticsearch/mapping-reference/date_nanos.md) or [`geo_point`](/reference/elasticsearch/mapping-reference/geo-point.md) field
* Have an [`index`](/reference/elasticsearch/mapping-reference/mapping-index.md) mapping parameter value of `true`, which is the default
* Have an [`doc_values`](/reference/elasticsearch/mapping-reference/doc-values.md) mapping parameter value of `true`, which is the default
`origin`
: (Required, string) Date or point of origin used to calculate distances.
If the `field` value is a [`date`](/reference/elasticsearch/mapping-reference/date.md) or [`date_nanos`](/reference/elasticsearch/mapping-reference/date_nanos.md) field, the `origin` value must be a [date](/reference/aggregations/search-aggregations-bucket-daterange-aggregation.md#date-format-pattern). [Date Math](/reference/elasticsearch/rest-apis/common-options.md#date-math), such as `now-1h`, is supported.
If the `field` value is a [`geo_point`](/reference/elasticsearch/mapping-reference/geo-point.md) field, the `origin` value must be a geopoint.
`pivot`
: (Required, [time unit](/reference/elasticsearch/rest-apis/api-conventions.md#time-units) or [distance unit](/reference/elasticsearch/rest-apis/api-conventions.md#distance-units)) Distance from the `origin` at which relevance scores receive half of the `boost` value.
If the `field` value is a [`date`](/reference/elasticsearch/mapping-reference/date.md) or [`date_nanos`](/reference/elasticsearch/mapping-reference/date_nanos.md) field, the `pivot` value must be a [time unit](/reference/elasticsearch/rest-apis/api-conventions.md#time-units), such as `1h` or `10d`.
If the `field` value is a [`geo_point`](/reference/elasticsearch/mapping-reference/geo-point.md) field, the `pivot` value must be a [distance unit](/reference/elasticsearch/rest-apis/api-conventions.md#distance-units), such as `1km` or `12m`.
`boost`
: (Optional, float) Floating point number used to multiply the [relevance score](/reference/query-languages/query-dsl/query-filter-context.md#relevance-scores) of matching documents. This value cannot be negative. Defaults to `1.0`.
## Notes [distance-feature-notes]
### How the `distance_feature` query calculates relevance scores [distance-feature-calculation]
The `distance_feature` query dynamically calculates the distance between the `origin` value and a document’s field values. It then uses this distance as a feature to boost the [relevance score](/reference/query-languages/query-dsl/query-filter-context.md#relevance-scores) of closer documents.
The `distance_feature` query calculates a document’s [relevance score](/reference/query-languages/query-dsl/query-filter-context.md#relevance-scores) as follows:
```
relevance score = boost * pivot / (pivot + distance)
```
The `distance` is the absolute difference between the `origin` value and a document’s field value.
### Skip non-competitive hits [distance-feature-skip-hits]
Unlike the [`function_score`](/reference/query-languages/query-dsl/query-dsl-function-score-query.md) query or other ways to change [relevance scores](/reference/query-languages/query-dsl/query-filter-context.md#relevance-scores), the `distance_feature` query efficiently skips non-competitive hits when the [`track_total_hits`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) parameter is **not** `true`.
|
unknown
|
github
|
https://github.com/elastic/elasticsearch
|
docs/reference/query-languages/query-dsl/query-dsl-distance-feature-query.md
|
## +block
def indented_block(self):
print(f"Indent-dependent {self.tag} block started")
start_O_line = self.O.line_number
block_indent = self.I.indent_count + 1
if self.offset:
self.O.offset -= block_indent
#Opening block
self.opening_tag()
#Main block loop
while 1:
loop_line = self.I.line_number
index, token = self.next_token()
if index > 0:
self.O.indents(count = self.I.indent_count)
self.O.write(self.I.popto(index))
self.I.popto(len(token))
if token:
self.routine(token)
#refill line
if self.I.line == '':
try:
self.I.readline()
except:
break
#check if next line is in block
if loop_line != self.I.line_number:
if block_indent > self.I.indent_count:
break
else:
self.O.newline()
#Closing block
if start_O_line != self.O.line_number:
self.O.newline()
self.O.indents(count = block_indent - 1)
self.closing_tag()
if self.offset:
self.O.offset += block_indent
## @wrapper:
def wrapping_block(self):
print(f"Wrapping {self.tag} block started")
start_O_line = self.O.line_number
block_indent = self.I.indent_count
if self.offset:
self.O.offset -= block_indent
#Opening block
self.opening_tag()
#Main block loop FIX
while 1:
loop_line = self.I.line_number
index, token = self.next_token()
if not token:
self.O.indents(count = self.I.indent_count)
self.O.write(self.I.popto(index))
self.I.popto(len(token))
if token:
self.routine(token)
#refill line
if self.I.line == '':
try:
self.I.readline()
except:
break
#check if next line is in block
if loop_line != self.I.line_number:
if block_indent > self.I.indent_count:
break
else:
self.O.newline()
#Closing block
if start_O_line != self.O.line_number:
self.O.newline()
self.O.indents(count = block_indent)
self.closing_tag()
if self.offset:
self.O.offset += block_indent
## +block()
def bracketed_block(self):
print(f"Bracketed {self.tag} block started")
start_O_line = self.O.line_number
block_indent = self.I.indent_count
if self.offset:
self.O.offset -= block_indent
#Opening block
self.opening_tag()
#Main block loop FIX
level = 0 #number of brackets must match
while 1:
loop_line = self.I.line_number
index, token = self.next_token('(', ')')
if not token or token == '(' or token == ')':
#indent will be added in the token's routine, if needed
self.O.indents(count = self.I.indent_count)
self.O.write(self.I.popto(index))
self.I.popto(len(token))
if token == '(':
self.O.write('(')
level += 1
elif token == ')':
if level == 0:
break #end of block
self.O.write(')')
level -= 1
else:
self.O.write(self.I.popto(index))
self.I.popto(len(token))
if token:
self.routine(token)
if self.I.line.isspace() or self.I.line == '':
self.I.readline()
#refill line
if self.I.line == '':
try:
self.I.readline()
except:
break
#check if next line is in block
if loop_line != self.I.line_number:
if block_indent > self.I.indent_count:
break
else:
self.O.newline()
#Closing block
self.closing_tag()
if self.offset:
self.O.offset += block_indent
#Selfclosing pseudo-block
def selfclosing_block(self):
print(f"Selfclosing {self.tag} tag started")
self.opening_tag()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
import os
import sys
import subprocess
import re
import mmap
import string
EVENT_NAME_PREFIX = "com.facebook.buck.artifact_cache."
RULE_KEY_KEY = 'rulekey'
TIMESTAMP_KEY = 'timestamp'
EVENT_NAME_KEY = 'event_name'
RESULT_KEY = 'result'
INFO_KEY = 'info'
class Entry(object):
def __init__(self, rulekey, timestamp, event_name, info, result):
self.rulekey = rulekey
self.timestamp = timestamp
self.event_name = event_name
self.info = info
self.result = result
def expand(self, show_timestamp, show_event_name, show_result, show_info):
r = ''
if show_timestamp:
r += "\t\ttimestamp = " + self.timestamp + "\n"
if show_event_name:
r += "\t\tevent_name = " + self.event_name + "\n"
if show_result:
r += "\t\tresult = " + self.result + "\n"
if show_info:
r += "\t\tinfo = " + self.info + "\n"
return r
class ArtifactHistory(object):
def __init__(self, rulekey):
self.rulekey = rulekey
self.entries = []
def add_entry(self, rulekey, timestamp, event_name, info, result):
new_entry = Entry(rulekey, timestamp, event_name, info, result)
if self.rulekey != new_entry.rulekey:
raise Exception("Attempt to insert event with rulekey=" + rulekey +
" into artifact history with rulekey=" + self.rulekey)
self.entries.append(new_entry)
def expand(self, show_timestamp, show_event_name, show_result, show_info):
repr = "rulekey=" + self.rulekey
print_something = show_timestamp or show_event_name or show_result or show_info
if print_something:
repr += "\n"
for i, item in enumerate(self.entries):
if print_something:
repr += "\tItem: \n"
repr += item.expand(show_timestamp, show_event_name, show_result, show_info)
return repr
def parseArgs():
import argparse
description = "Prints out rulekeys with all attempts to get the cache item for each rulekey."
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'log_file',
help='buck.log file')
parser.add_argument(
'--only-misses',
help='Show only cache misses',
action='store_true',
default=False)
parser.add_argument(
'--fields',
help='Comma separated list of fields to print out. Default value is: ' +
'timestamp,event_name,result,info',
default='timestamp,event_name,result,info')
parser.add_argument(
'--verbose',
help='Verbose mode',
action='store_true',
default=False)
return parser.parse_args()
def parse_line(line):
rulekey_match = re.search(r'\b[0-9a-f]{40}\b', line)
if not rulekey_match:
return None
timestamp_match = re.search(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3}', line)
if not timestamp_match:
return None
rulekey = line[rulekey_match.start():rulekey_match.end()]
timestamp = line[timestamp_match.start():timestamp_match.end()]
event_name_start = line.find(EVENT_NAME_PREFIX)
buck_event_tmp_line = line[event_name_start:]
event_name = buck_event_tmp_line[:buck_event_tmp_line.find("]")][len(EVENT_NAME_PREFIX):]
result_keyword = "cache "
result_start = line.rfind(result_keyword)
result = line[result_start + len(result_keyword): -1].strip()
info = line[line.find(event_name) + len(event_name) + 1:]
info = info[: -len(result) - len(result_keyword) - 1].strip()
return {RULE_KEY_KEY: rulekey,
TIMESTAMP_KEY: timestamp,
EVENT_NAME_KEY: event_name,
RESULT_KEY: result,
INFO_KEY: info}
def analyze(log_file, show_only_misses, fields_to_show):
rulekey_to_history = {}
file = open(log_file, "r+")
mm = mmap.mmap(file.fileno(), 0)
while mm.tell() < mm.size():
line = mm.readline()
if EVENT_NAME_PREFIX in line:
parsed_dict = parse_line(line)
if not parsed_dict:
continue
rulekey = parsed_dict.get(RULE_KEY_KEY)
history = rulekey_to_history.get(rulekey)
if not history:
history = ArtifactHistory(rulekey)
rulekey_to_history[rulekey] = history
history.add_entry(rulekey,
parsed_dict.get(TIMESTAMP_KEY),
parsed_dict.get(EVENT_NAME_KEY),
parsed_dict.get(INFO_KEY),
parsed_dict.get(RESULT_KEY))
mm.close()
file.close()
for i, artifact_history_item in enumerate(rulekey_to_history.values()):
got_hit = False
for j, history_entry in enumerate(artifact_history_item.entries):
if (history_entry.result == "hit"):
got_hit = True
break
if (show_only_misses and not got_hit or not show_only_misses):
print artifact_history_item.expand(TIMESTAMP_KEY in fields_to_show,
EVENT_NAME_KEY in fields_to_show,
RESULT_KEY in fields_to_show,
INFO_KEY in fields_to_show)
def main():
args = parseArgs()
if not os.path.exists(args.log_file):
raise Exception(args.log_file + ' does not exist')
analyze(args.log_file, args.only_misses, args.fields)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Prediction module for Urban Sounds Classification"""
from __future__ import print_function
import os
import sys
import warnings
import mxnet as mx
from mxnet import nd
from model import get_net
try:
import librosa
except ImportError:
raise ImportError("Librosa is not installed! please run the following command:\
`pip install librosa`")
sys.path.append('../')
def predict(prediction_dir='./Test'):
"""The function is used to run predictions on the audio files in the directory `pred_directory`.
Parameters
----------
net:
The model that has been trained.
prediction_dir: string, default ./Test
The directory that contains the audio files on which predictions are to be made
"""
if not os.path.exists(prediction_dir):
warnings.warn("The directory on which predictions are to be made is not found!")
return
if len(os.listdir(prediction_dir)) == 0:
warnings.warn("The directory on which predictions are to be made is empty! Exiting...")
return
# Loading synsets
if not os.path.exists('./synset.txt'):
warnings.warn("The synset or labels for the dataset do not exist. Please run the training script first.")
return
with open("./synset.txt", "r") as f:
synset = [l.rstrip() for l in f]
net = get_net(len(synset))
print("Trying to load the model with the saved parameters...")
if not os.path.exists("./net.params"):
warnings.warn("The model does not have any saved parameters... Cannot proceed! Train the model first")
return
net.load_parameters("./net.params")
file_names = os.listdir(prediction_dir)
full_file_names = [os.path.join(prediction_dir, item) for item in file_names]
from transforms import MFCC
mfcc = MFCC()
print("\nStarting predictions for audio files in ", prediction_dir, " ....\n")
for filename in full_file_names:
# Argument kaiser_fast to res_type is faster than 'kaiser_best'. To reduce the load time, passing kaiser_fast.
X1, _ = librosa.load(filename, res_type='kaiser_fast')
transformed_test_data = mfcc(mx.nd.array(X1))
output = net(transformed_test_data.reshape((1, -1)))
prediction = nd.argmax(output, axis=1)
print(filename, " -> ", synset[(int)(prediction.asscalar())])
if __name__ == '__main__':
try:
import argparse
parser = argparse.ArgumentParser(description="Urban Sounds clsssification example - MXNet")
parser.add_argument('--pred', '-p', help="Enter the folder path that contains your audio files", type=str)
args = parser.parse_args()
pred_dir = args.pred
except ImportError:
warnings.warn("Argparse module not installed! passing default arguments.")
pred_dir = './Test'
predict(prediction_dir=pred_dir)
print("Urban sounds classification Prediction DONE!")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from model.workitems import WorkItems
class WorkItemsTest(unittest.TestCase):
def test_display_position_for_attachment(self):
items = WorkItems()
items.item_ids = [0, 1, 2]
self.assertEqual(items.display_position_for_attachment(0), 1)
self.assertEqual(items.display_position_for_attachment(1), 2)
self.assertEqual(items.display_position_for_attachment(3), None)
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""The collection of modern alternatives to deprecated & removed functionality.
Collects specimens of old ORM code and explicitly covers the recommended
modern (i.e. not deprecated) alternative to them. The tests snippets here can
be migrated directly to the wiki, docs, etc.
"""
from sqlalchemy import Integer, String, ForeignKey, func, text
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import mapper, relationship, create_session, sessionmaker
from sqlalchemy.testing import fixtures
class QueryAlternativesTest(fixtures.MappedTest):
'''Collects modern idioms for Queries
The docstring for each test case serves as miniature documentation about
the deprecated use case, and the test body illustrates (and covers) the
intended replacement code to accomplish the same task.
Documenting the "old way" including the argument signature helps these
cases remain useful to readers even after the deprecated method has been
removed from the modern codebase.
Format:
def test_deprecated_thing(self):
"""Query.methodname(old, arg, **signature)
output = session.query(User).deprecatedmethod(inputs)
"""
# 0.4+
output = session.query(User).newway(inputs)
assert output is correct
# 0.5+
output = session.query(User).evennewerway(inputs)
assert output is correct
'''
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('users_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(64)))
Table('addresses_table', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('users_table.id')),
Column('email_address', String(128)),
Column('purpose', String(16)),
Column('bounces', Integer, default=0))
@classmethod
def setup_classes(cls):
class User(cls.Basic):
pass
class Address(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
addresses_table, User, users_table, Address = (cls.tables.addresses_table,
cls.classes.User,
cls.tables.users_table,
cls.classes.Address)
mapper(User, users_table, properties=dict(
addresses=relationship(Address, backref='user'),
))
mapper(Address, addresses_table)
@classmethod
def fixtures(cls):
return dict(
users_table=(
('id', 'name'),
(1, 'jack'),
(2, 'ed'),
(3, 'fred'),
(4, 'chuck')),
addresses_table=(
('id', 'user_id', 'email_address', 'purpose', 'bounces'),
(1, 1, 'jack@jack.home', 'Personal', 0),
(2, 1, 'jack@jack.bizz', 'Work', 1),
(3, 2, 'ed@foo.bar', 'Personal', 0),
(4, 3, 'fred@the.fred', 'Personal', 10)))
######################################################################
def test_override_get(self):
"""MapperExtension.get()
x = session.query.get(5)
"""
Address = self.classes.Address
from sqlalchemy.orm.query import Query
cache = {}
class MyQuery(Query):
def get(self, ident, **kwargs):
if ident in cache:
return cache[ident]
else:
x = super(MyQuery, self).get(ident)
cache[ident] = x
return x
session = sessionmaker(query_cls=MyQuery)()
ad1 = session.query(Address).get(1)
assert ad1 in list(cache.values())
def test_load(self):
"""x = session.query(Address).load(1)
x = session.load(Address, 1)
"""
Address = self.classes.Address
session = create_session()
ad1 = session.query(Address).populate_existing().get(1)
assert bool(ad1)
def test_apply_max(self):
"""Query.apply_max(col)
max = session.query(Address).apply_max(Address.bounces)
"""
Address = self.classes.Address
session = create_session()
# 0.5.0
maxes = list(session.query(Address).values(func.max(Address.bounces)))
max = maxes[0][0]
assert max == 10
max = session.query(func.max(Address.bounces)).one()[0]
assert max == 10
def test_apply_min(self):
"""Query.apply_min(col)
min = session.query(Address).apply_min(Address.bounces)
"""
Address = self.classes.Address
session = create_session()
# 0.5.0
mins = list(session.query(Address).values(func.min(Address.bounces)))
min = mins[0][0]
assert min == 0
min = session.query(func.min(Address.bounces)).one()[0]
assert min == 0
def test_apply_avg(self):
"""Query.apply_avg(col)
avg = session.query(Address).apply_avg(Address.bounces)
"""
Address = self.classes.Address
session = create_session()
avgs = list(session.query(Address).values(func.avg(Address.bounces)))
avg = avgs[0][0]
assert avg > 0 and avg < 10
avg = session.query(func.avg(Address.bounces)).one()[0]
assert avg > 0 and avg < 10
def test_apply_sum(self):
"""Query.apply_sum(col)
avg = session.query(Address).apply_avg(Address.bounces)
"""
Address = self.classes.Address
session = create_session()
avgs = list(session.query(Address).values(func.sum(Address.bounces)))
avg = avgs[0][0]
assert avg == 11
avg = session.query(func.sum(Address.bounces)).one()[0]
assert avg == 11
def test_count_by(self):
"""Query.count_by(*args, **params)
num = session.query(Address).count_by(purpose='Personal')
# old-style implicit *_by join
num = session.query(User).count_by(purpose='Personal')
"""
User, Address = self.classes.User, self.classes.Address
session = create_session()
num = session.query(Address).filter_by(purpose='Personal').count()
assert num == 3, num
num = (session.query(User).join('addresses').
filter(Address.purpose=='Personal')).count()
assert num == 3, num
def test_count_whereclause(self):
"""Query.count(whereclause=None, params=None, **kwargs)
num = session.query(Address).count(address_table.c.bounces > 1)
"""
Address = self.classes.Address
session = create_session()
num = session.query(Address).filter(Address.bounces > 1).count()
assert num == 1, num
def test_execute(self):
"""Query.execute(clauseelement, params=None, *args, **kwargs)
users = session.query(User).execute(users_table.select())
"""
User, users_table = self.classes.User, self.tables.users_table
session = create_session()
users = session.query(User).from_statement(users_table.select()).all()
assert len(users) == 4
def test_get_by(self):
"""Query.get_by(*args, **params)
user = session.query(User).get_by(name='ed')
# 0.3-style implicit *_by join
user = session.query(User).get_by(email_addresss='fred@the.fred')
"""
User, Address = self.classes.User, self.classes.Address
session = create_session()
user = session.query(User).filter_by(name='ed').first()
assert user.name == 'ed'
user = (session.query(User).join('addresses').
filter(Address.email_address=='fred@the.fred')).first()
assert user.name == 'fred'
user = session.query(User).filter(
User.addresses.any(Address.email_address=='fred@the.fred')).first()
assert user.name == 'fred'
def test_instances_entities(self):
"""Query.instances(cursor, *mappers_or_columns, **kwargs)
sel = users_table.join(addresses_table).select(use_labels=True)
res = session.query(User).instances(sel.execute(), Address)
"""
addresses_table, User, users_table, Address = (self.tables.addresses_table,
self.classes.User,
self.tables.users_table,
self.classes.Address)
session = create_session()
sel = users_table.join(addresses_table).select(use_labels=True)
res = list(session.query(User, Address).instances(sel.execute()))
assert len(res) == 4
cola, colb = res[0]
assert isinstance(cola, User) and isinstance(colb, Address)
def test_join_by(self):
"""Query.join_by(*args, **params)
TODO
"""
session = create_session()
def test_join_to(self):
"""Query.join_to(key)
TODO
"""
session = create_session()
def test_join_via(self):
"""Query.join_via(keys)
TODO
"""
session = create_session()
def test_list(self):
"""Query.list()
users = session.query(User).list()
"""
User = self.classes.User
session = create_session()
users = session.query(User).all()
assert len(users) == 4
def test_scalar(self):
"""Query.scalar()
user = session.query(User).filter(User.id==1).scalar()
"""
User = self.classes.User
session = create_session()
user = session.query(User).filter(User.id==1).first()
assert user.id==1
def test_select(self):
"""Query.select(arg=None, **kwargs)
users = session.query(User).select(users_table.c.name != None)
"""
User = self.classes.User
session = create_session()
users = session.query(User).filter(User.name != None).all()
assert len(users) == 4
def test_select_by(self):
"""Query.select_by(*args, **params)
users = session.query(User).select_by(name='fred')
# 0.3 magic join on *_by methods
users = session.query(User).select_by(email_address='fred@the.fred')
"""
User, Address = self.classes.User, self.classes.Address
session = create_session()
users = session.query(User).filter_by(name='fred').all()
assert len(users) == 1
users = session.query(User).filter(User.name=='fred').all()
assert len(users) == 1
users = (session.query(User).join('addresses').
filter_by(email_address='fred@the.fred')).all()
assert len(users) == 1
users = session.query(User).filter(User.addresses.any(
Address.email_address == 'fred@the.fred')).all()
assert len(users) == 1
def test_selectfirst(self):
"""Query.selectfirst(arg=None, **kwargs)
bounced = session.query(Address).selectfirst(
addresses_table.c.bounces > 0)
"""
Address = self.classes.Address
session = create_session()
bounced = session.query(Address).filter(Address.bounces > 0).first()
assert bounced.bounces > 0
def test_selectfirst_by(self):
"""Query.selectfirst_by(*args, **params)
onebounce = session.query(Address).selectfirst_by(bounces=1)
# 0.3 magic join on *_by methods
onebounce_user = session.query(User).selectfirst_by(bounces=1)
"""
User, Address = self.classes.User, self.classes.Address
session = create_session()
onebounce = session.query(Address).filter_by(bounces=1).first()
assert onebounce.bounces == 1
onebounce_user = (session.query(User).join('addresses').
filter_by(bounces=1)).first()
assert onebounce_user.name == 'jack'
onebounce_user = (session.query(User).join('addresses').
filter(Address.bounces == 1)).first()
assert onebounce_user.name == 'jack'
onebounce_user = session.query(User).filter(User.addresses.any(
Address.bounces == 1)).first()
assert onebounce_user.name == 'jack'
def test_selectone(self):
"""Query.selectone(arg=None, **kwargs)
ed = session.query(User).selectone(users_table.c.name == 'ed')
"""
User = self.classes.User
session = create_session()
ed = session.query(User).filter(User.name == 'jack').one()
def test_selectone_by(self):
"""Query.selectone_by
ed = session.query(User).selectone_by(name='ed')
# 0.3 magic join on *_by methods
ed = session.query(User).selectone_by(email_address='ed@foo.bar')
"""
User, Address = self.classes.User, self.classes.Address
session = create_session()
ed = session.query(User).filter_by(name='jack').one()
ed = session.query(User).filter(User.name == 'jack').one()
ed = session.query(User).join('addresses').filter(
Address.email_address == 'ed@foo.bar').one()
ed = session.query(User).filter(User.addresses.any(
Address.email_address == 'ed@foo.bar')).one()
def test_select_statement(self):
"""Query.select_statement(statement, **params)
users = session.query(User).select_statement(users_table.select())
"""
User, users_table = self.classes.User, self.tables.users_table
session = create_session()
users = session.query(User).from_statement(users_table.select()).all()
assert len(users) == 4
def test_select_text(self):
"""Query.select_text(text, **params)
users = session.query(User).select_text('SELECT * FROM users_table')
"""
User = self.classes.User
session = create_session()
users = (session.query(User).
from_statement(text('SELECT * FROM users_table'))).all()
assert len(users) == 4
def test_select_whereclause(self):
"""Query.select_whereclause(whereclause=None, params=None, **kwargs)
users = session,query(User).select_whereclause(users.c.name=='ed')
users = session.query(User).select_whereclause("name='ed'")
"""
User = self.classes.User
session = create_session()
users = session.query(User).filter(User.name=='ed').all()
assert len(users) == 1 and users[0].name == 'ed'
users = session.query(User).filter(text("name='ed'")).all()
assert len(users) == 1 and users[0].name == 'ed'
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Hard link support for Windows.
This module is a SCons tool which should be include in the topmost windows
environment. It is usually included by the target_platform_windows tool.
"""
import os
import stat
import sys
import SCons
if sys.platform == 'win32':
# Only attempt to load pywin32 on Windows systems
try:
import win32file
except ImportError:
print ('Warning: Unable to load win32file module; using copy instead of'
' hard linking for env.Install(). Is pywin32 present?')
#------------------------------------------------------------------------------
# Python 2.4 and 2.5's os module doesn't support os.link on Windows, even
# though Windows does have hard-link capability on NTFS filesystems. So by
# default, SCons will insist on copying files instead of linking them as it
# does on other (linux,mac) OS's.
#
# Use the CreateHardLink() functionality from pywin32 to provide hard link
# capability on Windows also.
def _HardLink(fs, src, dst):
"""Hard link function for hooking into SCons.Node.FS.
Args:
fs: Filesystem class to use.
src: Source filename to link to.
dst: Destination link name to create.
Raises:
OSError: The link could not be created.
"""
# A hard link shares file permissions from the source. On Windows, the write
# access of the file itself determines whether the file can be deleted
# (unlike Linux/Mac, where it's the write access of the containing
# directory). So if we made a link from a read-only file, the only way to
# delete it would be to make the link writable, which would have the
# unintended effect of making the source writable too.
#
# So if the source is read-only, we can't hard link from it.
if not stat.S_IMODE(fs.stat(src)[stat.ST_MODE]) & stat.S_IWRITE:
raise OSError('Unsafe to hard-link read-only file: %s' % src)
# If the file is writable, only hard-link from it if it was build by SCons.
# Those files shouldn't later become read-only. We don't hard-link from
# writable files which SCons didn't create, because those could become
# read-only (for example, following a 'p4 submit'), which as indicated above
# would make our link read-only too.
if not fs.File(src).has_builder():
raise OSError('Unsafe to hard-link file not built by SCons: %s' % src)
try:
win32file.CreateHardLink(dst, src)
except win32file.error, msg:
# Translate errors into standard OSError which SCons expects.
raise OSError(msg)
#------------------------------------------------------------------------------
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
env = env # Silence gpylint
# Patch in our hard link function, if we were able to load pywin32
if 'win32file' in globals():
SCons.Node.FS._hardlink_func = _HardLink
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python classes
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
import logging
LOG = logging.getLogger(".citation")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import GObject, GLib
#-------------------------------------------------------------------------
#
# GRAMPS classes
#
#-------------------------------------------------------------------------
from gramps.gen.errors import WindowActiveError
from gramps.gen.lib import Citation, Source
from gramps.gen.lib import Source, Citation
from ...dbguielement import DbGUIElement
from ...selectors import SelectorFactory
from .citationrefmodel import CitationRefModel
from .embeddedlist import EmbeddedList, TEXT_COL, MARKUP_COL, ICON_COL
from ...ddtargets import DdTargets
#-------------------------------------------------------------------------
#
# CitationEmbedList
#
#-------------------------------------------------------------------------
class CitationEmbedList(EmbeddedList, DbGUIElement):
"""
Citation List display tab for edit dialogs.
Derives from the EmbeddedList class.
"""
_HANDLE_COL = 5 # Column number from CitationRefModel
_DND_TYPE = DdTargets.CITATION_LINK
_DND_EXTRA = DdTargets.SOURCE_LINK
_MSG = {
'add' : _('Create and add a new citation and new source'),
'del' : _('Remove the existing citation'),
'edit' : _('Edit the selected citation'),
'share' : _('Add an existing citation or source'),
'up' : _('Move the selected citation upwards'),
'down' : _('Move the selected citation downwards'),
}
#index = column in model. Value =
# (name, sortcol in model, width, markup/text, weigth_col
_column_names = [
(_('Title'), 0, 200, TEXT_COL, -1, None),
(_('Author'), 1, 125, TEXT_COL, -1, None),
(_('Page'), 2, 100, TEXT_COL, -1, None),
(_('ID'), 3, 75, TEXT_COL, -1, None),
(_('Private'), 4, 30, ICON_COL, -1, 'gramps-lock')
]
def __init__(self, dbstate, uistate, track, data, callertitle=None):
self.data = data
self.callertitle = callertitle
EmbeddedList.__init__(self, dbstate, uistate, track,
_("_Source Citations"), CitationRefModel,
share_button=True, move_buttons=True)
DbGUIElement.__init__(self, dbstate.db)
self.callman.register_handles({'citation': self.data})
def _connect_db_signals(self):
"""
Implement base class DbGUIElement method
"""
#citation: citation-rebuild closes the editors, so no need to connect
# to it
self.callman.register_callbacks(
{'citation-delete': self.citation_delete,
'citation-update': self.citation_update,
})
self.callman.connect_all(keys=['citation'])
def get_icon_name(self):
"""
Return the stock-id icon name associated with the display tab
"""
return 'gramps-source'
def get_data(self):
"""
Return the data associated with display tab
"""
return self.data
def column_order(self):
"""
Return the column order of the columns in the display tab.
"""
return ((1, 4), (1, 0), (1, 1), (1, 2), (1, 3))
def add_button_clicked(self, obj):
"""
Create a new Citation instance and call the EditCitation editor with
the new citation.
Called when the Add button is clicked.
If the window already exists (WindowActiveError), we ignore it.
This prevents the dialog from coming up twice on the same object.
"""
try:
from .. import EditCitation
EditCitation(self.dbstate, self.uistate, self.track,
Citation(), Source(),
self.add_callback, self.callertitle)
except WindowActiveError:
pass
def add_callback(self, value):
"""
Called to update the screen when a new citation is added
"""
data = self.get_data()
data.append(value)
self.callman.register_handles({'citation': [value]})
self.changed = True
self.rebuild()
GLib.idle_add(self.tree.scroll_to_cell, len(data) - 1)
def share_button_clicked(self, obj):
SelectCitation = SelectorFactory('Citation')
sel = SelectCitation(self.dbstate, self.uistate, self.track)
object = sel.run()
LOG.debug("selected object: %s" % object)
# the object returned should either be a Source or a Citation
if object:
if isinstance(object, Source):
try:
from .. import EditCitation
EditCitation(self.dbstate, self.uistate, self.track,
Citation(), object,
callback=self.add_callback,
callertitle=self.callertitle)
except WindowActiveError:
from ...dialog import WarningDialog
WarningDialog(_("Cannot share this reference"),
self.__blocked_text())
elif isinstance(object, Citation):
try:
from .. import EditCitation
EditCitation(self.dbstate, self.uistate, self.track,
object, callback=self.add_callback,
callertitle=self.callertitle)
except WindowActiveError:
from ...dialog import WarningDialog
WarningDialog(_("Cannot share this reference"),
self.__blocked_text())
else:
raise ValueError("selection must be either source or citation")
def __blocked_text(self):
"""
Return the common text used when citation cannot be edited
"""
return _("This citation cannot be created at this time. "
"Either the associated Source object is already being "
"edited, or another citation associated with the same "
"source is being edited.\n\nTo edit this "
"citation, you need to close the object.")
def edit_button_clicked(self, obj):
"""
Get the selected Citation instance and call the EditCitation editor
with the citation.
Called when the Edit button is clicked.
If the window already exists (WindowActiveError), we ignore it.
This prevents the dialog from coming up twice on the same object.
"""
handle = self.get_selected()
if handle:
citation = self.dbstate.db.get_citation_from_handle(handle)
try:
from .. import EditCitation
EditCitation(self.dbstate, self.uistate, self.track, citation,
callertitle = self.callertitle)
except WindowActiveError:
pass
def citation_delete(self, del_citation_handle_list):
"""
Outside of this tab citation objects have been deleted. Check if tab
and object must be changed.
Note: delete of object will cause reference on database to be removed,
so this method need not do this
"""
rebuild = False
for handle in del_citation_handle_list :
while self.data.count(handle) > 0:
self.data.remove(handle)
rebuild = True
if rebuild:
self.rebuild()
def citation_update(self, upd_citation_handle_list):
"""
Outside of this tab citation objects have been updated. Check if tab
and object must be updated.
"""
for handle in upd_citation_handle_list :
if handle in self.data:
self.rebuild()
break
def _handle_drag(self, row, handle):
"""
A CITATION_LINK has been dragged
"""
if handle:
object = self.dbstate.db.get_citation_from_handle(handle)
if isinstance(object, Citation):
try:
from .. import EditCitation
EditCitation(self.dbstate, self.uistate, self.track,
object, callback=self.add_callback,
callertitle=self.callertitle)
except WindowActiveError:
from ...dialog import WarningDialog
WarningDialog(_("Cannot share this reference"),
self.__blocked_text())
else:
raise ValueError("selection must be either source or citation")
def handle_extra_type(self, objtype, handle):
"""
A SOURCE_LINK object has been dragged
"""
if handle:
object = self.dbstate.db.get_source_from_handle(handle)
if isinstance(object, Source):
try:
from .. import EditCitation
EditCitation(self.dbstate, self.uistate, self.track,
Citation(), object,
callback=self.add_callback,
callertitle=self.callertitle)
except WindowActiveError:
from ...dialog import WarningDialog
WarningDialog(_("Cannot share this reference"),
self.__blocked_text())
else:
raise ValueError("selection must be either source or citation")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- test-case-name: twisted.test.test_plugin -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Plugin system for Twisted.
@author: Jp Calderone
@author: Glyph Lefkowitz
"""
import os
import sys
from zope.interface import Interface, providedBy
def _determinePickleModule():
"""
Determine which 'pickle' API module to use.
"""
try:
import cPickle
return cPickle
except ImportError:
import pickle
return pickle
pickle = _determinePickleModule()
from twisted.python.components import getAdapterFactory
from twisted.python.reflect import namedAny
from twisted.python import log
from twisted.python.modules import getModule
class IPlugin(Interface):
"""
Interface that must be implemented by all plugins.
Only objects which implement this interface will be considered for return
by C{getPlugins}. To be useful, plugins should also implement some other
application-specific interface.
"""
class CachedPlugin(object):
def __init__(self, dropin, name, description, provided):
self.dropin = dropin
self.name = name
self.description = description
self.provided = provided
self.dropin.plugins.append(self)
def __repr__(self):
return '<CachedPlugin %r/%r (provides %r)>' % (
self.name, self.dropin.moduleName,
', '.join([i.__name__ for i in self.provided]))
def load(self):
return namedAny(self.dropin.moduleName + '.' + self.name)
def __conform__(self, interface, registry=None, default=None):
for providedInterface in self.provided:
if providedInterface.isOrExtends(interface):
return self.load()
if getAdapterFactory(providedInterface, interface, None) is not None:
return interface(self.load(), default)
return default
# backwards compat HOORJ
getComponent = __conform__
class CachedDropin(object):
"""
A collection of L{CachedPlugin} instances from a particular module in a
plugin package.
@type moduleName: C{str}
@ivar moduleName: The fully qualified name of the plugin module this
represents.
@type description: C{str} or C{NoneType}
@ivar description: A brief explanation of this collection of plugins
(probably the plugin module's docstring).
@type plugins: C{list}
@ivar plugins: The L{CachedPlugin} instances which were loaded from this
dropin.
"""
def __init__(self, moduleName, description):
self.moduleName = moduleName
self.description = description
self.plugins = []
def _generateCacheEntry(provider):
dropin = CachedDropin(provider.__name__,
provider.__doc__)
for k, v in provider.__dict__.iteritems():
plugin = IPlugin(v, None)
if plugin is not None:
# Instantiated for its side-effects.
CachedPlugin(dropin, k, v.__doc__, list(providedBy(plugin)))
return dropin
try:
fromkeys = dict.fromkeys
except AttributeError:
def fromkeys(keys, value=None):
d = {}
for k in keys:
d[k] = value
return d
def getCache(module):
"""
Compute all the possible loadable plugins, while loading as few as
possible and hitting the filesystem as little as possible.
@param module: a Python module object. This represents a package to search
for plugins.
@return: a dictionary mapping module names to L{CachedDropin} instances.
"""
allCachesCombined = {}
mod = getModule(module.__name__)
# don't want to walk deep, only immediate children.
buckets = {}
# Fill buckets with modules by related entry on the given package's
# __path__. There's an abstraction inversion going on here, because this
# information is already represented internally in twisted.python.modules,
# but it's simple enough that I'm willing to live with it. If anyone else
# wants to fix up this iteration so that it's one path segment at a time,
# be my guest. --glyph
for plugmod in mod.iterModules():
fpp = plugmod.filePath.parent()
if fpp not in buckets:
buckets[fpp] = []
bucket = buckets[fpp]
bucket.append(plugmod)
for pseudoPackagePath, bucket in buckets.iteritems():
dropinPath = pseudoPackagePath.child('dropin.cache')
try:
lastCached = dropinPath.getModificationTime()
dropinDotCache = pickle.load(dropinPath.open('r'))
except:
dropinDotCache = {}
lastCached = 0
needsWrite = False
existingKeys = {}
for pluginModule in bucket:
pluginKey = pluginModule.name.split('.')[-1]
existingKeys[pluginKey] = True
if ((pluginKey not in dropinDotCache) or
(pluginModule.filePath.getModificationTime() >= lastCached)):
needsWrite = True
try:
provider = pluginModule.load()
except:
# dropinDotCache.pop(pluginKey, None)
log.err()
else:
entry = _generateCacheEntry(provider)
dropinDotCache[pluginKey] = entry
# Make sure that the cache doesn't contain any stale plugins.
for pluginKey in dropinDotCache.keys():
if pluginKey not in existingKeys:
del dropinDotCache[pluginKey]
needsWrite = True
if needsWrite and os.environ.get("TWISTED_DISABLE_WRITING_OF_PLUGIN_CACHE") is None:
try:
dropinPath.setContent(pickle.dumps(dropinDotCache))
except OSError, e:
log.msg(
format=(
"Unable to write to plugin cache %(path)s: error "
"number %(errno)d"),
path=dropinPath.path, errno=e.errno)
except:
log.err(None, "Unexpected error while writing cache file")
allCachesCombined.update(dropinDotCache)
return allCachesCombined
def getPlugins(interface, package=None):
"""
Retrieve all plugins implementing the given interface beneath the given module.
@param interface: An interface class. Only plugins which implement this
interface will be returned.
@param package: A package beneath which plugins are installed. For
most uses, the default value is correct.
@return: An iterator of plugins.
"""
if package is None:
import twisted.plugins as package
allDropins = getCache(package)
for dropin in allDropins.itervalues():
for plugin in dropin.plugins:
try:
adapted = interface(plugin, None)
except:
log.err()
else:
if adapted is not None:
yield adapted
# Old, backwards compatible name. Don't use this.
getPlugIns = getPlugins
def pluginPackagePaths(name):
"""
Return a list of additional directories which should be searched for
modules to be included as part of the named plugin package.
@type name: C{str}
@param name: The fully-qualified Python name of a plugin package, eg
C{'twisted.plugins'}.
@rtype: C{list} of C{str}
@return: The absolute paths to other directories which may contain plugin
modules for the named plugin package.
"""
package = name.split('.')
# Note that this may include directories which do not exist. It may be
# preferable to remove such directories at this point, rather than allow
# them to be searched later on.
#
# Note as well that only '__init__.py' will be considered to make a
# directory a package (and thus exclude it from this list). This means
# that if you create a master plugin package which has some other kind of
# __init__ (eg, __init__.pyc) it will be incorrectly treated as a
# supplementary plugin directory.
return [
os.path.abspath(os.path.join(x, *package))
for x
in sys.path
if
not os.path.exists(os.path.join(x, *package + ['__init__.py']))]
__all__ = ['getPlugins', 'pluginPackagePaths']
|
unknown
|
codeparrot/codeparrot-clean
| ||
// (C) Copyright Tobias Schwinger
//
// Use modification and distribution are subject to the boost Software License,
// Version 1.0. (See http://www.boost.org/LICENSE_1_0.txt).
//------------------------------------------------------------------------------
#ifndef BOOST_FT_IS_FUNCTION_POINTER_HPP_INCLUDED
#define BOOST_FT_IS_FUNCTION_POINTER_HPP_INCLUDED
#include <boost/mpl/aux_/lambda_support.hpp>
#include <boost/function_types/components.hpp>
namespace boost
{
namespace function_types
{
template< typename T, typename Tag = null_tag >
struct is_function_pointer
: function_types::represents
< function_types::components<T>
, function_types::tag<Tag ,detail::pointer_tag>
>
{
BOOST_MPL_AUX_LAMBDA_SUPPORT(2,is_function_pointer,(T,Tag))
};
}
}
#endif
|
unknown
|
github
|
https://github.com/mysql/mysql-server
|
extra/boost/boost_1_87_0/boost/function_types/is_function_pointer.hpp
|
"""
This script is publically available from the web page given below. It is not
part of the live coding package but is included for the sake of completeness.
Author: Tim Golden
Source: http://tgolden.sc.sabren.com/python/win32_how_do_i/watch_directory_for_changes.html
From recipe page:
The approach here is to use the MS FindFirstChangeNotification API, exposed
via the pywin32 win32file module. It needs a little explanation: you get a
change handle for a directory (optionally with its subdirectories) for certain
kinds of change. You then use the ubiquitous WaitForSingleObject call from
win32event, which fires when something's changed in one of your directories.
Having noticed that something's changed, you're back to os.listdir-scanning
to compare the before and after images. Repeat to fade.
NB: Only call FindNextChangeNotification if the FindFirst... has fired, not
if it has timed out.
Todo:
Use this at all.
"""
import os
import win32file
import win32event
import win32con
path_to_watch = os.path.abspath (".")
#
# FindFirstChangeNotification sets up a handle for watching
# file changes. The first parameter is the path to be
# watched; the second is a boolean indicating whether the
# directories underneath the one specified are to be watched;
# the third is a list of flags as to what kind of changes to
# watch for. We're just looking at file additions / deletions.
#
change_handle = win32file.FindFirstChangeNotification (
path_to_watch,
0,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME
)
#
# Loop forever, listing any file changes. The WaitFor... will
# time out every half a second allowing for keyboard interrupts
# to terminate the loop.
#
try:
old_path_contents = dict ([(f, None) for f in os.listdir (path_to_watch)])
while 1:
result = win32event.WaitForSingleObject (change_handle, 500)
#
# If the WaitFor... returned because of a notification (as
# opposed to timing out or some error) then look for the
# changes in the directory contents.
#
if result == win32con.WAIT_OBJECT_0:
new_path_contents = dict ([(f, None) for f in os.listdir (path_to_watch)])
added = [f for f in new_path_contents if not f in old_path_contents]
deleted = [f for f in old_path_contents if not f in new_path_contents]
if added: print "Added: ", ", ".join (added)
if deleted: print "Deleted: ", ", ".join (deleted)
old_path_contents = new_path_contents
win32file.FindNextChangeNotification (change_handle)
finally:
win32file.FindCloseChangeNotification (change_handle)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
"""
usage: %prog species1,species2,... nrequired < maf
"""
import sys
import bx.align.maf
from bx.cookbook import doc_optparse
SPAN = 100
MIN = 100
def main():
options, args = doc_optparse.parse(__doc__)
try:
species = args[0].split(',')
nrequired = int(args[1])
except Exception:
doc_optparse.exit()
maf_reader = bx.align.maf.Reader(sys.stdin)
interval_start = None
interval_end = None
for m in maf_reader:
ref = m.components[0]
# Does this alignment have enough of the required species
if nrequired <= len([comp for comp in m.components if comp.src.split('.')[0] in species]):
if interval_start is None:
interval_start = ref.start
interval_end = ref.end
else:
if ref.start - interval_end < SPAN:
interval_end = ref.end
else:
if interval_end - interval_start >= MIN:
print(ref.src.split('.')[1], interval_start, interval_end)
interval_start = ref.start
interval_end = ref.end
else:
if interval_start is not None and interval_end - interval_start >= MIN:
print(ref.src.split('.')[1], interval_start, interval_end)
interval_start = None
interval_end = None
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import sys
from os_xenapi.client import exception as xenapi_exception
from oslo_log import log as logging
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import exception
from nova.image import glance
from nova import utils
from nova.virt.xenapi import vm_utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class GlanceStore(object):
def _call_glance_plugin(self, context, instance, session, fn, params):
glance_api_servers = glance.get_api_servers()
def pick_glance(kwargs):
server = next(glance_api_servers)
kwargs['endpoint'] = server
kwargs['api_version'] = 2
# NOTE(sdague): is the return significant here at all?
return server
def retry_cb(context, instance, exc=None):
if exc:
exc_info = sys.exc_info()
LOG.debug(six.text_type(exc), exc_info=exc_info)
compute_utils.add_instance_fault_from_exc(
context, instance, exc, exc_info)
cb = functools.partial(retry_cb, context, instance)
return session.call_plugin_serialized_with_retry(
'glance.py', fn, CONF.glance.num_retries, pick_glance, cb,
**params)
def _make_params(self, context, session, image_id):
return {'image_id': image_id,
'sr_path': vm_utils.get_sr_path(session),
'extra_headers': glance.generate_identity_headers(context)}
def download_image(self, context, session, instance, image_id):
params = self._make_params(context, session, image_id)
params['uuid_stack'] = vm_utils._make_uuid_stack()
try:
vdis = self._call_glance_plugin(context, instance, session,
'download_vhd2', params)
except xenapi_exception.PluginRetriesExceeded:
raise exception.CouldNotFetchImage(image_id=image_id)
return vdis
def upload_image(self, context, session, instance, image_id, vdi_uuids):
params = self._make_params(context, session, image_id)
params['vdi_uuids'] = vdi_uuids
props = params['properties'] = {}
props['auto_disk_config'] = instance['auto_disk_config']
props['os_type'] = instance.get('os_type', None) or (
CONF.xenserver.default_os_type)
compression_level = vm_utils.get_compression_level()
if compression_level:
props['xenapi_image_compression_level'] = compression_level
auto_disk_config = utils.get_auto_disk_config_from_instance(instance)
if utils.is_auto_disk_config_disabled(auto_disk_config):
props["auto_disk_config"] = "disabled"
try:
self._call_glance_plugin(context, instance, session,
'upload_vhd2', params)
except xenapi_exception.PluginRetriesExceeded:
raise exception.CouldNotUploadImage(image_id=image_id)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (C) 2010, Numenta Inc. All rights reserved.
#
# The information and source code contained herein is the
# exclusive property of Numenta Inc. No part of this software
# may be used, reproduced, stored or distributed in any form,
# without explicit written authorization from Numenta Inc.
# ----------------------------------------------------------------------
"""
This module helps generate encoded sensorimotor information given a grid-like
world with elements in every grid location.
Usage:
Create an instance of the SMSequences class giving the following information:
The set of all possible sensory elements.
A list sensory elements in the world (duplicates allowed).
A list of coordinates corresponding to the previous list of sensory elements.
Max and min displacement allowed in one time step for a motor transition.
An encoding information for both sensory elements and motor commands.
Use that instance to
Generate a sensorimotor sequence of a given length.
SMSequences.generateSensorimotorSequence(sequenceLength)
Encode a sensorimotor sequence given a list of coordinates.
SMSequences.encodeSensorimotorSequence(eyeLocs)
A simple example of how you would use this class is at the bottom of this file.
Run this script with no arguments to run that code.
"""
import numpy
from nupic.bindings.math import Random
from nupic.encoders import ScalarEncoder
from nupic.encoders import VectorEncoder
from nupic.encoders.category import CategoryEncoder
from nupic.encoders.sdrcategory import SDRCategoryEncoder
# Utility routines for printing sequences
def printSequence(x, formatString="%d"):
"""
Compact print a list or numpy array.
"""
numElements = len(x)
s = ""
for j in range(numElements):
s += formatString % x[j]
print s
def printSequences(x, formatString="%d"):
"""
Print a bunch of sequences stored in a 2D numpy array.
"""
[seqLen, numElements] = x.shape
for i in range(seqLen):
s = ""
for j in range(numElements):
s += formatString % x[i][j]
print s
class SMSequences(object):
"""
Class generates sensorimotor sequences
"""
def __init__(self,
sensoryInputElements,
spatialConfig,
sensoryInputElementsPool=list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz0123456789"),
minDisplacement=1,
maxDisplacement=1,
numActiveBitsSensoryInput=9,
numActiveBitsMotorInput=9,
seed=42,
verbosity=False,
useRandomEncoder=False):
"""
@param sensoryInputElements (list)
Strings or numbers representing the sensory elements that exist in your
world. Elements can be repeated if multiple of the same exist.
@param spatialConfig (numpy.array)
Array of size: (1, len(sensoryInputElements), dimension). It has a
coordinate for every element in sensoryInputElements.
@param sensoryInputElementsPool (list)
List of strings representing a readable version of all possible sensory
elements in this world. Elements don't need to be in any order and there
should be no duplicates. By default this contains the set of
alphanumeric characters.
@param maxDisplacement (int)
Maximum `distance` for a motor command. Distance is defined by the
largest difference along any coordinate dimension.
@param minDisplacement (int)
Minimum `distance` for a motor command. Distance is defined by the
largest difference along any coordinate dimension.
@param numActiveBitsSensoryInput (int)
Number of active bits for each sensory input.
@param numActiveBitsMotorInput (int)
Number of active bits for each dimension of the motor input.
@param seed (int)
Random seed for nupic.bindings.Random.
@param verbosity (int)
Verbosity
@param useRandomEncoder (boolean)
if True, use the random encoder SDRCategoryEncoder. If False,
use CategoryEncoder. CategoryEncoder encodes categories using contiguous
non-overlapping bits for each category, which makes it easier to debug.
"""
#---------------------------------------------------------------------------------
# Store creation parameters
self.sensoryInputElements = sensoryInputElements
self.sensoryInputElementsPool = sensoryInputElementsPool
self.spatialConfig = spatialConfig.astype(int)
self.spatialLength = len(spatialConfig)
self.maxDisplacement = maxDisplacement
self.minDisplacement = minDisplacement
self.numActiveBitsSensoryInput = numActiveBitsSensoryInput
self.numActiveBitsMotorInput = numActiveBitsMotorInput
self.verbosity = verbosity
self.seed = seed
self.initialize(useRandomEncoder)
def initialize(self, useRandomEncoder):
"""
Initialize the various data structures.
"""
self.setRandomSeed(self.seed)
self.dim = numpy.shape(self.spatialConfig)[-1]
self.spatialMap = dict( zip( map(tuple, list(self.spatialConfig)),
self.sensoryInputElements))
self.lengthMotorInput1D = (2*self.maxDisplacement + 1) * \
self.numActiveBitsMotorInput
uniqueSensoryElements = list(set(self.sensoryInputElementsPool))
if useRandomEncoder:
self.sensoryEncoder = SDRCategoryEncoder(n=1024,
w=self.numActiveBitsSensoryInput,
categoryList=uniqueSensoryElements,
forced=True)
self.lengthSensoryInput = self.sensoryEncoder.getWidth()
else:
self.lengthSensoryInput = (len(self.sensoryInputElementsPool)+1) * \
self.numActiveBitsSensoryInput
self.sensoryEncoder = CategoryEncoder(w=self.numActiveBitsSensoryInput,
categoryList=uniqueSensoryElements, forced=True)
motorEncoder1D = ScalarEncoder(n=self.lengthMotorInput1D,
w=self.numActiveBitsMotorInput,
minval=-self.maxDisplacement,
maxval=self.maxDisplacement,
clipInput=True,
forced=True)
self.motorEncoder = VectorEncoder(length=self.dim, encoder=motorEncoder1D)
def generateSensorimotorSequence(self, sequenceLength):
"""
Generate sensorimotor sequences of length sequenceLength.
@param sequenceLength (int)
Length of the sensorimotor sequence.
@return (tuple) Contains:
sensorySequence (list)
Encoded sensory input for whole sequence.
motorSequence (list)
Encoded motor input for whole sequence.
sensorimotorSequence (list)
Encoder sensorimotor input for whole sequence. This is useful
when you want to give external input to temporal memory.
"""
motorSequence = []
sensorySequence = []
sensorimotorSequence = []
currentEyeLoc = self.nupicRandomChoice(self.spatialConfig)
for i in xrange(sequenceLength):
currentSensoryInput = self.spatialMap[tuple(currentEyeLoc)]
nextEyeLoc, currentEyeV = self.getNextEyeLocation(currentEyeLoc)
if self.verbosity:
print "sensory input = ", currentSensoryInput, \
"eye location = ", currentEyeLoc, \
" motor command = ", currentEyeV
sensoryInput = self.encodeSensoryInput(currentSensoryInput)
motorInput = self.encodeMotorInput(list(currentEyeV))
sensorimotorInput = numpy.concatenate((sensoryInput, motorInput))
sensorySequence.append(sensoryInput)
motorSequence.append(motorInput)
sensorimotorSequence.append(sensorimotorInput)
currentEyeLoc = nextEyeLoc
return (sensorySequence, motorSequence, sensorimotorSequence)
def encodeSensorimotorSequence(self, eyeLocs):
"""
Encode sensorimotor sequence given the eye movements. Sequence will have
length len(eyeLocs) - 1 because only the differences of eye locations can be
used to encoder motor commands.
@param eyeLocs (list)
Numpy coordinates describing where the eye is looking.
@return (tuple) Contains:
sensorySequence (list)
Encoded sensory input for whole sequence.
motorSequence (list)
Encoded motor input for whole sequence.
sensorimotorSequence (list)
Encoder sensorimotor input for whole sequence. This is useful
when you want to give external input to temporal memory.
"""
sequenceLength = len(eyeLocs) - 1
motorSequence = []
sensorySequence = []
sensorimotorSequence = []
for i in xrange(sequenceLength):
currentEyeLoc = eyeLocs[i]
nextEyeLoc = eyeLocs[i+1]
currentSensoryInput = self.spatialMap[currentEyeLoc]
currentEyeV = nextEyeLoc - currentEyeLoc
if self.verbosity:
print "sensory input = ", currentSensoryInput, \
"eye location = ", currentEyeLoc, \
" motor command = ", currentEyeV
sensoryInput = self.encodeSensoryInput(currentSensoryInput)
motorInput = self.encodeMotorInput(list(currentEyeV))
sensorimotorInput = numpy.concatenate((sensoryInput, motorInput))
sensorySequence.append(sensoryInput)
motorSequence.append(motorInput)
sensorimotorSequence.append(sensorimotorInput)
return (sensorySequence, motorSequence, sensorimotorSequence)
def getNextEyeLocation(self, currentEyeLoc):
"""
Generate next eye location based on current eye location.
@param currentEyeLoc (numpy.array)
Current coordinate describing the eye location in the world.
@return (tuple) Contains:
nextEyeLoc (numpy.array)
Coordinate of the next eye location.
eyeDiff (numpy.array)
Vector describing change from currentEyeLoc to nextEyeLoc.
"""
possibleEyeLocs = []
for loc in self.spatialConfig:
shift = abs(max(loc - currentEyeLoc))
if self.minDisplacement <= shift <= self.maxDisplacement:
possibleEyeLocs.append(loc)
nextEyeLoc = self.nupicRandomChoice(possibleEyeLocs)
eyeDiff = nextEyeLoc - currentEyeLoc
return nextEyeLoc, eyeDiff
def setRandomSeed(self, seed):
"""
Reset the nupic random generator. This is necessary to reset random seed to
generate new sequences.
@param seed (int)
Seed for nupic.bindings.Random.
"""
self.seed = seed
self._random = Random()
self._random.setSeed(seed)
def nupicRandomChoice(self, array):
"""
Chooses a random element from an array using the nupic random number
generator.
@param array (list or numpy.array)
Array to choose random element from.
@return (element)
Element chosen at random.
"""
return array[self._random.getUInt32(len(array))]
def encodeMotorInput(self, motorInput):
"""
Encode motor command to bit vector.
@param motorInput (1D numpy.array)
Motor command to be encoded.
@return (1D numpy.array)
Encoded motor command.
"""
if not hasattr(motorInput, "__iter__"):
motorInput = list([motorInput])
return self.motorEncoder.encode(motorInput)
def decodeMotorInput(self, motorInputPattern):
"""
Decode motor command from bit vector.
@param motorInputPattern (1D numpy.array)
Encoded motor command.
@return (1D numpy.array)
Decoded motor command.
"""
key = self.motorEncoder.decode(motorInputPattern)[0].keys()[0]
motorCommand = self.motorEncoder.decode(motorInputPattern)[0][key][1][0]
return motorCommand
def encodeSensoryInput(self, sensoryInputElement):
"""
Encode sensory input to bit vector
@param sensoryElement (1D numpy.array)
Sensory element to be encoded.
@return (1D numpy.array)
Encoded sensory element.
"""
return self.sensoryEncoder.encode(sensoryInputElement)
def decodeSensoryInput(self, sensoryInputPattern):
"""
Decode sensory input from bit vector.
@param sensoryInputPattern (1D numpy.array)
Encoded sensory element.
@return (1D numpy.array)
Decoded sensory element.
"""
return self.sensoryEncoder.decode(sensoryInputPattern)[0]['category'][1]
def printSensoryCodingScheme(self):
"""
Print sensory inputs along with their encoded versions.
"""
print "\nsensory coding scheme: "
for loc in self.spatialConfig:
sensoryElement = self.spatialMap[tuple(loc)]
print sensoryElement, "%s : " % loc,
printSequence(self.encodeSensoryInput(sensoryElement))
def printMotorCodingScheme(self):
"""
Print motor commands (displacement vector) along with their encoded
versions.
"""
print "\nmotor coding scheme: "
self.build(self.dim, [])
def build(self, n, vec):
"""
Recursive function to help print motor coding scheme.
"""
for i in range(-self.maxDisplacement, self.maxDisplacement+1):
next = vec + [i]
if n == 1:
print '{:>5}\t'.format(next), " = ",
printSequence(self.encodeMotorInput(next))
else:
self.build(n-1, next)
if __name__ == "__main__":
# A simple example of how you would use this class
seq = SMSequences(
sensoryInputElementsPool=["A", "B", "C", "D", "E", "F", "G", "H"],
sensoryInputElements=["E", "D", "A", "D", "G", "H"],
spatialConfig=numpy.array([[0],[1],[2],[3],[4],[5]]),
minDisplacement=1,
maxDisplacement=2,
verbosity=3,
seed=4,
useRandomEncoder=False
)
sequence = seq.generateSensorimotorSequence(10)
print "Length of sequence:",len(sequence[0])
for i in range(len((sequence[0]))):
print "\n============= Sequence position",i
# Print the sensory pattern and motor command in "English"
print "Sensory pattern:",seq.decodeSensoryInput(sequence[0][i]),
print "Motor command:",seq.decodeMotorInput(sequence[1][i])
# Print the SDR's corresponding to sensory and motor commands
print "Sensory signal",
printSequence(sequence[0][i])
print "Motor signal",
printSequence(sequence[1][i])
print "Combined distal input",
printSequence(sequence[2][i])
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for FC Zone Manager."""
import mock
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.zonemanager.drivers import fc_zone_driver
from cinder.zonemanager import fc_zone_manager
fabric_name = 'BRCD_FAB_3'
init_target_map = {'10008c7cff523b01': ['20240002ac000a50']}
fabric_map = {'BRCD_FAB_3': ['20240002ac000a50']}
target_list = ['20240002ac000a50']
class TestFCZoneManager(test.TestCase):
@mock.patch('oslo_config.cfg._is_opt_registered', return_value=False)
def setUp(self, opt_mock):
super(TestFCZoneManager, self).setUp()
config = conf.Configuration(None)
config.fc_fabric_names = fabric_name
def fake_build_driver(self):
self.driver = mock.Mock(fc_zone_driver.FCZoneDriver)
self.stubs.Set(fc_zone_manager.ZoneManager, '_build_driver',
fake_build_driver)
self.zm = fc_zone_manager.ZoneManager(configuration=config)
self.configuration = conf.Configuration(None)
self.configuration.fc_fabric_names = fabric_name
self.driver = mock.Mock(fc_zone_driver.FCZoneDriver)
def __init__(self, *args, **kwargs):
super(TestFCZoneManager, self).__init__(*args, **kwargs)
@mock.patch('oslo_config.cfg._is_opt_registered', return_value=False)
def test_add_connection(self, opt_mock):
with mock.patch.object(self.zm.driver, 'add_connection')\
as add_connection_mock:
self.zm.driver.get_san_context.return_value = fabric_map
self.zm.add_connection(init_target_map)
self.zm.driver.get_san_context.assert_called_once_with(target_list)
add_connection_mock.assert_called_once_with(fabric_name,
init_target_map)
@mock.patch('oslo_config.cfg._is_opt_registered', return_value=False)
def test_add_connection_error(self, opt_mock):
with mock.patch.object(self.zm.driver, 'add_connection')\
as add_connection_mock:
add_connection_mock.side_effect = exception.FCZoneDriverException
self.assertRaises(exception.ZoneManagerException,
self.zm.add_connection, init_target_map)
@mock.patch('oslo_config.cfg._is_opt_registered', return_value=False)
def test_delete_connection(self, opt_mock):
with mock.patch.object(self.zm.driver, 'delete_connection')\
as delete_connection_mock:
self.zm.driver.get_san_context.return_value = fabric_map
self.zm.delete_connection(init_target_map)
self.zm.driver.get_san_context.assert_called_once_with(target_list)
delete_connection_mock.assert_called_once_with(fabric_name,
init_target_map)
@mock.patch('oslo_config.cfg._is_opt_registered', return_value=False)
def test_delete_connection_error(self, opt_mock):
with mock.patch.object(self.zm.driver, 'delete_connection')\
as del_connection_mock:
del_connection_mock.side_effect = exception.FCZoneDriverException
self.assertRaises(exception.ZoneManagerException,
self.zm.delete_connection, init_target_map)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import unicode_literals
import datetime
import pickle
import unittest
from collections import OrderedDict
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import F, Q, Count
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.where import NothingNode, WhereNode
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from django.utils import six
from django.utils.six.moves import range
from .models import (
FK1, X, Annotation, Article, Author, BaseA, Book, CategoryItem,
CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA,
Classroom, Company, Cover, CustomPk, CustomPkTag, Detail, DumbCategory,
Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job,
JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel,
Member, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node,
Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory,
Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program,
ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related,
RelatedIndividual, RelatedObject, Report, ReservedName, Responsibility,
School, SharedConnection, SimpleCategory, SingleObject, SpecialCategory,
Staff, StaffUser, Student, Tag, Task, Ticket21203Child, Ticket21203Parent,
Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid,
)
class BaseQuerysetTest(TestCase):
def assertValueQuerysetEqual(self, qs, values):
return self.assertQuerysetEqual(qs, values, transform=lambda x: x)
class Queries1Tests(BaseQuerysetTest):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
cls.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3)
cls.i1.tags = [cls.t1, cls.t2]
cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=n2)
cls.i2.tags = [cls.t1, cls.t3]
cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3)
i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3)
i4.tags = [t4]
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'})
self.assertIn('v0', str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn('w0', str(qs4.query).lower())
# So, 'U0."id"' is referenced twice.
self.assertTrue(str(qs4.query).lower().count('u0'), 2)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[]) | Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count(),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by('name').query
self.assertEqual(len([
t for t in combined_query.tables if combined_query.alias_refcount[t]
]), 1)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by('greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(qs.query))
qs = qs.order_by('id')
self.assertNotIn('OUTER JOIN', str(qs.query))
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3))
self.assertEqual(
len([x for x in qs.query.alias_map.values() if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = 127
msg = 'Maximum recursion depth exceeded: too many subqueries.'
with self.assertRaisesMessage(RuntimeError, msg):
for i in six.moves.range(local_recursion_limit * 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases, {
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD',
'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN',
}
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() & Tag.objects.all()
)
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() | Tag.objects.all()
)
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'):
Note.objects.filter({'note': 'n1', 'misc': 'foo'})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.tables), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertTrue(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertValueQuerysetEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn('note_id', ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note_id'),
[{'note_id': 1}, {'note_id': 2}]
)
# ...or use the field name.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note'),
[{'note': 1}, {'note': 2}]
)
def test_ticket2902(self):
# Parameters can be given to extra_select, *if* you use an OrderedDict.
# (First we need to know which order the keys fall in "naturally" on
# your system, so we can put things in the wrong way around from
# normal. A normal dict would thus fail.)
s = [('a', '%s'), ('b', '%s')]
params = ['one', 'two']
if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
s.reverse()
params.reverse()
# This slightly odd comparison works around the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
d = Item.objects.extra(select=OrderedDict(s), select_params=params).values('a', 'b')[0]
self.assertEqual(d, {'a': 'one', 'b': 'two'})
# Order by the number of tags attached to an item.
l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count')
self.assertEqual([o.count for o in l], [2, 2, 1, 0])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1)
self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2)
self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name = "one"
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.datetimes('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertValueQuerysetEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal='m')
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal='m'), [])
self.assertQuerysetEqual(q.exclude(meal='m'), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('food'), [])
self.assertQuerysetEqual(q.annotate(Count('food')), [])
self.assertQuerysetEqual(q.order_by('meal', 'food'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'foo': "1"}),
[]
)
q.query.low_mark = 1
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken',
q.extra, select={'foo': "1"}
)
self.assertQuerysetEqual(q.reverse(), [])
self.assertQuerysetEqual(q.defer('meal'), [])
self.assertQuerysetEqual(q.only('meal'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of DateQuerySets used to fail
qs = Item.objects.datetimes('created', 'month')
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
self.assertRaisesMessage(
TypeError,
'Cannot use multi-field values as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
)
self.assertRaisesMessage(
TypeError,
'Cannot use multi-field values as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertValueQuerysetEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
for i in [n_obj.pk]:
yield i
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix(qs.query)
first = qs[0]
self.assertEqual(list(qs), list(range(first, first + 5)))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]),
1
)
def test_ticket17429(self):
"""
Ensure that Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertQuerysetEqual(
Tag.objects.all(),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
ordered=False
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4'),
[repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))])
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))])
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))])
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertQuerysetEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertQuerysetEqual(q, ['<NamedCategory: Generic>'])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2)
self.assertNotIn('INNER JOIN', str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>',
'<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q1 & q2
self.assertQuerysetEqual(q3, [])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
def test_ticket19672(self):
self.assertQuerysetEqual(
Report.objects.filter(Q(creator__isnull=False) &
~Q(creator__extra__value=41)),
['<Report: r1>']
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count('item'))
qs = qs.filter(~Q(extra__value=0))
self.assertIn('SELECT', str(qs.query))
self.assertQuerysetEqual(
qs,
['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>']
)
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name='generic')
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Float was being rounded to integer on gte queries on integer field. Tests
# show that gt, lt, gte, and lte work as desired. Note that the fix changes
# get_prep_lookup for gte and lt queries only.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(BaseQuerysetTest):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# Raise proper error when a DateQuerySet gets passed a wrong type of
# field
self.assertRaisesMessage(
AssertionError,
"'name' isn't a DateTimeField.",
Item.objects.datetimes, 'name', 'month'
)
def test_ticket22023(self):
with self.assertRaisesMessage(TypeError,
"Cannot call only() after .values() or .values_list()"):
Valid.objects.values().only()
with self.assertRaisesMessage(TypeError,
"Cannot call defer() after .values() or .values_list()"):
Valid.objects.values().defer()
class Queries4Tests(BaseQuerysetTest):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
cls.r2 = Report.objects.create(name='r2', creator=cls.a3)
cls.r3 = Report.objects.create(name='r3')
Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1)
Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3)
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
with six.assertRaisesRegex(self, ValueError,
'Unsaved model instance <NamedCategory: Other> '
'cannot be used in an ORM query.'):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = Item.objects.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')).order_by()
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by()
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Test that we correctly recreate joins having identical connections
# in the rhs query, in case the query is ORed together. Related to
# ticket #18748
Report.objects.create(name='r4', creator=self.a1)
q1 = Author.objects.filter(report__name='r5')
q2 = Author.objects.filter(report__name='r4').filter(report__name='r1')
combined = q1 | q2
self.assertEqual(str(combined.query).count('JOIN'), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, 'a1')
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL. This exercises that case.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ''
else:
expected_null_charfield_repr = None
self.assertValueQuerysetEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
['e1', 'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, 'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertQuerysetEqual(
SimpleCategory.objects.order_by('categoryitem', 'pk'),
[c1, c2, c1], lambda x: x)
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d['rank'] for d in dicts],
[2, 1, 3]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
dicts = qs.values().order_by('id')
for d in dicts:
del d['id']
del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank='4'),
1
)
r = Ranking.objects.filter(author__name='a1')[0]
self.assertNotEqual(r.id, r.author.id)
self.assertEqual(r.rank, 4)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q() | ~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.exclude(~Q() & ~Q()),
['<Note: n1>', '<Note: n2>']
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s'"})[0].foo,
'%s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo,
'%s bar %s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo,
'bar %s'
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name='s')
r = RelatedObject.objects.create(single=s, f=1)
Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk')
self.assertNotIn('JOIN', str(qs.query))
qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk')
self.assertIn('INNER', str(qs.query))
qs = qs.order_by('others__single__name')
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count('LEFT'), 1)
self.assertEqual(str(qs.query).count('INNER'), 1)
self.assertQuerysetEqual(
qs,
['<Plaything: p2>']
)
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=t1)
ann1.notes.add(n1)
Annotation.objects.create(name='a2', tag=t4)
def test_parallel_iterators(self):
# Test that parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), '<Tag: t1>')
self.assertEqual(repr(next(i1)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t1>')
self.assertEqual(repr(next(i2)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t3>')
self.assertEqual(repr(next(i1)), '<Tag: t3>')
qs = X.objects.all()
self.assertEqual(bool(qs), False)
self.assertEqual(bool(qs), False)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# pre-emptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name='foo')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 1)
class RawQueriesTests(TestCase):
def setUp(self):
Note.objects.create(note='n1', misc='foo', id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ['n1']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>")
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ['n1', 'foo']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>")
class GeneratorExpressionTests(TestCase):
def test_ticket10432(self):
# Using an empty generator expression as the rvalue for an "__in"
# lookup is legal.
self.assertQuerysetEqual(
Note.objects.filter(pk__in=(x for x in ())),
[]
)
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]
id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name')
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_ticket_18414(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@unittest.skipUnless(connection.features.can_distinct_on_fields,
'Uses distinct(fields)')
def test_ticket_18414_distinct_on(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct('name').exists())
self.assertTrue(Article.objects.distinct('name')[1:2].exists())
self.assertFalse(Article.objects.distinct('name')[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertEqual(Tag.objects.all().ordered, True)
self.assertEqual(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().order_by('id').ordered, True)
def test_order_by_extra(self):
self.assertEqual(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertEqual(qs.ordered, False)
self.assertEqual(qs.order_by('num_notes').ordered, True)
@skipUnlessDBFeature('allow_sliced_subqueries')
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
DumbCategory.objects.create(id=1)
DumbCategory.objects.create(id=2)
DumbCategory.objects.create(id=3)
DumbCategory.objects.create(id=4)
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', category=generic)
ManagedModel.objects.create(data='mm1', tag=t1, public=True)
mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by('-id')[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3})
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
def test_no_model_options_cloning(self):
"""
Test that cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.")
try:
Note.objects.filter(pk__lte=F('pk') + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Test that cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned")
try:
Note.objects.filter(note=F('misc')).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(TestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertQuerysetEqual(
Number.objects.none().values('num').order_by('num'), []
)
def test_values_subquery(self):
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")),
[]
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertQuerysetEqual(q.values(), [])
self.assertQuerysetEqual(q.values_list(), [])
class ValuesQuerysetTests(BaseQuerysetTest):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
cls.identity = staticmethod(lambda x: x)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertValueQuerysetEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select=OrderedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(1, 2))
qs = qs.order_by('value_minus_x')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'})
qs = qs.order_by('value_minus_one').order_by('value_plus_one')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(select={
'value_plus_one': 'num+1',
'value_minus_one': 'num-1',
'constant_value': '1'
})
qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'},
order_by=['value_minus_one'])
qs = qs.values('num')
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={'value_plus_x': 'num+%s'},
select_params=[1],
order_by=['value_plus_x'])
qs = qs.filter(num=72)
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(select=OrderedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(72, 72))
qs = qs.order_by('value_minus_x')
qs = qs.filter(num=1)
qs = qs.values('num')
self.assertQuerysetEqual(qs, [], self.identity)
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num')
self.assertQuerysetEqual(qs, [(72,)], self.identity)
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num', flat=True)
self.assertQuerysetEqual(qs, [72], self.identity)
def test_field_error_values_list(self):
# see #23443
with self.assertRaisesMessage(FieldError,
"Cannot resolve keyword %r into field."
" Join on 'name' not permitted." % 'foo'):
Tag.objects.values_list('name__foo')
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
for i in range(1, 8):
Article.objects.create(
name="Article {}".format(i), created=some_date)
def get_ordered_articles(self):
return Article.objects.all().order_by('name')
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1')
self.assertQuerysetEqual(self.get_ordered_articles()[1:3],
["<Article: Article 2>", "<Article: Article 3>"])
def test_slicing_with_steps_can_be_used(self):
self.assertQuerysetEqual(self.get_ordered_articles()[::2],
["<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"])
@unittest.skipUnless(six.PY2, "Python 2 only -- Python 3 doesn't have longs.")
def test_slicing_works_with_longs(self):
self.assertEqual(self.get_ordered_articles()[long(0)].name, 'Article 1')
self.assertQuerysetEqual(self.get_ordered_articles()[long(1):long(3)],
["<Article: Article 2>", "<Article: Article 3>"])
self.assertQuerysetEqual(self.get_ordered_articles()[::long(2)],
["<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"])
# And can be mixed with ints.
self.assertQuerysetEqual(self.get_ordered_articles()[1:long(3)],
["<Article: Article 2>", "<Article: Article 3>"])
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][0:2],
["<Article: Article 1>",
"<Article: Article 2>"])
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:],
["<Article: Article 5>"])
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertQuerysetEqual(self.get_ordered_articles()[2:][0:2],
["<Article: Article 3>", "<Article: Article 4>"])
self.assertQuerysetEqual(self.get_ordered_articles()[2:][:2],
["<Article: Article 3>", "<Article: Article 4>"])
self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3],
["<Article: Article 5>"])
# Using an offset without a limit is also possible.
self.assertQuerysetEqual(self.get_ordered_articles()[5:],
["<Article: Article 6>",
"<Article: Article 7>"])
def test_slicing_cannot_filter_queryset_once_sliced(self):
six.assertRaisesRegex(
self,
AssertionError,
"Cannot filter a query once a slice has been taken.",
Article.objects.all()[0:5].filter,
id=1,
)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
six.assertRaisesRegex(
self,
AssertionError,
"Cannot reorder a query once a slice has been taken.",
Article.objects.all()[0:5].order_by,
'id',
)
def test_slicing_cannot_combine_queries_once_sliced(self):
six.assertRaisesRegex(
self,
AssertionError,
"Cannot combine queries once a slice has been taken.",
lambda: Article.objects.all()[0:1] & Article.objects.all()[4:5]
)
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
six.assertRaisesRegex(
self,
AssertionError,
"Negative indexing is not supported.",
lambda: Article.objects.all()[-1]
)
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
six.assertRaisesRegex(
self,
AssertionError,
"Negative indexing is not supported.",
lambda: Article.objects.all()[0:-5]
)
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact='Article 1')
s2 = Article.objects.filter(name__exact='Article 2')
self.assertQuerysetEqual((s1 | s2).order_by('name'),
["<Article: Article 1>",
"<Article: Article 2>"])
self.assertQuerysetEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(BaseQuerysetTest):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken.',
Article.objects.all()[:0].latest, 'created'
)
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
{apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
{lunch, dinner}
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
{apple}
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class ConditionalTests(BaseQuerysetTest):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopX.objects.all()) # Force queryset evaluation with list()
)
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopZ.objects.all()) # Force queryset evaluation with list()
)
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
# Sqlite 3 does not support passing in more than 1000 parameters except by
# changing a parameter at compilation time.
@skipUnlessDBFeature('supports_1000_query_parameters')
def test_ticket14244(self):
# Test that the "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
Number.objects.all().delete()
Number.objects.bulk_create(Number(num=num) for num in numbers)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1000]).count(),
1000
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1001]).count(),
1001
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:2000]).count(),
2000
)
self.assertEqual(
Number.objects.filter(num__in=numbers).count(),
len(numbers)
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
# Ticket #17056 -- affects Oracle
try:
DumbCategory.objects.create()
except TypeError:
self.fail("Creation of an instance of a model with only the PK field shouldn't error out after bulk insert refactoring (#17056)")
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name='apples')
Food.objects.create(name='oranges')
Eaten.objects.create(food=f1, meal='dinner')
j1 = Job.objects.create(name='Manager')
r1 = Responsibility.objects.create(description='Playing golf')
j2 = Job.objects.create(name='Programmer')
r2 = Responsibility.objects.create(description='Programming')
JobResponsibilities.objects.create(job=j1, responsibility=r1)
JobResponsibilities.objects.create(job=j2, responsibility=r2)
def test_to_field(self):
self.assertQuerysetEqual(
Food.objects.exclude(eaten__meal='dinner'),
['<Food: oranges>'])
self.assertQuerysetEqual(
Job.objects.exclude(responsibilities__description='Playing golf'),
['<Job: Programmer>'])
self.assertQuerysetEqual(
Responsibility.objects.exclude(jobs__name='Manager'),
['<Responsibility: Programming>'])
def test_ticket14511(self):
alex = Person.objects.get_or_create(name='Alex')[0]
jane = Person.objects.get_or_create(name='Jane')[0]
oracle = Company.objects.get_or_create(name='Oracle')[0]
google = Company.objects.get_or_create(name='Google')[0]
microsoft = Company.objects.get_or_create(name='Microsoft')[0]
intel = Company.objects.get_or_create(name='Intel')[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(employee=employee, employer=employer, title=title)
employ(oracle, alex, 'Engineer')
employ(oracle, alex, 'Developer')
employ(google, alex, 'Engineer')
employ(google, alex, 'Manager')
employ(microsoft, alex, 'Manager')
employ(intel, alex, 'Manager')
employ(microsoft, jane, 'Developer')
employ(intel, jane, 'Manager')
alex_tech_employers = alex.employers.filter(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertQuerysetEqual(alex_tech_employers, [google, oracle], lambda x: x)
alex_nontech_employers = alex.employers.exclude(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertQuerysetEqual(alex_nontech_employers, [google, intel, microsoft], lambda x: x)
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1),
['<Order: 3>'])
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)),
['<Order: 3>'])
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
['<Order: 1>'])
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F('second__onetoonecategory')
).get(), rel
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name='i1')
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = '' if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
['i1', none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i1']),
[none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i3']),
['i1', none_val], attrgetter('name'))
inner_qs = NullableName.objects.filter(name='i1').values_list('name')
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val], attrgetter('name'))
# Check that the inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]),
['i1'], attrgetter('name'))
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name='i1'))),
list(NullableName.objects.filter(Q(name='i1'))))
self.assertNotIn(
'IS NOT NULL',
str(NullableName.objects.filter(~~Q(name='i1')).query))
class EmptyStringsAsNullTest(TestCase):
"""
Test that filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name='')
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=['nonexisting']),
[self.nc.pk], attrgetter('pk')
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=['nonexisting']),
[self.nc.pk], attrgetter('pk')
)
def test_21001(self):
foo = NamedCategory.objects.create(name='foo')
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=''),
[foo.pk], attrgetter('pk')
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Test that generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(TestCase):
class DummyNode(object):
def as_sql(self, compiler, connection):
return 'dummy', []
class MockCompiler(object):
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
self.assertRaises(EmptyResultSet, w.as_sql, compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
self.assertRaises(EmptyResultSet, w.as_sql, compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector='OR')
self.assertRaises(EmptyResultSet, w.as_sql, compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('dummy', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = 'OR'
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='AND')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class IteratorExceptionsTest(TestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only('author')
with self.assertRaises(AttributeError):
list(qs)
def test_invalid_qs_list(self):
# Test for #19895 - second iteration over invalid queryset
# raises errors.
qs = Article.objects.order_by('invalid_column')
self.assertRaises(FieldError, list, qs)
self.assertRaises(FieldError, list, qs)
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name='foo')
d2 = ModelD.objects.create(name='bar')
cls.a1 = ModelA.objects.create(name='a1', d=cls.d1)
c = ModelC.objects.create(name='c')
b = ModelB.objects.create(name='b', c=c)
cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = (
Q(d__name='foo') |
Q(b__name='foo') |
Q(b__c__name='foo')
)
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count('INNER JOIN'), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note='n', misc='m')
e = ExtraInfo.objects.create(info='info', note=n)
a = Author.objects.create(name='Author1', num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name='Foo', creator=a)
r2 = Report.objects.create(name='Bar')
Report.objects.create(name='Bar', creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) |
Q(creator__ranking__rank=1, name='Foo')
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count(' JOIN '), 2)
self.assertQuerysetEqual(
qs.order_by('name'), [r2, r1], lambda x: x)
def test_ticket_21748(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
i3 = Identifier.objects.create(name='i3')
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertQuerysetEqual(
Identifier.objects.filter(program=None, channel=None),
[i3], lambda x: x)
self.assertQuerysetEqual(
Identifier.objects.exclude(program=None, channel=None).order_by('name'),
[i1, i2], lambda x: x)
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk')
qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneq. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id)
| Q(program__id=p1.id)
).order_by('pk')
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id)
| Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id)
& Q(program__id=p1.id))).order_by('pk')
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id)
| ~Q(program__id=p1.id))).order_by('pk')
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count('JOIN'),
str(qs2.query).count('JOIN'))
self.assertEqual(0, str(qs1.query).count('INNER JOIN'))
self.assertEqual(str(qs1.query).count('INNER JOIN'),
str(qs2.query).count('INNER JOIN'))
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# Check that we don't accidentally trim reverse joins - we can't know
# if there is anything on the other side of the join, so trimming
# reverse joins can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn('INNER JOIN', str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
Test that the queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name='foo').select_related('tag')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name='foo').filter(member__name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name='r4').filter(report__name='r1')
self.assertEqual(str(qs.query).count('JOIN'), 2)
class DisjunctionPromotionTests(TestCase):
def test_disjuction_promotion_select_related(self):
fk1 = FK1.objects.create(f1='f1', f2='f2')
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(' JOIN '), 0)
qs = qs.select_related('a', 'b')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 0)
self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2)
with self.assertNumQueries(1):
self.assertQuerysetEqual(qs, [basea], lambda x: x)
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar')))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo')))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
Identifier.objects.create(name='extra')
program = Program.objects.create(identifier=Identifier.objects.create(name='program'))
channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel'))
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=channel).order_by('name'),
['<Identifier: channel>', '<Identifier: extra>']
)
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=None).order_by('name'),
['<Identifier: program>']
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text='pg3')
pg2 = Page.objects.create(text='pg2')
pg1 = Page.objects.create(text='pg1')
pa1 = Paragraph.objects.create(text='pa1')
pa1.page = [pg1, pg2]
pa2 = Paragraph.objects.create(text='pa2')
pa2.page = [pg2, pg3]
pa3 = Paragraph.objects.create(text='pa3')
ch1 = Chapter.objects.create(title='ch1', paragraph=pa1)
ch2 = Chapter.objects.create(title='ch2', paragraph=pa2)
ch3 = Chapter.objects.create(title='ch3', paragraph=pa3)
b1 = Book.objects.create(title='b1', chapter=ch1)
b2 = Book.objects.create(title='b2', chapter=ch2)
b3 = Book.objects.create(title='b3', chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text='pg1')
self.assertNotIn('IS NOT NULL', str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data='foo')
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data='bar', parent=my1)
parents = MyObject.objects.filter(parent=F('id'))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id'))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name='foo')
a1 = Annotation.objects.create(tag=t, name='a1')
a2 = Annotation.objects.create(tag=t, name='a2')
a3 = Annotation.objects.create(tag=t, name='a3')
n = Note.objects.create(note='foo', misc='bar')
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(TestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name='')
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn('LEFT OUTER JOIN', str(qs.query))
else:
self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# Check that if a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertQuerysetEqual(
Order.objects.filter(items__in=OrderItem.objects.values_list('status')),
[o1.pk], lambda x: x.pk)
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data='foo')
lfa2 = LeafA.objects.create(data='bar')
lfb1 = LeafB.objects.create(data='lfb1')
lfb2 = LeafB.objects.create(data='lfb2')
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertQuerysetEqual(
qs, [lfb1], lambda x: x)
class Ticket18785Tests(TestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = Item.objects.exclude(
note__isnull=False
).filter(
name='something', creator__extra__isnull=True
).order_by()
self.assertEqual(1, str(qs.query).count('INNER JOIN'))
self.assertEqual(0, str(qs.query).count('OUTER JOIN'))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(
chapter__paragraph__page=page)
self.assertQuerysetEqual(
sentences_not_in_pub, [book2], lambda x: x)
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertQuerysetEqual(qs, [p1], lambda x: x)
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(ValueError,
self.error % (self.ob, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(ValueError,
self.error % (self.ob, ChildObjectA._meta.object_name)):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(ValueError,
self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(ValueError,
self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = ['<ObjectA: oa>', ]
out_b = ['<ObjectB: ob>', '<ObjectB: pob>']
out_c = ['<ObjectC: >']
# proxy model objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b)
self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2)
# child objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b)
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'), out_b)
# parent objects
self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# Test for #23226
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
#23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field
"""
self.assertQuerysetEqual(ObjectB.objects.filter(
objecta__in=ObjectB.objects.all().values_list('pk')
).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>'])
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data='s1')
s2 = SharedConnection.objects.create(data='s2')
s3 = SharedConnection.objects.create(data='s3')
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest
else [s2, s1, s3]
)
self.assertQuerysetEqual(
SharedConnection.objects.order_by('-pointera__connection', 'pk'),
expected_ordering, lambda x: x
)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name='jackstaff')
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name='jillstaff')
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
'creator__staffuser__staff', 'owner__staffuser__staff')
self.assertEqual(str(qs.query).count(' JOIN '), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff)
self.assertEqual(task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related('parent').defer('parent__created')
self.assertQuerysetEqual(qs, [c], lambda x: x)
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
qs = qs.values('parent__parent__id')
self.assertIn(' INNER JOIN ', str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values('parent__parent__id')
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values('objecta__name')
self.assertIn(' INNER JOIN ', str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1')
sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2')
sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3')
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertQuerysetEqual(
SpecialCategory.objects.exclude(
categoryitem__id=c1.pk).order_by('name'),
[sc2, sc3], lambda x: x
)
self.assertQuerysetEqual(
SpecialCategory.objects.filter(categoryitem__id=c1.pk),
[sc1], lambda x: x
)
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1')
cp1 = CustomPk.objects.create(name='cp1', extra='extra')
cp1.custompktag_set.add(cpt1)
self.assertQuerysetEqual(
CustomPk.objects.filter(custompktag=cpt1), [cp1],
lambda x: x)
self.assertQuerysetEqual(
CustomPkTag.objects.filter(custom_pk=cp1), [cpt1],
lambda x: x)
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F('school')))
self.assertQuerysetEqual(queryset, [st2], lambda x: x)
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True,
modelc_fk=c1, modela_fk=a1)
complex_q = Q(pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000 /
F("ticket23605b__modelc_fk__field_c0")
) &
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True) &
~Q(ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True) &
Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
))).filter(ticket23605b__field_b1=True))
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertQuerysetEqual(qs1, [a1], lambda x: x)
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertQuerysetEqual(qs2, [a2], lambda x: x)
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(TestCase):
def test_invalid_values(self):
with self.assertRaises(ValueError):
Annotation.objects.filter(tag='abc')
with self.assertRaises(ValueError):
Annotation.objects.filter(tag__in=[123, 'abc'])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertQuerysetEqual(
Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)),
[i4], lambda x: x
)
self.assertQuerysetEqual(
Individual.objects.exclude(
Q(alive=False), Q(related_individual__isnull=True)
).order_by('pk'),
[i1, i2, i3], lambda x: x
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2013 by Clearcode <http://clearcode.cc>
# and associates (see AUTHORS).
# This file is part of pytest-dbfixtures.
# pytest-dbfixtures is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pytest-dbfixtures is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with pytest-dbfixtures. If not, see <http://www.gnu.org/licenses/>.
import os
import pytest
from path import path
from tempfile import mkdtemp
from pytest_dbfixtures.executors import TCPExecutor
from pytest_dbfixtures.port import get_port
from pytest_dbfixtures.utils import get_config, try_import, get_process_fixture
def mongo_proc(executable=None, params=None, host=None, port=None,
logs_prefix=''):
"""
Mongo process factory.
:param str executable: path to mongod
:param str params: params
:param str host: hostname
:param str port: exact port (e.g. '8000')
or randomly selected port:
'?' - any random available port
'2000-3000' - random available port from a given range
'4002,4003' - random of 4002 or 4003 ports
:param str logs_prefix: prefix for log filename
:rtype: func
:returns: function which makes a mongo process
"""
@pytest.fixture(scope='function')
def mongo_proc_fixture(request):
"""
#. Get config.
#. Run a ``mongod`` process.
#. Stop ``mongod`` process after tests.
.. note::
`mongod <http://docs.mongodb.org/v2.2/reference/mongod/>`_
:param FixtureRequest request: fixture request object
:rtype: pytest_dbfixtures.executors.TCPExecutor
:returns: tcp executor
"""
config = get_config(request)
# make a temporary directory for tests and delete it
# if tests have been finished
tmp = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'tmp')
if not os.path.exists(tmp):
os.mkdir(tmp)
tmpdir = path(mkdtemp(prefix='mongo_pytest_fixture', dir=tmp))
request.addfinalizer(lambda: tmpdir.exists() and tmpdir.rmtree())
mongo_exec = executable or config.mongo.mongo_exec
mongo_params = params or config.mongo.params
mongo_host = host or config.mongo.host
mongo_port = get_port(port or config.mongo.port)
logsdir = path(request.config.getvalue('logsdir'))
mongo_logpath = logsdir / '{prefix}mongo.{port}.log'.format(
prefix=logs_prefix,
port=mongo_port
)
mongo_executor = TCPExecutor(
'{mongo_exec} --bind_ip {host} --port {port} --dbpath {dbpath} --logpath {logpath} {params}'.format( # noqa
mongo_exec=mongo_exec,
params=mongo_params,
host=mongo_host,
port=mongo_port,
dbpath=tmpdir,
logpath=mongo_logpath,
),
host=mongo_host,
port=mongo_port,
)
mongo_executor.start()
request.addfinalizer(mongo_executor.stop)
return mongo_executor
return mongo_proc_fixture
def mongodb(process_fixture_name):
"""
Mongo database factory.
:param str process_fixture_name: name of the process fixture
:rtype: func
:returns: function which makes a connection to mongo
"""
@pytest.fixture
def mongodb_factory(request):
"""
#. Get pymongo module and config.
#. Get connection to mongo.
#. Drop collections before and after tests.
:param FixtureRequest request: fixture request object
:rtype: pymongo.connection.Connection
:returns: connection to mongo database
"""
proc_fixture = get_process_fixture(request, process_fixture_name)
pymongo, _ = try_import('pymongo', request)
mongo_host = proc_fixture.host
mongo_port = proc_fixture.port
try:
client = pymongo.MongoClient
except AttributeError:
client = pymongo.Connection
mongo_conn = client(mongo_host, mongo_port)
return mongo_conn
return mongodb_factory
__all__ = [mongodb, mongo_proc]
|
unknown
|
codeparrot/codeparrot-clean
| ||
'''
This bottle-sqlalchemy plugin integrates SQLAlchemy with your Bottle
application. It connects to a database at the beginning of a request,
passes the database handle to the route callback and closes the connection
afterwards.
The plugin inject an argument to all route callbacks that require a `db`
keyword.
Usage Example::
import bottle
from bottle import HTTPError
from bottle.ext import sqlalchemy
from sqlalchemy import create_engine, Column, Integer, Sequence, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=True)
app = bottle.Bottle()
plugin = sqlalchemy.Plugin(engine, Base.metadata, create=True)
app.install(plugin)
class Entity(Base):
__tablename__ = 'entity'
id = Column(Integer, Sequence('id_seq'), primary_key=True)
name = Column(String(50))
def __init__(self, name):
self.name = name
def __repr__(self):
return "<Entity('%d', '%s')>" % (self.id, self.name)
@app.get('/:name')
def show(name, db):
entity = db.query(Entity).filter_by(name=name).first()
if entity:
return {'id': entity.id, 'name': entity.name}
return HTTPError(404, 'Entity not found.')
@app.put('/:name')
def put_name(name, db):
entity = Entity(name)
db.add(entity)
It is up to you create engine and metadata, because SQLAlchemy has
a lot of options to do it. The plugin just handles the SQLAlchemy
session.
Copyright (c) 2011-2012, Iuri de Silvio
License: MIT (see LICENSE for details)
'''
import inspect
import bottle
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.scoping import ScopedSession
# PluginError is defined to bottle >= 0.10
if not hasattr(bottle, 'PluginError'):
class PluginError(bottle.BottleException):
pass
bottle.PluginError = PluginError
class SQLAlchemyPlugin(object):
name = 'sqlalchemy'
api = 2
def __init__(self, engine, metadata=None,
keyword='db', commit=True, create=False, use_kwargs=False, create_session=None):
'''
:param engine: SQLAlchemy engine created with `create_engine` function
:param metadata: SQLAlchemy metadata. It is required only if `create=True`
:param keyword: Keyword used to inject session database in a route
:param create: If it is true, execute `metadata.create_all(engine)`
when plugin is applied
:param commit: If it is true, commit changes after route is executed.
:param use_kwargs: plugin inject session database even if it is not
explicitly defined, using **kwargs argument if defined.
:param create_session: SQLAlchemy session maker created with the
'sessionmaker' function. Will create its own if undefined.
'''
self.engine = engine
if create_session is None:
create_session = sessionmaker()
self.create_session = create_session
self.metadata = metadata
self.keyword = keyword
self.create = create
self.commit = commit
self.use_kwargs = use_kwargs
def setup(self, app):
''' Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available.'''
for other in app.plugins:
if not isinstance(other, SQLAlchemyPlugin):
continue
if other.keyword == self.keyword:
raise bottle.PluginError("Found another SQLAlchemy plugin with "\
"conflicting settings (non-unique keyword).")
elif other.name == self.name:
self.name += '_%s' % self.keyword
if self.create and not self.metadata:
raise bottle.PluginError('Define metadata value to create database.')
def apply(self, callback, route):
# hack to support bottle v0.9.x
if bottle.__version__.startswith('0.9'):
config = route['config']
_callback = route['callback']
else:
config = route.config
_callback = route.callback
if "sqlalchemy" in config: # support for configuration before `ConfigDict` namespaces
g = lambda key, default: config.get('sqlalchemy', {}).get(key, default)
else:
g = lambda key, default: config.get('sqlalchemy.' + key, default)
keyword = g('keyword', self.keyword)
create = g('create', self.create)
commit = g('commit', self.commit)
use_kwargs = g('use_kwargs', self.use_kwargs)
try:
# check if inspect.signature exists
inspect.signature
except AttributeError:
argspec = inspect.getargspec(_callback)
parameters = argspec.args
accept_kwargs = argspec.keywords
else:
parameters = inspect.signature(_callback).parameters
accept_kwargs = any(p.kind == inspect.Parameter.VAR_KEYWORD
for p in parameters.values())
if not ((use_kwargs and accept_kwargs) or keyword in parameters):
return callback
if create:
self.metadata.create_all(self.engine)
def wrapper(*args, **kwargs):
kwargs[keyword] = session = self.create_session(bind=self.engine)
try:
rv = callback(*args, **kwargs)
if commit:
session.commit()
except (SQLAlchemyError, bottle.HTTPError):
session.rollback()
raise
except bottle.HTTPResponse:
if commit:
session.commit()
raise
finally:
if isinstance(self.create_session, ScopedSession):
self.create_session.remove()
else:
session.close()
return rv
return wrapper
Plugin = SQLAlchemyPlugin
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.css21 import CSS21Parser, ParseError
class FontFaceRule(object):
at_keyword = '@font-face'
__slots__ = 'declarations', 'line', 'column'
def __init__(self, declarations, line, column):
self.declarations = declarations
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} at {0.line}:{0.column}>'
.format(self))
class CSSFonts3Parser(CSS21Parser):
''' Parse @font-face rules from the CSS 3 fonts module '''
ALLOWED_CONTEXTS_FOR_FONT_FACE = {'stylesheet', '@media', '@page'}
def __init__(self):
super(CSSFonts3Parser, self).__init__()
self.at_parsers['@font-face'] = self.parse_font_face_rule
def parse_font_face_rule(self, rule, previous_rules, errors, context):
if context not in self.ALLOWED_CONTEXTS_FOR_FONT_FACE:
raise ParseError(rule,
'@font-face rule not allowed in ' + context)
if rule.body is None:
raise ParseError(rule,
'invalid {0} rule: missing block'.format(rule.at_keyword))
if rule.head:
raise ParseError(rule, '{0} rule is not allowed to have content before the descriptor declaration'.format(rule.at_keyword))
declarations, decerrors = self.parse_declaration_list(rule.body)
errors.extend(decerrors)
return FontFaceRule(declarations, rule.line, rule.column)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_AUTOTUNER_H_
#define TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_AUTOTUNER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
// PrefetchAutotuner dynamically adjusts the buffer size of a prefetch iterator.
//
// PrefetchAutotuner attempts to find the minimum buffer size such that there is
// always at least 1 element in the prefetch queue every time the downstream
// iterator calls GetNext().
//
// One common failure mode of input pipelines is being throughput bound. No
// amount of prefetching can address that performance mode. In order to guard
// against this condition, PrefetchAutotuner will only increase the buffer_limit
// if the prefetching thread is able to successfully fill the buffer at its
// current size.
//
// Note: in the current implementation, we never decrease the buffer_limit().
// This should change in the future!
//
// PrefetchAutotuner is NOT thread safe.
class PrefetchAutotuner {
public:
explicit PrefetchAutotuner(
int64_t initial_buffer_size, int64_t buffer_size_min,
std::shared_ptr<model::RamBudgetManager> ram_budget_manager);
int64_t buffer_limit() const { return buffer_limit_; }
// Reports whether the element size has been set.
bool HasElementSize() const { return element_size_bytes_.has_value(); }
// Sets the element size to use for predicting memory usage. Element size must
// be set before the autotuner can increase the buffer size.
void SetElementSize(int64_t element_size_bytes);
void RecordConsumption(size_t current_buffer_size);
void RecordEmpty() { RecordConsumption(0); }
private:
// PrefetchAutotuner operates as a state machine.
enum class Mode {
// Disables the autotuning.
kDisabled,
// We have increased the size of the buffer, and will transition to
// kDownswing if we successfully fill the buffer.
kUpswing,
// We have successfully filled a buffer of this size. If we ever block the
// downstream iterator, we should increase the buffer size.
kDownswing,
};
int64_t buffer_limit_;
// Estimated per-element size.
std::optional<int64_t> element_size_bytes_;
Mode mode_ = Mode::kDisabled;
std::shared_ptr<model::RamBudgetManager> ram_budget_manager_;
};
} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_AUTOTUNER_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/kernels/data/prefetch_autotuner.h
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by Grako.
#
# https://pypi.python.org/pypi/grako/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.parsing import graken, Parser
from grako.util import re, RE_FLAGS, generic_main # noqa
__version__ = (2016, 8, 1, 1, 11, 52, 0)
__all__ = [
'SqlParser',
'SqlSemantics',
'main'
]
KEYWORDS = set([])
class SqlParser(Parser):
def __init__(self,
whitespace='\\s+',
nameguard=None,
comments_re='/\\*[\\s\\S]*?\\*/',
eol_comments_re='--.*?$',
ignorecase=True,
left_recursion=True,
keywords=KEYWORDS,
namechars='',
**kwargs):
super(SqlParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
keywords=keywords,
namechars=namechars,
**kwargs
)
@graken()
def _digit_(self):
self._pattern(r'\d+')
@graken()
def _double_quote_(self):
self._token('"')
@graken()
def _quote_(self):
self._token("'")
@graken()
def _left_paren_(self):
self._token('(')
@graken()
def _right_paren_(self):
self._token(')')
@graken()
def _asterisk_(self):
self._token('*')
@graken()
def _plus_sign_(self):
self._token('+')
@graken()
def _comma_(self):
self._token(',')
@graken()
def _minus_sign_(self):
self._token('-')
@graken()
def _period_(self):
self._token('.')
@graken()
def _solidus_(self):
self._token('/')
@graken()
def _colon_(self):
self._token(':')
@graken()
def _semicolon_(self):
self._token(';')
@graken()
def _less_than_operator_(self):
self._token('<')
@graken()
def _equals_operator_(self):
self._token('=')
@graken()
def _greater_than_operator_(self):
self._token('>')
@graken()
def _question_mark_(self):
self._token('?')
@graken()
def _left_bracket_or_trigraph_(self):
with self._choice():
with self._option():
self._left_bracket_()
with self._option():
self._left_bracket_trigraph_()
self._error('no available options')
@graken()
def _right_bracket_or_trigraph_(self):
with self._choice():
with self._option():
self._right_bracket_()
with self._option():
self._right_bracket_trigraph_()
self._error('no available options')
@graken()
def _left_bracket_(self):
self._token('[')
@graken()
def _left_bracket_trigraph_(self):
self._token('??(')
@graken()
def _right_bracket_(self):
self._token(']')
@graken()
def _right_bracket_trigraph_(self):
self._token('??)')
@graken()
def _underscore_(self):
self._token('_')
@graken()
def _regular_identifier_(self):
self._pattern(r'[a-z]\w*')
self._check_name()
@graken()
def _large_object_length_token_(self):
self._digit_()
self._multiplier_()
@graken()
def _multiplier_(self):
with self._choice():
with self._option():
self._token('K')
with self._option():
self._token('M')
with self._option():
self._token('G')
self._error('expecting one of: G K M')
@graken()
def _delimited_identifier_(self):
self._double_quote_()
self._delimited_identifier_body_()
self._double_quote_()
@graken()
def _delimited_identifier_body_(self):
self._pattern(r'(""|[^"\n])+')
@graken()
def _unicode_escape_value_(self):
with self._choice():
with self._option():
self._unicode_4_digit_escape_value_()
with self._option():
self._unicode_6_digit_escape_value_()
with self._option():
self._unicode_character_escape_value_()
self._error('no available options')
@graken()
def _unicode_4_digit_escape_value_(self):
self._unicode_escape_character_()
self._byte_()
self._byte_()
@graken()
def _unicode_6_digit_escape_value_(self):
self._unicode_escape_character_()
self._plus_sign_()
self._byte_()
self._byte_()
self._byte_()
@graken()
def _unicode_character_escape_value_(self):
self._unicode_escape_character_()
self._unicode_escape_character_()
@graken()
def _unicode_escape_character_(self):
self._token('\\U')
@graken()
def _not_equals_operator_(self):
self._token('<>')
@graken()
def _greater_than_or_equals_operator_(self):
self._token('>=')
@graken()
def _less_than_or_equals_operator_(self):
self._token('<=')
@graken()
def _concatenation_operator_(self):
self._token('||')
@graken()
def _right_arrow_(self):
self._token('->')
@graken()
def _double_colon_(self):
self._token('::')
@graken()
def _literal_(self):
with self._choice():
with self._option():
self._signed_numeric_literal_()
with self._option():
self._general_literal_()
self._error('no available options')
@graken()
def _unsigned_literal_(self):
with self._choice():
with self._option():
self._unsigned_numeric_literal_()
with self._option():
self._general_literal_()
self._error('no available options')
@graken()
def _general_literal_(self):
with self._choice():
with self._option():
self._character_string_literal_()
with self._option():
self._national_character_string_literal_()
with self._option():
self._unicode_character_string_literal_()
with self._option():
self._binary_string_literal_()
with self._option():
self._datetime_literal_()
with self._option():
self._interval_literal_()
with self._option():
self._boolean_literal_()
self._error('no available options')
@graken()
def _character_string_literal_(self):
with self._optional():
self._underscore_()
self._character_set_name_()
def block0():
self._quote_()
self._character_representation_()
self._quote_()
self._positive_closure(block0)
@graken()
def _character_representation_(self):
self._pattern(r"(''|[^'\n])*")
@graken()
def _national_character_string_literal_(self):
self._token('N')
def block0():
self._quote_()
self._character_representation_()
self._quote_()
self._positive_closure(block0)
@graken()
def _unicode_character_string_literal_(self):
with self._optional():
self._underscore_()
self._character_set_name_()
self._token('U&')
def block0():
self._quote_()
with self._optional():
def block1():
self._unicode_representation_()
self._positive_closure(block1)
self._quote_()
self._positive_closure(block0)
with self._optional():
self._token('ESCAPE')
self._escape_character_()
@graken()
def _unicode_representation_(self):
with self._choice():
with self._option():
self._character_representation_()
with self._option():
self._unicode_escape_value_()
self._error('no available options')
@graken()
def _binary_string_literal_(self):
self._token('X')
def block0():
self._quote_()
with self._optional():
def block1():
self._byte_()
self._positive_closure(block1)
self._quote_()
self._positive_closure(block0)
with self._optional():
self._token('ESCAPE')
self._escape_character_()
@graken()
def _hexit_(self):
self._pattern(r'[a-f\d]')
@graken()
def _byte_(self):
self._hexit_()
self._hexit_()
@graken()
def _signed_numeric_literal_(self):
with self._optional():
self._sign_()
self._unsigned_numeric_literal_()
@graken()
def _unsigned_numeric_literal_(self):
with self._choice():
with self._option():
self._exact_numeric_literal_()
with self._option():
self._approximate_numeric_literal_()
self._error('no available options')
@graken()
def _exact_numeric_literal_(self):
with self._choice():
with self._option():
self._unsigned_integer_()
with self._optional():
self._period_()
with self._optional():
self._unsigned_integer_()
with self._option():
self._period_()
self._unsigned_integer_()
self._error('no available options')
@graken()
def _sign_(self):
with self._choice():
with self._option():
self._plus_sign_()
with self._option():
self._minus_sign_()
self._error('no available options')
@graken()
def _approximate_numeric_literal_(self):
self._exact_numeric_literal_()
self._token('E')
self._signed_integer_()
@graken()
def _signed_integer_(self):
with self._optional():
self._sign_()
self._unsigned_integer_()
@graken()
def _unsigned_integer_(self):
self._digit_()
@graken()
def _datetime_literal_(self):
with self._choice():
with self._option():
self._date_literal_()
with self._option():
self._time_literal_()
with self._option():
self._timestamp_literal_()
self._error('no available options')
@graken()
def _date_literal_(self):
self._token('DATE')
self._date_string_()
@graken()
def _time_literal_(self):
self._token('TIME')
self._time_string_()
@graken()
def _timestamp_literal_(self):
self._token('TIMESTAMP')
self._timestamp_string_()
@graken()
def _date_string_(self):
self._quote_()
self._unquoted_date_string_()
self._quote_()
@graken()
def _time_string_(self):
self._quote_()
self._unquoted_time_string_()
self._quote_()
@graken()
def _timestamp_string_(self):
self._quote_()
self._unquoted_timestamp_string_()
self._quote_()
@graken()
def _time_zone_interval_(self):
self._sign_()
self._hours_value_()
self._colon_()
self._minutes_value_()
@graken()
def _date_value_(self):
self._years_value_()
self._minus_sign_()
self._months_value_()
self._minus_sign_()
self._days_value_()
@graken()
def _time_value_(self):
self._hours_value_()
self._colon_()
self._minutes_value_()
self._colon_()
self._seconds_value_()
@graken()
def _interval_literal_(self):
self._token('INTERVAL')
with self._optional():
self._sign_()
self._interval_string_()
self._interval_qualifier_()
@graken()
def _interval_string_(self):
self._quote_()
self._unquoted_interval_string_()
self._quote_()
@graken()
def _unquoted_date_string_(self):
self._date_value_()
@graken()
def _unquoted_time_string_(self):
self._time_value_()
with self._optional():
self._time_zone_interval_()
@graken()
def _unquoted_timestamp_string_(self):
self._unquoted_date_string_()
self._unquoted_time_string_()
@graken()
def _unquoted_interval_string_(self):
with self._optional():
self._sign_()
with self._group():
with self._choice():
with self._option():
self._year_month_literal_()
with self._option():
self._day_time_literal_()
self._error('no available options')
@graken()
def _year_month_literal_(self):
with self._choice():
with self._option():
self._years_value_()
with self._option():
with self._optional():
self._years_value_()
self._minus_sign_()
self._months_value_()
self._error('no available options')
@graken()
def _day_time_literal_(self):
with self._choice():
with self._option():
self._day_time_interval_()
with self._option():
self._time_interval_()
self._error('no available options')
@graken()
def _day_time_interval_(self):
self._days_value_()
with self._optional():
self._hours_value_()
with self._optional():
self._colon_()
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
@graken()
def _time_interval_(self):
with self._choice():
with self._option():
self._hours_value_()
with self._optional():
self._colon_()
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
with self._option():
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
with self._option():
self._seconds_value_()
self._error('no available options')
@graken()
def _years_value_(self):
self._datetime_value_()
@graken()
def _months_value_(self):
self._datetime_value_()
@graken()
def _days_value_(self):
self._datetime_value_()
@graken()
def _hours_value_(self):
self._datetime_value_()
@graken()
def _minutes_value_(self):
self._datetime_value_()
@graken()
def _seconds_value_(self):
self._unsigned_integer_()
with self._optional():
self._period_()
with self._optional():
self._unsigned_integer_()
@graken()
def _datetime_value_(self):
self._unsigned_integer_()
@graken()
def _boolean_literal_(self):
with self._choice():
with self._option():
self._token('TRUE')
with self._option():
self._token('FALSE')
with self._option():
self._token('UNKNOWN')
self._error('expecting one of: FALSE TRUE UNKNOWN')
@graken()
def _identifier_(self):
self._actual_identifier_()
@graken()
def _identifier_list_(self):
def sep0():
self._token(',')
def block0():
self._identifier_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _actual_identifier_(self):
with self._choice():
with self._option():
self._regular_identifier_()
with self._option():
self._delimited_identifier_()
self._error('no available options')
@graken()
def _table_name_(self):
self._local_or_schema_qualified_name_()
@graken()
def _schema_name_(self):
with self._optional():
self._identifier_()
self._period_()
self._identifier_()
@graken()
def _schema_qualified_name_(self):
with self._optional():
self._schema_name_()
self._period_()
self._identifier_()
@graken()
def _local_or_schema_qualified_name_(self):
with self._optional():
self._local_or_schema_qualifier_()
self._period_()
self._identifier_()
@graken()
def _local_or_schema_qualifier_(self):
with self._choice():
with self._option():
self._schema_name_()
with self._option():
self._token('MODULE')
self._error('expecting one of: MODULE')
@graken()
def _cursor_name_(self):
self._local_qualified_name_()
@graken()
def _local_qualified_name_(self):
with self._optional():
self._token('MODULE')
self._period_()
self._identifier_()
@graken()
def _host_parameter_name_(self):
self._colon_()
self._identifier_()
@graken()
def _external_routine_name_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._character_string_literal_()
self._error('no available options')
@graken()
def _character_set_name_(self):
with self._optional():
self._schema_name_()
self._period_()
self._regular_identifier_()
@graken()
def _connection_name_(self):
self._simple_value_specification_()
@graken()
def _sql_statement_name_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._extended_statement_name_()
self._error('no available options')
@graken()
def _extended_statement_name_(self):
with self._optional():
self._scope_option_()
self._simple_value_specification_()
@graken()
def _dynamic_cursor_name_(self):
with self._choice():
with self._option():
self._cursor_name_()
with self._option():
self._extended_cursor_name_()
self._error('no available options')
@graken()
def _extended_cursor_name_(self):
with self._optional():
self._scope_option_()
self._simple_value_specification_()
@graken()
def _descriptor_name_(self):
with self._optional():
self._scope_option_()
self._simple_value_specification_()
@graken()
def _scope_option_(self):
with self._choice():
with self._option():
self._token('GLOBAL')
with self._option():
self._token('LOCAL')
self._error('expecting one of: GLOBAL LOCAL')
@graken()
def _data_type_(self):
with self._choice():
with self._option():
self._predefined_type_()
with self._option():
self._row_type_()
with self._option():
self._schema_qualified_name_()
with self._option():
self._reference_type_()
with self._option():
self._collection_type_()
self._error('no available options')
@graken()
def _predefined_type_(self):
with self._choice():
with self._option():
self._character_string_type_()
with self._optional():
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._optional():
self._collate_clause_()
with self._option():
self._national_character_string_type_()
with self._optional():
self._collate_clause_()
with self._option():
self._binary_large_object_string_type_()
with self._option():
self._numeric_type_()
with self._option():
self._token('BOOLEAN')
with self._option():
self._datetime_type_()
with self._option():
self._interval_type_()
self._error('expecting one of: BOOLEAN')
@graken()
def _character_string_type_(self):
with self._choice():
with self._option():
self._token('CHARACTER')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHARACTER')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('VARCHAR')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._character_large_object_type_()
self._error('expecting one of: CHAR CHARACTER')
@graken()
def _character_large_object_type_(self):
with self._choice():
with self._option():
self._token('CHARACTER')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('CHAR')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('CLOB')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
self._error('expecting one of: CHAR CHARACTER CLOB')
@graken()
def _national_character_string_type_(self):
with self._choice():
with self._option():
self._token('NATIONAL')
self._token('CHARACTER')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NCHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHARACTER')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NCHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._national_character_large_object_type_()
self._error('expecting one of: NATIONAL NCHAR')
@graken()
def _national_character_large_object_type_(self):
with self._choice():
with self._option():
self._token('NATIONAL')
self._token('CHARACTER')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('NCHAR')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('NCLOB')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
self._error('expecting one of: NATIONAL NCHAR NCLOB')
@graken()
def _binary_large_object_string_type_(self):
with self._choice():
with self._option():
self._token('BINARY')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('BLOB')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
self._error('expecting one of: BINARY BLOB')
@graken()
def _numeric_type_(self):
with self._choice():
with self._option():
self._exact_numeric_type_()
with self._option():
self._approximate_numeric_type_()
self._error('no available options')
@graken()
def _exact_numeric_type_(self):
with self._choice():
with self._option():
self._token('NUMERIC')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('DECIMAL')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('DEC')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('SMALLINT')
with self._option():
self._token('INTEGER')
with self._option():
self._token('INT')
with self._option():
self._token('BIGINT')
self._error('expecting one of: BIGINT DEC DECIMAL INT INTEGER NUMERIC SMALLINT')
@graken()
def _approximate_numeric_type_(self):
with self._choice():
with self._option():
self._token('FLOAT')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._option():
self._token('REAL')
with self._option():
self._token('DOUBLE')
self._token('PRECISION')
self._error('expecting one of: DOUBLE FLOAT REAL')
@graken()
def _length_(self):
self._unsigned_integer_()
@graken()
def _large_object_length_(self):
with self._choice():
with self._option():
self._unsigned_integer_()
with self._optional():
self._multiplier_()
with self._optional():
self._char_length_units_()
with self._option():
self._large_object_length_token_()
with self._optional():
self._char_length_units_()
self._error('no available options')
@graken()
def _char_length_units_(self):
with self._choice():
with self._option():
self._token('CHARACTERS')
with self._option():
self._token('CODE_UNITS')
with self._option():
self._token('OCTETS')
self._error('expecting one of: CHARACTERS CODE_UNITS OCTETS')
@graken()
def _precision_(self):
self._unsigned_integer_()
@graken()
def _scale_(self):
self._unsigned_integer_()
@graken()
def _datetime_type_(self):
with self._choice():
with self._option():
self._token('DATE')
with self._option():
self._token('TIME')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._optional():
self._with_or_without_time_zone_()
with self._option():
self._token('TIMESTAMP')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._optional():
self._with_or_without_time_zone_()
self._error('expecting one of: DATE TIME TIMESTAMP')
@graken()
def _with_or_without_time_zone_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('TIME')
self._token('ZONE')
with self._option():
self._token('WITHOUT')
self._token('TIME')
self._token('ZONE')
self._error('expecting one of: WITH WITHOUT')
@graken()
def _interval_type_(self):
self._token('INTERVAL')
self._interval_qualifier_()
@graken()
def _row_type_(self):
self._token('ROW')
self._row_type_body_()
@graken()
def _row_type_body_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._field_definition_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _reference_type_(self):
self._token('REF')
self._left_paren_()
self._schema_qualified_name_()
self._right_paren_()
with self._optional():
self._scope_clause_()
@graken()
def _scope_clause_(self):
self._token('SCOPE')
self._table_name_()
@graken()
def _collection_type_(self):
with self._choice():
with self._option():
self._array_type_()
with self._option():
self._multiset_type_()
self._error('no available options')
@graken()
def _array_type_(self):
self._data_type_()
self._token('ARRAY')
with self._optional():
self._left_bracket_or_trigraph_()
self._unsigned_integer_()
self._right_bracket_or_trigraph_()
@graken()
def _multiset_type_(self):
self._data_type_()
self._token('MULTISET')
@graken()
def _field_definition_(self):
self._identifier_()
self._data_type_()
with self._optional():
self._reference_scope_check_()
@graken()
def _value_expression_primary_(self):
with self._choice():
with self._option():
self._parenthesized_value_expression_()
with self._option():
self._nonparenthesized_value_expression_primary_()
self._error('no available options')
@graken()
def _parenthesized_value_expression_(self):
self._left_paren_()
self._value_expression_()
self._right_paren_()
@graken()
def _nonparenthesized_value_expression_primary_(self):
with self._choice():
with self._option():
self._unsigned_value_specification_()
with self._option():
self._column_reference_()
with self._option():
self._set_function_specification_()
with self._option():
self._window_function_()
with self._option():
self._subquery_()
with self._option():
self._case_expression_()
with self._option():
self._cast_specification_()
with self._option():
self._field_reference_()
with self._option():
self._subtype_treatment_()
with self._option():
self._method_invocation_()
with self._option():
self._static_method_invocation_()
with self._option():
self._new_specification_()
with self._option():
self._attribute_or_method_reference_()
with self._option():
self._reference_resolution_()
with self._option():
self._collection_value_constructor_()
with self._option():
self._array_element_reference_()
with self._option():
self._multiset_element_reference_()
with self._option():
self._routine_invocation_()
with self._option():
self._next_value_expression_()
self._error('no available options')
@graken()
def _collection_value_constructor_(self):
with self._choice():
with self._option():
self._array_value_constructor_()
with self._option():
self._multiset_value_constructor_()
self._error('no available options')
@graken()
def _value_specification_(self):
with self._choice():
with self._option():
self._literal_()
with self._option():
self._general_value_specification_()
self._error('no available options')
@graken()
def _unsigned_value_specification_(self):
with self._choice():
with self._option():
self._unsigned_literal_()
with self._option():
self._general_value_specification_()
self._error('no available options')
@graken()
def _general_value_specification_(self):
with self._choice():
with self._option():
self._host_parameter_specification_()
with self._option():
self._sql_parameter_reference_()
with self._option():
self._dynamic_parameter_specification_()
with self._option():
self._current_collation_specification_()
with self._option():
self._token('CURRENT_DEFAULT_TRANSFORM_GROUP')
with self._option():
self._token('CURRENT_PATH')
with self._option():
self._token('CURRENT_ROLE')
with self._option():
self._token('CURRENT_TRANSFORM_GROUP_FOR_TYPE')
self._schema_qualified_name_()
with self._option():
self._token('CURRENT_USER')
with self._option():
self._token('SESSION_USER')
with self._option():
self._token('SYSTEM_USER')
with self._option():
self._token('USER')
with self._option():
self._token('VALUE')
self._error('expecting one of: CURRENT_DEFAULT_TRANSFORM_GROUP CURRENT_PATH CURRENT_ROLE CURRENT_USER SESSION_USER SYSTEM_USER USER VALUE')
@graken()
def _simple_value_specification_(self):
with self._choice():
with self._option():
self._literal_()
with self._option():
self._host_parameter_name_()
with self._option():
self._sql_parameter_reference_()
self._error('no available options')
@graken()
def _target_specification_(self):
with self._choice():
with self._option():
self._host_parameter_specification_()
with self._option():
self._sql_parameter_reference_()
with self._option():
self._column_reference_()
with self._option():
self._target_array_element_specification_()
with self._option():
self._dynamic_parameter_specification_()
self._error('no available options')
@graken()
def _simple_target_specification_(self):
with self._choice():
with self._option():
self._host_parameter_specification_()
with self._option():
self._sql_parameter_reference_()
with self._option():
self._column_reference_()
self._error('no available options')
@graken()
def _host_parameter_specification_(self):
self._host_parameter_name_()
with self._optional():
self._indicator_parameter_()
@graken()
def _dynamic_parameter_specification_(self):
self._question_mark_()
@graken()
def _indicator_parameter_(self):
with self._optional():
self._token('INDICATOR')
self._host_parameter_name_()
@graken()
def _target_array_element_specification_(self):
self._target_array_reference_()
self._left_bracket_or_trigraph_()
self._simple_value_specification_()
self._right_bracket_or_trigraph_()
@graken()
def _target_array_reference_(self):
with self._choice():
with self._option():
self._sql_parameter_reference_()
with self._option():
self._column_reference_()
self._error('no available options')
@graken()
def _current_collation_specification_(self):
self._token('CURRENT_COLLATION')
self._left_paren_()
self._string_value_expression_()
self._right_paren_()
@graken()
def _contextually_typed_value_specification_(self):
with self._choice():
with self._option():
self._implicitly_typed_value_specification_()
with self._option():
self._token('DEFAULT')
self._error('expecting one of: DEFAULT')
@graken()
def _implicitly_typed_value_specification_(self):
with self._choice():
with self._option():
self._token('NULL')
with self._option():
self._empty_specification_()
self._error('expecting one of: NULL')
@graken()
def _empty_specification_(self):
with self._choice():
with self._option():
self._token('ARRAY')
self._left_bracket_or_trigraph_()
self._right_bracket_or_trigraph_()
with self._option():
self._token('MULTISET')
self._left_bracket_or_trigraph_()
self._right_bracket_or_trigraph_()
self._error('no available options')
@graken()
def _identifier_chain_(self):
def sep0():
self._token('.')
def block0():
self._identifier_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _column_reference_(self):
with self._choice():
with self._option():
self._identifier_chain_()
with self._option():
self._token('MODULE')
self._period_()
self._identifier_()
self._period_()
self._identifier_()
self._error('no available options')
@graken()
def _sql_parameter_reference_(self):
self._identifier_chain_()
@graken()
def _set_function_specification_(self):
with self._choice():
with self._option():
self._aggregate_function_()
with self._option():
self._grouping_operation_()
self._error('no available options')
@graken()
def _grouping_operation_(self):
self._token('GROUPING')
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._column_reference_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _window_function_(self):
self._window_function_type_()
self._token('OVER')
self._window_name_or_specification_()
@graken()
def _window_function_type_(self):
with self._choice():
with self._option():
self._rank_function_type_()
self._left_paren_()
self._right_paren_()
with self._option():
self._token('ROW_NUMBER')
self._left_paren_()
self._right_paren_()
with self._option():
self._aggregate_function_()
self._error('no available options')
@graken()
def _rank_function_type_(self):
with self._choice():
with self._option():
self._token('RANK')
with self._option():
self._token('DENSE_RANK')
with self._option():
self._token('PERCENT_RANK')
with self._option():
self._token('CUME_DIST')
self._error('expecting one of: CUME_DIST DENSE_RANK PERCENT_RANK RANK')
@graken()
def _window_name_or_specification_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._window_specification_()
self._error('no available options')
@graken()
def _case_expression_(self):
with self._choice():
with self._option():
self._case_abbreviation_()
with self._option():
self._case_specification_()
self._error('no available options')
@graken()
def _case_abbreviation_(self):
with self._choice():
with self._option():
self._token('NULLIF')
self._left_paren_()
self._value_expression_()
self._comma_()
self._value_expression_()
self._right_paren_()
with self._option():
self._token('COALESCE')
self._left_paren_()
self._value_expression_()
def block0():
self._comma_()
self._value_expression_()
self._positive_closure(block0)
self._right_paren_()
self._error('no available options')
@graken()
def _case_specification_(self):
with self._choice():
with self._option():
self._simple_case_()
with self._option():
self._searched_case_()
self._error('no available options')
@graken()
def _simple_case_(self):
self._token('CASE')
self._case_operand_()
def block0():
self._simple_when_clause_()
self._positive_closure(block0)
with self._optional():
self._else_clause_()
self._token('END')
@graken()
def _searched_case_(self):
self._token('CASE')
def block0():
self._searched_when_clause_()
self._positive_closure(block0)
with self._optional():
self._else_clause_()
self._token('END')
@graken()
def _simple_when_clause_(self):
self._token('WHEN')
self._when_operand_()
self._token('THEN')
self._result_()
@graken()
def _searched_when_clause_(self):
self._token('WHEN')
self._search_condition_()
self._token('THEN')
self._result_()
@graken()
def _else_clause_(self):
self._token('ELSE')
self._result_()
@graken()
def _case_operand_(self):
with self._choice():
with self._option():
self._row_value_predicand_()
with self._option():
self._overlaps_predicate_part_1_()
self._error('no available options')
@graken()
def _when_operand_(self):
with self._choice():
with self._option():
self._row_value_predicand_()
with self._option():
self._comparison_predicate_part_2_()
with self._option():
self._between_predicate_part_2_()
with self._option():
self._in_predicate_part_2_()
with self._option():
self._character_like_predicate_part_2_()
with self._option():
self._octet_like_predicate_part_2_()
with self._option():
self._similar_predicate_part_2_()
with self._option():
self._null_predicate_part_2_()
with self._option():
self._quantified_comparison_predicate_part_2_()
with self._option():
self._match_predicate_part_2_()
with self._option():
self._overlaps_predicate_part_2_()
with self._option():
self._distinct_predicate_part_2_()
with self._option():
self._member_predicate_part_2_()
with self._option():
self._submultiset_predicate_part_2_()
with self._option():
self._set_predicate_part_2_()
with self._option():
self._type_predicate_part_2_()
self._error('no available options')
@graken()
def _result_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._token('NULL')
self._error('expecting one of: NULL')
@graken()
def _cast_specification_(self):
self._token('CAST')
self._left_paren_()
self._cast_operand_()
self._token('AS')
self._cast_target_()
self._right_paren_()
@graken()
def _cast_operand_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._implicitly_typed_value_specification_()
self._error('no available options')
@graken()
def _cast_target_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._data_type_()
self._error('no available options')
@graken()
def _next_value_expression_(self):
self._token('NEXT')
self._token('VALUE')
self._token('FOR')
self._schema_qualified_name_()
@graken()
def _field_reference_(self):
self._value_expression_primary_()
self._period_()
self._identifier_()
@graken()
def _subtype_treatment_(self):
self._token('TREAT')
self._left_paren_()
self._value_expression_()
self._token('AS')
self._target_subtype_()
self._right_paren_()
@graken()
def _target_subtype_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._reference_type_()
self._error('no available options')
@graken()
def _method_invocation_(self):
with self._choice():
with self._option():
self._direct_invocation_()
with self._option():
self._generalized_invocation_()
self._error('no available options')
@graken()
def _direct_invocation_(self):
self._value_expression_primary_()
self._period_()
self._identifier_()
with self._optional():
self._sql_argument_list_()
@graken()
def _generalized_invocation_(self):
self._left_paren_()
self._value_expression_primary_()
self._token('AS')
self._data_type_()
self._right_paren_()
self._period_()
self._identifier_()
with self._optional():
self._sql_argument_list_()
@graken()
def _static_method_invocation_(self):
self._schema_qualified_name_()
self._double_colon_()
self._identifier_()
with self._optional():
self._sql_argument_list_()
@graken()
def _new_specification_(self):
self._token('NEW')
self._routine_invocation_()
@graken()
def _attribute_or_method_reference_(self):
self._value_expression_primary_()
self._right_arrow_()
self._identifier_()
with self._optional():
self._sql_argument_list_()
@graken()
def _reference_resolution_(self):
self._token('DEREF')
self._left_paren_()
self._reference_value_expression_()
self._right_paren_()
@graken()
def _array_element_reference_(self):
self._array_value_expression_()
self._left_bracket_or_trigraph_()
self._numeric_value_expression_()
self._right_bracket_or_trigraph_()
@graken()
def _multiset_element_reference_(self):
self._token('ELEMENT')
self._left_paren_()
self._multiset_value_expression_()
self._right_paren_()
@graken()
def _value_expression_(self):
with self._choice():
with self._option():
self._common_value_expression_()
with self._option():
self._boolean_value_expression_()
with self._option():
self._row_value_expression_()
self._error('no available options')
@graken()
def _common_value_expression_(self):
with self._choice():
with self._option():
self._numeric_value_expression_()
with self._option():
self._string_value_expression_()
with self._option():
self._datetime_value_expression_()
with self._option():
self._interval_value_expression_()
with self._option():
self._user_defined_type_value_expression_()
with self._option():
self._reference_value_expression_()
with self._option():
self._collection_value_expression_()
self._error('no available options')
@graken()
def _user_defined_type_value_expression_(self):
self._value_expression_primary_()
@graken()
def _reference_value_expression_(self):
self._value_expression_primary_()
@graken()
def _collection_value_expression_(self):
with self._choice():
with self._option():
self._array_value_expression_()
with self._option():
self._multiset_value_expression_()
self._error('no available options')
@graken()
def _numeric_value_expression_(self):
with self._choice():
with self._option():
self._term_()
with self._option():
self._numeric_value_expression_()
self._plus_sign_()
self._term_()
with self._option():
self._numeric_value_expression_()
self._minus_sign_()
self._term_()
self._error('no available options')
@graken()
def _term_(self):
with self._choice():
with self._option():
self._factor_()
with self._option():
self._term_()
self._asterisk_()
self._factor_()
with self._option():
self._term_()
self._solidus_()
self._factor_()
self._error('no available options')
@graken()
def _factor_(self):
with self._optional():
self._sign_()
self._numeric_primary_()
@graken()
def _numeric_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._numeric_value_function_()
self._error('no available options')
@graken()
def _numeric_value_function_(self):
with self._choice():
with self._option():
self._position_expression_()
with self._option():
self._extract_expression_()
with self._option():
self._length_expression_()
with self._option():
self._cardinality_expression_()
with self._option():
self._absolute_value_expression_()
with self._option():
self._modulus_expression_()
with self._option():
self._natural_logarithm_()
with self._option():
self._exponential_function_()
with self._option():
self._power_function_()
with self._option():
self._square_root_()
with self._option():
self._floor_function_()
with self._option():
self._ceiling_function_()
with self._option():
self._width_bucket_function_()
self._error('no available options')
@graken()
def _position_expression_(self):
with self._choice():
with self._option():
self._string_position_expression_()
with self._option():
self._blob_position_expression_()
self._error('no available options')
@graken()
def _string_position_expression_(self):
self._token('POSITION')
self._left_paren_()
self._string_value_expression_()
self._token('IN')
self._string_value_expression_()
with self._optional():
self._token('USING')
self._char_length_units_()
self._right_paren_()
@graken()
def _blob_position_expression_(self):
self._token('POSITION')
self._left_paren_()
self._blob_value_expression_()
self._token('IN')
self._blob_value_expression_()
self._right_paren_()
@graken()
def _length_expression_(self):
with self._choice():
with self._option():
self._char_length_expression_()
with self._option():
self._octet_length_expression_()
self._error('no available options')
@graken()
def _char_length_expression_(self):
with self._group():
with self._choice():
with self._option():
self._token('CHAR_LENGTH')
with self._option():
self._token('CHARACTER_LENGTH')
self._error('expecting one of: CHARACTER_LENGTH CHAR_LENGTH')
self._left_paren_()
self._string_value_expression_()
with self._optional():
self._token('USING')
self._char_length_units_()
self._right_paren_()
@graken()
def _octet_length_expression_(self):
self._token('OCTET_LENGTH')
self._left_paren_()
self._string_value_expression_()
self._right_paren_()
@graken()
def _extract_expression_(self):
self._token('EXTRACT')
self._left_paren_()
self._extract_field_()
self._token('FROM')
self._extract_source_()
self._right_paren_()
@graken()
def _extract_field_(self):
with self._choice():
with self._option():
self._primary_datetime_field_()
with self._option():
self._time_zone_field_()
self._error('no available options')
@graken()
def _time_zone_field_(self):
with self._choice():
with self._option():
self._token('TIMEZONE_HOUR')
with self._option():
self._token('TIMEZONE_MINUTE')
self._error('expecting one of: TIMEZONE_HOUR TIMEZONE_MINUTE')
@graken()
def _extract_source_(self):
with self._choice():
with self._option():
self._datetime_value_expression_()
with self._option():
self._interval_value_expression_()
self._error('no available options')
@graken()
def _cardinality_expression_(self):
self._token('CARDINALITY')
self._left_paren_()
self._collection_value_expression_()
self._right_paren_()
@graken()
def _absolute_value_expression_(self):
self._token('ABS')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _modulus_expression_(self):
self._token('MOD')
self._left_paren_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _natural_logarithm_(self):
self._token('LN')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _exponential_function_(self):
self._token('EXP')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _power_function_(self):
self._token('POWER')
self._left_paren_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _square_root_(self):
self._token('SQRT')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _floor_function_(self):
self._token('FLOOR')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _ceiling_function_(self):
with self._group():
with self._choice():
with self._option():
self._token('CEIL')
with self._option():
self._token('CEILING')
self._error('expecting one of: CEIL CEILING')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _width_bucket_function_(self):
self._token('WIDTH_BUCKET')
self._left_paren_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _string_value_expression_(self):
with self._choice():
with self._option():
self._character_value_expression_()
with self._option():
self._blob_value_expression_()
self._error('no available options')
@graken()
def _character_value_expression_(self):
with self._choice():
with self._option():
self._concatenation_()
with self._option():
self._character_factor_()
self._error('no available options')
@graken()
def _concatenation_(self):
self._character_value_expression_()
self._concatenation_operator_()
self._character_factor_()
@graken()
def _character_factor_(self):
self._character_primary_()
with self._optional():
self._collate_clause_()
@graken()
def _character_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._string_value_function_()
self._error('no available options')
@graken()
def _blob_value_expression_(self):
with self._choice():
with self._option():
self._blob_concatenation_()
with self._option():
self._blob_factor_()
self._error('no available options')
@graken()
def _blob_factor_(self):
self._blob_primary_()
@graken()
def _blob_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._string_value_function_()
self._error('no available options')
@graken()
def _blob_concatenation_(self):
self._blob_value_expression_()
self._concatenation_operator_()
self._blob_factor_()
@graken()
def _string_value_function_(self):
with self._choice():
with self._option():
self._character_value_function_()
with self._option():
self._blob_value_function_()
self._error('no available options')
@graken()
def _character_value_function_(self):
with self._choice():
with self._option():
self._character_substring_function_()
with self._option():
self._regular_expression_substring_function_()
with self._option():
self._fold_()
with self._option():
self._transcoding_()
with self._option():
self._character_transliteration_()
with self._option():
self._trim_function_()
with self._option():
self._character_overlay_function_()
with self._option():
self._normalize_function_()
with self._option():
self._specific_type_method_()
self._error('no available options')
@graken()
def _character_substring_function_(self):
self._token('SUBSTRING')
self._left_paren_()
self._character_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
with self._optional():
self._token('USING')
self._char_length_units_()
self._right_paren_()
@graken()
def _regular_expression_substring_function_(self):
self._token('SUBSTRING')
self._left_paren_()
self._character_value_expression_()
self._token('SIMILAR')
self._character_value_expression_()
self._token('ESCAPE')
self._escape_character_()
self._right_paren_()
@graken()
def _fold_(self):
with self._group():
with self._choice():
with self._option():
self._token('UPPER')
with self._option():
self._token('LOWER')
self._error('expecting one of: LOWER UPPER')
self._left_paren_()
self._character_value_expression_()
self._right_paren_()
@graken()
def _transcoding_(self):
self._token('CONVERT')
self._left_paren_()
self._character_value_expression_()
self._token('USING')
self._schema_qualified_name_()
self._right_paren_()
@graken()
def _character_transliteration_(self):
self._token('TRANSLATE')
self._left_paren_()
self._character_value_expression_()
self._token('USING')
self._schema_qualified_name_()
self._right_paren_()
@graken()
def _trim_function_(self):
self._token('TRIM')
self._left_paren_()
self._trim_operands_()
self._right_paren_()
@graken()
def _trim_operands_(self):
with self._optional():
with self._optional():
self._trim_specification_()
with self._optional():
self._character_value_expression_()
self._token('FROM')
self._character_value_expression_()
@graken()
def _trim_specification_(self):
with self._choice():
with self._option():
self._token('LEADING')
with self._option():
self._token('TRAILING')
with self._option():
self._token('BOTH')
self._error('expecting one of: BOTH LEADING TRAILING')
@graken()
def _character_overlay_function_(self):
self._token('OVERLAY')
self._left_paren_()
self._character_value_expression_()
self._token('PLACING')
self._character_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
with self._optional():
self._token('USING')
self._char_length_units_()
self._right_paren_()
@graken()
def _normalize_function_(self):
self._token('NORMALIZE')
self._left_paren_()
self._character_value_expression_()
self._right_paren_()
@graken()
def _specific_type_method_(self):
self._user_defined_type_value_expression_()
self._period_()
self._token('SPECIFICTYPE')
@graken()
def _blob_value_function_(self):
with self._choice():
with self._option():
self._blob_substring_function_()
with self._option():
self._blob_trim_function_()
with self._option():
self._blob_overlay_function_()
self._error('no available options')
@graken()
def _blob_substring_function_(self):
self._token('SUBSTRING')
self._left_paren_()
self._blob_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
self._right_paren_()
@graken()
def _blob_trim_function_(self):
self._token('TRIM')
self._left_paren_()
self._blob_trim_operands_()
self._right_paren_()
@graken()
def _blob_trim_operands_(self):
with self._optional():
with self._optional():
self._trim_specification_()
with self._optional():
self._blob_value_expression_()
self._token('FROM')
self._blob_value_expression_()
@graken()
def _blob_overlay_function_(self):
self._token('OVERLAY')
self._left_paren_()
self._blob_value_expression_()
self._token('PLACING')
self._blob_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
self._right_paren_()
@graken()
def _start_position_(self):
self._numeric_value_expression_()
@graken()
def _string_length_(self):
self._numeric_value_expression_()
@graken()
def _datetime_value_expression_(self):
with self._choice():
with self._option():
self._datetime_term_()
with self._option():
self._interval_value_expression_()
self._plus_sign_()
self._datetime_term_()
with self._option():
self._datetime_value_expression_()
self._plus_sign_()
self._interval_term_()
with self._option():
self._datetime_value_expression_()
self._minus_sign_()
self._interval_term_()
self._error('no available options')
@graken()
def _datetime_term_(self):
self._datetime_factor_()
@graken()
def _datetime_factor_(self):
self._datetime_primary_()
with self._optional():
self._time_zone_()
@graken()
def _datetime_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._datetime_value_function_()
self._error('no available options')
@graken()
def _time_zone_(self):
self._token('AT')
self._time_zone_specifier_()
@graken()
def _time_zone_specifier_(self):
with self._choice():
with self._option():
self._token('LOCAL')
with self._option():
self._token('TIME')
self._token('ZONE')
self._interval_primary_()
self._error('expecting one of: LOCAL')
@graken()
def _datetime_value_function_(self):
with self._choice():
with self._option():
self._token('CURRENT_DATE')
with self._option():
self._current_time_value_function_()
with self._option():
self._current_timestamp_value_function_()
with self._option():
self._current_local_time_value_function_()
with self._option():
self._current_local_timestamp_value_function_()
self._error('expecting one of: CURRENT_DATE')
@graken()
def _current_time_value_function_(self):
self._token('CURRENT_TIME')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _current_local_time_value_function_(self):
self._token('LOCALTIME')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _current_timestamp_value_function_(self):
self._token('CURRENT_TIMESTAMP')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _current_local_timestamp_value_function_(self):
self._token('LOCALTIMESTAMP')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _interval_value_expression_(self):
with self._choice():
with self._option():
self._interval_term_()
with self._option():
self._interval_value_expression_1_()
self._plus_sign_()
self._interval_term_1_()
with self._option():
self._interval_value_expression_1_()
self._minus_sign_()
self._interval_term_1_()
with self._option():
self._left_paren_()
self._datetime_value_expression_()
self._minus_sign_()
self._datetime_term_()
self._right_paren_()
self._interval_qualifier_()
self._error('no available options')
@graken()
def _interval_term_(self):
with self._choice():
with self._option():
self._interval_factor_()
with self._option():
self._interval_term_2_()
self._asterisk_()
self._factor_()
with self._option():
self._interval_term_2_()
self._solidus_()
self._factor_()
with self._option():
self._term_()
self._asterisk_()
self._interval_factor_()
self._error('no available options')
@graken()
def _interval_factor_(self):
with self._optional():
self._sign_()
self._interval_primary_()
@graken()
def _interval_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._optional():
self._interval_qualifier_()
with self._option():
self._interval_absolute_value_function_()
self._error('no available options')
@graken()
def _interval_value_expression_1_(self):
self._interval_value_expression_()
@graken()
def _interval_term_1_(self):
self._interval_term_()
@graken()
def _interval_term_2_(self):
self._interval_term_()
@graken()
def _interval_absolute_value_function_(self):
self._token('ABS')
self._left_paren_()
self._interval_value_expression_()
self._right_paren_()
@graken()
def _boolean_value_expression_(self):
with self._choice():
with self._option():
self._boolean_term_()
with self._option():
self._boolean_value_expression_()
self._token('OR')
self._boolean_term_()
self._error('no available options')
@graken()
def _boolean_term_(self):
with self._choice():
with self._option():
self._boolean_factor_()
with self._option():
self._boolean_term_()
self._token('AND')
self._boolean_factor_()
self._error('no available options')
@graken()
def _boolean_factor_(self):
with self._optional():
self._token('NOT')
self._boolean_test_()
@graken()
def _boolean_test_(self):
self._boolean_primary_()
with self._optional():
self._token('IS')
with self._optional():
self._token('NOT')
self._truth_value_()
@graken()
def _truth_value_(self):
with self._choice():
with self._option():
self._token('TRUE')
with self._option():
self._token('FALSE')
with self._option():
self._token('UNKNOWN')
self._error('expecting one of: FALSE TRUE UNKNOWN')
@graken()
def _boolean_primary_(self):
with self._choice():
with self._option():
self._predicate_()
with self._option():
self._boolean_predicand_()
self._error('no available options')
@graken()
def _boolean_predicand_(self):
with self._choice():
with self._option():
self._parenthesized_boolean_value_expression_()
with self._option():
self._nonparenthesized_value_expression_primary_()
self._error('no available options')
@graken()
def _parenthesized_boolean_value_expression_(self):
self._left_paren_()
self._boolean_value_expression_()
self._right_paren_()
@graken()
def _array_value_expression_(self):
with self._choice():
with self._option():
self._array_concatenation_()
with self._option():
self._array_primary_()
self._error('no available options')
@graken()
def _array_concatenation_(self):
self._array_value_expression_()
self._concatenation_operator_()
self._array_primary_()
@graken()
def _array_primary_(self):
self._value_expression_primary_()
@graken()
def _array_value_constructor_(self):
with self._choice():
with self._option():
self._array_value_constructor_by_enumeration_()
with self._option():
self._array_value_constructor_by_query_()
self._error('no available options')
@graken()
def _array_value_constructor_by_enumeration_(self):
self._token('ARRAY')
self._left_bracket_or_trigraph_()
self._array_element_list_()
self._right_bracket_or_trigraph_()
@graken()
def _array_element_list_(self):
def sep0():
self._token(',')
def block0():
self._array_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _array_element_(self):
self._value_expression_()
@graken()
def _array_value_constructor_by_query_(self):
self._token('ARRAY')
self._left_paren_()
self._query_expression_()
with self._optional():
self._order_by_clause_()
self._right_paren_()
@graken()
def _multiset_value_expression_(self):
with self._choice():
with self._option():
self._multiset_term_()
with self._option():
self._multiset_value_expression_()
self._token('MULTISET')
self._token('UNION')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
self._multiset_term_()
with self._option():
self._multiset_value_expression_()
self._token('MULTISET')
self._token('EXCEPT')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
self._multiset_term_()
self._error('no available options')
@graken()
def _multiset_term_(self):
with self._choice():
with self._option():
self._multiset_primary_()
with self._option():
self._multiset_term_()
self._token('MULTISET')
self._token('INTERSECT')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
self._multiset_primary_()
self._error('no available options')
@graken()
def _multiset_primary_(self):
with self._choice():
with self._option():
self._multiset_set_function_()
with self._option():
self._value_expression_primary_()
self._error('no available options')
@graken()
def _multiset_set_function_(self):
self._token('SET')
self._left_paren_()
self._multiset_value_expression_()
self._right_paren_()
@graken()
def _multiset_value_constructor_(self):
with self._choice():
with self._option():
self._multiset_value_constructor_by_enumeration_()
with self._option():
self._multiset_value_constructor_by_query_()
with self._option():
self._table_value_constructor_by_query_()
self._error('no available options')
@graken()
def _multiset_value_constructor_by_enumeration_(self):
self._token('MULTISET')
self._left_bracket_or_trigraph_()
self._multiset_element_list_()
self._right_bracket_or_trigraph_()
@graken()
def _multiset_element_list_(self):
def sep0():
self._token(',')
def block0():
self._multiset_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _multiset_element_(self):
self._value_expression_()
@graken()
def _multiset_value_constructor_by_query_(self):
self._token('MULTISET')
self._subquery_()
@graken()
def _table_value_constructor_by_query_(self):
self._token('TABLE')
self._subquery_()
@graken()
def _row_value_constructor_(self):
with self._choice():
with self._option():
self._common_value_expression_()
with self._option():
self._boolean_value_expression_()
with self._option():
self._explicit_row_value_constructor_()
self._error('no available options')
@graken()
def _explicit_row_value_constructor_(self):
with self._choice():
with self._option():
self._left_paren_()
self._row_value_constructor_element_()
self._comma_()
self._row_value_constructor_element_list_()
self._right_paren_()
with self._option():
self._token('ROW')
self._left_paren_()
self._row_value_constructor_element_list_()
self._right_paren_()
with self._option():
self._subquery_()
self._error('no available options')
@graken()
def _row_value_constructor_element_list_(self):
def sep0():
self._token(',')
def block0():
self._row_value_constructor_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _row_value_constructor_element_(self):
self._value_expression_()
@graken()
def _contextually_typed_row_value_constructor_(self):
with self._choice():
with self._option():
self._common_value_expression_()
with self._option():
self._boolean_value_expression_()
with self._option():
self._contextually_typed_value_specification_()
with self._option():
self._left_paren_()
self._contextually_typed_row_value_constructor_element_()
self._comma_()
self._contextually_typed_row_value_constructor_element_list_()
self._right_paren_()
with self._option():
self._token('ROW')
self._left_paren_()
self._contextually_typed_row_value_constructor_element_list_()
self._right_paren_()
self._error('no available options')
@graken()
def _contextually_typed_row_value_constructor_element_list_(self):
def sep0():
self._token(',')
def block0():
self._contextually_typed_row_value_constructor_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _contextually_typed_row_value_constructor_element_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._contextually_typed_value_specification_()
self._error('no available options')
@graken()
def _row_value_constructor_predicand_(self):
with self._choice():
with self._option():
self._common_value_expression_()
with self._option():
self._boolean_predicand_()
with self._option():
self._explicit_row_value_constructor_()
self._error('no available options')
@graken()
def _row_value_expression_(self):
with self._choice():
with self._option():
self._row_value_special_case_()
with self._option():
self._explicit_row_value_constructor_()
self._error('no available options')
@graken()
def _table_row_value_expression_(self):
with self._choice():
with self._option():
self._row_value_special_case_()
with self._option():
self._row_value_constructor_()
self._error('no available options')
@graken()
def _contextually_typed_row_value_expression_(self):
with self._choice():
with self._option():
self._row_value_special_case_()
with self._option():
self._contextually_typed_row_value_constructor_()
self._error('no available options')
@graken()
def _row_value_predicand_(self):
with self._choice():
with self._option():
self._row_value_special_case_()
with self._option():
self._row_value_constructor_predicand_()
self._error('no available options')
@graken()
def _row_value_special_case_(self):
self._nonparenthesized_value_expression_primary_()
@graken()
def _table_value_constructor_(self):
self._token('VALUES')
self._row_value_expression_list_()
@graken()
def _row_value_expression_list_(self):
def sep0():
self._token(',')
def block0():
self._table_row_value_expression_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _contextually_typed_table_value_constructor_(self):
self._token('VALUES')
self._contextually_typed_row_value_expression_list_()
@graken()
def _contextually_typed_row_value_expression_list_(self):
def sep0():
self._token(',')
def block0():
self._contextually_typed_row_value_expression_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _table_expression_(self):
self._from_clause_()
with self._optional():
self._where_clause_()
with self._optional():
self._group_by_clause_()
with self._optional():
self._having_clause_()
with self._optional():
self._window_clause_()
@graken()
def _from_clause_(self):
self._token('FROM')
self._table_reference_list_()
@graken()
def _table_reference_list_(self):
def sep0():
self._token(',')
def block0():
self._table_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _table_reference_(self):
with self._choice():
with self._option():
self._table_factor_()
with self._option():
self._joined_table_()
with self._optional():
self._sample_clause_()
self._error('no available options')
@graken()
def _table_factor_(self):
self._table_primary_()
with self._optional():
self._sample_clause_()
@graken()
def _sample_clause_(self):
self._token('TABLESAMPLE')
self._sample_method_()
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
with self._optional():
self._repeatable_clause_()
@graken()
def _sample_method_(self):
with self._choice():
with self._option():
self._token('BERNOULLI')
with self._option():
self._token('SYSTEM')
self._error('expecting one of: BERNOULLI SYSTEM')
@graken()
def _repeatable_clause_(self):
self._token('REPEATABLE')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _table_primary_(self):
with self._choice():
with self._option():
self._table_or_query_name_()
with self._optional():
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._subquery_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._lateral_derived_table_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._collection_derived_table_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._table_function_derived_table_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._only_spec_()
with self._optional():
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._parenthesized_joined_table_()
self._error('no available options')
@graken()
def _parenthesized_joined_table_(self):
self._left_paren_()
self._joined_table_()
self._right_paren_()
@graken()
def _only_spec_(self):
self._token('ONLY')
self._left_paren_()
self._table_or_query_name_()
self._right_paren_()
@graken()
def _lateral_derived_table_(self):
self._token('LATERAL')
self._subquery_()
@graken()
def _collection_derived_table_(self):
self._token('UNNEST')
self._left_paren_()
self._collection_value_expression_()
self._right_paren_()
with self._optional():
self._token('WITH')
self._token('ORDINALITY')
@graken()
def _table_function_derived_table_(self):
self._token('TABLE')
self._left_paren_()
self._collection_value_expression_()
self._right_paren_()
@graken()
def _table_or_query_name_(self):
with self._choice():
with self._option():
self._table_name_()
with self._option():
self._identifier_()
self._error('no available options')
@graken()
def _column_name_list_(self):
self._identifier_list_()
@graken()
def _joined_table_(self):
with self._choice():
with self._option():
self._cross_join_()
with self._option():
self._qualified_join_()
with self._option():
self._natural_join_()
with self._option():
self._union_join_()
self._error('no available options')
@graken()
def _cross_join_(self):
self._table_reference_()
self._token('CROSS')
self._token('JOIN')
self._table_primary_()
@graken()
def _qualified_join_(self):
self._table_reference_()
with self._optional():
self._join_type_()
self._token('JOIN')
self._table_reference_()
self._join_specification_()
@graken()
def _natural_join_(self):
self._table_reference_()
self._token('NATURAL')
with self._optional():
self._join_type_()
self._token('JOIN')
self._table_primary_()
@graken()
def _union_join_(self):
self._table_reference_()
self._token('UNION')
self._token('JOIN')
self._table_primary_()
@graken()
def _join_specification_(self):
with self._choice():
with self._option():
self._join_condition_()
with self._option():
self._named_columns_join_()
self._error('no available options')
@graken()
def _join_condition_(self):
self._token('ON')
self._search_condition_()
@graken()
def _named_columns_join_(self):
self._token('USING')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _join_type_(self):
with self._choice():
with self._option():
self._token('INNER')
with self._option():
self._outer_join_type_()
with self._optional():
self._token('OUTER')
self._error('expecting one of: INNER')
@graken()
def _outer_join_type_(self):
with self._choice():
with self._option():
self._token('LEFT')
with self._option():
self._token('RIGHT')
with self._option():
self._token('FULL')
self._error('expecting one of: FULL LEFT RIGHT')
@graken()
def _where_clause_(self):
self._token('WHERE')
self._search_condition_()
@graken()
def _group_by_clause_(self):
self._token('GROUP')
self._token('BY')
with self._optional():
self._set_quantifier_()
self._grouping_element_list_()
@graken()
def _grouping_element_list_(self):
def sep0():
self._token(',')
def block0():
self._grouping_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _grouping_element_(self):
with self._choice():
with self._option():
self._ordinary_grouping_set_()
with self._option():
self._rollup_list_()
with self._option():
self._cube_list_()
with self._option():
self._grouping_sets_specification_()
with self._option():
self._empty_grouping_set_()
self._error('no available options')
@graken()
def _ordinary_grouping_set_(self):
with self._choice():
with self._option():
self._grouping_column_reference_()
with self._option():
self._left_paren_()
self._grouping_column_reference_list_()
self._right_paren_()
self._error('no available options')
@graken()
def _grouping_column_reference_(self):
self._column_reference_()
with self._optional():
self._collate_clause_()
@graken()
def _grouping_column_reference_list_(self):
def sep0():
self._token(',')
def block0():
self._grouping_column_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _rollup_list_(self):
self._token('ROLLUP')
self._left_paren_()
self._ordinary_grouping_set_list_()
self._right_paren_()
@graken()
def _ordinary_grouping_set_list_(self):
def sep0():
self._token(',')
def block0():
self._ordinary_grouping_set_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _cube_list_(self):
self._token('CUBE')
self._left_paren_()
self._ordinary_grouping_set_list_()
self._right_paren_()
@graken()
def _grouping_sets_specification_(self):
self._token('GROUPING')
self._token('SETS')
self._left_paren_()
self._grouping_set_list_()
self._right_paren_()
@graken()
def _grouping_set_list_(self):
def sep0():
self._token(',')
def block0():
self._grouping_set_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _grouping_set_(self):
with self._choice():
with self._option():
self._ordinary_grouping_set_()
with self._option():
self._rollup_list_()
with self._option():
self._cube_list_()
with self._option():
self._grouping_sets_specification_()
with self._option():
self._empty_grouping_set_()
self._error('no available options')
@graken()
def _empty_grouping_set_(self):
self._left_paren_()
self._right_paren_()
@graken()
def _having_clause_(self):
self._token('HAVING')
self._search_condition_()
@graken()
def _window_clause_(self):
self._token('WINDOW')
self._window_definition_list_()
@graken()
def _window_definition_list_(self):
def sep0():
self._token(',')
def block0():
self._window_definition_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _window_definition_(self):
self._identifier_()
self._token('AS')
self._window_specification_()
@graken()
def _window_specification_(self):
self._left_paren_()
self._window_specification_details_()
self._right_paren_()
@graken()
def _window_specification_details_(self):
with self._optional():
self._identifier_()
with self._optional():
self._window_partition_clause_()
with self._optional():
self._order_by_clause_()
with self._optional():
self._window_frame_clause_()
@graken()
def _window_partition_clause_(self):
self._token('PARTITION')
self._token('BY')
self._window_partition_column_reference_list_()
@graken()
def _window_partition_column_reference_list_(self):
def sep0():
self._token(',')
def block0():
self._window_partition_column_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _window_partition_column_reference_(self):
self._column_reference_()
with self._optional():
self._collate_clause_()
@graken()
def _window_frame_clause_(self):
self._window_frame_units_()
self._window_frame_extent_()
with self._optional():
self._window_frame_exclusion_()
@graken()
def _window_frame_units_(self):
with self._choice():
with self._option():
self._token('ROWS')
with self._option():
self._token('RANGE')
self._error('expecting one of: RANGE ROWS')
@graken()
def _window_frame_extent_(self):
with self._choice():
with self._option():
self._window_frame_start_()
with self._option():
self._window_frame_between_()
self._error('no available options')
@graken()
def _window_frame_start_(self):
with self._choice():
with self._option():
self._token('UNBOUNDED')
self._token('PRECEDING')
with self._option():
self._window_frame_preceding_()
with self._option():
self._token('CURRENT')
self._token('ROW')
self._error('expecting one of: CURRENT UNBOUNDED')
@graken()
def _window_frame_preceding_(self):
self._unsigned_value_specification_()
self._token('PRECEDING')
@graken()
def _window_frame_between_(self):
self._token('BETWEEN')
self._window_frame_bound_()
self._token('AND')
self._window_frame_bound_()
@graken()
def _window_frame_bound_(self):
with self._choice():
with self._option():
self._window_frame_start_()
with self._option():
self._token('UNBOUNDED')
self._token('FOLLOWING')
with self._option():
self._window_frame_following_()
self._error('expecting one of: UNBOUNDED')
@graken()
def _window_frame_following_(self):
self._unsigned_value_specification_()
self._token('FOLLOWING')
@graken()
def _window_frame_exclusion_(self):
with self._choice():
with self._option():
self._token('EXCLUDE')
self._token('CURRENT')
self._token('ROW')
with self._option():
self._token('EXCLUDE')
self._token('GROUP')
with self._option():
self._token('EXCLUDE')
self._token('TIES')
with self._option():
self._token('EXCLUDE')
self._token('NO')
self._token('OTHERS')
self._error('expecting one of: EXCLUDE')
@graken()
def _query_specification_(self):
self._token('SELECT')
with self._optional():
self._set_quantifier_()
self._select_list_()
self._table_expression_()
@graken()
def _select_list_(self):
with self._choice():
with self._option():
self._asterisk_()
with self._option():
def sep0():
self._token(',')
def block0():
self._select_sublist_()
self._positive_closure(block0, prefix=sep0)
self._error('no available options')
@graken()
def _select_sublist_(self):
with self._choice():
with self._option():
self._derived_column_()
with self._option():
self._qualified_asterisk_()
self._error('no available options')
@graken()
def _qualified_asterisk_(self):
with self._choice():
with self._option():
self._identifier_chain_()
self._period_()
self._asterisk_()
with self._option():
self._all_fields_reference_()
self._error('no available options')
@graken()
def _derived_column_(self):
self._value_expression_()
with self._optional():
self._as_clause_()
@graken()
def _as_clause_(self):
with self._optional():
self._token('AS')
self._identifier_()
@graken()
def _all_fields_reference_(self):
self._value_expression_primary_()
self._period_()
self._asterisk_()
with self._optional():
self._token('AS')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _query_expression_(self):
with self._optional():
self._with_clause_()
self._query_expression_body_()
@graken()
def _with_clause_(self):
self._token('WITH')
with self._optional():
self._token('RECURSIVE')
self._with_list_()
@graken()
def _with_list_(self):
def sep0():
self._token(',')
def block0():
self._with_list_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _with_list_element_(self):
self._identifier_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._token('AS')
self._subquery_()
with self._optional():
self._search_or_cycle_clause_()
@graken()
def _query_expression_body_(self):
with self._choice():
with self._option():
self._non_join_query_expression_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_expression_(self):
with self._choice():
with self._option():
self._non_join_query_term_()
with self._option():
self._query_expression_body_()
self._token('UNION')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
with self._optional():
self._corresponding_spec_()
self._query_term_()
with self._option():
self._query_expression_body_()
self._token('EXCEPT')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
with self._optional():
self._corresponding_spec_()
self._query_term_()
self._error('no available options')
@graken()
def _query_term_(self):
with self._choice():
with self._option():
self._non_join_query_term_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_term_(self):
with self._choice():
with self._option():
self._non_join_query_primary_()
with self._option():
self._query_term_()
self._token('INTERSECT')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
with self._optional():
self._corresponding_spec_()
self._query_primary_()
self._error('no available options')
@graken()
def _query_primary_(self):
with self._choice():
with self._option():
self._non_join_query_primary_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_primary_(self):
with self._choice():
with self._option():
self._simple_table_()
with self._option():
self._left_paren_()
self._non_join_query_expression_()
self._right_paren_()
self._error('no available options')
@graken()
def _simple_table_(self):
with self._choice():
with self._option():
self._query_specification_()
with self._option():
self._table_value_constructor_()
with self._option():
self._explicit_table_()
self._error('no available options')
@graken()
def _explicit_table_(self):
self._token('TABLE')
self._table_or_query_name_()
@graken()
def _corresponding_spec_(self):
self._token('CORRESPONDING')
with self._optional():
self._token('BY')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _search_or_cycle_clause_(self):
with self._choice():
with self._option():
self._search_clause_()
with self._option():
self._cycle_clause_()
with self._option():
self._search_clause_()
self._cycle_clause_()
self._error('no available options')
@graken()
def _search_clause_(self):
self._token('SEARCH')
self._recursive_search_order_()
self._token('SET')
self._identifier_()
@graken()
def _recursive_search_order_(self):
with self._choice():
with self._option():
self._token('DEPTH')
self._token('FIRST')
self._token('BY')
self._sort_specification_list_()
with self._option():
self._token('BREADTH')
self._token('FIRST')
self._token('BY')
self._sort_specification_list_()
self._error('no available options')
@graken()
def _cycle_clause_(self):
self._token('CYCLE')
self._cycle_column_list_()
self._token('SET')
self._identifier_()
self._token('TO')
self._value_expression_()
self._token('DEFAULT')
self._value_expression_()
self._token('USING')
self._identifier_()
@graken()
def _cycle_column_list_(self):
self._column_name_list_()
@graken()
def _subquery_(self):
self._left_paren_()
self._query_expression_()
self._right_paren_()
@graken()
def _predicate_(self):
with self._choice():
with self._option():
self._comparison_predicate_()
with self._option():
self._between_predicate_()
with self._option():
self._in_predicate_()
with self._option():
self._like_predicate_()
with self._option():
self._similar_predicate_()
with self._option():
self._null_predicate_()
with self._option():
self._quantified_comparison_predicate_()
with self._option():
self._exists_predicate_()
with self._option():
self._unique_predicate_()
with self._option():
self._normalized_predicate_()
with self._option():
self._match_predicate_()
with self._option():
self._overlaps_predicate_()
with self._option():
self._distinct_predicate_()
with self._option():
self._member_predicate_()
with self._option():
self._submultiset_predicate_()
with self._option():
self._set_predicate_()
with self._option():
self._type_predicate_()
self._error('no available options')
@graken()
def _comparison_predicate_(self):
self._row_value_predicand_()
self._comparison_predicate_part_2_()
@graken()
def _comparison_predicate_part_2_(self):
self._comp_op_()
self._row_value_predicand_()
@graken()
def _comp_op_(self):
with self._choice():
with self._option():
self._equals_operator_()
with self._option():
self._not_equals_operator_()
with self._option():
self._less_than_operator_()
with self._option():
self._greater_than_operator_()
with self._option():
self._less_than_or_equals_operator_()
with self._option():
self._greater_than_or_equals_operator_()
self._error('no available options')
@graken()
def _between_predicate_(self):
self._row_value_predicand_()
self._between_predicate_part_2_()
@graken()
def _between_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('BETWEEN')
with self._optional():
with self._choice():
with self._option():
self._token('ASYMMETRIC')
with self._option():
self._token('SYMMETRIC')
self._error('expecting one of: ASYMMETRIC SYMMETRIC')
self._row_value_predicand_()
self._token('AND')
self._row_value_predicand_()
@graken()
def _in_predicate_(self):
self._row_value_predicand_()
self._in_predicate_part_2_()
@graken()
def _in_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('IN')
self._in_predicate_value_()
@graken()
def _in_predicate_value_(self):
with self._choice():
with self._option():
self._subquery_()
with self._option():
self._left_paren_()
self._in_value_list_()
self._right_paren_()
self._error('no available options')
@graken()
def _in_value_list_(self):
def sep0():
self._token(',')
def block0():
self._row_value_expression_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _like_predicate_(self):
with self._choice():
with self._option():
self._character_like_predicate_()
with self._option():
self._octet_like_predicate_()
self._error('no available options')
@graken()
def _character_like_predicate_(self):
self._row_value_predicand_()
self._character_like_predicate_part_2_()
@graken()
def _character_like_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('LIKE')
self._character_value_expression_()
with self._optional():
self._token('ESCAPE')
self._escape_character_()
@graken()
def _escape_character_(self):
self._character_value_expression_()
@graken()
def _octet_like_predicate_(self):
self._row_value_predicand_()
self._octet_like_predicate_part_2_()
@graken()
def _octet_like_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('LIKE')
self._blob_value_expression_()
with self._optional():
self._token('ESCAPE')
self._blob_value_expression_()
@graken()
def _similar_predicate_(self):
self._row_value_predicand_()
self._similar_predicate_part_2_()
@graken()
def _similar_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('SIMILAR')
self._token('TO')
self._character_value_expression_()
with self._optional():
self._token('ESCAPE')
self._escape_character_()
@graken()
def _null_predicate_(self):
self._row_value_predicand_()
self._null_predicate_part_2_()
@graken()
def _null_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('NULL')
@graken()
def _quantified_comparison_predicate_(self):
self._row_value_predicand_()
self._quantified_comparison_predicate_part_2_()
@graken()
def _quantified_comparison_predicate_part_2_(self):
self._comp_op_()
self._quantifier_()
self._subquery_()
@graken()
def _quantifier_(self):
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._some_()
self._error('expecting one of: ALL')
@graken()
def _some_(self):
with self._choice():
with self._option():
self._token('SOME')
with self._option():
self._token('ANY')
self._error('expecting one of: ANY SOME')
@graken()
def _exists_predicate_(self):
self._token('EXISTS')
self._subquery_()
@graken()
def _unique_predicate_(self):
self._token('UNIQUE')
self._subquery_()
@graken()
def _normalized_predicate_(self):
self._string_value_expression_()
self._normalized_predicate_part_2_()
@graken()
def _normalized_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('NORMALIZED')
@graken()
def _match_predicate_(self):
self._row_value_predicand_()
self._match_predicate_part_2_()
@graken()
def _match_predicate_part_2_(self):
self._token('MATCH')
with self._optional():
self._token('UNIQUE')
with self._optional():
with self._choice():
with self._option():
self._token('SIMPLE')
with self._option():
self._token('PARTIAL')
with self._option():
self._token('FULL')
self._error('expecting one of: FULL PARTIAL SIMPLE')
self._subquery_()
@graken()
def _overlaps_predicate_(self):
self._overlaps_predicate_part_1_()
self._overlaps_predicate_part_2_()
@graken()
def _overlaps_predicate_part_1_(self):
self._row_value_predicand_()
@graken()
def _overlaps_predicate_part_2_(self):
self._token('OVERLAPS')
self._row_value_predicand_()
@graken()
def _distinct_predicate_(self):
self._row_value_predicand_()
self._distinct_predicate_part_2_()
@graken()
def _distinct_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('DISTINCT')
self._token('FROM')
self._row_value_predicand_()
@graken()
def _member_predicate_(self):
self._row_value_predicand_()
self._member_predicate_part_2_()
@graken()
def _member_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('MEMBER')
with self._optional():
self._token('OF')
self._multiset_value_expression_()
@graken()
def _submultiset_predicate_(self):
self._row_value_predicand_()
self._submultiset_predicate_part_2_()
@graken()
def _submultiset_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('SUBMULTISET')
with self._optional():
self._token('OF')
self._multiset_value_expression_()
@graken()
def _set_predicate_(self):
self._row_value_predicand_()
self._set_predicate_part_2_()
@graken()
def _set_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('A')
self._token('SET')
@graken()
def _type_predicate_(self):
self._row_value_predicand_()
self._type_predicate_part_2_()
@graken()
def _type_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('OF')
self._left_paren_()
self._type_list_()
self._right_paren_()
@graken()
def _type_list_(self):
def sep0():
self._token(',')
def block0():
self._user_defined_type_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _user_defined_type_specification_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._exclusive_user_defined_type_specification_()
self._error('no available options')
@graken()
def _exclusive_user_defined_type_specification_(self):
self._token('ONLY')
self._schema_qualified_name_()
@graken()
def _search_condition_(self):
self._boolean_value_expression_()
@graken()
def _interval_qualifier_(self):
with self._choice():
with self._option():
self._start_field_()
self._token('TO')
self._end_field_()
with self._option():
self._single_datetime_field_()
self._error('no available options')
@graken()
def _start_field_(self):
self._non_second_primary_datetime_field_()
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _end_field_(self):
with self._choice():
with self._option():
self._non_second_primary_datetime_field_()
with self._option():
self._token('SECOND')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
self._error('expecting one of: SECOND')
@graken()
def _single_datetime_field_(self):
with self._choice():
with self._option():
self._non_second_primary_datetime_field_()
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._option():
self._token('SECOND')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._precision_()
self._right_paren_()
self._error('expecting one of: SECOND')
@graken()
def _primary_datetime_field_(self):
with self._choice():
with self._option():
self._non_second_primary_datetime_field_()
with self._option():
self._token('SECOND')
self._error('expecting one of: SECOND')
@graken()
def _non_second_primary_datetime_field_(self):
with self._choice():
with self._option():
self._token('YEAR')
with self._option():
self._token('MONTH')
with self._option():
self._token('DAY')
with self._option():
self._token('HOUR')
with self._option():
self._token('MINUTE')
self._error('expecting one of: DAY HOUR MINUTE MONTH YEAR')
@graken()
def _language_clause_(self):
self._token('LANGUAGE')
self._language_name_()
@graken()
def _language_name_(self):
with self._choice():
with self._option():
self._token('ADA')
with self._option():
self._token('C')
with self._option():
self._token('COBOL')
with self._option():
self._token('FORTRAN')
with self._option():
self._token('M')
with self._option():
self._token('MUMPS')
with self._option():
self._token('PASCAL')
with self._option():
self._token('PLI')
with self._option():
self._token('SQL')
self._error('expecting one of: ADA C COBOL FORTRAN M MUMPS PASCAL PLI SQL')
@graken()
def _path_specification_(self):
self._token('PATH')
self._schema_name_list_()
@graken()
def _schema_name_list_(self):
def sep0():
self._token(',')
def block0():
self._schema_name_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _routine_invocation_(self):
self._schema_qualified_name_()
self._sql_argument_list_()
@graken()
def _sql_argument_list_(self):
self._left_paren_()
with self._optional():
def sep0():
self._token(',')
def block0():
self._sql_argument_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _sql_argument_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._generalized_expression_()
with self._option():
self._target_specification_()
self._error('no available options')
@graken()
def _generalized_expression_(self):
self._value_expression_()
self._token('AS')
self._schema_qualified_name_()
@graken()
def _specific_routine_designator_(self):
with self._choice():
with self._option():
self._token('SPECIFIC')
self._routine_type_()
self._schema_qualified_name_()
with self._option():
self._routine_type_()
self._member_name_()
with self._optional():
self._token('FOR')
self._schema_qualified_name_()
self._error('no available options')
@graken()
def _routine_type_(self):
with self._choice():
with self._option():
self._token('ROUTINE')
with self._option():
self._token('FUNCTION')
with self._option():
self._token('PROCEDURE')
with self._option():
with self._optional():
with self._choice():
with self._option():
self._token('INSTANCE')
with self._option():
self._token('STATIC')
with self._option():
self._token('CONSTRUCTOR')
self._error('expecting one of: CONSTRUCTOR INSTANCE STATIC')
self._token('METHOD')
self._error('expecting one of: CONSTRUCTOR FUNCTION INSTANCE METHOD PROCEDURE ROUTINE STATIC')
@graken()
def _member_name_(self):
self._member_name_alternatives_()
with self._optional():
self._data_type_list_()
@graken()
def _member_name_alternatives_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._identifier_()
self._error('no available options')
@graken()
def _data_type_list_(self):
self._left_paren_()
with self._optional():
def sep0():
self._token(',')
def block0():
self._data_type_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _collate_clause_(self):
self._token('COLLATE')
self._schema_qualified_name_()
@graken()
def _constraint_name_definition_(self):
self._token('CONSTRAINT')
self._schema_qualified_name_()
@graken()
def _constraint_characteristics_(self):
with self._choice():
with self._option():
self._constraint_check_time_()
with self._optional():
with self._optional():
self._token('NOT')
self._token('DEFERRABLE')
with self._option():
with self._optional():
self._token('NOT')
self._token('DEFERRABLE')
with self._optional():
self._constraint_check_time_()
self._error('expecting one of: DEFERRABLE NOT')
@graken()
def _constraint_check_time_(self):
with self._choice():
with self._option():
self._token('INITIALLY')
self._token('DEFERRED')
with self._option():
self._token('INITIALLY')
self._token('IMMEDIATE')
self._error('expecting one of: INITIALLY')
@graken()
def _aggregate_function_(self):
with self._choice():
with self._option():
self._token('COUNT')
self._left_paren_()
self._asterisk_()
self._right_paren_()
with self._optional():
self._filter_clause_()
with self._option():
self._general_set_function_()
with self._optional():
self._filter_clause_()
with self._option():
self._binary_set_function_()
with self._optional():
self._filter_clause_()
with self._option():
self._ordered_set_function_()
with self._optional():
self._filter_clause_()
self._error('no available options')
@graken()
def _general_set_function_(self):
self._computational_operation_()
self._left_paren_()
with self._optional():
self._set_quantifier_()
self._value_expression_()
self._right_paren_()
@graken()
def _computational_operation_(self):
with self._choice():
with self._option():
self._token('AVG')
with self._option():
self._token('MAX')
with self._option():
self._token('MIN')
with self._option():
self._token('SUM')
with self._option():
self._token('EVERY')
with self._option():
self._token('ANY')
with self._option():
self._token('SOME')
with self._option():
self._token('COUNT')
with self._option():
self._token('STDDEV_POP')
with self._option():
self._token('STDDEV_SAMP')
with self._option():
self._token('VAR_SAMP')
with self._option():
self._token('VAR_POP')
with self._option():
self._token('COLLECT')
with self._option():
self._token('FUSION')
with self._option():
self._token('INTERSECTION')
self._error('expecting one of: ANY AVG COLLECT COUNT EVERY FUSION INTERSECTION MAX MIN SOME STDDEV_POP STDDEV_SAMP SUM VAR_POP VAR_SAMP')
@graken()
def _set_quantifier_(self):
with self._choice():
with self._option():
self._token('DISTINCT')
with self._option():
self._token('ALL')
self._error('expecting one of: ALL DISTINCT')
@graken()
def _filter_clause_(self):
self._token('FILTER')
self._left_paren_()
self._token('WHERE')
self._search_condition_()
self._right_paren_()
@graken()
def _binary_set_function_(self):
self._binary_set_function_type_()
self._left_paren_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _binary_set_function_type_(self):
with self._choice():
with self._option():
self._token('COVAR_POP')
with self._option():
self._token('COVAR_SAMP')
with self._option():
self._token('CORR')
with self._option():
self._token('REGR_SLOPE')
with self._option():
self._token('REGR_INTERCEPT')
with self._option():
self._token('REGR_COUNT')
with self._option():
self._token('REGR_R2')
with self._option():
self._token('REGR_AVGX')
with self._option():
self._token('REGR_AVGY')
with self._option():
self._token('REGR_SXX')
with self._option():
self._token('REGR_SYY')
with self._option():
self._token('REGR_SXY')
self._error('expecting one of: CORR COVAR_POP COVAR_SAMP REGR_AVGX REGR_AVGY REGR_COUNT REGR_INTERCEPT REGR_R2 REGR_SLOPE REGR_SXX REGR_SXY REGR_SYY')
@graken()
def _ordered_set_function_(self):
with self._choice():
with self._option():
self._hypothetical_set_function_()
with self._option():
self._inverse_distribution_function_()
self._error('no available options')
@graken()
def _hypothetical_set_function_(self):
self._rank_function_type_()
self._left_paren_()
self._hypothetical_set_function_value_expression_list_()
self._right_paren_()
self._within_group_specification_()
@graken()
def _within_group_specification_(self):
self._token('WITHIN')
self._token('GROUP')
self._left_paren_()
self._order_by_clause_()
self._right_paren_()
@graken()
def _hypothetical_set_function_value_expression_list_(self):
def sep0():
self._token(',')
def block0():
self._value_expression_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _inverse_distribution_function_(self):
self._inverse_distribution_function_type_()
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
self._within_group_specification_()
@graken()
def _inverse_distribution_function_type_(self):
with self._choice():
with self._option():
self._token('PERCENTILE_CONT')
with self._option():
self._token('PERCENTILE_DISC')
self._error('expecting one of: PERCENTILE_CONT PERCENTILE_DISC')
@graken()
def _sort_specification_list_(self):
def sep0():
self._token(',')
def block0():
self._sort_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _sort_specification_(self):
self._value_expression_()
with self._optional():
self._ordering_specification_()
with self._optional():
self._null_ordering_()
@graken()
def _ordering_specification_(self):
with self._choice():
with self._option():
self._token('ASC')
with self._option():
self._token('DESC')
self._error('expecting one of: ASC DESC')
@graken()
def _null_ordering_(self):
with self._choice():
with self._option():
self._token('NULLS')
self._token('FIRST')
with self._option():
self._token('NULLS')
self._token('LAST')
self._error('expecting one of: NULLS')
@graken()
def _schema_definition_(self):
self._token('CREATE')
self._token('SCHEMA')
self._schema_name_clause_()
with self._optional():
self._schema_character_set_or_path_()
with self._optional():
def block0():
self._schema_element_()
self._positive_closure(block0)
@graken()
def _schema_character_set_or_path_(self):
with self._choice():
with self._option():
self._schema_character_set_specification_()
with self._option():
self._schema_path_specification_()
with self._option():
self._schema_character_set_specification_()
self._schema_path_specification_()
with self._option():
self._schema_path_specification_()
self._schema_character_set_specification_()
self._error('no available options')
@graken()
def _schema_name_clause_(self):
with self._choice():
with self._option():
self._schema_name_()
with self._option():
self._token('AUTHORIZATION')
self._identifier_()
with self._option():
self._schema_name_()
self._token('AUTHORIZATION')
self._identifier_()
self._error('no available options')
@graken()
def _schema_character_set_specification_(self):
self._token('DEFAULT')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
@graken()
def _schema_path_specification_(self):
self._path_specification_()
@graken()
def _schema_element_(self):
with self._choice():
with self._option():
self._table_definition_()
with self._option():
self._view_definition_()
with self._option():
self._domain_definition_()
with self._option():
self._character_set_definition_()
with self._option():
self._collation_definition_()
with self._option():
self._transliteration_definition_()
with self._option():
self._assertion_definition_()
with self._option():
self._trigger_definition_()
with self._option():
self._user_defined_type_definition_()
with self._option():
self._user_defined_cast_definition_()
with self._option():
self._user_defined_ordering_definition_()
with self._option():
self._transform_definition_()
with self._option():
self._schema_routine_()
with self._option():
self._sequence_generator_definition_()
with self._option():
self._grant_statement_()
with self._option():
self._role_definition_()
self._error('no available options')
@graken()
def _drop_schema_statement_(self):
self._token('DROP')
self._token('SCHEMA')
self._schema_name_()
self._drop_behavior_()
@graken()
def _drop_behavior_(self):
with self._choice():
with self._option():
self._token('CASCADE')
with self._option():
self._token('RESTRICT')
self._error('expecting one of: CASCADE RESTRICT')
@graken()
def _table_definition_(self):
self._token('CREATE')
with self._optional():
self._table_scope_()
self._token('TABLE')
self._table_name_()
self._table_contents_source_()
with self._optional():
self._token('ON')
self._token('COMMIT')
self._table_commit_action_()
self._token('ROWS')
@graken()
def _table_contents_source_(self):
with self._choice():
with self._option():
self._table_element_list_()
with self._option():
self._typed_table_clause_()
with self._option():
self._as_subquery_clause_()
self._error('no available options')
@graken()
def _table_scope_(self):
self._global_or_local_()
self._token('TEMPORARY')
@graken()
def _global_or_local_(self):
with self._choice():
with self._option():
self._token('GLOBAL')
with self._option():
self._token('LOCAL')
self._error('expecting one of: GLOBAL LOCAL')
@graken()
def _table_commit_action_(self):
with self._choice():
with self._option():
self._token('PRESERVE')
with self._option():
self._token('DELETE')
self._error('expecting one of: DELETE PRESERVE')
@graken()
def _table_element_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._table_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _table_element_(self):
with self._choice():
with self._option():
self._column_definition_()
with self._option():
self._table_constraint_definition_()
with self._option():
self._like_clause_()
with self._option():
self._self_referencing_column_specification_()
with self._option():
self._column_options_()
self._error('no available options')
@graken()
def _typed_table_clause_(self):
self._token('OF')
self._schema_qualified_name_()
with self._optional():
self._subtable_clause_()
with self._optional():
self._table_element_list_()
@graken()
def _self_referencing_column_specification_(self):
self._token('REF')
self._token('IS')
self._identifier_()
self._reference_generation_()
@graken()
def _reference_generation_(self):
with self._choice():
with self._option():
self._token('SYSTEM')
self._token('GENERATED')
with self._option():
self._token('USER')
self._token('GENERATED')
with self._option():
self._token('DERIVED')
self._error('expecting one of: DERIVED SYSTEM USER')
@graken()
def _column_options_(self):
self._identifier_()
self._token('WITH')
self._token('OPTIONS')
self._column_option_list_()
@graken()
def _column_option_list_(self):
with self._optional():
self._scope_clause_()
with self._optional():
self._default_clause_()
with self._optional():
def block0():
self._column_constraint_definition_()
self._positive_closure(block0)
@graken()
def _subtable_clause_(self):
self._token('UNDER')
self._table_name_()
@graken()
def _like_clause_(self):
self._token('LIKE')
self._table_name_()
with self._optional():
self._like_options_()
@graken()
def _like_options_(self):
with self._choice():
with self._option():
self._identity_option_()
with self._option():
self._column_default_option_()
self._error('no available options')
@graken()
def _identity_option_(self):
with self._choice():
with self._option():
self._token('INCLUDING')
self._token('IDENTITY')
with self._option():
self._token('EXCLUDING')
self._token('IDENTITY')
self._error('expecting one of: EXCLUDING INCLUDING')
@graken()
def _column_default_option_(self):
with self._choice():
with self._option():
self._token('INCLUDING')
self._token('DEFAULTS')
with self._option():
self._token('EXCLUDING')
self._token('DEFAULTS')
self._error('expecting one of: EXCLUDING INCLUDING')
@graken()
def _as_subquery_clause_(self):
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._token('AS')
self._subquery_()
self._with_or_without_data_()
@graken()
def _with_or_without_data_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('NO')
self._token('DATA')
with self._option():
self._token('WITH')
self._token('DATA')
self._error('expecting one of: WITH')
@graken()
def _column_definition_(self):
self._identifier_()
with self._optional():
self._data_type_or_domain_name_()
with self._optional():
self._reference_scope_check_()
with self._optional():
with self._choice():
with self._option():
self._default_clause_()
with self._option():
self._identity_column_specification_()
with self._option():
self._generation_clause_()
self._error('no available options')
with self._optional():
def block1():
self._column_constraint_definition_()
self._positive_closure(block1)
with self._optional():
self._collate_clause_()
@graken()
def _data_type_or_domain_name_(self):
with self._choice():
with self._option():
self._data_type_()
with self._option():
self._schema_qualified_name_()
self._error('no available options')
@graken()
def _column_constraint_definition_(self):
with self._optional():
self._constraint_name_definition_()
self._column_constraint_()
with self._optional():
self._constraint_characteristics_()
@graken()
def _column_constraint_(self):
with self._choice():
with self._option():
self._token('NOT')
self._token('NULL')
with self._option():
self._unique_specification_()
with self._option():
self._references_specification_()
with self._option():
self._check_constraint_definition_()
self._error('expecting one of: NOT')
@graken()
def _identity_column_specification_(self):
self._token('GENERATED')
with self._group():
with self._choice():
with self._option():
self._token('ALWAYS')
with self._option():
self._token('BY')
self._token('DEFAULT')
self._error('expecting one of: ALWAYS BY')
self._token('AS')
self._token('IDENTITY')
with self._optional():
self._left_paren_()
self._common_sequence_generator_options_()
self._right_paren_()
@graken()
def _generation_clause_(self):
self._generation_rule_()
self._token('AS')
self._generation_expression_()
@graken()
def _generation_rule_(self):
self._token('GENERATED')
self._token('ALWAYS')
@graken()
def _generation_expression_(self):
self._left_paren_()
self._value_expression_()
self._right_paren_()
@graken()
def _default_clause_(self):
self._token('DEFAULT')
self._default_option_()
@graken()
def _default_option_(self):
with self._choice():
with self._option():
self._literal_()
with self._option():
self._datetime_value_function_()
with self._option():
self._token('USER')
with self._option():
self._token('CURRENT_USER')
with self._option():
self._token('CURRENT_ROLE')
with self._option():
self._token('SESSION_USER')
with self._option():
self._token('SYSTEM_USER')
with self._option():
self._token('CURRENT_PATH')
with self._option():
self._implicitly_typed_value_specification_()
self._error('expecting one of: CURRENT_PATH CURRENT_ROLE CURRENT_USER SESSION_USER SYSTEM_USER USER')
@graken()
def _table_constraint_definition_(self):
with self._optional():
self._constraint_name_definition_()
self._table_constraint_()
with self._optional():
self._constraint_characteristics_()
@graken()
def _table_constraint_(self):
with self._choice():
with self._option():
self._unique_constraint_definition_()
with self._option():
self._referential_constraint_definition_()
with self._option():
self._check_constraint_definition_()
self._error('no available options')
@graken()
def _unique_constraint_definition_(self):
with self._choice():
with self._option():
self._unique_specification_()
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('UNIQUE')
self._token('(')
self._token('VALUE')
self._token(')')
self._error('expecting one of: UNIQUE')
@graken()
def _unique_specification_(self):
with self._choice():
with self._option():
self._token('UNIQUE')
with self._option():
self._token('PRIMARY')
self._token('KEY')
self._error('expecting one of: PRIMARY UNIQUE')
@graken()
def _referential_constraint_definition_(self):
self._token('FOREIGN')
self._token('KEY')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._references_specification_()
@graken()
def _references_specification_(self):
self._token('REFERENCES')
self._referenced_table_and_columns_()
with self._optional():
self._token('MATCH')
self._match_type_()
with self._optional():
self._referential_triggered_action_()
@graken()
def _match_type_(self):
with self._choice():
with self._option():
self._token('FULL')
with self._option():
self._token('PARTIAL')
with self._option():
self._token('SIMPLE')
self._error('expecting one of: FULL PARTIAL SIMPLE')
@graken()
def _referenced_table_and_columns_(self):
self._table_name_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _referential_triggered_action_(self):
with self._choice():
with self._option():
self._update_rule_()
with self._optional():
self._delete_rule_()
with self._option():
self._delete_rule_()
with self._optional():
self._update_rule_()
self._error('no available options')
@graken()
def _update_rule_(self):
self._token('ON')
self._token('UPDATE')
self._referential_action_()
@graken()
def _delete_rule_(self):
self._token('ON')
self._token('DELETE')
self._referential_action_()
@graken()
def _referential_action_(self):
with self._choice():
with self._option():
self._token('CASCADE')
with self._option():
self._token('SET')
self._token('NULL')
with self._option():
self._token('SET')
self._token('DEFAULT')
with self._option():
self._token('RESTRICT')
with self._option():
self._token('NO')
self._token('ACTION')
self._error('expecting one of: CASCADE NO RESTRICT SET')
@graken()
def _check_constraint_definition_(self):
self._token('CHECK')
self._left_paren_()
self._search_condition_()
self._right_paren_()
@graken()
def _alter_table_statement_(self):
self._token('ALTER')
self._token('TABLE')
self._table_name_()
self._alter_table_action_()
@graken()
def _alter_table_action_(self):
with self._choice():
with self._option():
self._add_column_definition_()
with self._option():
self._alter_column_definition_()
with self._option():
self._drop_column_definition_()
with self._option():
self._add_table_constraint_definition_()
with self._option():
self._drop_table_constraint_definition_()
self._error('no available options')
@graken()
def _add_column_definition_(self):
self._token('ADD')
with self._optional():
self._token('COLUMN')
self._column_definition_()
@graken()
def _alter_column_definition_(self):
self._token('ALTER')
with self._optional():
self._token('COLUMN')
self._identifier_()
self._alter_column_action_()
@graken()
def _alter_column_action_(self):
with self._choice():
with self._option():
self._set_column_default_clause_()
with self._option():
self._drop_column_default_clause_()
with self._option():
self._add_column_scope_clause_()
with self._option():
self._drop_column_scope_clause_()
with self._option():
self._alter_identity_column_specification_()
self._error('no available options')
@graken()
def _set_column_default_clause_(self):
self._token('SET')
self._default_clause_()
@graken()
def _drop_column_default_clause_(self):
self._token('DROP')
self._token('DEFAULT')
@graken()
def _add_column_scope_clause_(self):
self._token('ADD')
self._scope_clause_()
@graken()
def _drop_column_scope_clause_(self):
self._token('DROP')
self._token('SCOPE')
self._drop_behavior_()
@graken()
def _alter_identity_column_specification_(self):
def block0():
self._alter_identity_column_option_()
self._positive_closure(block0)
@graken()
def _alter_identity_column_option_(self):
with self._choice():
with self._option():
self._alter_sequence_generator_restart_option_()
with self._option():
self._token('SET')
self._basic_sequence_generator_option_()
self._error('no available options')
@graken()
def _drop_column_definition_(self):
self._token('DROP')
with self._optional():
self._token('COLUMN')
self._identifier_()
self._drop_behavior_()
@graken()
def _add_table_constraint_definition_(self):
self._token('ADD')
self._table_constraint_definition_()
@graken()
def _drop_table_constraint_definition_(self):
self._token('DROP')
self._token('CONSTRAINT')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _drop_table_statement_(self):
self._token('DROP')
self._token('TABLE')
self._table_name_()
self._drop_behavior_()
@graken()
def _view_definition_(self):
self._token('CREATE')
with self._optional():
self._token('RECURSIVE')
self._token('VIEW')
self._table_name_()
self._view_specification_()
self._token('AS')
self._query_expression_()
with self._optional():
self._token('WITH')
with self._optional():
self._levels_clause_()
self._token('CHECK')
self._token('OPTION')
@graken()
def _view_specification_(self):
with self._choice():
with self._option():
self._regular_view_specification_()
with self._option():
self._referenceable_view_specification_()
self._error('no available options')
@graken()
def _regular_view_specification_(self):
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _referenceable_view_specification_(self):
self._token('OF')
self._schema_qualified_name_()
with self._optional():
self._subview_clause_()
with self._optional():
self._view_element_list_()
@graken()
def _subview_clause_(self):
self._token('UNDER')
self._table_name_()
@graken()
def _view_element_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._view_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _view_element_(self):
with self._choice():
with self._option():
self._self_referencing_column_specification_()
with self._option():
self._view_column_option_()
self._error('no available options')
@graken()
def _view_column_option_(self):
self._identifier_()
self._token('WITH')
self._token('OPTIONS')
self._scope_clause_()
@graken()
def _levels_clause_(self):
with self._choice():
with self._option():
self._token('CASCADED')
with self._option():
self._token('LOCAL')
self._error('expecting one of: CASCADED LOCAL')
@graken()
def _drop_view_statement_(self):
self._token('DROP')
self._token('VIEW')
self._table_name_()
self._drop_behavior_()
@graken()
def _domain_definition_(self):
self._token('CREATE')
self._token('DOMAIN')
self._schema_qualified_name_()
with self._optional():
self._token('AS')
self._data_type_()
with self._optional():
self._default_clause_()
with self._optional():
def block0():
self._domain_constraint_()
self._positive_closure(block0)
with self._optional():
self._collate_clause_()
@graken()
def _domain_constraint_(self):
with self._optional():
self._constraint_name_definition_()
self._check_constraint_definition_()
with self._optional():
self._constraint_characteristics_()
@graken()
def _alter_domain_statement_(self):
self._token('ALTER')
self._token('DOMAIN')
self._schema_qualified_name_()
self._alter_domain_action_()
@graken()
def _alter_domain_action_(self):
with self._choice():
with self._option():
self._set_domain_default_clause_()
with self._option():
self._drop_domain_default_clause_()
with self._option():
self._add_domain_constraint_definition_()
with self._option():
self._drop_domain_constraint_definition_()
self._error('no available options')
@graken()
def _set_domain_default_clause_(self):
self._token('SET')
self._default_clause_()
@graken()
def _drop_domain_default_clause_(self):
self._token('DROP')
self._token('DEFAULT')
@graken()
def _add_domain_constraint_definition_(self):
self._token('ADD')
self._domain_constraint_()
@graken()
def _drop_domain_constraint_definition_(self):
self._token('DROP')
self._token('CONSTRAINT')
self._schema_qualified_name_()
@graken()
def _drop_domain_statement_(self):
self._token('DROP')
self._token('DOMAIN')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _character_set_definition_(self):
self._token('CREATE')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._optional():
self._token('AS')
self._character_set_source_()
with self._optional():
self._collate_clause_()
@graken()
def _character_set_source_(self):
self._token('GET')
self._character_set_name_()
@graken()
def _drop_character_set_statement_(self):
self._token('DROP')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
@graken()
def _collation_definition_(self):
self._token('CREATE')
self._token('COLLATION')
self._schema_qualified_name_()
self._token('FOR')
self._character_set_name_()
self._token('FROM')
self._schema_qualified_name_()
with self._optional():
self._pad_characteristic_()
@graken()
def _pad_characteristic_(self):
with self._choice():
with self._option():
self._token('NO')
self._token('PAD')
with self._option():
self._token('PAD')
self._token('SPACE')
self._error('expecting one of: NO PAD')
@graken()
def _drop_collation_statement_(self):
self._token('DROP')
self._token('COLLATION')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _transliteration_definition_(self):
self._token('CREATE')
self._token('TRANSLATION')
self._schema_qualified_name_()
self._token('FOR')
self._character_set_name_()
self._token('TO')
self._character_set_name_()
self._token('FROM')
self._transliteration_source_()
@graken()
def _transliteration_source_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._specific_routine_designator_()
self._error('no available options')
@graken()
def _drop_transliteration_statement_(self):
self._token('DROP')
self._token('TRANSLATION')
self._schema_qualified_name_()
@graken()
def _assertion_definition_(self):
self._token('CREATE')
self._token('ASSERTION')
self._schema_qualified_name_()
self._token('CHECK')
self._left_paren_()
self._search_condition_()
self._right_paren_()
with self._optional():
self._constraint_characteristics_()
@graken()
def _drop_assertion_statement_(self):
self._token('DROP')
self._token('ASSERTION')
self._schema_qualified_name_()
@graken()
def _trigger_definition_(self):
self._token('CREATE')
self._token('TRIGGER')
self._schema_qualified_name_()
self._trigger_action_time_()
self._trigger_event_()
self._token('ON')
self._table_name_()
with self._optional():
self._token('REFERENCING')
self._old_or_new_values_alias_list_()
self._triggered_action_()
@graken()
def _trigger_action_time_(self):
with self._choice():
with self._option():
self._token('BEFORE')
with self._option():
self._token('AFTER')
self._error('expecting one of: AFTER BEFORE')
@graken()
def _trigger_event_(self):
with self._choice():
with self._option():
self._token('INSERT')
with self._option():
self._token('DELETE')
with self._option():
self._token('UPDATE')
with self._optional():
self._token('OF')
self._column_name_list_()
self._error('expecting one of: DELETE INSERT UPDATE')
@graken()
def _triggered_action_(self):
with self._optional():
self._token('FOR')
self._token('EACH')
with self._group():
with self._choice():
with self._option():
self._token('ROW')
with self._option():
self._token('STATEMENT')
self._error('expecting one of: ROW STATEMENT')
with self._optional():
self._token('WHEN')
self._left_paren_()
self._search_condition_()
self._right_paren_()
self._triggered_sql_statement_()
@graken()
def _triggered_sql_statement_(self):
with self._choice():
with self._option():
self._sql_procedure_statement_()
with self._option():
self._token('BEGIN')
self._token('ATOMIC')
def block0():
self._sql_procedure_statement_()
self._semicolon_()
self._positive_closure(block0)
self._token('END')
self._error('no available options')
@graken()
def _old_or_new_values_alias_list_(self):
def block0():
self._old_or_new_values_alias_()
self._positive_closure(block0)
@graken()
def _old_or_new_values_alias_(self):
with self._choice():
with self._option():
self._token('OLD')
with self._optional():
self._token('ROW')
self._as_clause_()
with self._option():
self._token('NEW')
with self._optional():
self._token('ROW')
self._as_clause_()
with self._option():
self._token('OLD')
self._token('TABLE')
self._as_clause_()
with self._option():
self._token('NEW')
self._token('TABLE')
self._as_clause_()
self._error('no available options')
@graken()
def _drop_trigger_statement_(self):
self._token('DROP')
self._token('TRIGGER')
self._schema_qualified_name_()
@graken()
def _user_defined_type_definition_(self):
self._token('CREATE')
self._token('TYPE')
self._user_defined_type_body_()
@graken()
def _user_defined_type_body_(self):
self._schema_qualified_name_()
with self._optional():
self._subtype_clause_()
with self._optional():
self._token('AS')
self._representation_()
with self._optional():
self._user_defined_type_option_list_()
with self._optional():
self._method_specification_list_()
@graken()
def _user_defined_type_option_list_(self):
self._user_defined_type_option_()
with self._optional():
def block0():
self._user_defined_type_option_()
self._positive_closure(block0)
@graken()
def _user_defined_type_option_(self):
with self._choice():
with self._option():
self._instantiable_clause_()
with self._option():
self._finality_()
with self._option():
self._reference_type_specification_()
with self._option():
self._ref_cast_option_()
with self._option():
self._cast_option_()
self._error('no available options')
@graken()
def _subtype_clause_(self):
self._token('UNDER')
self._schema_qualified_name_()
@graken()
def _representation_(self):
with self._choice():
with self._option():
self._predefined_type_()
with self._option():
self._member_list_()
self._error('no available options')
@graken()
def _member_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._member_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _member_(self):
self._attribute_definition_()
@graken()
def _instantiable_clause_(self):
with self._choice():
with self._option():
self._token('INSTANTIABLE')
with self._option():
self._token('NOT')
self._token('INSTANTIABLE')
self._error('expecting one of: INSTANTIABLE NOT')
@graken()
def _finality_(self):
with self._choice():
with self._option():
self._token('FINAL')
with self._option():
self._token('NOT')
self._token('FINAL')
self._error('expecting one of: FINAL NOT')
@graken()
def _reference_type_specification_(self):
with self._choice():
with self._option():
self._user_defined_representation_()
with self._option():
self._derived_representation_()
with self._option():
self._system_generated_representation_()
self._error('no available options')
@graken()
def _user_defined_representation_(self):
self._token('REF')
self._token('USING')
self._predefined_type_()
@graken()
def _derived_representation_(self):
self._token('REF')
self._token('FROM')
self._list_of_attributes_()
@graken()
def _system_generated_representation_(self):
self._token('REF')
self._token('IS')
self._token('SYSTEM')
self._token('GENERATED')
@graken()
def _cast_to_ref_(self):
self._token('CAST')
self._left_paren_()
self._token('SOURCE')
self._token('AS')
self._token('REF')
self._right_paren_()
self._token('WITH')
self._identifier_()
@graken()
def _cast_to_type_(self):
self._token('CAST')
self._left_paren_()
self._token('REF')
self._token('AS')
self._token('SOURCE')
self._right_paren_()
self._token('WITH')
self._identifier_()
@graken()
def _list_of_attributes_(self):
self._left_paren_()
self._identifier_list_()
self._right_paren_()
@graken()
def _cast_to_distinct_(self):
self._token('CAST')
self._left_paren_()
self._token('SOURCE')
self._token('AS')
self._token('DISTINCT')
self._right_paren_()
self._token('WITH')
self._identifier_()
@graken()
def _cast_to_source_(self):
self._token('CAST')
self._left_paren_()
self._token('DISTINCT')
self._token('AS')
self._token('SOURCE')
self._right_paren_()
self._token('WITH')
self._identifier_()
@graken()
def _method_specification_list_(self):
def sep0():
self._token(',')
def block0():
self._method_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _method_specification_(self):
with self._choice():
with self._option():
self._original_method_specification_()
with self._option():
self._overriding_method_specification_()
self._error('no available options')
@graken()
def _original_method_specification_(self):
self._partial_method_specification_()
with self._optional():
self._token('SELF')
self._token('AS')
self._token('RESULT')
with self._optional():
self._token('SELF')
self._token('AS')
self._token('LOCATOR')
with self._optional():
self._method_characteristics_()
@graken()
def _overriding_method_specification_(self):
self._token('OVERRIDING')
self._partial_method_specification_()
@graken()
def _partial_method_specification_(self):
with self._optional():
with self._choice():
with self._option():
self._token('INSTANCE')
with self._option():
self._token('STATIC')
with self._option():
self._token('CONSTRUCTOR')
self._error('expecting one of: CONSTRUCTOR INSTANCE STATIC')
self._token('METHOD')
self._identifier_()
self._sql_parameter_declaration_list_()
self._returns_clause_()
with self._optional():
self._token('SPECIFIC')
self._schema_qualified_name_()
@graken()
def _method_characteristics_(self):
def block0():
self._method_characteristic_()
self._positive_closure(block0)
@graken()
def _method_characteristic_(self):
with self._choice():
with self._option():
self._language_clause_()
with self._option():
self._parameter_style_clause_()
with self._option():
self._deterministic_characteristic_()
with self._option():
self._sql_data_access_indication_()
with self._option():
self._null_call_clause_()
self._error('no available options')
@graken()
def _attribute_definition_(self):
self._identifier_()
self._data_type_()
with self._optional():
self._reference_scope_check_()
with self._optional():
self._default_clause_()
with self._optional():
self._collate_clause_()
@graken()
def _alter_type_statement_(self):
self._token('ALTER')
self._token('TYPE')
self._schema_qualified_name_()
self._alter_type_action_()
@graken()
def _alter_type_action_(self):
with self._choice():
with self._option():
self._add_attribute_definition_()
with self._option():
self._drop_attribute_definition_()
with self._option():
self._add_original_method_specification_()
with self._option():
self._add_overriding_method_specification_()
with self._option():
self._drop_method_specification_()
self._error('no available options')
@graken()
def _add_attribute_definition_(self):
self._token('ADD')
self._token('ATTRIBUTE')
self._attribute_definition_()
@graken()
def _drop_attribute_definition_(self):
self._token('DROP')
self._token('ATTRIBUTE')
self._identifier_()
self._token('RESTRICT')
@graken()
def _add_original_method_specification_(self):
self._token('ADD')
self._original_method_specification_()
@graken()
def _add_overriding_method_specification_(self):
self._token('ADD')
self._overriding_method_specification_()
@graken()
def _drop_method_specification_(self):
self._token('DROP')
self._specific_method_specification_designator_()
self._token('RESTRICT')
@graken()
def _specific_method_specification_designator_(self):
with self._optional():
with self._choice():
with self._option():
self._token('INSTANCE')
with self._option():
self._token('STATIC')
with self._option():
self._token('CONSTRUCTOR')
self._error('expecting one of: CONSTRUCTOR INSTANCE STATIC')
self._token('METHOD')
self._identifier_()
self._data_type_list_()
@graken()
def _drop_data_type_statement_(self):
self._token('DROP')
self._token('TYPE')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _schema_routine_(self):
with self._choice():
with self._option():
self._schema_procedure_()
with self._option():
self._schema_function_()
self._error('no available options')
@graken()
def _schema_procedure_(self):
self._token('CREATE')
self._sql_invoked_procedure_()
@graken()
def _schema_function_(self):
self._token('CREATE')
self._sql_invoked_function_()
@graken()
def _sql_invoked_procedure_(self):
self._token('PROCEDURE')
self._schema_qualified_name_()
self._sql_parameter_declaration_list_()
self._routine_characteristics_()
self._routine_body_()
@graken()
def _sql_invoked_function_(self):
with self._group():
with self._choice():
with self._option():
self._function_specification_()
with self._option():
self._method_specification_designator_()
self._error('no available options')
self._routine_body_()
@graken()
def _sql_parameter_declaration_list_(self):
self._left_paren_()
with self._optional():
def sep0():
self._token(',')
def block0():
self._sql_parameter_declaration_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _sql_parameter_declaration_(self):
with self._optional():
self._parameter_mode_()
with self._optional():
self._identifier_()
self._parameter_type_()
with self._optional():
self._token('RESULT')
@graken()
def _parameter_mode_(self):
with self._choice():
with self._option():
self._token('IN')
with self._option():
self._token('OUT')
with self._option():
self._token('INOUT')
self._error('expecting one of: IN INOUT OUT')
@graken()
def _parameter_type_(self):
self._data_type_()
with self._optional():
self._locator_indication_()
@graken()
def _locator_indication_(self):
self._token('AS')
self._token('LOCATOR')
@graken()
def _function_specification_(self):
self._token('FUNCTION')
self._schema_qualified_name_()
self._sql_parameter_declaration_list_()
self._returns_clause_()
self._routine_characteristics_()
with self._optional():
self._dispatch_clause_()
@graken()
def _method_specification_designator_(self):
with self._choice():
with self._option():
self._token('SPECIFIC')
self._token('METHOD')
self._schema_qualified_name_()
with self._option():
with self._optional():
with self._choice():
with self._option():
self._token('INSTANCE')
with self._option():
self._token('STATIC')
with self._option():
self._token('CONSTRUCTOR')
self._error('expecting one of: CONSTRUCTOR INSTANCE STATIC')
self._token('METHOD')
self._identifier_()
self._sql_parameter_declaration_list_()
with self._optional():
self._returns_clause_()
self._token('FOR')
self._schema_qualified_name_()
self._error('no available options')
@graken()
def _routine_characteristics_(self):
with self._optional():
def block0():
self._routine_characteristic_()
self._positive_closure(block0)
@graken()
def _routine_characteristic_(self):
with self._choice():
with self._option():
self._language_clause_()
with self._option():
self._parameter_style_clause_()
with self._option():
self._token('SPECIFIC')
self._schema_qualified_name_()
with self._option():
self._deterministic_characteristic_()
with self._option():
self._sql_data_access_indication_()
with self._option():
self._null_call_clause_()
with self._option():
self._dynamic_result_sets_characteristic_()
with self._option():
self._savepoint_level_indication_()
self._error('no available options')
@graken()
def _savepoint_level_indication_(self):
with self._choice():
with self._option():
self._token('NEW')
self._token('SAVEPOINT')
self._token('LEVEL')
with self._option():
self._token('OLD')
self._token('SAVEPOINT')
self._token('LEVEL')
self._error('expecting one of: NEW OLD')
@graken()
def _dynamic_result_sets_characteristic_(self):
self._token('DYNAMIC')
self._token('RESULT')
self._token('SETS')
self._unsigned_integer_()
@graken()
def _parameter_style_clause_(self):
self._token('PARAMETER')
self._token('STYLE')
self._parameter_style_()
@graken()
def _dispatch_clause_(self):
self._token('STATIC')
self._token('DISPATCH')
@graken()
def _returns_clause_(self):
self._token('RETURNS')
self._returns_type_()
@graken()
def _returns_type_(self):
with self._choice():
with self._option():
self._returns_data_type_()
with self._optional():
self._result_cast_()
with self._option():
self._returns_table_type_()
self._error('no available options')
@graken()
def _returns_table_type_(self):
self._token('TABLE')
self._table_function_column_list_()
@graken()
def _table_function_column_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._table_function_column_list_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _table_function_column_list_element_(self):
self._identifier_()
self._data_type_()
@graken()
def _result_cast_(self):
self._token('CAST')
self._token('FROM')
self._result_cast_from_type_()
@graken()
def _result_cast_from_type_(self):
self._data_type_()
with self._optional():
self._locator_indication_()
@graken()
def _returns_data_type_(self):
self._data_type_()
with self._optional():
self._locator_indication_()
@graken()
def _routine_body_(self):
with self._choice():
with self._option():
self._sql_routine_spec_()
with self._option():
self._external_body_reference_()
self._error('no available options')
@graken()
def _sql_routine_spec_(self):
with self._optional():
self._rights_clause_()
self._sql_procedure_statement_()
@graken()
def _rights_clause_(self):
with self._choice():
with self._option():
self._token('SQL')
self._token('SECURITY')
self._token('INVOKER')
with self._option():
self._token('SQL')
self._token('SECURITY')
self._token('DEFINER')
self._error('expecting one of: SQL')
@graken()
def _external_body_reference_(self):
self._token('EXTERNAL')
with self._optional():
self._token('NAME')
self._external_routine_name_()
with self._optional():
self._parameter_style_clause_()
with self._optional():
self._transform_group_specification_()
with self._optional():
self._external_security_clause_()
@graken()
def _external_security_clause_(self):
with self._choice():
with self._option():
self._token('EXTERNAL')
self._token('SECURITY')
self._token('DEFINER')
with self._option():
self._token('EXTERNAL')
self._token('SECURITY')
self._token('INVOKER')
with self._option():
self._token('EXTERNAL')
self._token('SECURITY')
self._token('IMPLEMENTATION')
self._token('DEFINED')
self._error('expecting one of: EXTERNAL')
@graken()
def _parameter_style_(self):
with self._choice():
with self._option():
self._token('SQL')
with self._option():
self._token('GENERAL')
self._error('expecting one of: GENERAL SQL')
@graken()
def _deterministic_characteristic_(self):
with self._choice():
with self._option():
self._token('DETERMINISTIC')
with self._option():
self._token('NOT')
self._token('DETERMINISTIC')
self._error('expecting one of: DETERMINISTIC NOT')
@graken()
def _sql_data_access_indication_(self):
with self._choice():
with self._option():
self._token('NO')
self._token('SQL')
with self._option():
self._token('CONTAINS')
self._token('SQL')
with self._option():
self._token('READS')
self._token('SQL')
self._token('DATA')
with self._option():
self._token('MODIFIES')
self._token('SQL')
self._token('DATA')
self._error('expecting one of: CONTAINS MODIFIES NO READS')
@graken()
def _null_call_clause_(self):
with self._choice():
with self._option():
self._token('RETURNS')
self._token('NULL')
self._token('ON')
self._token('NULL')
self._token('INPUT')
with self._option():
self._token('CALLED')
self._token('ON')
self._token('NULL')
self._token('INPUT')
self._error('expecting one of: CALLED RETURNS')
@graken()
def _transform_group_specification_(self):
self._token('TRANSFORM')
self._token('GROUP')
with self._group():
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._multiple_group_specification_()
self._error('no available options')
@graken()
def _multiple_group_specification_(self):
def sep0():
self._token(',')
def block0():
self._group_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _group_specification_(self):
self._identifier_()
self._token('FOR')
self._token('TYPE')
self._schema_qualified_name_()
@graken()
def _alter_routine_statement_(self):
self._token('ALTER')
self._specific_routine_designator_()
self._alter_routine_characteristics_()
self._token('RESTRICT')
@graken()
def _alter_routine_characteristics_(self):
def block0():
self._alter_routine_characteristic_()
self._positive_closure(block0)
@graken()
def _alter_routine_characteristic_(self):
with self._choice():
with self._option():
self._language_clause_()
with self._option():
self._parameter_style_clause_()
with self._option():
self._sql_data_access_indication_()
with self._option():
self._null_call_clause_()
with self._option():
self._dynamic_result_sets_characteristic_()
with self._option():
self._token('NAME')
self._external_routine_name_()
self._error('no available options')
@graken()
def _drop_routine_statement_(self):
self._token('DROP')
self._specific_routine_designator_()
self._drop_behavior_()
@graken()
def _user_defined_cast_definition_(self):
self._token('CREATE')
self._token('CAST')
self._left_paren_()
self._source_data_type_()
self._token('AS')
self._target_data_type_()
self._right_paren_()
self._token('WITH')
self._specific_routine_designator_()
with self._optional():
self._token('AS')
self._token('ASSIGNMENT')
@graken()
def _source_data_type_(self):
self._data_type_()
@graken()
def _target_data_type_(self):
self._data_type_()
@graken()
def _drop_user_defined_cast_statement_(self):
self._token('DROP')
self._token('CAST')
self._left_paren_()
self._source_data_type_()
self._token('AS')
self._target_data_type_()
self._right_paren_()
self._drop_behavior_()
@graken()
def _user_defined_ordering_definition_(self):
self._token('CREATE')
self._token('ORDERING')
self._token('FOR')
self._schema_qualified_name_()
self._ordering_form_()
@graken()
def _ordering_form_(self):
with self._choice():
with self._option():
self._equals_ordering_form_()
with self._option():
self._full_ordering_form_()
self._error('no available options')
@graken()
def _equals_ordering_form_(self):
self._token('EQUALS')
self._token('ONLY')
self._token('BY')
self._ordering_category_()
@graken()
def _full_ordering_form_(self):
self._token('ORDER')
self._token('FULL')
self._token('BY')
self._ordering_category_()
@graken()
def _ordering_category_(self):
with self._choice():
with self._option():
self._relative_category_()
with self._option():
self._map_category_()
with self._option():
self._state_category_()
self._error('no available options')
@graken()
def _relative_category_(self):
self._token('RELATIVE')
self._token('WITH')
self._specific_routine_designator_()
@graken()
def _map_category_(self):
self._token('MAP')
self._token('WITH')
self._specific_routine_designator_()
@graken()
def _state_category_(self):
self._token('STATE')
with self._optional():
self._schema_qualified_name_()
@graken()
def _drop_user_defined_ordering_statement_(self):
self._token('DROP')
self._token('ORDERING')
self._token('FOR')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _transform_definition_(self):
self._token('CREATE')
with self._group():
with self._choice():
with self._option():
self._token('TRANSFORM')
with self._option():
self._token('TRANSFORMS')
self._error('expecting one of: TRANSFORM TRANSFORMS')
self._token('FOR')
self._schema_qualified_name_()
def block1():
self._transform_group_()
self._positive_closure(block1)
@graken()
def _transform_group_(self):
self._identifier_()
self._left_paren_()
self._transform_element_list_()
self._right_paren_()
@graken()
def _transform_element_list_(self):
self._transform_element_()
with self._optional():
self._comma_()
self._transform_element_()
@graken()
def _transform_element_(self):
with self._choice():
with self._option():
self._to_sql_()
with self._option():
self._from_sql_()
self._error('no available options')
@graken()
def _to_sql_(self):
self._token('TO')
self._token('SQL')
self._token('WITH')
self._specific_routine_designator_()
@graken()
def _from_sql_(self):
self._token('FROM')
self._token('SQL')
self._token('WITH')
self._specific_routine_designator_()
@graken()
def _alter_transform_statement_(self):
self._token('ALTER')
with self._group():
with self._choice():
with self._option():
self._token('TRANSFORM')
with self._option():
self._token('TRANSFORMS')
self._error('expecting one of: TRANSFORM TRANSFORMS')
self._token('FOR')
self._schema_qualified_name_()
def block1():
self._alter_group_()
self._positive_closure(block1)
@graken()
def _alter_group_(self):
self._identifier_()
self._left_paren_()
self._alter_transform_action_list_()
self._right_paren_()
@graken()
def _alter_transform_action_list_(self):
def sep0():
self._token(',')
def block0():
self._alter_transform_action_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _alter_transform_action_(self):
with self._choice():
with self._option():
self._add_transform_element_list_()
with self._option():
self._drop_transform_element_list_()
self._error('no available options')
@graken()
def _add_transform_element_list_(self):
self._token('ADD')
self._left_paren_()
self._transform_element_list_()
self._right_paren_()
@graken()
def _drop_transform_element_list_(self):
self._token('DROP')
self._left_paren_()
self._transform_kind_()
with self._optional():
self._comma_()
self._transform_kind_()
self._drop_behavior_()
self._right_paren_()
@graken()
def _transform_kind_(self):
with self._choice():
with self._option():
self._token('TO')
self._token('SQL')
with self._option():
self._token('FROM')
self._token('SQL')
self._error('expecting one of: FROM TO')
@graken()
def _drop_transform_statement_(self):
self._token('DROP')
with self._group():
with self._choice():
with self._option():
self._token('TRANSFORM')
with self._option():
self._token('TRANSFORMS')
self._error('expecting one of: TRANSFORM TRANSFORMS')
self._transforms_to_be_dropped_()
self._token('FOR')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _transforms_to_be_dropped_(self):
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._identifier_()
self._error('expecting one of: ALL')
@graken()
def _sequence_generator_definition_(self):
self._token('CREATE')
self._token('SEQUENCE')
self._schema_qualified_name_()
with self._optional():
self._sequence_generator_options_()
@graken()
def _sequence_generator_options_(self):
def block0():
self._sequence_generator_option_()
self._positive_closure(block0)
@graken()
def _sequence_generator_option_(self):
with self._choice():
with self._option():
self._sequence_generator_data_type_option_()
with self._option():
self._common_sequence_generator_options_()
self._error('no available options')
@graken()
def _common_sequence_generator_options_(self):
def block0():
self._common_sequence_generator_option_()
self._positive_closure(block0)
@graken()
def _common_sequence_generator_option_(self):
with self._choice():
with self._option():
self._sequence_generator_start_with_option_()
with self._option():
self._basic_sequence_generator_option_()
self._error('no available options')
@graken()
def _basic_sequence_generator_option_(self):
with self._choice():
with self._option():
self._sequence_generator_increment_by_option_()
with self._option():
self._sequence_generator_maxvalue_option_()
with self._option():
self._sequence_generator_minvalue_option_()
with self._option():
self._sequence_generator_cycle_option_()
self._error('no available options')
@graken()
def _sequence_generator_data_type_option_(self):
self._token('AS')
self._data_type_()
@graken()
def _sequence_generator_start_with_option_(self):
self._token('START')
self._token('WITH')
self._signed_numeric_literal_()
@graken()
def _sequence_generator_increment_by_option_(self):
self._token('INCREMENT')
self._token('BY')
self._signed_numeric_literal_()
@graken()
def _sequence_generator_maxvalue_option_(self):
with self._choice():
with self._option():
self._token('MAXVALUE')
self._signed_numeric_literal_()
with self._option():
self._token('NO')
self._token('MAXVALUE')
self._error('expecting one of: NO')
@graken()
def _sequence_generator_minvalue_option_(self):
with self._choice():
with self._option():
self._token('MINVALUE')
self._signed_numeric_literal_()
with self._option():
self._token('NO')
self._token('MINVALUE')
self._error('expecting one of: NO')
@graken()
def _sequence_generator_cycle_option_(self):
with self._choice():
with self._option():
self._token('CYCLE')
with self._option():
self._token('NO')
self._token('CYCLE')
self._error('expecting one of: CYCLE NO')
@graken()
def _alter_sequence_generator_statement_(self):
self._token('ALTER')
self._token('SEQUENCE')
self._schema_qualified_name_()
self._alter_sequence_generator_options_()
@graken()
def _alter_sequence_generator_options_(self):
def block0():
self._alter_sequence_generator_option_()
self._positive_closure(block0)
@graken()
def _alter_sequence_generator_option_(self):
with self._choice():
with self._option():
self._alter_sequence_generator_restart_option_()
with self._option():
self._basic_sequence_generator_option_()
self._error('no available options')
@graken()
def _alter_sequence_generator_restart_option_(self):
self._token('RESTART')
self._token('WITH')
self._signed_numeric_literal_()
@graken()
def _drop_sequence_generator_statement_(self):
self._token('DROP')
self._token('SEQUENCE')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _grant_statement_(self):
with self._choice():
with self._option():
self._grant_privilege_statement_()
with self._option():
self._grant_role_statement_()
self._error('no available options')
@graken()
def _grant_privilege_statement_(self):
self._token('GRANT')
self._privileges_()
self._token('TO')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('WITH')
self._token('HIERARCHY')
self._token('OPTION')
with self._optional():
self._token('WITH')
self._token('GRANT')
self._token('OPTION')
with self._optional():
self._token('GRANTED')
self._token('BY')
self._grantor_()
@graken()
def _privileges_(self):
self._object_privileges_()
self._token('ON')
self._object_name_()
@graken()
def _object_name_(self):
with self._choice():
with self._option():
with self._optional():
self._token('TABLE')
self._table_name_()
with self._option():
self._token('DOMAIN')
self._schema_qualified_name_()
with self._option():
self._token('COLLATION')
self._schema_qualified_name_()
with self._option():
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._option():
self._token('TRANSLATION')
self._schema_qualified_name_()
with self._option():
self._token('TYPE')
self._schema_qualified_name_()
with self._option():
self._token('SEQUENCE')
self._schema_qualified_name_()
with self._option():
self._specific_routine_designator_()
self._error('no available options')
@graken()
def _object_privileges_(self):
with self._choice():
with self._option():
self._token('ALL')
self._token('PRIVILEGES')
with self._option():
def sep0():
self._token(',')
def block0():
self._action_()
self._positive_closure(block0, prefix=sep0)
self._error('expecting one of: ALL')
@graken()
def _action_(self):
with self._choice():
with self._option():
self._token('SELECT')
with self._option():
self._token('SELECT')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('SELECT')
self._left_paren_()
self._privilege_method_list_()
self._right_paren_()
with self._option():
self._token('DELETE')
with self._option():
self._token('INSERT')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('UPDATE')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('REFERENCES')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('USAGE')
with self._option():
self._token('TRIGGER')
with self._option():
self._token('UNDER')
with self._option():
self._token('EXECUTE')
self._error('expecting one of: DELETE EXECUTE INSERT REFERENCES SELECT TRIGGER UNDER UPDATE USAGE')
@graken()
def _privilege_method_list_(self):
def sep0():
self._token(',')
def block0():
self._specific_routine_designator_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _grantee_(self):
with self._choice():
with self._option():
self._token('PUBLIC')
with self._option():
self._identifier_()
self._error('expecting one of: PUBLIC')
@graken()
def _grantor_(self):
with self._choice():
with self._option():
self._token('CURRENT_USER')
with self._option():
self._token('CURRENT_ROLE')
self._error('expecting one of: CURRENT_ROLE CURRENT_USER')
@graken()
def _role_definition_(self):
self._token('CREATE')
self._token('ROLE')
self._identifier_()
with self._optional():
self._token('WITH')
self._token('ADMIN')
self._grantor_()
@graken()
def _grant_role_statement_(self):
self._token('GRANT')
self._identifier_list_()
self._token('TO')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('WITH')
self._token('ADMIN')
self._token('OPTION')
with self._optional():
self._token('GRANTED')
self._token('BY')
self._grantor_()
@graken()
def _drop_role_statement_(self):
self._token('DROP')
self._token('ROLE')
self._identifier_()
@graken()
def _revoke_statement_(self):
with self._choice():
with self._option():
self._revoke_privilege_statement_()
with self._option():
self._revoke_role_statement_()
self._error('no available options')
@graken()
def _revoke_privilege_statement_(self):
self._token('REVOKE')
with self._optional():
self._revoke_option_extension_()
self._privileges_()
self._token('FROM')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('GRANTED')
self._token('BY')
self._grantor_()
self._drop_behavior_()
@graken()
def _revoke_option_extension_(self):
with self._choice():
with self._option():
self._token('GRANT')
self._token('OPTION')
self._token('FOR')
with self._option():
self._token('HIERARCHY')
self._token('OPTION')
self._token('FOR')
self._error('expecting one of: GRANT HIERARCHY')
@graken()
def _revoke_role_statement_(self):
self._token('REVOKE')
with self._optional():
self._token('ADMIN')
self._token('OPTION')
self._token('FOR')
self._identifier_list_()
self._token('FROM')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('GRANTED')
self._token('BY')
self._grantor_()
self._drop_behavior_()
@graken()
def _character_set_specification_list_(self):
def sep0():
self._token(',')
def block0():
self._character_set_name_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _sql_procedure_statement_(self):
self._sql_executable_statement_()
@graken()
def _sql_executable_statement_(self):
with self._choice():
with self._option():
self._sql_schema_statement_()
with self._option():
self._sql_data_statement_()
with self._option():
self._sql_control_statement_()
with self._option():
self._sql_transaction_statement_()
with self._option():
self._sql_connection_statement_()
with self._option():
self._sql_session_statement_()
with self._option():
self._get_diagnostics_statement_()
with self._option():
self._sql_dynamic_statement_()
self._error('no available options')
@graken()
def _sql_schema_statement_(self):
with self._choice():
with self._option():
self._sql_schema_definition_statement_()
with self._option():
self._sql_schema_manipulation_statement_()
self._error('no available options')
@graken()
def _sql_schema_definition_statement_(self):
with self._choice():
with self._option():
self._schema_definition_()
with self._option():
self._table_definition_()
with self._option():
self._view_definition_()
with self._option():
self._schema_routine_()
with self._option():
self._grant_statement_()
with self._option():
self._role_definition_()
with self._option():
self._domain_definition_()
with self._option():
self._character_set_definition_()
with self._option():
self._collation_definition_()
with self._option():
self._transliteration_definition_()
with self._option():
self._assertion_definition_()
with self._option():
self._trigger_definition_()
with self._option():
self._user_defined_type_definition_()
with self._option():
self._user_defined_cast_definition_()
with self._option():
self._user_defined_ordering_definition_()
with self._option():
self._transform_definition_()
with self._option():
self._sequence_generator_definition_()
self._error('no available options')
@graken()
def _sql_schema_manipulation_statement_(self):
with self._choice():
with self._option():
self._drop_schema_statement_()
with self._option():
self._alter_table_statement_()
with self._option():
self._drop_table_statement_()
with self._option():
self._drop_view_statement_()
with self._option():
self._alter_routine_statement_()
with self._option():
self._drop_routine_statement_()
with self._option():
self._drop_user_defined_cast_statement_()
with self._option():
self._revoke_statement_()
with self._option():
self._drop_role_statement_()
with self._option():
self._alter_domain_statement_()
with self._option():
self._drop_domain_statement_()
with self._option():
self._drop_character_set_statement_()
with self._option():
self._drop_collation_statement_()
with self._option():
self._drop_transliteration_statement_()
with self._option():
self._drop_assertion_statement_()
with self._option():
self._drop_trigger_statement_()
with self._option():
self._alter_type_statement_()
with self._option():
self._drop_data_type_statement_()
with self._option():
self._drop_user_defined_ordering_statement_()
with self._option():
self._alter_transform_statement_()
with self._option():
self._drop_transform_statement_()
with self._option():
self._alter_sequence_generator_statement_()
with self._option():
self._drop_sequence_generator_statement_()
self._error('no available options')
@graken()
def _sql_data_statement_(self):
with self._choice():
with self._option():
self._open_statement_()
with self._option():
self._fetch_statement_()
with self._option():
self._close_statement_()
with self._option():
self._select_statement_single_row_()
with self._option():
self._free_locator_statement_()
with self._option():
self._hold_locator_statement_()
with self._option():
self._sql_data_change_statement_()
self._error('no available options')
@graken()
def _sql_data_change_statement_(self):
with self._choice():
with self._option():
self._delete_statement_positioned_()
with self._option():
self._delete_statement_searched_()
with self._option():
self._insert_statement_()
with self._option():
self._update_statement_positioned_()
with self._option():
self._update_statement_searched_()
with self._option():
self._merge_statement_()
self._error('no available options')
@graken()
def _sql_control_statement_(self):
with self._choice():
with self._option():
self._call_statement_()
with self._option():
self._return_statement_()
self._error('no available options')
@graken()
def _sql_transaction_statement_(self):
with self._choice():
with self._option():
self._start_transaction_statement_()
with self._option():
self._set_transaction_statement_()
with self._option():
self._set_constraints_mode_statement_()
with self._option():
self._savepoint_statement_()
with self._option():
self._release_savepoint_statement_()
with self._option():
self._commit_statement_()
with self._option():
self._rollback_statement_()
self._error('no available options')
@graken()
def _sql_connection_statement_(self):
with self._choice():
with self._option():
self._connect_statement_()
with self._option():
self._set_connection_statement_()
with self._option():
self._disconnect_statement_()
self._error('no available options')
@graken()
def _sql_session_statement_(self):
with self._choice():
with self._option():
self._set_session_user_identifier_statement_()
with self._option():
self._set_role_statement_()
with self._option():
self._set_local_time_zone_statement_()
with self._option():
self._set_session_characteristics_statement_()
with self._option():
self._set_catalog_statement_()
with self._option():
self._set_schema_statement_()
with self._option():
self._set_names_statement_()
with self._option():
self._set_path_statement_()
with self._option():
self._set_transform_group_statement_()
with self._option():
self._set_session_collation_statement_()
self._error('no available options')
@graken()
def _sql_dynamic_statement_(self):
with self._choice():
with self._option():
self._descriptor_statement_()
with self._option():
self._prepare_statement_()
with self._option():
self._deallocate_prepared_statement_()
with self._option():
self._describe_statement_()
with self._option():
self._execute_statement_()
with self._option():
self._execute_immediate_statement_()
with self._option():
self._sql_dynamic_data_statement_()
self._error('no available options')
@graken()
def _sql_dynamic_data_statement_(self):
with self._choice():
with self._option():
self._allocate_cursor_statement_()
with self._option():
self._dynamic_open_statement_()
with self._option():
self._dynamic_fetch_statement_()
with self._option():
self._dynamic_close_statement_()
with self._option():
self._dynamic_delete_statement_positioned_()
with self._option():
self._dynamic_update_statement_positioned_()
self._error('no available options')
@graken()
def _descriptor_statement_(self):
with self._choice():
with self._option():
self._allocate_descriptor_statement_()
with self._option():
self._deallocate_descriptor_statement_()
with self._option():
self._set_descriptor_statement_()
with self._option():
self._get_descriptor_statement_()
self._error('no available options')
@graken()
def _cursor_sensitivity_(self):
with self._choice():
with self._option():
self._token('SENSITIVE')
with self._option():
self._token('INSENSITIVE')
with self._option():
self._token('ASENSITIVE')
self._error('expecting one of: ASENSITIVE INSENSITIVE SENSITIVE')
@graken()
def _cursor_scrollability_(self):
with self._choice():
with self._option():
self._token('SCROLL')
with self._option():
self._token('NO')
self._token('SCROLL')
self._error('expecting one of: NO SCROLL')
@graken()
def _cursor_holdability_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('HOLD')
with self._option():
self._token('WITHOUT')
self._token('HOLD')
self._error('expecting one of: WITH WITHOUT')
@graken()
def _cursor_returnability_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('RETURN')
with self._option():
self._token('WITHOUT')
self._token('RETURN')
self._error('expecting one of: WITH WITHOUT')
@graken()
def _cursor_specification_(self):
self._query_expression_()
with self._optional():
self._order_by_clause_()
with self._optional():
self._updatability_clause_()
@graken()
def _updatability_clause_(self):
self._token('FOR')
with self._group():
with self._choice():
with self._option():
self._token('READ')
self._token('ONLY')
with self._option():
self._token('UPDATE')
with self._optional():
self._token('OF')
self._column_name_list_()
self._error('expecting one of: READ UPDATE')
@graken()
def _order_by_clause_(self):
self._token('ORDER')
self._token('BY')
self._sort_specification_list_()
@graken()
def _open_statement_(self):
self._token('OPEN')
self._cursor_name_()
@graken()
def _fetch_statement_(self):
self._token('FETCH')
with self._optional():
with self._optional():
self._fetch_orientation_()
self._token('FROM')
self._cursor_name_()
self._token('INTO')
self._fetch_target_list_()
@graken()
def _fetch_orientation_(self):
with self._choice():
with self._option():
self._token('NEXT')
with self._option():
self._token('PRIOR')
with self._option():
self._token('FIRST')
with self._option():
self._token('LAST')
with self._option():
with self._group():
with self._choice():
with self._option():
self._token('ABSOLUTE')
with self._option():
self._token('RELATIVE')
self._error('expecting one of: ABSOLUTE RELATIVE')
self._simple_value_specification_()
self._error('expecting one of: FIRST LAST NEXT PRIOR')
@graken()
def _fetch_target_list_(self):
def sep0():
self._token(',')
def block0():
self._target_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _close_statement_(self):
self._token('CLOSE')
self._cursor_name_()
@graken()
def _select_statement_single_row_(self):
self._token('SELECT')
with self._optional():
self._set_quantifier_()
self._select_list_()
self._token('INTO')
self._select_target_list_()
self._table_expression_()
@graken()
def _select_target_list_(self):
def sep0():
self._token(',')
def block0():
self._target_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _delete_statement_positioned_(self):
self._token('DELETE')
self._token('FROM')
self._target_table_()
self._token('WHERE')
self._token('CURRENT')
self._token('OF')
self._cursor_name_()
@graken()
def _target_table_(self):
with self._choice():
with self._option():
self._table_name_()
with self._option():
self._token('ONLY')
self._left_paren_()
self._table_name_()
self._right_paren_()
self._error('no available options')
@graken()
def _delete_statement_searched_(self):
self._token('DELETE')
self._token('FROM')
self._target_table_()
with self._optional():
self._token('WHERE')
self._search_condition_()
@graken()
def _insert_statement_(self):
self._token('INSERT')
self._token('INTO')
self._table_name_()
self._insert_columns_and_source_()
@graken()
def _insert_columns_and_source_(self):
with self._choice():
with self._option():
self._from_subquery_()
with self._option():
self._from_constructor_()
with self._option():
self._from_default_()
self._error('no available options')
@graken()
def _from_subquery_(self):
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._optional():
self._override_clause_()
self._query_expression_()
@graken()
def _from_constructor_(self):
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._optional():
self._override_clause_()
self._contextually_typed_table_value_constructor_()
@graken()
def _override_clause_(self):
with self._choice():
with self._option():
self._token('OVERRIDING')
self._token('USER')
self._token('VALUE')
with self._option():
self._token('OVERRIDING')
self._token('SYSTEM')
self._token('VALUE')
self._error('expecting one of: OVERRIDING')
@graken()
def _from_default_(self):
self._token('DEFAULT')
self._token('VALUES')
@graken()
def _merge_statement_(self):
self._token('MERGE')
self._token('INTO')
self._target_table_()
with self._optional():
self._as_clause_()
self._token('USING')
self._table_reference_()
self._token('ON')
self._search_condition_()
self._merge_operation_specification_()
@graken()
def _merge_operation_specification_(self):
def block0():
self._merge_when_clause_()
self._positive_closure(block0)
@graken()
def _merge_when_clause_(self):
with self._choice():
with self._option():
self._merge_when_matched_clause_()
with self._option():
self._merge_when_not_matched_clause_()
self._error('no available options')
@graken()
def _merge_when_matched_clause_(self):
self._token('WHEN')
self._token('MATCHED')
self._token('THEN')
self._merge_update_specification_()
@graken()
def _merge_when_not_matched_clause_(self):
self._token('WHEN')
self._token('NOT')
self._token('MATCHED')
self._token('THEN')
self._merge_insert_specification_()
@graken()
def _merge_update_specification_(self):
self._token('UPDATE')
self._token('SET')
self._set_clause_list_()
@graken()
def _merge_insert_specification_(self):
self._token('INSERT')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._optional():
self._override_clause_()
self._token('VALUES')
self._merge_insert_value_list_()
@graken()
def _merge_insert_value_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._merge_insert_value_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _merge_insert_value_element_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._contextually_typed_value_specification_()
self._error('no available options')
@graken()
def _update_statement_positioned_(self):
self._token('UPDATE')
self._target_table_()
self._token('SET')
self._set_clause_list_()
self._token('WHERE')
self._token('CURRENT')
self._token('OF')
self._cursor_name_()
@graken()
def _update_statement_searched_(self):
self._token('UPDATE')
self._target_table_()
self._token('SET')
self._set_clause_list_()
with self._optional():
self._token('WHERE')
self._search_condition_()
@graken()
def _set_clause_list_(self):
def sep0():
self._token(',')
def block0():
self._set_clause_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _set_clause_(self):
with self._choice():
with self._option():
self._multiple_column_assignment_()
with self._option():
self._set_target_()
self._equals_operator_()
self._update_source_()
self._error('no available options')
@graken()
def _set_target_(self):
with self._choice():
with self._option():
self._update_target_()
with self._option():
self._mutated_set_clause_()
self._error('no available options')
@graken()
def _multiple_column_assignment_(self):
self._set_target_list_()
self._equals_operator_()
self._contextually_typed_row_value_expression_()
@graken()
def _set_target_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._set_target_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _update_target_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._identifier_()
self._left_bracket_or_trigraph_()
self._simple_value_specification_()
self._right_bracket_or_trigraph_()
self._error('no available options')
@graken()
def _mutated_set_clause_(self):
self._mutated_target_()
self._period_()
self._identifier_()
@graken()
def _mutated_target_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._mutated_set_clause_()
self._error('no available options')
@graken()
def _update_source_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._contextually_typed_value_specification_()
self._error('no available options')
@graken()
def _temporary_table_declaration_(self):
self._token('DECLARE')
self._token('LOCAL')
self._token('TEMPORARY')
self._token('TABLE')
self._table_name_()
self._table_element_list_()
with self._optional():
self._token('ON')
self._token('COMMIT')
self._table_commit_action_()
self._token('ROWS')
@graken()
def _free_locator_statement_(self):
self._token('FREE')
self._token('LOCATOR')
def sep0():
self._token(',')
def block0():
self._locator_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _locator_reference_(self):
self._host_parameter_name_()
@graken()
def _hold_locator_statement_(self):
self._token('HOLD')
self._token('LOCATOR')
def sep0():
self._token(',')
def block0():
self._locator_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _call_statement_(self):
self._token('CALL')
self._routine_invocation_()
@graken()
def _return_statement_(self):
self._token('RETURN')
self._return_value_()
@graken()
def _return_value_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._token('NULL')
self._error('expecting one of: NULL')
@graken()
def _start_transaction_statement_(self):
self._token('START')
self._token('TRANSACTION')
with self._optional():
self._transaction_characteristics_()
@graken()
def _transaction_mode_(self):
with self._choice():
with self._option():
self._isolation_level_()
with self._option():
self._transaction_access_mode_()
with self._option():
self._diagnostics_size_()
self._error('no available options')
@graken()
def _transaction_access_mode_(self):
with self._choice():
with self._option():
self._token('READ')
self._token('ONLY')
with self._option():
self._token('READ')
self._token('WRITE')
self._error('expecting one of: READ')
@graken()
def _isolation_level_(self):
self._token('ISOLATION')
self._token('LEVEL')
self._level_of_isolation_()
@graken()
def _level_of_isolation_(self):
with self._choice():
with self._option():
self._token('READ')
self._token('UNCOMMITTED')
with self._option():
self._token('READ')
self._token('COMMITTED')
with self._option():
self._token('REPEATABLE')
self._token('READ')
with self._option():
self._token('SERIALIZABLE')
self._error('expecting one of: READ REPEATABLE SERIALIZABLE')
@graken()
def _diagnostics_size_(self):
self._token('DIAGNOSTICS')
self._token('SIZE')
self._simple_value_specification_()
@graken()
def _set_transaction_statement_(self):
self._token('SET')
with self._optional():
self._token('LOCAL')
self._token('TRANSACTION')
self._transaction_characteristics_()
@graken()
def _transaction_characteristics_(self):
def sep0():
self._token(',')
def block0():
self._transaction_mode_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _set_constraints_mode_statement_(self):
self._token('SET')
self._token('CONSTRAINTS')
self._constraint_name_list_()
with self._group():
with self._choice():
with self._option():
self._token('DEFERRED')
with self._option():
self._token('IMMEDIATE')
self._error('expecting one of: DEFERRED IMMEDIATE')
@graken()
def _constraint_name_list_(self):
with self._choice():
with self._option():
self._token('ALL')
with self._option():
def sep0():
self._token(',')
def block0():
self._schema_qualified_name_()
self._positive_closure(block0, prefix=sep0)
self._error('expecting one of: ALL')
@graken()
def _savepoint_statement_(self):
self._token('SAVEPOINT')
self._identifier_()
@graken()
def _release_savepoint_statement_(self):
self._token('RELEASE')
self._token('SAVEPOINT')
self._identifier_()
@graken()
def _commit_statement_(self):
self._token('COMMIT')
with self._optional():
self._token('WORK')
with self._optional():
self._token('AND')
with self._optional():
self._token('NO')
self._token('CHAIN')
@graken()
def _rollback_statement_(self):
self._token('ROLLBACK')
with self._optional():
self._token('WORK')
with self._optional():
self._token('AND')
with self._optional():
self._token('NO')
self._token('CHAIN')
with self._optional():
self._savepoint_clause_()
@graken()
def _savepoint_clause_(self):
self._token('TO')
self._token('SAVEPOINT')
self._identifier_()
@graken()
def _connect_statement_(self):
self._token('CONNECT')
self._token('TO')
self._connection_target_()
@graken()
def _connection_target_(self):
with self._choice():
with self._option():
self._simple_value_specification_()
with self._optional():
self._token('AS')
self._connection_name_()
with self._optional():
self._token('USER')
self._simple_value_specification_()
with self._option():
self._token('DEFAULT')
self._error('expecting one of: DEFAULT')
@graken()
def _set_connection_statement_(self):
self._token('SET')
self._token('CONNECTION')
self._connection_object_()
@graken()
def _connection_object_(self):
with self._choice():
with self._option():
self._token('DEFAULT')
with self._option():
self._connection_name_()
self._error('expecting one of: DEFAULT')
@graken()
def _disconnect_statement_(self):
self._token('DISCONNECT')
self._disconnect_object_()
@graken()
def _disconnect_object_(self):
with self._choice():
with self._option():
self._connection_object_()
with self._option():
self._token('ALL')
with self._option():
self._token('CURRENT')
self._error('expecting one of: ALL CURRENT')
@graken()
def _set_session_characteristics_statement_(self):
self._token('SET')
self._token('SESSION')
self._token('CHARACTERISTICS')
self._token('AS')
self._session_characteristic_list_()
@graken()
def _session_characteristic_list_(self):
def sep0():
self._token(',')
def block0():
self._session_characteristic_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _session_characteristic_(self):
self._token('TRANSACTION')
self._transaction_characteristics_()
@graken()
def _set_session_user_identifier_statement_(self):
self._token('SET')
self._token('SESSION')
self._token('AUTHORIZATION')
self._value_specification_()
@graken()
def _set_role_statement_(self):
self._token('SET')
self._token('ROLE')
self._role_specification_()
@graken()
def _role_specification_(self):
with self._choice():
with self._option():
self._value_specification_()
with self._option():
self._token('NONE')
self._error('expecting one of: NONE')
@graken()
def _set_local_time_zone_statement_(self):
self._token('SET')
self._token('TIME')
self._token('ZONE')
self._set_time_zone_value_()
@graken()
def _set_time_zone_value_(self):
with self._choice():
with self._option():
self._interval_value_expression_()
with self._option():
self._token('LOCAL')
self._error('expecting one of: LOCAL')
@graken()
def _set_catalog_statement_(self):
self._token('SET')
self._catalog_name_characteristic_()
@graken()
def _catalog_name_characteristic_(self):
self._token('CATALOG')
self._value_specification_()
@graken()
def _set_schema_statement_(self):
self._token('SET')
self._schema_name_characteristic_()
@graken()
def _schema_name_characteristic_(self):
self._token('SCHEMA')
self._value_specification_()
@graken()
def _set_names_statement_(self):
self._token('SET')
self._character_set_name_characteristic_()
@graken()
def _character_set_name_characteristic_(self):
self._token('NAMES')
self._value_specification_()
@graken()
def _set_path_statement_(self):
self._token('SET')
self._sql_path_characteristic_()
@graken()
def _sql_path_characteristic_(self):
self._token('PATH')
self._value_specification_()
@graken()
def _set_transform_group_statement_(self):
self._token('SET')
self._transform_group_characteristic_()
@graken()
def _transform_group_characteristic_(self):
with self._choice():
with self._option():
self._token('DEFAULT')
self._token('TRANSFORM')
self._token('GROUP')
self._value_specification_()
with self._option():
self._token('TRANSFORM')
self._token('GROUP')
self._token('FOR')
self._token('TYPE')
self._schema_qualified_name_()
self._value_specification_()
self._error('no available options')
@graken()
def _set_session_collation_statement_(self):
with self._choice():
with self._option():
self._token('SET')
self._token('COLLATION')
self._value_specification_()
with self._optional():
self._token('FOR')
self._character_set_specification_list_()
with self._option():
self._token('SET')
self._token('NO')
self._token('COLLATION')
with self._optional():
self._token('FOR')
self._character_set_specification_list_()
self._error('expecting one of: SET')
@graken()
def _allocate_descriptor_statement_(self):
self._token('ALLOCATE')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
with self._optional():
self._token('WITH')
self._token('MAX')
self._simple_value_specification_()
@graken()
def _deallocate_descriptor_statement_(self):
self._token('DEALLOCATE')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
@graken()
def _get_descriptor_statement_(self):
self._token('GET')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
self._get_descriptor_information_()
@graken()
def _get_descriptor_information_(self):
with self._choice():
with self._option():
def sep0():
self._token(',')
def block0():
self._get_header_information_()
self._positive_closure(block0, prefix=sep0)
with self._option():
self._token('VALUE')
self._item_number_()
def sep1():
self._token(',')
def block1():
self._get_item_information_()
self._positive_closure(block1, prefix=sep1)
self._error('no available options')
@graken()
def _get_header_information_(self):
self._simple_target_specification_()
self._equals_operator_()
self._header_item_name_()
@graken()
def _header_item_name_(self):
with self._choice():
with self._option():
self._token('COUNT')
with self._option():
self._token('KEY_TYPE')
with self._option():
self._token('DYNAMIC_FUNCTION')
with self._option():
self._token('DYNAMIC_FUNCTION_CODE')
with self._option():
self._token('TOP_LEVEL_COUNT')
self._error('expecting one of: COUNT DYNAMIC_FUNCTION DYNAMIC_FUNCTION_CODE KEY_TYPE TOP_LEVEL_COUNT')
@graken()
def _get_item_information_(self):
self._simple_target_specification_()
self._equals_operator_()
self._descriptor_item_name_()
@graken()
def _item_number_(self):
self._simple_value_specification_()
@graken()
def _descriptor_item_name_(self):
with self._choice():
with self._option():
self._token('CARDINALITY')
with self._option():
self._token('CHARACTER_SET_CATALOG')
with self._option():
self._token('CHARACTER_SET_NAME')
with self._option():
self._token('CHARACTER_SET_SCHEMA')
with self._option():
self._token('COLLATION_CATALOG')
with self._option():
self._token('COLLATION_NAME')
with self._option():
self._token('COLLATION_SCHEMA')
with self._option():
self._token('DATA')
with self._option():
self._token('DATETIME_INTERVAL_CODE')
with self._option():
self._token('DATETIME_INTERVAL_PRECISION')
with self._option():
self._token('DEGREE')
with self._option():
self._token('INDICATOR')
with self._option():
self._token('KEY_MEMBER')
with self._option():
self._token('LENGTH')
with self._option():
self._token('LEVEL')
with self._option():
self._token('NAME')
with self._option():
self._token('NULLABLE')
with self._option():
self._token('OCTET_LENGTH')
with self._option():
self._token('PARAMETER_MODE')
with self._option():
self._token('PARAMETER_ORDINAL_POSITION')
with self._option():
self._token('PARAMETER_SPECIFIC_CATALOG')
with self._option():
self._token('PARAMETER_SPECIFIC_NAME')
with self._option():
self._token('PARAMETER_SPECIFIC_SCHEMA')
with self._option():
self._token('PRECISION')
with self._option():
self._token('RETURNED_CARDINALITY')
with self._option():
self._token('RETURNED_LENGTH')
with self._option():
self._token('RETURNED_OCTET_LENGTH')
with self._option():
self._token('SCALE')
with self._option():
self._token('SCOPE_CATALOG')
with self._option():
self._token('SCOPE_NAME')
with self._option():
self._token('SCOPE_SCHEMA')
with self._option():
self._token('TYPE')
with self._option():
self._token('UNNAMED')
with self._option():
self._token('USER_DEFINED_TYPE_CATALOG')
with self._option():
self._token('USER_DEFINED_TYPE_NAME')
with self._option():
self._token('USER_DEFINED_TYPE_SCHEMA')
with self._option():
self._token('USER_DEFINED_TYPE_CODE')
self._error('expecting one of: CARDINALITY CHARACTER_SET_CATALOG CHARACTER_SET_NAME CHARACTER_SET_SCHEMA COLLATION_CATALOG COLLATION_NAME COLLATION_SCHEMA DATA DATETIME_INTERVAL_CODE DATETIME_INTERVAL_PRECISION DEGREE INDICATOR KEY_MEMBER LENGTH LEVEL NAME NULLABLE OCTET_LENGTH PARAMETER_MODE PARAMETER_ORDINAL_POSITION PARAMETER_SPECIFIC_CATALOG PARAMETER_SPECIFIC_NAME PARAMETER_SPECIFIC_SCHEMA PRECISION RETURNED_CARDINALITY RETURNED_LENGTH RETURNED_OCTET_LENGTH SCALE SCOPE_CATALOG SCOPE_NAME SCOPE_SCHEMA TYPE UNNAMED USER_DEFINED_TYPE_CATALOG USER_DEFINED_TYPE_CODE USER_DEFINED_TYPE_NAME USER_DEFINED_TYPE_SCHEMA')
@graken()
def _set_descriptor_statement_(self):
self._token('SET')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
self._set_descriptor_information_()
@graken()
def _set_descriptor_information_(self):
with self._choice():
with self._option():
def sep0():
self._token(',')
def block0():
self._set_header_information_()
self._positive_closure(block0, prefix=sep0)
with self._option():
self._token('VALUE')
self._item_number_()
def sep1():
self._token(',')
def block1():
self._set_item_information_()
self._positive_closure(block1, prefix=sep1)
self._error('no available options')
@graken()
def _set_header_information_(self):
self._header_item_name_()
self._equals_operator_()
self._simple_value_specification_()
@graken()
def _set_item_information_(self):
self._descriptor_item_name_()
self._equals_operator_()
self._simple_value_specification_()
@graken()
def _prepare_statement_(self):
self._token('PREPARE')
self._sql_statement_name_()
with self._optional():
self._attributes_specification_()
self._token('FROM')
self._sql_statement_variable_()
@graken()
def _attributes_specification_(self):
self._token('ATTRIBUTES')
self._simple_value_specification_()
@graken()
def _sql_statement_variable_(self):
self._simple_value_specification_()
@graken()
def _deallocate_prepared_statement_(self):
self._token('DEALLOCATE')
self._token('PREPARE')
self._sql_statement_name_()
@graken()
def _describe_statement_(self):
with self._choice():
with self._option():
self._describe_input_statement_()
with self._option():
self._describe_output_statement_()
self._error('no available options')
@graken()
def _describe_input_statement_(self):
self._token('DESCRIBE')
self._token('INPUT')
self._sql_statement_name_()
self._using_descriptor_()
with self._optional():
self._nesting_option_()
@graken()
def _describe_output_statement_(self):
self._token('DESCRIBE')
with self._optional():
self._token('OUTPUT')
self._described_object_()
self._using_descriptor_()
with self._optional():
self._nesting_option_()
@graken()
def _nesting_option_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('NESTING')
with self._option():
self._token('WITHOUT')
self._token('NESTING')
self._error('expecting one of: WITH WITHOUT')
@graken()
def _using_descriptor_(self):
self._token('USING')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
@graken()
def _described_object_(self):
with self._choice():
with self._option():
self._sql_statement_name_()
with self._option():
self._token('CURSOR')
self._extended_cursor_name_()
self._token('STRUCTURE')
self._error('no available options')
@graken()
def _input_using_clause_(self):
with self._choice():
with self._option():
self._using_arguments_()
with self._option():
self._using_descriptor_()
self._error('no available options')
@graken()
def _using_arguments_(self):
self._token('USING')
def sep0():
self._token(',')
def block0():
self._using_argument_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _using_argument_(self):
self._general_value_specification_()
@graken()
def _output_using_clause_(self):
with self._choice():
with self._option():
self._into_arguments_()
with self._option():
self._into_descriptor_()
self._error('no available options')
@graken()
def _into_arguments_(self):
self._token('INTO')
def sep0():
self._token(',')
def block0():
self._into_argument_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _into_argument_(self):
self._target_specification_()
@graken()
def _into_descriptor_(self):
self._token('INTO')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
@graken()
def _execute_statement_(self):
self._token('EXECUTE')
self._sql_statement_name_()
with self._optional():
self._output_using_clause_()
with self._optional():
self._input_using_clause_()
@graken()
def _execute_immediate_statement_(self):
self._token('EXECUTE')
self._token('IMMEDIATE')
self._sql_statement_variable_()
@graken()
def _allocate_cursor_statement_(self):
self._token('ALLOCATE')
self._extended_cursor_name_()
self._cursor_intent_()
@graken()
def _cursor_intent_(self):
with self._choice():
with self._option():
self._statement_cursor_()
with self._option():
self._result_set_cursor_()
self._error('no available options')
@graken()
def _statement_cursor_(self):
with self._optional():
self._cursor_sensitivity_()
with self._optional():
self._cursor_scrollability_()
self._token('CURSOR')
with self._optional():
self._cursor_holdability_()
with self._optional():
self._cursor_returnability_()
self._token('FOR')
self._extended_statement_name_()
@graken()
def _result_set_cursor_(self):
self._token('FOR')
self._token('PROCEDURE')
self._specific_routine_designator_()
@graken()
def _dynamic_open_statement_(self):
self._token('OPEN')
self._dynamic_cursor_name_()
with self._optional():
self._input_using_clause_()
@graken()
def _dynamic_fetch_statement_(self):
self._token('FETCH')
with self._optional():
with self._optional():
self._fetch_orientation_()
self._token('FROM')
self._dynamic_cursor_name_()
self._output_using_clause_()
@graken()
def _dynamic_close_statement_(self):
self._token('CLOSE')
self._dynamic_cursor_name_()
@graken()
def _dynamic_delete_statement_positioned_(self):
self._token('DELETE')
self._token('FROM')
self._target_table_()
self._token('WHERE')
self._token('CURRENT')
self._token('OF')
self._dynamic_cursor_name_()
@graken()
def _dynamic_update_statement_positioned_(self):
self._token('UPDATE')
self._target_table_()
self._token('SET')
self._set_clause_list_()
self._token('WHERE')
self._token('CURRENT')
self._token('OF')
self._dynamic_cursor_name_()
@graken()
def _direct_sql_statement_(self):
self._directly_executable_statement_()
self._semicolon_()
@graken()
def _directly_executable_statement_(self):
with self._choice():
with self._option():
self._direct_sql_data_statement_()
with self._option():
self._sql_schema_statement_()
with self._option():
self._sql_transaction_statement_()
with self._option():
self._sql_connection_statement_()
with self._option():
self._sql_session_statement_()
self._error('no available options')
@graken()
def _direct_sql_data_statement_(self):
with self._choice():
with self._option():
self._delete_statement_searched_()
with self._option():
self._cursor_specification_()
with self._option():
self._insert_statement_()
with self._option():
self._update_statement_searched_()
with self._option():
self._merge_statement_()
with self._option():
self._temporary_table_declaration_()
self._error('no available options')
@graken()
def _get_diagnostics_statement_(self):
self._token('GET')
self._token('DIAGNOSTICS')
self._sql_diagnostics_information_()
@graken()
def _sql_diagnostics_information_(self):
with self._choice():
with self._option():
self._statement_information_()
with self._option():
self._condition_information_()
self._error('no available options')
@graken()
def _statement_information_(self):
def sep0():
self._token(',')
def block0():
self._statement_information_item_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _statement_information_item_(self):
self._simple_target_specification_()
self._equals_operator_()
self._statement_information_item_name_()
@graken()
def _statement_information_item_name_(self):
with self._choice():
with self._option():
self._token('NUMBER')
with self._option():
self._token('MORE')
with self._option():
self._token('COMMAND_FUNCTION')
with self._option():
self._token('COMMAND_FUNCTION_CODE')
with self._option():
self._token('DYNAMIC_FUNCTION')
with self._option():
self._token('DYNAMIC_FUNCTION_CODE')
with self._option():
self._token('ROW_COUNT')
with self._option():
self._token('TRANSACTIONS_COMMITTED')
with self._option():
self._token('TRANSACTIONS_ROLLED_BACK')
with self._option():
self._token('TRANSACTION_ACTIVE')
self._error('expecting one of: COMMAND_FUNCTION COMMAND_FUNCTION_CODE DYNAMIC_FUNCTION DYNAMIC_FUNCTION_CODE MORE NUMBER ROW_COUNT TRANSACTIONS_COMMITTED TRANSACTIONS_ROLLED_BACK TRANSACTION_ACTIVE')
@graken()
def _condition_information_(self):
with self._group():
with self._choice():
with self._option():
self._token('EXCEPTION')
with self._option():
self._token('CONDITION')
self._error('expecting one of: CONDITION EXCEPTION')
self._simple_value_specification_()
def sep1():
self._token(',')
def block1():
self._condition_information_item_()
self._positive_closure(block1, prefix=sep1)
@graken()
def _condition_information_item_(self):
self._simple_target_specification_()
self._equals_operator_()
self._condition_information_item_name_()
@graken()
def _condition_information_item_name_(self):
with self._choice():
with self._option():
self._token('CATALOG_NAME')
with self._option():
self._token('CLASS_ORIGIN')
with self._option():
self._token('COLUMN_NAME')
with self._option():
self._token('CONDITION_NUMBER')
with self._option():
self._token('CONNECTION_NAME')
with self._option():
self._token('CONSTRAINT_CATALOG')
with self._option():
self._token('CONSTRAINT_NAME')
with self._option():
self._token('CONSTRAINT_SCHEMA')
with self._option():
self._token('CURSOR_NAME')
with self._option():
self._token('MESSAGE_LENGTH')
with self._option():
self._token('MESSAGE_OCTET_LENGTH')
with self._option():
self._token('MESSAGE_TEXT')
with self._option():
self._token('PARAMETER_MODE')
with self._option():
self._token('PARAMETER_NAME')
with self._option():
self._token('PARAMETER_ORDINAL_POSITION')
with self._option():
self._token('RETURNED_SQLSTATE')
with self._option():
self._token('ROUTINE_CATALOG')
with self._option():
self._token('ROUTINE_NAME')
with self._option():
self._token('ROUTINE_SCHEMA')
with self._option():
self._token('SCHEMA_NAME')
with self._option():
self._token('SERVER_NAME')
with self._option():
self._token('SPECIFIC_NAME')
with self._option():
self._token('SUBCLASS_ORIGIN')
with self._option():
self._token('TABLE_NAME')
with self._option():
self._token('TRIGGER_CATALOG')
with self._option():
self._token('TRIGGER_NAME')
with self._option():
self._token('TRIGGER_SCHEMA')
self._error('expecting one of: CATALOG_NAME CLASS_ORIGIN COLUMN_NAME CONDITION_NUMBER CONNECTION_NAME CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CURSOR_NAME MESSAGE_LENGTH MESSAGE_OCTET_LENGTH MESSAGE_TEXT PARAMETER_MODE PARAMETER_NAME PARAMETER_ORDINAL_POSITION RETURNED_SQLSTATE ROUTINE_CATALOG ROUTINE_NAME ROUTINE_SCHEMA SCHEMA_NAME SERVER_NAME SPECIFIC_NAME SUBCLASS_ORIGIN TABLE_NAME TRIGGER_CATALOG TRIGGER_NAME TRIGGER_SCHEMA')
@graken()
def _ref_cast_option_(self):
with self._optional():
self._cast_to_ref_()
with self._optional():
self._cast_to_type_()
@graken()
def _cast_option_(self):
with self._optional():
self._cast_to_distinct_()
with self._optional():
self._cast_to_source_()
@graken()
def _reference_scope_check_(self):
self._token('REFERENCES')
self._token('ARE')
with self._optional():
self._token('NOT')
self._token('CHECKED')
with self._optional():
self._token('ON')
self._token('DELETE')
self._referential_action_()
@graken()
def _start_(self):
self._direct_sql_statement_()
self._check_eof()
class SqlSemantics(object):
def digit(self, ast):
return ast
def double_quote(self, ast):
return ast
def quote(self, ast):
return ast
def left_paren(self, ast):
return ast
def right_paren(self, ast):
return ast
def asterisk(self, ast):
return ast
def plus_sign(self, ast):
return ast
def comma(self, ast):
return ast
def minus_sign(self, ast):
return ast
def period(self, ast):
return ast
def solidus(self, ast):
return ast
def colon(self, ast):
return ast
def semicolon(self, ast):
return ast
def less_than_operator(self, ast):
return ast
def equals_operator(self, ast):
return ast
def greater_than_operator(self, ast):
return ast
def question_mark(self, ast):
return ast
def left_bracket_or_trigraph(self, ast):
return ast
def right_bracket_or_trigraph(self, ast):
return ast
def left_bracket(self, ast):
return ast
def left_bracket_trigraph(self, ast):
return ast
def right_bracket(self, ast):
return ast
def right_bracket_trigraph(self, ast):
return ast
def underscore(self, ast):
return ast
def regular_identifier(self, ast):
return ast
def large_object_length_token(self, ast):
return ast
def multiplier(self, ast):
return ast
def delimited_identifier(self, ast):
return ast
def delimited_identifier_body(self, ast):
return ast
def unicode_escape_value(self, ast):
return ast
def unicode_4_digit_escape_value(self, ast):
return ast
def unicode_6_digit_escape_value(self, ast):
return ast
def unicode_character_escape_value(self, ast):
return ast
def unicode_escape_character(self, ast):
return ast
def not_equals_operator(self, ast):
return ast
def greater_than_or_equals_operator(self, ast):
return ast
def less_than_or_equals_operator(self, ast):
return ast
def concatenation_operator(self, ast):
return ast
def right_arrow(self, ast):
return ast
def double_colon(self, ast):
return ast
def literal(self, ast):
return ast
def unsigned_literal(self, ast):
return ast
def general_literal(self, ast):
return ast
def character_string_literal(self, ast):
return ast
def character_representation(self, ast):
return ast
def national_character_string_literal(self, ast):
return ast
def unicode_character_string_literal(self, ast):
return ast
def unicode_representation(self, ast):
return ast
def binary_string_literal(self, ast):
return ast
def hexit(self, ast):
return ast
def byte(self, ast):
return ast
def signed_numeric_literal(self, ast):
return ast
def unsigned_numeric_literal(self, ast):
return ast
def exact_numeric_literal(self, ast):
return ast
def sign(self, ast):
return ast
def approximate_numeric_literal(self, ast):
return ast
def signed_integer(self, ast):
return ast
def unsigned_integer(self, ast):
return ast
def datetime_literal(self, ast):
return ast
def date_literal(self, ast):
return ast
def time_literal(self, ast):
return ast
def timestamp_literal(self, ast):
return ast
def date_string(self, ast):
return ast
def time_string(self, ast):
return ast
def timestamp_string(self, ast):
return ast
def time_zone_interval(self, ast):
return ast
def date_value(self, ast):
return ast
def time_value(self, ast):
return ast
def interval_literal(self, ast):
return ast
def interval_string(self, ast):
return ast
def unquoted_date_string(self, ast):
return ast
def unquoted_time_string(self, ast):
return ast
def unquoted_timestamp_string(self, ast):
return ast
def unquoted_interval_string(self, ast):
return ast
def year_month_literal(self, ast):
return ast
def day_time_literal(self, ast):
return ast
def day_time_interval(self, ast):
return ast
def time_interval(self, ast):
return ast
def years_value(self, ast):
return ast
def months_value(self, ast):
return ast
def days_value(self, ast):
return ast
def hours_value(self, ast):
return ast
def minutes_value(self, ast):
return ast
def seconds_value(self, ast):
return ast
def datetime_value(self, ast):
return ast
def boolean_literal(self, ast):
return ast
def identifier(self, ast):
return ast
def identifier_list(self, ast):
return ast
def actual_identifier(self, ast):
return ast
def table_name(self, ast):
return ast
def schema_name(self, ast):
return ast
def schema_qualified_name(self, ast):
return ast
def local_or_schema_qualified_name(self, ast):
return ast
def local_or_schema_qualifier(self, ast):
return ast
def cursor_name(self, ast):
return ast
def local_qualified_name(self, ast):
return ast
def host_parameter_name(self, ast):
return ast
def external_routine_name(self, ast):
return ast
def character_set_name(self, ast):
return ast
def connection_name(self, ast):
return ast
def sql_statement_name(self, ast):
return ast
def extended_statement_name(self, ast):
return ast
def dynamic_cursor_name(self, ast):
return ast
def extended_cursor_name(self, ast):
return ast
def descriptor_name(self, ast):
return ast
def scope_option(self, ast):
return ast
def data_type(self, ast):
return ast
def predefined_type(self, ast):
return ast
def character_string_type(self, ast):
return ast
def character_large_object_type(self, ast):
return ast
def national_character_string_type(self, ast):
return ast
def national_character_large_object_type(self, ast):
return ast
def binary_large_object_string_type(self, ast):
return ast
def numeric_type(self, ast):
return ast
def exact_numeric_type(self, ast):
return ast
def approximate_numeric_type(self, ast):
return ast
def length(self, ast):
return ast
def large_object_length(self, ast):
return ast
def char_length_units(self, ast):
return ast
def precision(self, ast):
return ast
def scale(self, ast):
return ast
def datetime_type(self, ast):
return ast
def with_or_without_time_zone(self, ast):
return ast
def interval_type(self, ast):
return ast
def row_type(self, ast):
return ast
def row_type_body(self, ast):
return ast
def reference_type(self, ast):
return ast
def scope_clause(self, ast):
return ast
def collection_type(self, ast):
return ast
def array_type(self, ast):
return ast
def multiset_type(self, ast):
return ast
def field_definition(self, ast):
return ast
def value_expression_primary(self, ast):
return ast
def parenthesized_value_expression(self, ast):
return ast
def nonparenthesized_value_expression_primary(self, ast):
return ast
def collection_value_constructor(self, ast):
return ast
def value_specification(self, ast):
return ast
def unsigned_value_specification(self, ast):
return ast
def general_value_specification(self, ast):
return ast
def simple_value_specification(self, ast):
return ast
def target_specification(self, ast):
return ast
def simple_target_specification(self, ast):
return ast
def host_parameter_specification(self, ast):
return ast
def dynamic_parameter_specification(self, ast):
return ast
def indicator_parameter(self, ast):
return ast
def target_array_element_specification(self, ast):
return ast
def target_array_reference(self, ast):
return ast
def current_collation_specification(self, ast):
return ast
def contextually_typed_value_specification(self, ast):
return ast
def implicitly_typed_value_specification(self, ast):
return ast
def empty_specification(self, ast):
return ast
def identifier_chain(self, ast):
return ast
def column_reference(self, ast):
return ast
def sql_parameter_reference(self, ast):
return ast
def set_function_specification(self, ast):
return ast
def grouping_operation(self, ast):
return ast
def window_function(self, ast):
return ast
def window_function_type(self, ast):
return ast
def rank_function_type(self, ast):
return ast
def window_name_or_specification(self, ast):
return ast
def case_expression(self, ast):
return ast
def case_abbreviation(self, ast):
return ast
def case_specification(self, ast):
return ast
def simple_case(self, ast):
return ast
def searched_case(self, ast):
return ast
def simple_when_clause(self, ast):
return ast
def searched_when_clause(self, ast):
return ast
def else_clause(self, ast):
return ast
def case_operand(self, ast):
return ast
def when_operand(self, ast):
return ast
def result(self, ast):
return ast
def cast_specification(self, ast):
return ast
def cast_operand(self, ast):
return ast
def cast_target(self, ast):
return ast
def next_value_expression(self, ast):
return ast
def field_reference(self, ast):
return ast
def subtype_treatment(self, ast):
return ast
def target_subtype(self, ast):
return ast
def method_invocation(self, ast):
return ast
def direct_invocation(self, ast):
return ast
def generalized_invocation(self, ast):
return ast
def static_method_invocation(self, ast):
return ast
def new_specification(self, ast):
return ast
def attribute_or_method_reference(self, ast):
return ast
def reference_resolution(self, ast):
return ast
def array_element_reference(self, ast):
return ast
def multiset_element_reference(self, ast):
return ast
def value_expression(self, ast):
return ast
def common_value_expression(self, ast):
return ast
def user_defined_type_value_expression(self, ast):
return ast
def reference_value_expression(self, ast):
return ast
def collection_value_expression(self, ast):
return ast
def numeric_value_expression(self, ast):
return ast
def term(self, ast):
return ast
def factor(self, ast):
return ast
def numeric_primary(self, ast):
return ast
def numeric_value_function(self, ast):
return ast
def position_expression(self, ast):
return ast
def string_position_expression(self, ast):
return ast
def blob_position_expression(self, ast):
return ast
def length_expression(self, ast):
return ast
def char_length_expression(self, ast):
return ast
def octet_length_expression(self, ast):
return ast
def extract_expression(self, ast):
return ast
def extract_field(self, ast):
return ast
def time_zone_field(self, ast):
return ast
def extract_source(self, ast):
return ast
def cardinality_expression(self, ast):
return ast
def absolute_value_expression(self, ast):
return ast
def modulus_expression(self, ast):
return ast
def natural_logarithm(self, ast):
return ast
def exponential_function(self, ast):
return ast
def power_function(self, ast):
return ast
def square_root(self, ast):
return ast
def floor_function(self, ast):
return ast
def ceiling_function(self, ast):
return ast
def width_bucket_function(self, ast):
return ast
def string_value_expression(self, ast):
return ast
def character_value_expression(self, ast):
return ast
def concatenation(self, ast):
return ast
def character_factor(self, ast):
return ast
def character_primary(self, ast):
return ast
def blob_value_expression(self, ast):
return ast
def blob_factor(self, ast):
return ast
def blob_primary(self, ast):
return ast
def blob_concatenation(self, ast):
return ast
def string_value_function(self, ast):
return ast
def character_value_function(self, ast):
return ast
def character_substring_function(self, ast):
return ast
def regular_expression_substring_function(self, ast):
return ast
def fold(self, ast):
return ast
def transcoding(self, ast):
return ast
def character_transliteration(self, ast):
return ast
def trim_function(self, ast):
return ast
def trim_operands(self, ast):
return ast
def trim_specification(self, ast):
return ast
def character_overlay_function(self, ast):
return ast
def normalize_function(self, ast):
return ast
def specific_type_method(self, ast):
return ast
def blob_value_function(self, ast):
return ast
def blob_substring_function(self, ast):
return ast
def blob_trim_function(self, ast):
return ast
def blob_trim_operands(self, ast):
return ast
def blob_overlay_function(self, ast):
return ast
def start_position(self, ast):
return ast
def string_length(self, ast):
return ast
def datetime_value_expression(self, ast):
return ast
def datetime_term(self, ast):
return ast
def datetime_factor(self, ast):
return ast
def datetime_primary(self, ast):
return ast
def time_zone(self, ast):
return ast
def time_zone_specifier(self, ast):
return ast
def datetime_value_function(self, ast):
return ast
def current_time_value_function(self, ast):
return ast
def current_local_time_value_function(self, ast):
return ast
def current_timestamp_value_function(self, ast):
return ast
def current_local_timestamp_value_function(self, ast):
return ast
def interval_value_expression(self, ast):
return ast
def interval_term(self, ast):
return ast
def interval_factor(self, ast):
return ast
def interval_primary(self, ast):
return ast
def interval_value_expression_1(self, ast):
return ast
def interval_term_1(self, ast):
return ast
def interval_term_2(self, ast):
return ast
def interval_absolute_value_function(self, ast):
return ast
def boolean_value_expression(self, ast):
return ast
def boolean_term(self, ast):
return ast
def boolean_factor(self, ast):
return ast
def boolean_test(self, ast):
return ast
def truth_value(self, ast):
return ast
def boolean_primary(self, ast):
return ast
def boolean_predicand(self, ast):
return ast
def parenthesized_boolean_value_expression(self, ast):
return ast
def array_value_expression(self, ast):
return ast
def array_concatenation(self, ast):
return ast
def array_primary(self, ast):
return ast
def array_value_constructor(self, ast):
return ast
def array_value_constructor_by_enumeration(self, ast):
return ast
def array_element_list(self, ast):
return ast
def array_element(self, ast):
return ast
def array_value_constructor_by_query(self, ast):
return ast
def multiset_value_expression(self, ast):
return ast
def multiset_term(self, ast):
return ast
def multiset_primary(self, ast):
return ast
def multiset_set_function(self, ast):
return ast
def multiset_value_constructor(self, ast):
return ast
def multiset_value_constructor_by_enumeration(self, ast):
return ast
def multiset_element_list(self, ast):
return ast
def multiset_element(self, ast):
return ast
def multiset_value_constructor_by_query(self, ast):
return ast
def table_value_constructor_by_query(self, ast):
return ast
def row_value_constructor(self, ast):
return ast
def explicit_row_value_constructor(self, ast):
return ast
def row_value_constructor_element_list(self, ast):
return ast
def row_value_constructor_element(self, ast):
return ast
def contextually_typed_row_value_constructor(self, ast):
return ast
def contextually_typed_row_value_constructor_element_list(self, ast):
return ast
def contextually_typed_row_value_constructor_element(self, ast):
return ast
def row_value_constructor_predicand(self, ast):
return ast
def row_value_expression(self, ast):
return ast
def table_row_value_expression(self, ast):
return ast
def contextually_typed_row_value_expression(self, ast):
return ast
def row_value_predicand(self, ast):
return ast
def row_value_special_case(self, ast):
return ast
def table_value_constructor(self, ast):
return ast
def row_value_expression_list(self, ast):
return ast
def contextually_typed_table_value_constructor(self, ast):
return ast
def contextually_typed_row_value_expression_list(self, ast):
return ast
def table_expression(self, ast):
return ast
def from_clause(self, ast):
return ast
def table_reference_list(self, ast):
return ast
def table_reference(self, ast):
return ast
def table_factor(self, ast):
return ast
def sample_clause(self, ast):
return ast
def sample_method(self, ast):
return ast
def repeatable_clause(self, ast):
return ast
def table_primary(self, ast):
return ast
def parenthesized_joined_table(self, ast):
return ast
def only_spec(self, ast):
return ast
def lateral_derived_table(self, ast):
return ast
def collection_derived_table(self, ast):
return ast
def table_function_derived_table(self, ast):
return ast
def table_or_query_name(self, ast):
return ast
def column_name_list(self, ast):
return ast
def joined_table(self, ast):
return ast
def cross_join(self, ast):
return ast
def qualified_join(self, ast):
return ast
def natural_join(self, ast):
return ast
def union_join(self, ast):
return ast
def join_specification(self, ast):
return ast
def join_condition(self, ast):
return ast
def named_columns_join(self, ast):
return ast
def join_type(self, ast):
return ast
def outer_join_type(self, ast):
return ast
def where_clause(self, ast):
return ast
def group_by_clause(self, ast):
return ast
def grouping_element_list(self, ast):
return ast
def grouping_element(self, ast):
return ast
def ordinary_grouping_set(self, ast):
return ast
def grouping_column_reference(self, ast):
return ast
def grouping_column_reference_list(self, ast):
return ast
def rollup_list(self, ast):
return ast
def ordinary_grouping_set_list(self, ast):
return ast
def cube_list(self, ast):
return ast
def grouping_sets_specification(self, ast):
return ast
def grouping_set_list(self, ast):
return ast
def grouping_set(self, ast):
return ast
def empty_grouping_set(self, ast):
return ast
def having_clause(self, ast):
return ast
def window_clause(self, ast):
return ast
def window_definition_list(self, ast):
return ast
def window_definition(self, ast):
return ast
def window_specification(self, ast):
return ast
def window_specification_details(self, ast):
return ast
def window_partition_clause(self, ast):
return ast
def window_partition_column_reference_list(self, ast):
return ast
def window_partition_column_reference(self, ast):
return ast
def window_frame_clause(self, ast):
return ast
def window_frame_units(self, ast):
return ast
def window_frame_extent(self, ast):
return ast
def window_frame_start(self, ast):
return ast
def window_frame_preceding(self, ast):
return ast
def window_frame_between(self, ast):
return ast
def window_frame_bound(self, ast):
return ast
def window_frame_following(self, ast):
return ast
def window_frame_exclusion(self, ast):
return ast
def query_specification(self, ast):
return ast
def select_list(self, ast):
return ast
def select_sublist(self, ast):
return ast
def qualified_asterisk(self, ast):
return ast
def derived_column(self, ast):
return ast
def as_clause(self, ast):
return ast
def all_fields_reference(self, ast):
return ast
def query_expression(self, ast):
return ast
def with_clause(self, ast):
return ast
def with_list(self, ast):
return ast
def with_list_element(self, ast):
return ast
def query_expression_body(self, ast):
return ast
def non_join_query_expression(self, ast):
return ast
def query_term(self, ast):
return ast
def non_join_query_term(self, ast):
return ast
def query_primary(self, ast):
return ast
def non_join_query_primary(self, ast):
return ast
def simple_table(self, ast):
return ast
def explicit_table(self, ast):
return ast
def corresponding_spec(self, ast):
return ast
def search_or_cycle_clause(self, ast):
return ast
def search_clause(self, ast):
return ast
def recursive_search_order(self, ast):
return ast
def cycle_clause(self, ast):
return ast
def cycle_column_list(self, ast):
return ast
def subquery(self, ast):
return ast
def predicate(self, ast):
return ast
def comparison_predicate(self, ast):
return ast
def comparison_predicate_part_2(self, ast):
return ast
def comp_op(self, ast):
return ast
def between_predicate(self, ast):
return ast
def between_predicate_part_2(self, ast):
return ast
def in_predicate(self, ast):
return ast
def in_predicate_part_2(self, ast):
return ast
def in_predicate_value(self, ast):
return ast
def in_value_list(self, ast):
return ast
def like_predicate(self, ast):
return ast
def character_like_predicate(self, ast):
return ast
def character_like_predicate_part_2(self, ast):
return ast
def escape_character(self, ast):
return ast
def octet_like_predicate(self, ast):
return ast
def octet_like_predicate_part_2(self, ast):
return ast
def similar_predicate(self, ast):
return ast
def similar_predicate_part_2(self, ast):
return ast
def null_predicate(self, ast):
return ast
def null_predicate_part_2(self, ast):
return ast
def quantified_comparison_predicate(self, ast):
return ast
def quantified_comparison_predicate_part_2(self, ast):
return ast
def quantifier(self, ast):
return ast
def some(self, ast):
return ast
def exists_predicate(self, ast):
return ast
def unique_predicate(self, ast):
return ast
def normalized_predicate(self, ast):
return ast
def normalized_predicate_part_2(self, ast):
return ast
def match_predicate(self, ast):
return ast
def match_predicate_part_2(self, ast):
return ast
def overlaps_predicate(self, ast):
return ast
def overlaps_predicate_part_1(self, ast):
return ast
def overlaps_predicate_part_2(self, ast):
return ast
def distinct_predicate(self, ast):
return ast
def distinct_predicate_part_2(self, ast):
return ast
def member_predicate(self, ast):
return ast
def member_predicate_part_2(self, ast):
return ast
def submultiset_predicate(self, ast):
return ast
def submultiset_predicate_part_2(self, ast):
return ast
def set_predicate(self, ast):
return ast
def set_predicate_part_2(self, ast):
return ast
def type_predicate(self, ast):
return ast
def type_predicate_part_2(self, ast):
return ast
def type_list(self, ast):
return ast
def user_defined_type_specification(self, ast):
return ast
def exclusive_user_defined_type_specification(self, ast):
return ast
def search_condition(self, ast):
return ast
def interval_qualifier(self, ast):
return ast
def start_field(self, ast):
return ast
def end_field(self, ast):
return ast
def single_datetime_field(self, ast):
return ast
def primary_datetime_field(self, ast):
return ast
def non_second_primary_datetime_field(self, ast):
return ast
def language_clause(self, ast):
return ast
def language_name(self, ast):
return ast
def path_specification(self, ast):
return ast
def schema_name_list(self, ast):
return ast
def routine_invocation(self, ast):
return ast
def sql_argument_list(self, ast):
return ast
def sql_argument(self, ast):
return ast
def generalized_expression(self, ast):
return ast
def specific_routine_designator(self, ast):
return ast
def routine_type(self, ast):
return ast
def member_name(self, ast):
return ast
def member_name_alternatives(self, ast):
return ast
def data_type_list(self, ast):
return ast
def collate_clause(self, ast):
return ast
def constraint_name_definition(self, ast):
return ast
def constraint_characteristics(self, ast):
return ast
def constraint_check_time(self, ast):
return ast
def aggregate_function(self, ast):
return ast
def general_set_function(self, ast):
return ast
def computational_operation(self, ast):
return ast
def set_quantifier(self, ast):
return ast
def filter_clause(self, ast):
return ast
def binary_set_function(self, ast):
return ast
def binary_set_function_type(self, ast):
return ast
def ordered_set_function(self, ast):
return ast
def hypothetical_set_function(self, ast):
return ast
def within_group_specification(self, ast):
return ast
def hypothetical_set_function_value_expression_list(self, ast):
return ast
def inverse_distribution_function(self, ast):
return ast
def inverse_distribution_function_type(self, ast):
return ast
def sort_specification_list(self, ast):
return ast
def sort_specification(self, ast):
return ast
def ordering_specification(self, ast):
return ast
def null_ordering(self, ast):
return ast
def schema_definition(self, ast):
return ast
def schema_character_set_or_path(self, ast):
return ast
def schema_name_clause(self, ast):
return ast
def schema_character_set_specification(self, ast):
return ast
def schema_path_specification(self, ast):
return ast
def schema_element(self, ast):
return ast
def drop_schema_statement(self, ast):
return ast
def drop_behavior(self, ast):
return ast
def table_definition(self, ast):
return ast
def table_contents_source(self, ast):
return ast
def table_scope(self, ast):
return ast
def global_or_local(self, ast):
return ast
def table_commit_action(self, ast):
return ast
def table_element_list(self, ast):
return ast
def table_element(self, ast):
return ast
def typed_table_clause(self, ast):
return ast
def self_referencing_column_specification(self, ast):
return ast
def reference_generation(self, ast):
return ast
def column_options(self, ast):
return ast
def column_option_list(self, ast):
return ast
def subtable_clause(self, ast):
return ast
def like_clause(self, ast):
return ast
def like_options(self, ast):
return ast
def identity_option(self, ast):
return ast
def column_default_option(self, ast):
return ast
def as_subquery_clause(self, ast):
return ast
def with_or_without_data(self, ast):
return ast
def column_definition(self, ast):
return ast
def data_type_or_domain_name(self, ast):
return ast
def column_constraint_definition(self, ast):
return ast
def column_constraint(self, ast):
return ast
def identity_column_specification(self, ast):
return ast
def generation_clause(self, ast):
return ast
def generation_rule(self, ast):
return ast
def generation_expression(self, ast):
return ast
def default_clause(self, ast):
return ast
def default_option(self, ast):
return ast
def table_constraint_definition(self, ast):
return ast
def table_constraint(self, ast):
return ast
def unique_constraint_definition(self, ast):
return ast
def unique_specification(self, ast):
return ast
def referential_constraint_definition(self, ast):
return ast
def references_specification(self, ast):
return ast
def match_type(self, ast):
return ast
def referenced_table_and_columns(self, ast):
return ast
def referential_triggered_action(self, ast):
return ast
def update_rule(self, ast):
return ast
def delete_rule(self, ast):
return ast
def referential_action(self, ast):
return ast
def check_constraint_definition(self, ast):
return ast
def alter_table_statement(self, ast):
return ast
def alter_table_action(self, ast):
return ast
def add_column_definition(self, ast):
return ast
def alter_column_definition(self, ast):
return ast
def alter_column_action(self, ast):
return ast
def set_column_default_clause(self, ast):
return ast
def drop_column_default_clause(self, ast):
return ast
def add_column_scope_clause(self, ast):
return ast
def drop_column_scope_clause(self, ast):
return ast
def alter_identity_column_specification(self, ast):
return ast
def alter_identity_column_option(self, ast):
return ast
def drop_column_definition(self, ast):
return ast
def add_table_constraint_definition(self, ast):
return ast
def drop_table_constraint_definition(self, ast):
return ast
def drop_table_statement(self, ast):
return ast
def view_definition(self, ast):
return ast
def view_specification(self, ast):
return ast
def regular_view_specification(self, ast):
return ast
def referenceable_view_specification(self, ast):
return ast
def subview_clause(self, ast):
return ast
def view_element_list(self, ast):
return ast
def view_element(self, ast):
return ast
def view_column_option(self, ast):
return ast
def levels_clause(self, ast):
return ast
def drop_view_statement(self, ast):
return ast
def domain_definition(self, ast):
return ast
def domain_constraint(self, ast):
return ast
def alter_domain_statement(self, ast):
return ast
def alter_domain_action(self, ast):
return ast
def set_domain_default_clause(self, ast):
return ast
def drop_domain_default_clause(self, ast):
return ast
def add_domain_constraint_definition(self, ast):
return ast
def drop_domain_constraint_definition(self, ast):
return ast
def drop_domain_statement(self, ast):
return ast
def character_set_definition(self, ast):
return ast
def character_set_source(self, ast):
return ast
def drop_character_set_statement(self, ast):
return ast
def collation_definition(self, ast):
return ast
def pad_characteristic(self, ast):
return ast
def drop_collation_statement(self, ast):
return ast
def transliteration_definition(self, ast):
return ast
def transliteration_source(self, ast):
return ast
def drop_transliteration_statement(self, ast):
return ast
def assertion_definition(self, ast):
return ast
def drop_assertion_statement(self, ast):
return ast
def trigger_definition(self, ast):
return ast
def trigger_action_time(self, ast):
return ast
def trigger_event(self, ast):
return ast
def triggered_action(self, ast):
return ast
def triggered_sql_statement(self, ast):
return ast
def old_or_new_values_alias_list(self, ast):
return ast
def old_or_new_values_alias(self, ast):
return ast
def drop_trigger_statement(self, ast):
return ast
def user_defined_type_definition(self, ast):
return ast
def user_defined_type_body(self, ast):
return ast
def user_defined_type_option_list(self, ast):
return ast
def user_defined_type_option(self, ast):
return ast
def subtype_clause(self, ast):
return ast
def representation(self, ast):
return ast
def member_list(self, ast):
return ast
def member(self, ast):
return ast
def instantiable_clause(self, ast):
return ast
def finality(self, ast):
return ast
def reference_type_specification(self, ast):
return ast
def user_defined_representation(self, ast):
return ast
def derived_representation(self, ast):
return ast
def system_generated_representation(self, ast):
return ast
def cast_to_ref(self, ast):
return ast
def cast_to_type(self, ast):
return ast
def list_of_attributes(self, ast):
return ast
def cast_to_distinct(self, ast):
return ast
def cast_to_source(self, ast):
return ast
def method_specification_list(self, ast):
return ast
def method_specification(self, ast):
return ast
def original_method_specification(self, ast):
return ast
def overriding_method_specification(self, ast):
return ast
def partial_method_specification(self, ast):
return ast
def method_characteristics(self, ast):
return ast
def method_characteristic(self, ast):
return ast
def attribute_definition(self, ast):
return ast
def alter_type_statement(self, ast):
return ast
def alter_type_action(self, ast):
return ast
def add_attribute_definition(self, ast):
return ast
def drop_attribute_definition(self, ast):
return ast
def add_original_method_specification(self, ast):
return ast
def add_overriding_method_specification(self, ast):
return ast
def drop_method_specification(self, ast):
return ast
def specific_method_specification_designator(self, ast):
return ast
def drop_data_type_statement(self, ast):
return ast
def schema_routine(self, ast):
return ast
def schema_procedure(self, ast):
return ast
def schema_function(self, ast):
return ast
def sql_invoked_procedure(self, ast):
return ast
def sql_invoked_function(self, ast):
return ast
def sql_parameter_declaration_list(self, ast):
return ast
def sql_parameter_declaration(self, ast):
return ast
def parameter_mode(self, ast):
return ast
def parameter_type(self, ast):
return ast
def locator_indication(self, ast):
return ast
def function_specification(self, ast):
return ast
def method_specification_designator(self, ast):
return ast
def routine_characteristics(self, ast):
return ast
def routine_characteristic(self, ast):
return ast
def savepoint_level_indication(self, ast):
return ast
def dynamic_result_sets_characteristic(self, ast):
return ast
def parameter_style_clause(self, ast):
return ast
def dispatch_clause(self, ast):
return ast
def returns_clause(self, ast):
return ast
def returns_type(self, ast):
return ast
def returns_table_type(self, ast):
return ast
def table_function_column_list(self, ast):
return ast
def table_function_column_list_element(self, ast):
return ast
def result_cast(self, ast):
return ast
def result_cast_from_type(self, ast):
return ast
def returns_data_type(self, ast):
return ast
def routine_body(self, ast):
return ast
def sql_routine_spec(self, ast):
return ast
def rights_clause(self, ast):
return ast
def external_body_reference(self, ast):
return ast
def external_security_clause(self, ast):
return ast
def parameter_style(self, ast):
return ast
def deterministic_characteristic(self, ast):
return ast
def sql_data_access_indication(self, ast):
return ast
def null_call_clause(self, ast):
return ast
def transform_group_specification(self, ast):
return ast
def multiple_group_specification(self, ast):
return ast
def group_specification(self, ast):
return ast
def alter_routine_statement(self, ast):
return ast
def alter_routine_characteristics(self, ast):
return ast
def alter_routine_characteristic(self, ast):
return ast
def drop_routine_statement(self, ast):
return ast
def user_defined_cast_definition(self, ast):
return ast
def source_data_type(self, ast):
return ast
def target_data_type(self, ast):
return ast
def drop_user_defined_cast_statement(self, ast):
return ast
def user_defined_ordering_definition(self, ast):
return ast
def ordering_form(self, ast):
return ast
def equals_ordering_form(self, ast):
return ast
def full_ordering_form(self, ast):
return ast
def ordering_category(self, ast):
return ast
def relative_category(self, ast):
return ast
def map_category(self, ast):
return ast
def state_category(self, ast):
return ast
def drop_user_defined_ordering_statement(self, ast):
return ast
def transform_definition(self, ast):
return ast
def transform_group(self, ast):
return ast
def transform_element_list(self, ast):
return ast
def transform_element(self, ast):
return ast
def to_sql(self, ast):
return ast
def from_sql(self, ast):
return ast
def alter_transform_statement(self, ast):
return ast
def alter_group(self, ast):
return ast
def alter_transform_action_list(self, ast):
return ast
def alter_transform_action(self, ast):
return ast
def add_transform_element_list(self, ast):
return ast
def drop_transform_element_list(self, ast):
return ast
def transform_kind(self, ast):
return ast
def drop_transform_statement(self, ast):
return ast
def transforms_to_be_dropped(self, ast):
return ast
def sequence_generator_definition(self, ast):
return ast
def sequence_generator_options(self, ast):
return ast
def sequence_generator_option(self, ast):
return ast
def common_sequence_generator_options(self, ast):
return ast
def common_sequence_generator_option(self, ast):
return ast
def basic_sequence_generator_option(self, ast):
return ast
def sequence_generator_data_type_option(self, ast):
return ast
def sequence_generator_start_with_option(self, ast):
return ast
def sequence_generator_increment_by_option(self, ast):
return ast
def sequence_generator_maxvalue_option(self, ast):
return ast
def sequence_generator_minvalue_option(self, ast):
return ast
def sequence_generator_cycle_option(self, ast):
return ast
def alter_sequence_generator_statement(self, ast):
return ast
def alter_sequence_generator_options(self, ast):
return ast
def alter_sequence_generator_option(self, ast):
return ast
def alter_sequence_generator_restart_option(self, ast):
return ast
def drop_sequence_generator_statement(self, ast):
return ast
def grant_statement(self, ast):
return ast
def grant_privilege_statement(self, ast):
return ast
def privileges(self, ast):
return ast
def object_name(self, ast):
return ast
def object_privileges(self, ast):
return ast
def action(self, ast):
return ast
def privilege_method_list(self, ast):
return ast
def grantee(self, ast):
return ast
def grantor(self, ast):
return ast
def role_definition(self, ast):
return ast
def grant_role_statement(self, ast):
return ast
def drop_role_statement(self, ast):
return ast
def revoke_statement(self, ast):
return ast
def revoke_privilege_statement(self, ast):
return ast
def revoke_option_extension(self, ast):
return ast
def revoke_role_statement(self, ast):
return ast
def character_set_specification_list(self, ast):
return ast
def sql_procedure_statement(self, ast):
return ast
def sql_executable_statement(self, ast):
return ast
def sql_schema_statement(self, ast):
return ast
def sql_schema_definition_statement(self, ast):
return ast
def sql_schema_manipulation_statement(self, ast):
return ast
def sql_data_statement(self, ast):
return ast
def sql_data_change_statement(self, ast):
return ast
def sql_control_statement(self, ast):
return ast
def sql_transaction_statement(self, ast):
return ast
def sql_connection_statement(self, ast):
return ast
def sql_session_statement(self, ast):
return ast
def sql_dynamic_statement(self, ast):
return ast
def sql_dynamic_data_statement(self, ast):
return ast
def descriptor_statement(self, ast):
return ast
def cursor_sensitivity(self, ast):
return ast
def cursor_scrollability(self, ast):
return ast
def cursor_holdability(self, ast):
return ast
def cursor_returnability(self, ast):
return ast
def cursor_specification(self, ast):
return ast
def updatability_clause(self, ast):
return ast
def order_by_clause(self, ast):
return ast
def open_statement(self, ast):
return ast
def fetch_statement(self, ast):
return ast
def fetch_orientation(self, ast):
return ast
def fetch_target_list(self, ast):
return ast
def close_statement(self, ast):
return ast
def select_statement_single_row(self, ast):
return ast
def select_target_list(self, ast):
return ast
def delete_statement_positioned(self, ast):
return ast
def target_table(self, ast):
return ast
def delete_statement_searched(self, ast):
return ast
def insert_statement(self, ast):
return ast
def insert_columns_and_source(self, ast):
return ast
def from_subquery(self, ast):
return ast
def from_constructor(self, ast):
return ast
def override_clause(self, ast):
return ast
def from_default(self, ast):
return ast
def merge_statement(self, ast):
return ast
def merge_operation_specification(self, ast):
return ast
def merge_when_clause(self, ast):
return ast
def merge_when_matched_clause(self, ast):
return ast
def merge_when_not_matched_clause(self, ast):
return ast
def merge_update_specification(self, ast):
return ast
def merge_insert_specification(self, ast):
return ast
def merge_insert_value_list(self, ast):
return ast
def merge_insert_value_element(self, ast):
return ast
def update_statement_positioned(self, ast):
return ast
def update_statement_searched(self, ast):
return ast
def set_clause_list(self, ast):
return ast
def set_clause(self, ast):
return ast
def set_target(self, ast):
return ast
def multiple_column_assignment(self, ast):
return ast
def set_target_list(self, ast):
return ast
def update_target(self, ast):
return ast
def mutated_set_clause(self, ast):
return ast
def mutated_target(self, ast):
return ast
def update_source(self, ast):
return ast
def temporary_table_declaration(self, ast):
return ast
def free_locator_statement(self, ast):
return ast
def locator_reference(self, ast):
return ast
def hold_locator_statement(self, ast):
return ast
def call_statement(self, ast):
return ast
def return_statement(self, ast):
return ast
def return_value(self, ast):
return ast
def start_transaction_statement(self, ast):
return ast
def transaction_mode(self, ast):
return ast
def transaction_access_mode(self, ast):
return ast
def isolation_level(self, ast):
return ast
def level_of_isolation(self, ast):
return ast
def diagnostics_size(self, ast):
return ast
def set_transaction_statement(self, ast):
return ast
def transaction_characteristics(self, ast):
return ast
def set_constraints_mode_statement(self, ast):
return ast
def constraint_name_list(self, ast):
return ast
def savepoint_statement(self, ast):
return ast
def release_savepoint_statement(self, ast):
return ast
def commit_statement(self, ast):
return ast
def rollback_statement(self, ast):
return ast
def savepoint_clause(self, ast):
return ast
def connect_statement(self, ast):
return ast
def connection_target(self, ast):
return ast
def set_connection_statement(self, ast):
return ast
def connection_object(self, ast):
return ast
def disconnect_statement(self, ast):
return ast
def disconnect_object(self, ast):
return ast
def set_session_characteristics_statement(self, ast):
return ast
def session_characteristic_list(self, ast):
return ast
def session_characteristic(self, ast):
return ast
def set_session_user_identifier_statement(self, ast):
return ast
def set_role_statement(self, ast):
return ast
def role_specification(self, ast):
return ast
def set_local_time_zone_statement(self, ast):
return ast
def set_time_zone_value(self, ast):
return ast
def set_catalog_statement(self, ast):
return ast
def catalog_name_characteristic(self, ast):
return ast
def set_schema_statement(self, ast):
return ast
def schema_name_characteristic(self, ast):
return ast
def set_names_statement(self, ast):
return ast
def character_set_name_characteristic(self, ast):
return ast
def set_path_statement(self, ast):
return ast
def sql_path_characteristic(self, ast):
return ast
def set_transform_group_statement(self, ast):
return ast
def transform_group_characteristic(self, ast):
return ast
def set_session_collation_statement(self, ast):
return ast
def allocate_descriptor_statement(self, ast):
return ast
def deallocate_descriptor_statement(self, ast):
return ast
def get_descriptor_statement(self, ast):
return ast
def get_descriptor_information(self, ast):
return ast
def get_header_information(self, ast):
return ast
def header_item_name(self, ast):
return ast
def get_item_information(self, ast):
return ast
def item_number(self, ast):
return ast
def descriptor_item_name(self, ast):
return ast
def set_descriptor_statement(self, ast):
return ast
def set_descriptor_information(self, ast):
return ast
def set_header_information(self, ast):
return ast
def set_item_information(self, ast):
return ast
def prepare_statement(self, ast):
return ast
def attributes_specification(self, ast):
return ast
def sql_statement_variable(self, ast):
return ast
def deallocate_prepared_statement(self, ast):
return ast
def describe_statement(self, ast):
return ast
def describe_input_statement(self, ast):
return ast
def describe_output_statement(self, ast):
return ast
def nesting_option(self, ast):
return ast
def using_descriptor(self, ast):
return ast
def described_object(self, ast):
return ast
def input_using_clause(self, ast):
return ast
def using_arguments(self, ast):
return ast
def using_argument(self, ast):
return ast
def output_using_clause(self, ast):
return ast
def into_arguments(self, ast):
return ast
def into_argument(self, ast):
return ast
def into_descriptor(self, ast):
return ast
def execute_statement(self, ast):
return ast
def execute_immediate_statement(self, ast):
return ast
def allocate_cursor_statement(self, ast):
return ast
def cursor_intent(self, ast):
return ast
def statement_cursor(self, ast):
return ast
def result_set_cursor(self, ast):
return ast
def dynamic_open_statement(self, ast):
return ast
def dynamic_fetch_statement(self, ast):
return ast
def dynamic_close_statement(self, ast):
return ast
def dynamic_delete_statement_positioned(self, ast):
return ast
def dynamic_update_statement_positioned(self, ast):
return ast
def direct_sql_statement(self, ast):
return ast
def directly_executable_statement(self, ast):
return ast
def direct_sql_data_statement(self, ast):
return ast
def get_diagnostics_statement(self, ast):
return ast
def sql_diagnostics_information(self, ast):
return ast
def statement_information(self, ast):
return ast
def statement_information_item(self, ast):
return ast
def statement_information_item_name(self, ast):
return ast
def condition_information(self, ast):
return ast
def condition_information_item(self, ast):
return ast
def condition_information_item_name(self, ast):
return ast
def ref_cast_option(self, ast):
return ast
def cast_option(self, ast):
return ast
def reference_scope_check(self, ast):
return ast
def start(self, ast):
return ast
def main(
filename,
startrule,
trace=False,
whitespace=None,
nameguard=None,
comments_re='/\\*[\\s\\S]*?\\*/',
eol_comments_re='--.*?$',
ignorecase=True,
left_recursion=True,
**kwargs):
with open(filename) as f:
text = f.read()
whitespace = whitespace or '\\s+'
parser = SqlParser(parseinfo=False)
ast = parser.parse(
text,
startrule,
filename=filename,
trace=trace,
whitespace=whitespace,
nameguard=nameguard,
ignorecase=ignorecase,
**kwargs)
return ast
if __name__ == '__main__':
import json
ast = generic_main(main, SqlParser, name='Sql')
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(ast, indent=2))
print()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# BigBrother
# Copyright 2017, Federico Lolli aka Mr.Robot
# :todo:
# add db manipulation database
import sqlite3 as lite
from core.globals import vars
import sys
class DBHandler(object):
def __init__(self):
try:
self.con = lite.connect(vars.db_path)
self.cur = self.con.cursor()
except lite.Error as e:
print("An error occurred:", e.args[0])
sys.exit()
def get_full_details(self):
return self.cur.execute("SELECT * FROM modules").fetchall()
def get_partial_details(self):
return self.cur.execute("SELECT id, specie, family, kingdom, permission FROM modules").fetchall()
def get_modules_list(self):
return self.cur.execute("SELECT id, specie, family FROM modules").fetchall()
# def get_mal_names(self):
# Sqlite3 returns a tuple even if a single value is returned
# We use x[0] for x to unpack the tuples
# return [val[0] for val in self.cur.execute("SELECT specie, family FROM modules").fetchall()]
# def get_mal_tags(self):
# return [val[0] for val in self.cur.execute("SELECT DISTINCT TAGS FROM modules WHERE TAGS IS NOT NULL").fetchall()]
def get_mod_info(self, mid):
return self.cur.execute("SELECT specie, family, class, phylum, kingdom, permission, power FROM modules WHERE id = " \
+ str(mid)).fetchall()
def get_mod_path(self, mid):
return self.cur.execute("SELECT specie, family FROM modules WHERE id = " \
+ str(mid)).fetchall()
def get_permission(self, specie):
return self.cur.execute("SELECT permission FROM modules WHERE specie = '%s'" % specie).fetchall()[0][0]
def get_level_permission(self, level):
return self.cur.execute("SELECT permission FROM levels WHERE name = '%s'" % level).fetchall()[0][0]
def query(self, query, param=''):
if vars.DEBUG_LEVEL is 2:
print(locals())
try:
if param is not '':
return self.cur.execute(query, param if type(param) is list else [param]).fetchall()
else:
return self.cur.execute(query).fetchall()
except lite.Error as e:
print("An error occurred:", e.args[0])
sys.exit()
def close_connection(self):
try:
self.cur.close()
self.con.close()
return
except lite.Error as e:
print("An error occurred:", e.args[0])
sys.exit()
def renew_connection(self):
try:
self.con = lite.connect(vars.db_path)
self.cur = self.con.cursor()
except lite.Error as e:
print("An error occurred:", e.args[0])
sys.exit()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""Maximum flow algorithms test suite on large graphs.
"""
__author__ = """Loïc Séguin-C. <loicseguin@gmail.com>"""
# Copyright (C) 2010 Loïc Séguin-C. <loicseguin@gmail.com>
# All rights reserved.
# BSD license.
import os
from nose.tools import *
import networkx as nx
from networkx.algorithms.flow import build_flow_dict, build_residual_network
from networkx.algorithms.flow import (edmonds_karp, preflow_push, shortest_augmenting_path)
flow_funcs = [edmonds_karp, preflow_push, shortest_augmenting_path]
msg = "Assertion failed in function: {0}"
def gen_pyramid(N):
# This graph admits a flow of value 1 for which every arc is at
# capacity (except the arcs incident to the sink which have
# infinite capacity).
G = nx.DiGraph()
for i in range(N - 1):
cap = 1. / (i + 2)
for j in range(i + 1):
G.add_edge((i, j), (i + 1, j),
capacity = cap)
cap = 1. / (i + 1) - cap
G.add_edge((i, j), (i + 1, j + 1),
capacity = cap)
cap = 1. / (i + 2) - cap
for j in range(N):
G.add_edge((N - 1, j), 't')
return G
def read_graph(name):
dirname = os.path.dirname(__file__)
path = os.path.join(dirname, name + '.gpickle.bz2')
return nx.read_gpickle(path)
def validate_flows(G, s, t, soln_value, R, flow_func):
flow_value = R.graph['flow_value']
flow_dict = build_flow_dict(G, R)
assert_equal(soln_value, flow_value, msg=msg.format(flow_func.__name__))
assert_equal(set(G), set(flow_dict), msg=msg.format(flow_func.__name__))
for u in G:
assert_equal(set(G[u]), set(flow_dict[u]),
msg=msg.format(flow_func.__name__))
excess = dict((u, 0) for u in flow_dict)
for u in flow_dict:
for v, flow in flow_dict[u].items():
ok_(flow <= G[u][v].get('capacity', float('inf')),
msg=msg.format(flow_func.__name__))
ok_(flow >= 0, msg=msg.format(flow_func.__name__))
excess[u] -= flow
excess[v] += flow
for u, exc in excess.items():
if u == s:
assert_equal(exc, -soln_value, msg=msg.format(flow_func.__name__))
elif u == t:
assert_equal(exc, soln_value, msg=msg.format(flow_func.__name__))
else:
assert_equal(exc, 0, msg=msg.format(flow_func.__name__))
class TestMaxflowLargeGraph:
def test_complete_graph(self):
N = 50
G = nx.complete_graph(N)
nx.set_edge_attributes(G, 'capacity', 5)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
kwargs['flow_func'] = flow_func
flow_value = nx.maximum_flow_value(G, 1, 2, **kwargs)
assert_equal(flow_value, 5 * (N - 1),
msg=msg.format(flow_func.__name__))
def test_pyramid(self):
N = 10
#N = 100 # this gives a graph with 5051 nodes
G = gen_pyramid(N)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
kwargs['flow_func'] = flow_func
flow_value = nx.maximum_flow_value(G, (0, 0), 't', **kwargs)
assert_almost_equal(flow_value, 1.,
msg=msg.format(flow_func.__name__))
def test_gl1(self):
G = read_graph('gl1')
s = 1
t = len(G)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs),
flow_func)
def test_gw1(self):
G = read_graph('gw1')
s = 1
t = len(G)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
validate_flows(G, s, t, 1202018, flow_func(G, s, t, **kwargs),
flow_func)
def test_wlm3(self):
G = read_graph('wlm3')
s = 1
t = len(G)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs),
flow_func)
def test_preflow_push_global_relabel(self):
G = read_graph('gw1')
R = preflow_push(G, 1, len(G), global_relabel_freq=50)
assert_equal(R.graph['flow_value'], 1202018)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.compat import six
class Rule(object):
"""
A Lifecycle rule for an S3 bucket.
:ivar id: Unique identifier for the rule. The value cannot be longer
than 255 characters. This value is optional. The server will
generate a unique value for the rule if no value is provided.
:ivar prefix: Prefix identifying one or more objects to which the
rule applies. If prefix is not provided, Boto generates a default
prefix which will match all objects.
:ivar status: If 'Enabled', the rule is currently being applied.
If 'Disabled', the rule is not currently being applied.
:ivar expiration: An instance of `Expiration`. This indicates
the lifetime of the objects that are subject to the rule.
:ivar transition: An instance of `Transition`. This indicates
when to transition to a different storage class.
"""
def __init__(self, id=None, prefix=None, status=None, expiration=None,
transition=None):
self.id = id
self.prefix = '' if prefix is None else prefix
self.status = status
if isinstance(expiration, six.integer_types):
# retain backwards compatibility???
self.expiration = Expiration(days=expiration)
else:
# None or object
self.expiration = expiration
# retain backwards compatibility
if isinstance(transition, Transition):
self.transition = Transitions()
self.transition.append(transition)
elif transition:
self.transition = transition
else:
self.transition = Transitions()
def __repr__(self):
return '<Rule: %s>' % self.id
def startElement(self, name, attrs, connection):
if name == 'Transition':
return self.transition
elif name == 'Expiration':
self.expiration = Expiration()
return self.expiration
return None
def endElement(self, name, value, connection):
if name == 'ID':
self.id = value
elif name == 'Prefix':
self.prefix = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
def to_xml(self):
s = '<Rule>'
if self.id is not None:
s += '<ID>%s</ID>' % self.id
s += '<Prefix>%s</Prefix>' % self.prefix
s += '<Status>%s</Status>' % self.status
if self.expiration is not None:
s += self.expiration.to_xml()
if self.transition is not None:
s += self.transition.to_xml()
s += '</Rule>'
return s
class Expiration(object):
"""
When an object will expire.
:ivar days: The number of days until the object expires
:ivar date: The date when the object will expire. Must be
in ISO 8601 format.
"""
def __init__(self, days=None, date=None):
self.days = days
self.date = date
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Days':
self.days = int(value)
elif name == 'Date':
self.date = value
def __repr__(self):
if self.days is None:
how_long = "on: %s" % self.date
else:
how_long = "in: %s days" % self.days
return '<Expiration: %s>' % how_long
def to_xml(self):
s = '<Expiration>'
if self.days is not None:
s += '<Days>%s</Days>' % self.days
elif self.date is not None:
s += '<Date>%s</Date>' % self.date
s += '</Expiration>'
return s
class Transition(object):
"""
A transition to a different storage class.
:ivar days: The number of days until the object should be moved.
:ivar date: The date when the object should be moved. Should be
in ISO 8601 format.
:ivar storage_class: The storage class to transition to. Valid
values are GLACIER, STANDARD_IA.
"""
def __init__(self, days=None, date=None, storage_class=None):
self.days = days
self.date = date
self.storage_class = storage_class
def __repr__(self):
if self.days is None:
how_long = "on: %s" % self.date
else:
how_long = "in: %s days" % self.days
return '<Transition: %s, %s>' % (how_long, self.storage_class)
def to_xml(self):
s = '<Transition>'
s += '<StorageClass>%s</StorageClass>' % self.storage_class
if self.days is not None:
s += '<Days>%s</Days>' % self.days
elif self.date is not None:
s += '<Date>%s</Date>' % self.date
s += '</Transition>'
return s
class Transitions(list):
"""
A container for the transitions associated with a Lifecycle's Rule configuration.
"""
def __init__(self):
self.transition_properties = 3
self.current_transition_property = 1
self.temp_days = None
self.temp_date = None
self.temp_storage_class = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Days':
self.temp_days = int(value)
elif name == 'Date':
self.temp_date = value
elif name == 'StorageClass':
self.temp_storage_class = value
# the XML does not contain a <Transitions> tag
# but rather N number of <Transition> tags not
# structured in any sort of hierarchy.
if self.current_transition_property == self.transition_properties:
self.append(Transition(self.temp_days, self.temp_date, self.temp_storage_class))
self.temp_days = self.temp_date = self.temp_storage_class = None
self.current_transition_property = 1
else:
self.current_transition_property += 1
def to_xml(self):
"""
Returns a string containing the XML version of the Lifecycle
configuration as defined by S3.
"""
s = ''
for transition in self:
s += transition.to_xml()
return s
def add_transition(self, days=None, date=None, storage_class=None):
"""
Add a transition to this Lifecycle configuration. This only adds
the rule to the local copy. To install the new rule(s) on
the bucket, you need to pass this Lifecycle config object
to the configure_lifecycle method of the Bucket object.
:ivar days: The number of days until the object should be moved.
:ivar date: The date when the object should be moved. Should be
in ISO 8601 format.
:ivar storage_class: The storage class to transition to. Valid
values are GLACIER, STANDARD_IA.
"""
transition = Transition(days, date, storage_class)
self.append(transition)
def __first_or_default(self, prop):
for transition in self:
return getattr(transition, prop)
return None
# maintain backwards compatibility so that we can continue utilizing
# 'rule.transition.days' syntax
@property
def days(self):
return self.__first_or_default('days')
@property
def date(self):
return self.__first_or_default('date')
@property
def storage_class(self):
return self.__first_or_default('storage_class')
class Lifecycle(list):
"""
A container for the rules associated with a Lifecycle configuration.
"""
def startElement(self, name, attrs, connection):
if name == 'Rule':
rule = Rule()
self.append(rule)
return rule
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def to_xml(self):
"""
Returns a string containing the XML version of the Lifecycle
configuration as defined by S3.
"""
s = '<?xml version="1.0" encoding="UTF-8"?>'
s += '<LifecycleConfiguration>'
for rule in self:
s += rule.to_xml()
s += '</LifecycleConfiguration>'
return s
def add_rule(self, id=None, prefix='', status='Enabled',
expiration=None, transition=None):
"""
Add a rule to this Lifecycle configuration. This only adds
the rule to the local copy. To install the new rule(s) on
the bucket, you need to pass this Lifecycle config object
to the configure_lifecycle method of the Bucket object.
:type id: str
:param id: Unique identifier for the rule. The value cannot be longer
than 255 characters. This value is optional. The server will
generate a unique value for the rule if no value is provided.
:type prefix: str
:iparam prefix: Prefix identifying one or more objects to which the
rule applies.
:type status: str
:param status: If 'Enabled', the rule is currently being applied.
If 'Disabled', the rule is not currently being applied.
:type expiration: int
:param expiration: Indicates the lifetime, in days, of the objects
that are subject to the rule. The value must be a non-zero
positive integer. A Expiration object instance is also perfect.
:type transition: Transitions
:param transition: Indicates when an object transitions to a
different storage class.
"""
rule = Rule(id, prefix, status, expiration, transition)
self.append(rule)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# rhn-client-tools
#
# Copyright (c) 2006--2013 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
# You must obey the GNU General Public License in all respects
# for all of the code used other than OpenSSL. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you
# do not wish to do so, delete this exception statement from your
# version. If you delete this exception statement from all source
# files in the program, then also delete it here.
import rpcServer
import up2dateErrors
import capabilities
import sys
import xmlrpclib
import OpenSSL
class _DoCallWrapper(object):
"""
A callable object that will handle multiple levels of attributes,
and catch exceptions.
"""
def __init__(self, server, method_name):
self._server = server
self._method_name = method_name
def __getattr__(self, method_name):
""" Recursively build up the method name to pass to the server. """
return _DoCallWrapper(self._server,
"%s.%s" % (self._method_name, method_name))
def __call__(self, *args, **kwargs):
""" Call the method. Catch faults and translate them. """
method = getattr(self._server, self._method_name)
try:
return rpcServer.doCall(method, *args, **kwargs)
except xmlrpclib.Fault, f:
raise self.__exception_from_fault(f), None, sys.exc_info()[2]
except OpenSSL.SSL.Error, e:
# TODO This should probably be moved to rhnlib and raise an
# exception that subclasses OpenSSL.SSL.Error
# TODO Is there a better way to detect cert failures?
error = str(e)
error = error.strip("[()]")
pieces = error.split(',')
message = ""
if len(pieces) > 2:
message = pieces[2]
elif len(pieces) == 2:
message = pieces[1]
message = message.strip(" '")
if message == 'certificate verify failed':
raise up2dateErrors.SSLCertificateVerifyFailedError(), None, sys.exc_info()[2]
else:
raise up2dateErrors.NetworkError(message), None, sys.exc_info()[2]
def __exception_from_fault(self, fault):
if fault.faultCode == -3:
# This username is already taken, or the password is incorrect.
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -2:
# Invalid username and password combination.
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -1:
exception = up2dateErrors.UnknownMethodException(fault.faultString)
elif fault.faultCode == -13:
# Username is too short.
exception = up2dateErrors.LoginMinLengthError(fault.faultString)
elif fault.faultCode == -14:
# too short password
exception = up2dateErrors.PasswordMinLengthError(
fault.faultString)
elif fault.faultCode == -15:
# bad chars in username
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -16:
# Invalid product registration code.
# TODO Should this really be a validation error?
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -19:
# invalid
exception = up2dateErrors.NoBaseChannelError(fault.faultString)
elif fault.faultCode == -31:
# No entitlement
exception = up2dateErrors.InsuffMgmntEntsError(fault.faultString)
elif fault.faultCode == -36:
# rhnException.py says this means "Invalid action."
# TODO find out which is right
exception = up2dateErrors.PasswordError(fault.faultString)
elif abs(fault.faultCode) == 49:
exception = up2dateErrors.AbuseError(fault.faultString)
elif abs(fault.faultCode) == 60:
exception = up2dateErrors.AuthenticationTicketError(fault.faultString)
elif abs(fault.faultCode) == 105:
exception = up2dateErrors.RhnUuidUniquenessError(fault.faultString)
elif fault.faultCode == 99:
exception = up2dateErrors.DelayError(fault.faultString)
elif abs(fault.faultCode) == 91:
exception = up2dateErrors.InsuffMgmntEntsError(fault.faultString)
elif fault.faultCode == -106:
# Invalid username.
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -600:
# Invalid username.
exception = up2dateErrors.InvalidRegistrationNumberError(fault.faultString)
elif fault.faultCode == -601:
# No entitlements associated with given hardware info
exception = up2dateErrors.NotEntitlingError(fault.faultString)
elif fault.faultCode == -602:
# No entitlements associated with reg num
exception = up2dateErrors.NotEntitlingError(fault.faultString)
elif fault.faultCode == -2001 or fault.faultCode == -700:
exception = up2dateErrors.AuthenticationOrAccountCreationError(
fault.faultString)
elif fault.faultCode == -701:
exception = up2dateErrors.PasswordMaxLengthError(
fault.faultString)
elif fault.faultCode == -61:
exception = up2dateErrors.ActivationKeyUsageLimitError(
fault.faultString)
elif fault.faultCode == -5:
exception = up2dateErrors.UnableToCreateUser(
fault.faultString)
else:
exception = up2dateErrors.CommunicationError(fault.faultString)
return exception
class RhnServer(object):
"""
An rpc server object that calls doCall for you, and catches lower
level exceptions
"""
def __init__(self):
self._server = rpcServer.getServer()
self._capabilities = None
def __get_capabilities(self):
if self._capabilities is None:
headers = self._server.get_response_headers()
if headers is None:
self.registration.welcome_message()
headers = self._server.get_response_headers()
self._capabilities = capabilities.Capabilities()
self._capabilities.populate(headers)
return self._capabilities
capabilities = property(__get_capabilities)
def add_header(self, key, value):
self._server.add_header(key, value)
def __getattr__(self, method_name):
""" Return a callable object that will do the work for us. """
return _DoCallWrapper(self._server, method_name)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
namespace Illuminate\Tests\Queue;
use Exception;
use Illuminate\Database\Capsule\Manager as DB;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Queue\Failed\DatabaseFailedJobProvider;
use Illuminate\Support\Carbon;
use Illuminate\Support\Facades\Date;
use Illuminate\Support\Str;
use PHPUnit\Framework\TestCase;
use RuntimeException;
class DatabaseFailedJobProviderTest extends TestCase
{
protected $db;
protected $provider;
protected function setUp(): void
{
parent::setUp();
$this->createDatabaseWithFailedJobTable()
->createProvider();
}
public function testCanGetAllFailedJobIds()
{
$this->assertEmpty($this->provider->ids());
array_map(fn () => $this->createFailedJobsRecord(), range(1, 4));
$this->assertCount(4, $this->provider->ids());
$this->assertSame([4, 3, 2, 1], $this->provider->ids());
}
public function testCanGetAllFailedJobs()
{
$this->assertEmpty($this->provider->all());
array_map(fn () => $this->createFailedJobsRecord(), range(1, 4));
$this->assertCount(4, $this->provider->all());
$this->assertSame(3, $this->provider->all()[1]->id);
$this->assertSame('default', $this->provider->all()[1]->queue);
}
public function testCanRetrieveFailedJobsById()
{
array_map(fn () => $this->createFailedJobsRecord(), range(1, 2));
$this->assertNotNull($this->provider->find(1));
$this->assertNotNull($this->provider->find(2));
$this->assertNull($this->provider->find(3));
}
public function testCanRemoveFailedJobsById()
{
$this->createFailedJobsRecord();
$this->assertFalse($this->provider->forget(2));
$this->assertSame(1, $this->failedJobsTable()->count());
$this->assertTrue($this->provider->forget(1));
$this->assertSame(0, $this->failedJobsTable()->count());
}
public function testCanPruneFailedJobs()
{
Carbon::setTestNow(Carbon::createFromDate(2024, 4, 28));
$this->createFailedJobsRecord(['failed_at' => Carbon::createFromDate(2024, 4, 24)]);
$this->createFailedJobsRecord(['failed_at' => Carbon::createFromDate(2024, 4, 26)]);
$this->provider->prune(Carbon::createFromDate(2024, 4, 23));
$this->assertSame(2, $this->failedJobsTable()->count());
$this->provider->prune(Carbon::createFromDate(2024, 4, 25));
$this->assertSame(1, $this->failedJobsTable()->count());
$this->provider->prune(Carbon::createFromDate(2024, 4, 30));
$this->assertSame(0, $this->failedJobsTable()->count());
}
public function testCanPruneFailedJobsWithRelativeHoursAndMinutes()
{
Carbon::setTestNow(Carbon::create(2025, 8, 24, 12, 0, 0));
$this->createFailedJobsRecord(['failed_at' => Carbon::create(2025, 8, 24, 11, 45, 0)]);
$this->createFailedJobsRecord(['failed_at' => Carbon::create(2025, 8, 24, 13, 0, 0)]);
$this->provider->prune(Carbon::create(2025, 8, 24, 11, 45, 0));
$this->assertSame(2, $this->failedJobsTable()->count());
$this->provider->prune(Carbon::create(2025, 8, 24, 14, 0, 0));
$this->assertSame(0, $this->failedJobsTable()->count());
}
public function testCanFlushFailedJobs()
{
Date::setTestNow(Date::now());
$this->createFailedJobsRecord(['failed_at' => Date::now()->subDays(10)]);
$this->provider->flush();
$this->assertSame(0, $this->failedJobsTable()->count());
$this->createFailedJobsRecord(['failed_at' => Date::now()->subDays(10)]);
$this->provider->flush(15 * 24);
$this->assertSame(1, $this->failedJobsTable()->count());
$this->createFailedJobsRecord(['failed_at' => Date::now()->subDays(10)]);
$this->provider->flush(10 * 24);
$this->assertSame(0, $this->failedJobsTable()->count());
}
public function testCanProperlyLogFailedJob()
{
$uuid = Str::uuid();
$exception = new Exception(mb_convert_encoding('ÐÑÙ0E\xE2\x�98\xA0World��7B¹!þÿ', 'ISO-8859-1', 'UTF-8'));
$this->provider->log('database', 'default', json_encode(['uuid' => (string) $uuid]), $exception);
$exception = (string) mb_convert_encoding($exception, 'UTF-8');
$this->assertSame(1, $this->failedJobsTable()->count());
$this->assertSame($exception, $this->failedJobsTable()->first()->exception);
}
public function testJobsCanBeCounted()
{
$this->assertSame(0, $this->provider->count());
$this->provider->log('database', 'default', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->assertSame(1, $this->provider->count());
$this->provider->log('database', 'default', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->provider->log('another-connection', 'another-queue', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->assertSame(3, $this->provider->count());
}
public function testJobsCanBeCountedByConnection()
{
$this->provider->log('connection-1', 'default', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->provider->log('connection-2', 'default', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->assertSame(1, $this->provider->count('connection-1'));
$this->assertSame(1, $this->provider->count('connection-2'));
$this->provider->log('connection-1', 'default', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->assertSame(2, $this->provider->count('connection-1'));
$this->assertSame(1, $this->provider->count('connection-2'));
}
public function testJobsCanBeCountedByQueue()
{
$this->provider->log('database', 'queue-1', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->provider->log('database', 'queue-2', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->assertSame(1, $this->provider->count(queue: 'queue-1'));
$this->assertSame(1, $this->provider->count(queue: 'queue-2'));
$this->provider->log('database', 'queue-1', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->assertSame(2, $this->provider->count(queue: 'queue-1'));
$this->assertSame(1, $this->provider->count(queue: 'queue-2'));
}
public function testJobsCanBeCountedByQueueAndConnection()
{
$this->provider->log('connection-1', 'queue-99', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->provider->log('connection-1', 'queue-99', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->provider->log('connection-2', 'queue-99', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->provider->log('connection-1', 'queue-1', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->provider->log('connection-2', 'queue-1', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->provider->log('connection-2', 'queue-1', json_encode(['uuid' => (string) Str::uuid()]), new RuntimeException());
$this->assertSame(2, $this->provider->count('connection-1', 'queue-99'));
$this->assertSame(1, $this->provider->count('connection-2', 'queue-99'));
$this->assertSame(1, $this->provider->count('connection-1', 'queue-1'));
$this->assertSame(2, $this->provider->count('connection-2', 'queue-1'));
}
protected function createSimpleDatabaseWithFailedJobTable()
{
$db = new DB;
$db->addConnection([
'driver' => 'sqlite',
'database' => ':memory:',
]);
$db->getConnection()->getSchemaBuilder()->create('failed_jobs', function (Blueprint $table) {
$table->id();
$table->timestamp('failed_at')->useCurrent();
});
return $db;
}
protected function createDatabaseWithFailedJobTable()
{
$this->db = new DB;
$this->db->addConnection([
'driver' => 'sqlite',
'database' => ':memory:',
]);
$this->db->getConnection()->getSchemaBuilder()->create('failed_jobs', function (Blueprint $table) {
$table->id();
$table->text('connection');
$table->text('queue');
$table->longText('payload');
$table->longText('exception');
$table->timestamp('failed_at')->useCurrent();
});
return $this;
}
protected function createProvider(string $database = 'default', string $table = 'failed_jobs')
{
$this->provider = new DatabaseFailedJobProvider($this->db->getDatabaseManager(), $database, $table);
return $this;
}
protected function failedJobsTable()
{
return $this->db->getConnection()->table('failed_jobs');
}
protected function createFailedJobsRecord(array $overrides = [])
{
return $this->failedJobsTable()
->insert(array_merge([
'connection' => 'database',
'queue' => 'default',
'payload' => json_encode(['uuid' => (string) Str::uuid()]),
'exception' => new Exception('Whoops!'),
'failed_at' => Date::now()->subDays(10),
], $overrides));
}
}
|
php
|
github
|
https://github.com/laravel/framework
|
tests/Queue/DatabaseFailedJobProviderTest.php
|
// #docregion
import {Component, computed, inject, input} from '@angular/core';
import {FormGroup, ReactiveFormsModule} from '@angular/forms';
import {DynamicFormQuestionComponent} from './dynamic-form-question.component';
import {QuestionBase} from './question-base';
import {QuestionControlService} from './question-control.service';
@Component({
selector: 'app-dynamic-form',
templateUrl: './dynamic-form.component.html',
providers: [QuestionControlService],
imports: [DynamicFormQuestionComponent, ReactiveFormsModule],
})
export class DynamicFormComponent {
private readonly qcs = inject(QuestionControlService);
readonly questions = input<QuestionBase<string>[] | null>([]);
readonly form = computed<FormGroup>(() =>
this.qcs.toFormGroup(this.questions() as QuestionBase<string>[]),
);
payLoad = '';
onSubmit() {
this.payLoad = JSON.stringify(this.form().getRawValue());
}
}
|
typescript
|
github
|
https://github.com/angular/angular
|
adev/src/content/examples/dynamic-form/src/app/dynamic-form.component.ts
|
import unittest
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.content import ContentStore
from xmodule.modulestore import Location
class Content:
def __init__(self, location, content_type):
self.location = location
self.content_type = content_type
class ContentTest(unittest.TestCase):
def test_thumbnail_none(self):
# We had a bug where a thumbnail location of None was getting transformed into a Location tuple, with
# all elements being None. It is important that the location be just None for rendering.
content = StaticContent('loc', 'name', 'content_type', 'data', None, None, None)
self.assertIsNone(content.thumbnail_location)
content = StaticContent('loc', 'name', 'content_type', 'data')
self.assertIsNone(content.thumbnail_location)
def test_static_url_generation_from_courseid(self):
url = StaticContent.convert_legacy_static_url_with_course_id('images_course_image.jpg', 'foo/bar/bz')
self.assertEqual(url, '/c4x/foo/bar/asset/images_course_image.jpg')
def test_generate_thumbnail_image(self):
contentStore = ContentStore()
content = Content(Location(u'c4x', u'mitX', u'800', u'asset', u'monsters__.jpg'), None)
(thumbnail_content, thumbnail_file_location) = contentStore.generate_thumbnail(content)
self.assertIsNone(thumbnail_content)
self.assertEqual(Location(u'c4x', u'mitX', u'800', u'thumbnail', u'monsters__.jpg'), thumbnail_file_location)
def test_compute_location(self):
# We had a bug that __ got converted into a single _. Make sure that substitution of INVALID_CHARS (like space)
# still happen.
asset_location = StaticContent.compute_location('mitX', '400', 'subs__1eo_jXvZnE .srt.sjson')
self.assertEqual(Location(u'c4x', u'mitX', u'400', u'asset', u'subs__1eo_jXvZnE_.srt.sjson', None), asset_location)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import re
import sys
from django.core.management.color import no_style
from django.db import transaction, models
from django.db.utils import DatabaseError
from django.db.backends.util import truncate_name
from django.db.backends.creation import BaseDatabaseCreation
from django.db.models.fields import NOT_PROVIDED
from django.dispatch import dispatcher
from django.conf import settings
from django.utils.datastructures import SortedDict
try:
from django.utils.functional import cached_property
except ImportError:
class cached_property(object):
"""
Decorator that creates converts a method with a single
self argument into a property cached on the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, type):
res = instance.__dict__[self.func.__name__] = self.func(instance)
return res
from south.logger import get_logger
def alias(attrname):
"""
Returns a function which calls 'attrname' - for function aliasing.
We can't just use foo = bar, as this breaks subclassing.
"""
def func(self, *args, **kwds):
return getattr(self, attrname)(*args, **kwds)
return func
def invalidate_table_constraints(func):
def _cache_clear(self, table, *args, **opts):
self._set_cache(table, value=INVALID)
return func(self, table, *args, **opts)
return _cache_clear
def delete_column_constraints(func):
def _column_rm(self, table, column, *args, **opts):
self._set_cache(table, column, value=[])
return func(self, table, column, *args, **opts)
return _column_rm
def copy_column_constraints(func):
def _column_cp(self, table, column_old, column_new, *args, **opts):
db_name = self._get_setting('NAME')
self._set_cache(table, column_new, value=self.lookup_constraint(db_name, table, column_old))
return func(self, table, column_old, column_new, *args, **opts)
return _column_cp
class INVALID(Exception):
def __repr__(self):
return 'INVALID'
class DryRunError(ValueError):
pass
class DatabaseOperations(object):
"""
Generic SQL implementation of the DatabaseOperations.
Some of this code comes from Django Evolution.
"""
alter_string_set_type = 'ALTER COLUMN %(column)s TYPE %(type)s'
alter_string_set_null = 'ALTER COLUMN %(column)s DROP NOT NULL'
alter_string_drop_null = 'ALTER COLUMN %(column)s SET NOT NULL'
delete_check_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s'
add_column_string = 'ALTER TABLE %s ADD COLUMN %s;'
delete_unique_sql = "ALTER TABLE %s DROP CONSTRAINT %s"
delete_foreign_key_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s'
max_index_name_length = 63
drop_index_string = 'DROP INDEX %(index_name)s'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s CASCADE;'
create_primary_key_string = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s PRIMARY KEY (%(columns)s)"
delete_primary_key_sql = "ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s"
add_check_constraint_fragment = "ADD CONSTRAINT %(constraint)s CHECK (%(check)s)"
rename_table_sql = "ALTER TABLE %s RENAME TO %s;"
backend_name = None
default_schema_name = "public"
# Features
allows_combined_alters = True
supports_foreign_keys = True
has_check_constraints = True
has_booleans = True
@cached_property
def has_ddl_transactions(self):
"""
Tests the database using feature detection to see if it has
transactional DDL support.
"""
self._possibly_initialise()
connection = self._get_connection()
if hasattr(connection.features, "confirm") and not connection.features._confirmed:
connection.features.confirm()
# Django 1.3's MySQLdb backend doesn't raise DatabaseError
exceptions = (DatabaseError, )
try:
from MySQLdb import OperationalError
exceptions += (OperationalError, )
except ImportError:
pass
# Now do the test
if getattr(connection.features, 'supports_transactions', True):
cursor = connection.cursor()
self.start_transaction()
cursor.execute('CREATE TABLE DDL_TRANSACTION_TEST (X INT)')
self.rollback_transaction()
try:
try:
cursor.execute('CREATE TABLE DDL_TRANSACTION_TEST (X INT)')
except exceptions:
return False
else:
return True
finally:
cursor.execute('DROP TABLE DDL_TRANSACTION_TEST')
else:
return False
def __init__(self, db_alias):
self.debug = False
self.deferred_sql = []
self.dry_run = False
self.pending_transactions = 0
self.pending_create_signals = []
self.db_alias = db_alias
self._constraint_cache = {}
self._initialised = False
def lookup_constraint(self, db_name, table_name, column_name=None):
""" return a set() of constraints for db_name.table_name.column_name """
def _lookup():
table = self._constraint_cache[db_name][table_name]
if table is INVALID:
raise INVALID
elif column_name is None:
return table.items()
else:
return table[column_name]
try:
ret = _lookup()
return ret
except INVALID:
del self._constraint_cache[db_name][table_name]
self._fill_constraint_cache(db_name, table_name)
except KeyError:
if self._is_valid_cache(db_name, table_name):
return []
self._fill_constraint_cache(db_name, table_name)
return self.lookup_constraint(db_name, table_name, column_name)
def _set_cache(self, table_name, column_name=None, value=INVALID):
db_name = self._get_setting('NAME')
try:
if column_name is not None:
self._constraint_cache[db_name][table_name][column_name] = value
else:
self._constraint_cache[db_name][table_name] = value
except (LookupError, TypeError):
pass
def _is_valid_cache(self, db_name, table_name):
# we cache per-table so if the table is there it is valid
try:
return self._constraint_cache[db_name][table_name] is not INVALID
except KeyError:
return False
def _is_multidb(self):
try:
from django.db import connections
connections # Prevents "unused import" warning
except ImportError:
return False
else:
return True
def _get_connection(self):
"""
Returns a django connection for a given DB Alias
"""
if self._is_multidb():
from django.db import connections
return connections[self.db_alias]
else:
from django.db import connection
return connection
def _get_setting(self, setting_name):
"""
Allows code to get a setting (like, for example, STORAGE_ENGINE)
"""
setting_name = setting_name.upper()
connection = self._get_connection()
if self._is_multidb():
# Django 1.2 and above
return connection.settings_dict[setting_name]
else:
# Django 1.1 and below
return getattr(settings, "DATABASE_%s" % setting_name)
def _has_setting(self, setting_name):
"""
Existence-checking version of _get_setting.
"""
try:
self._get_setting(setting_name)
except (KeyError, AttributeError):
return False
else:
return True
def _get_schema_name(self):
try:
return self._get_setting('schema')
except (KeyError, AttributeError):
return self.default_schema_name
def _possibly_initialise(self):
if not self._initialised:
self.connection_init()
self._initialised = True
def connection_init(self):
"""
Run before any SQL to let database-specific config be sent as a command,
e.g. which storage engine (MySQL) or transaction serialisability level.
"""
pass
def quote_name(self, name):
"""
Uses the database backend to quote the given table/column name.
"""
return self._get_connection().ops.quote_name(name)
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
If the instance's debug attribute is True, prints out what it executes.
"""
self._possibly_initialise()
cursor = self._get_connection().cursor()
if self.debug:
print " = %s" % sql, params
if self.dry_run:
return []
get_logger().debug('execute "%s" with params "%s"' % (sql, params))
try:
cursor.execute(sql, params)
except DatabaseError, e:
print >> sys.stderr, 'FATAL ERROR - The following SQL query failed: %s' % sql
print >> sys.stderr, 'The error was: %s' % e
raise
try:
return cursor.fetchall()
except:
return []
def execute_many(self, sql, regex=r"(?mx) ([^';]* (?:'[^']*'[^';]*)*)", comment_regex=r"(?mx) (?:^\s*$)|(?:--.*$)"):
"""
Takes a SQL file and executes it as many separate statements.
(Some backends, such as Postgres, don't work otherwise.)
"""
# Be warned: This function is full of dark magic. Make sure you really
# know regexes before trying to edit it.
# First, strip comments
sql = "\n".join([x.strip().replace("%", "%%") for x in re.split(comment_regex, sql) if x.strip()])
# Now execute each statement
for st in re.split(regex, sql)[1:][::2]:
self.execute(st)
def add_deferred_sql(self, sql):
"""
Add a SQL statement to the deferred list, that won't be executed until
this instance's execute_deferred_sql method is run.
"""
self.deferred_sql.append(sql)
def execute_deferred_sql(self):
"""
Executes all deferred SQL, resetting the deferred_sql list
"""
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
def clear_deferred_sql(self):
"""
Resets the deferred_sql list to empty.
"""
self.deferred_sql = []
def clear_run_data(self, pending_creates = None):
"""
Resets variables to how they should be before a run. Used for dry runs.
If you want, pass in an old panding_creates to reset to.
"""
self.clear_deferred_sql()
self.pending_create_signals = pending_creates or []
def get_pending_creates(self):
return self.pending_create_signals
@invalidate_table_constraints
def create_table(self, table_name, fields):
"""
Creates the table 'table_name'. 'fields' is a tuple of fields,
each repsented by a 2-part tuple of field name and a
django.db.models.fields.Field object
"""
if len(table_name) > 63:
print " ! WARNING: You have a table name longer than 63 characters; this will not fully work on PostgreSQL or MySQL."
# avoid default values in CREATE TABLE statements (#925)
for field_name, field in fields:
field._suppress_default = True
columns = [
self.column_sql(table_name, field_name, field)
for field_name, field in fields
]
self.execute('CREATE TABLE %s (%s);' % (
self.quote_name(table_name),
', '.join([col for col in columns if col]),
))
add_table = alias('create_table') # Alias for consistency's sake
@invalidate_table_constraints
def rename_table(self, old_table_name, table_name):
"""
Renames the table 'old_table_name' to 'table_name'.
"""
if old_table_name == table_name:
# Short-circuit out.
return
params = (self.quote_name(old_table_name), self.quote_name(table_name))
self.execute(self.rename_table_sql % params)
# Invalidate the not-yet-indexed table
self._set_cache(table_name, value=INVALID)
@invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
"""
Deletes the table 'table_name'.
"""
params = (self.quote_name(table_name), )
if cascade:
self.execute('DROP TABLE %s CASCADE;' % params)
else:
self.execute('DROP TABLE %s;' % params)
drop_table = alias('delete_table')
@invalidate_table_constraints
def clear_table(self, table_name):
"""
Deletes all rows from 'table_name'.
"""
params = (self.quote_name(table_name), )
self.execute('DELETE FROM %s;' % params)
@invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=True):
"""
Adds the column 'name' to the table 'table_name'.
Uses the 'field' paramater, a django.db.models.fields.Field instance,
to generate the necessary sql
@param table_name: The name of the table to add the column to
@param name: The name of the column to add
@param field: The field to use
"""
sql = self.column_sql(table_name, name, field)
if sql:
params = (
self.quote_name(table_name),
sql,
)
sql = self.add_column_string % params
self.execute(sql)
# Now, drop the default if we need to
if not keep_default and field.default is not None:
field.default = NOT_PROVIDED
self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True)
def _db_type_for_alter_column(self, field):
"""
Returns a field's type suitable for ALTER COLUMN.
By default it just returns field.db_type().
To be overriden by backend specific subclasses
@param field: The field to generate type for
"""
try:
return field.db_type(connection=self._get_connection())
except TypeError:
return field.db_type()
def _alter_add_column_mods(self, field, name, params, sqls):
"""
Subcommand of alter_column that modifies column definitions beyond
the type string -- e.g. adding constraints where they cannot be specified
as part of the type (overrideable)
"""
pass
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# Next, set any default
if not field.null and field.has_default():
default = field.get_default()
sqls.append(('ALTER COLUMN %s SET DEFAULT %%s ' % (self.quote_name(name),), [default]))
else:
sqls.append(('ALTER COLUMN %s DROP DEFAULT' % (self.quote_name(name),), []))
@invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
if self.dry_run:
if self.debug:
print ' - no dry run output for alter_column() due to dynamic DDL, sorry'
return
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Add _id or whatever if we need to
field.set_attributes_from_name(name)
if not explicit_name:
name = field.column
else:
field.column = name
if not ignore_constraints:
# Drop all check constraints. Note that constraints will be added back
# with self.alter_string_set_type and self.alter_string_drop_null.
if self.has_check_constraints:
check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK")
for constraint in check_constraints:
self.execute(self.delete_check_sql % {
'table': self.quote_name(table_name),
'constraint': self.quote_name(constraint),
})
# Drop all foreign key constraints
try:
self.delete_foreign_key(table_name, name)
except ValueError:
# There weren't any
pass
# First, change the type
params = {
"column": self.quote_name(name),
"type": self._db_type_for_alter_column(field),
"table_name": table_name
}
# SQLs is a list of (SQL, values) pairs.
sqls = []
# Only alter the column if it has a type (Geometry ones sometimes don't)
if params["type"] is not None:
sqls.append((self.alter_string_set_type % params, []))
# Add any field- and backend- specific modifications
self._alter_add_column_mods(field, name, params, sqls)
# Next, nullity
if field.null:
sqls.append((self.alter_string_set_null % params, []))
else:
sqls.append((self.alter_string_drop_null % params, []))
# Next, set any default
self._alter_set_defaults(field, name, params, sqls)
# Finally, actually change the column
if self.allows_combined_alters:
sqls, values = zip(*sqls)
self.execute(
"ALTER TABLE %s %s;" % (self.quote_name(table_name), ", ".join(sqls)),
flatten(values),
)
else:
# Databases like e.g. MySQL don't like more than one alter at once.
for sql, values in sqls:
self.execute("ALTER TABLE %s %s;" % (self.quote_name(table_name), sql), values)
if not ignore_constraints:
# Add back FK constraints if needed
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
def _fill_constraint_cache(self, db_name, table_name):
schema = self._get_schema_name()
ifsc_tables = ["constraint_column_usage", "key_column_usage"]
self._constraint_cache.setdefault(db_name, {})
self._constraint_cache[db_name][table_name] = {}
for ifsc_table in ifsc_tables:
rows = self.execute("""
SELECT kc.constraint_name, kc.column_name, c.constraint_type
FROM information_schema.%s AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %%s AND
kc.table_name = %%s
""" % ifsc_table, [schema, table_name])
for constraint, column, kind in rows:
self._constraint_cache[db_name][table_name].setdefault(column, set())
self._constraint_cache[db_name][table_name][column].add((kind, constraint))
return
def _constraints_affecting_columns(self, table_name, columns, type="UNIQUE"):
"""
Gets the names of the constraints affecting the given columns.
If columns is None, returns all constraints of the type on the table.
"""
if self.dry_run:
raise DryRunError("Cannot get constraints for columns.")
if columns is not None:
columns = set(map(lambda s: s.lower(), columns))
db_name = self._get_setting('NAME')
cnames = {}
for col, constraints in self.lookup_constraint(db_name, table_name):
for kind, cname in constraints:
if kind == type:
cnames.setdefault(cname, set())
cnames[cname].add(col.lower())
for cname, cols in cnames.items():
if cols == columns or columns is None:
yield cname
@invalidate_table_constraints
def create_unique(self, table_name, columns):
"""
Creates a UNIQUE constraint on the columns on the given table.
"""
if not isinstance(columns, (list, tuple)):
columns = [columns]
name = self.create_index_name(table_name, columns, suffix="_uniq")
cols = ", ".join(map(self.quote_name, columns))
self.execute("ALTER TABLE %s ADD CONSTRAINT %s UNIQUE (%s)" % (
self.quote_name(table_name),
self.quote_name(name),
cols,
))
return name
@invalidate_table_constraints
def delete_unique(self, table_name, columns):
"""
Deletes a UNIQUE constraint on precisely the columns on the given table.
"""
if not isinstance(columns, (list, tuple)):
columns = [columns]
# Dry runs mean we can't do anything.
if self.dry_run:
if self.debug:
print ' - no dry run output for delete_unique_column() due to dynamic DDL, sorry'
return
constraints = list(self._constraints_affecting_columns(table_name, columns))
if not constraints:
raise ValueError("Cannot find a UNIQUE constraint on table %s, columns %r" % (table_name, columns))
for constraint in constraints:
self.execute(self.delete_unique_sql % (
self.quote_name(table_name),
self.quote_name(constraint),
))
def column_sql(self, table_name, field_name, field, tablespace='', with_name=True, field_prepared=False):
"""
Creates the SQL snippet for a column. Used by add_column and add_table.
"""
# If the field hasn't already been told its attribute name, do so.
if not field_prepared:
field.set_attributes_from_name(field_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Possible hook to fiddle with the fields (e.g. defaults & TEXT on MySQL)
field = self._field_sanity(field)
try:
sql = field.db_type(connection=self._get_connection())
except TypeError:
sql = field.db_type()
if sql:
# Some callers, like the sqlite stuff, just want the extended type.
if with_name:
field_output = [self.quote_name(field.column), sql]
else:
field_output = [sql]
field_output.append('%sNULL' % (not field.null and 'NOT ' or ''))
if field.primary_key:
field_output.append('PRIMARY KEY')
elif field.unique:
# Just use UNIQUE (no indexes any more, we have delete_unique)
field_output.append('UNIQUE')
tablespace = field.db_tablespace or tablespace
if tablespace and getattr(self._get_connection().features, "supports_tablespaces", False) and field.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self._get_connection().ops.tablespace_sql(tablespace, inline=True))
sql = ' '.join(field_output)
sqlparams = ()
# if the field is "NOT NULL" and a default value is provided, create the column with it
# this allows the addition of a NOT NULL field to a table with existing rows
if not getattr(field, '_suppress_default', False):
if field.has_default():
default = field.get_default()
# If the default is actually None, don't add a default term
if default is not None:
# If the default is a callable, then call it!
if callable(default):
default = default()
default = field.get_db_prep_save(default, connection=self._get_connection())
default = self._default_value_workaround(default)
# Now do some very cheap quoting. TODO: Redesign return values to avoid this.
if isinstance(default, basestring):
default = "'%s'" % default.replace("'", "''")
# Escape any % signs in the output (bug #317)
if isinstance(default, basestring):
default = default.replace("%", "%%")
# Add it in
sql += " DEFAULT %s"
sqlparams = (default)
elif (not field.null and field.blank) or (field.get_default() == ''):
if field.empty_strings_allowed and self._get_connection().features.interprets_empty_strings_as_nulls:
sql += " DEFAULT ''"
# Error here would be nice, but doesn't seem to play fair.
#else:
# raise ValueError("Attempting to add a non null column that isn't character based without an explicit default value.")
if field.rel and self.supports_foreign_keys:
self.add_deferred_sql(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
# Things like the contrib.gis module fields have this in 1.1 and below
if hasattr(field, 'post_create_sql'):
for stmt in field.post_create_sql(no_style(), table_name):
self.add_deferred_sql(stmt)
# In 1.2 and above, you have to ask the DatabaseCreation stuff for it.
# This also creates normal indexes in 1.1.
if hasattr(self._get_connection().creation, "sql_indexes_for_field"):
# Make a fake model to pass in, with only db_table
model = self.mock_model("FakeModelForGISCreation", table_name)
for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()):
self.add_deferred_sql(stmt)
if sql:
return sql % sqlparams
else:
return None
def _field_sanity(self, field):
"""
Placeholder for DBMS-specific field alterations (some combos aren't valid,
e.g. DEFAULT and TEXT on MySQL)
"""
return field
def _default_value_workaround(self, value):
"""
DBMS-specific value alterations (this really works around
missing functionality in Django backends)
"""
if isinstance(value, bool) and not self.has_booleans:
return int(value)
else:
return value
def foreign_key_sql(self, from_table_name, from_column_name, to_table_name, to_column_name):
"""
Generates a full SQL statement to add a foreign key constraint
"""
constraint_name = '%s_refs_%s_%x' % (from_column_name, to_column_name, abs(hash((from_table_name, to_table_name))))
return 'ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % (
self.quote_name(from_table_name),
self.quote_name(self.shorten_name(constraint_name)),
self.quote_name(from_column_name),
self.quote_name(to_table_name),
self.quote_name(to_column_name),
self._get_connection().ops.deferrable_sql() # Django knows this
)
@invalidate_table_constraints
def delete_foreign_key(self, table_name, column):
"""
Drop a foreign key constraint
"""
if self.dry_run:
if self.debug:
print ' - no dry run output for delete_foreign_key() due to dynamic DDL, sorry'
return # We can't look at the DB to get the constraints
constraints = self._find_foreign_constraints(table_name, column)
if not constraints:
raise ValueError("Cannot find a FOREIGN KEY constraint on table %s, column %s" % (table_name, column))
for constraint_name in constraints:
self.execute(self.delete_foreign_key_sql % {
"table": self.quote_name(table_name),
"constraint": self.quote_name(constraint_name),
})
drop_foreign_key = alias('delete_foreign_key')
def _find_foreign_constraints(self, table_name, column_name=None):
constraints = self._constraints_affecting_columns(
table_name, [column_name], "FOREIGN KEY")
primary_key_columns = self._find_primary_key_columns(table_name)
if len(primary_key_columns) > 1:
# Composite primary keys cannot be referenced by a foreign key
return list(constraints)
else:
primary_key_columns.add(column_name)
recursive_constraints = set(self._constraints_affecting_columns(
table_name, primary_key_columns, "FOREIGN KEY"))
return list(recursive_constraints.union(constraints))
def _digest(self, *args):
"""
Use django.db.backends.creation.BaseDatabaseCreation._digest
to create index name in Django style. An evil hack :(
"""
if not hasattr(self, '_django_db_creation'):
self._django_db_creation = BaseDatabaseCreation(self._get_connection())
return self._django_db_creation._digest(*args)
def shorten_name(self, name):
return truncate_name(name, self._get_connection().ops.max_name_length())
def create_index_name(self, table_name, column_names, suffix=""):
"""
Generate a unique name for the index
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return self.shorten_name(
'%s_%s' % (table_name, self._digest(column_names[0]))
)
# Else generate the name for the index by South
table_name = table_name.replace('"', '').replace('.', '_')
index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (table_name, column_names[0], index_unique_name, suffix)).replace('"', '').replace('.', '_')
if len(index_name) > self.max_index_name_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(self.max_index_name_length - len(part))], part)
return index_name
def create_index_sql(self, table_name, column_names, unique=False, db_tablespace=''):
"""
Generates a create index statement on 'table_name' for a list of 'column_names'
"""
if not column_names:
print "No column names supplied on which to create an index"
return ''
connection = self._get_connection()
if db_tablespace and connection.features.supports_tablespaces:
tablespace_sql = ' ' + connection.ops.tablespace_sql(db_tablespace)
else:
tablespace_sql = ''
index_name = self.create_index_name(table_name, column_names)
return 'CREATE %sINDEX %s ON %s (%s)%s;' % (
unique and 'UNIQUE ' or '',
self.quote_name(index_name),
self.quote_name(table_name),
','.join([self.quote_name(field) for field in column_names]),
tablespace_sql
)
@invalidate_table_constraints
def create_index(self, table_name, column_names, unique=False, db_tablespace=''):
""" Executes a create index statement """
sql = self.create_index_sql(table_name, column_names, unique, db_tablespace)
self.execute(sql)
@invalidate_table_constraints
def delete_index(self, table_name, column_names, db_tablespace=''):
"""
Deletes an index created with create_index.
This is possible using only columns due to the deterministic
index naming function which relies on column names.
"""
if isinstance(column_names, (str, unicode)):
column_names = [column_names]
name = self.create_index_name(table_name, column_names)
sql = self.drop_index_string % {
"index_name": self.quote_name(name),
"table_name": self.quote_name(table_name),
}
self.execute(sql)
drop_index = alias('delete_index')
@delete_column_constraints
def delete_column(self, table_name, name):
"""
Deletes the column 'column_name' from the table 'table_name'.
"""
params = (self.quote_name(table_name), self.quote_name(name))
self.execute(self.delete_column_string % params, [])
drop_column = alias('delete_column')
def rename_column(self, table_name, old, new):
"""
Renames the column 'old' from the table 'table_name' to 'new'.
"""
raise NotImplementedError("rename_column has no generic SQL syntax")
@invalidate_table_constraints
def delete_primary_key(self, table_name):
"""
Drops the old primary key.
"""
# Dry runs mean we can't do anything.
if self.dry_run:
if self.debug:
print ' - no dry run output for delete_primary_key() due to dynamic DDL, sorry'
return
constraints = list(self._constraints_affecting_columns(table_name, None, type="PRIMARY KEY"))
if not constraints:
raise ValueError("Cannot find a PRIMARY KEY constraint on table %s" % (table_name,))
for constraint in constraints:
self.execute(self.delete_primary_key_sql % {
"table": self.quote_name(table_name),
"constraint": self.quote_name(constraint),
})
drop_primary_key = alias('delete_primary_key')
@invalidate_table_constraints
def create_primary_key(self, table_name, columns):
"""
Creates a new primary key on the specified columns.
"""
if not isinstance(columns, (list, tuple)):
columns = [columns]
self.execute(self.create_primary_key_string % {
"table": self.quote_name(table_name),
"constraint": self.quote_name(table_name + "_pkey"),
"columns": ", ".join(map(self.quote_name, columns)),
})
def _find_primary_key_columns(self, table_name):
"""
Find all columns of the primary key of the specified table
"""
db_name = self._get_setting('NAME')
primary_key_columns = set()
for col, constraints in self.lookup_constraint(db_name, table_name):
for kind, cname in constraints:
if kind == 'PRIMARY KEY':
primary_key_columns.add(col.lower())
return primary_key_columns
def start_transaction(self):
"""
Makes sure the following commands are inside a transaction.
Must be followed by a (commit|rollback)_transaction call.
"""
if self.dry_run:
self.pending_transactions += 1
transaction.commit_unless_managed(using=self.db_alias)
transaction.enter_transaction_management(using=self.db_alias)
transaction.managed(True, using=self.db_alias)
def commit_transaction(self):
"""
Commits the current transaction.
Must be preceded by a start_transaction call.
"""
if self.dry_run:
return
transaction.commit(using=self.db_alias)
transaction.leave_transaction_management(using=self.db_alias)
def rollback_transaction(self):
"""
Rolls back the current transaction.
Must be preceded by a start_transaction call.
"""
if self.dry_run:
self.pending_transactions -= 1
transaction.rollback(using=self.db_alias)
transaction.leave_transaction_management(using=self.db_alias)
def rollback_transactions_dry_run(self):
"""
Rolls back all pending_transactions during this dry run.
"""
if not self.dry_run:
return
while self.pending_transactions > 0:
self.rollback_transaction()
if transaction.is_dirty(using=self.db_alias):
# Force an exception, if we're still in a dirty transaction.
# This means we are missing a COMMIT/ROLLBACK.
transaction.leave_transaction_management(using=self.db_alias)
def send_create_signal(self, app_label, model_names):
self.pending_create_signals.append((app_label, model_names))
def send_pending_create_signals(self, verbosity=0, interactive=False):
# Group app_labels together
signals = SortedDict()
for (app_label, model_names) in self.pending_create_signals:
try:
signals[app_label].extend(model_names)
except KeyError:
signals[app_label] = list(model_names)
# Send only one signal per app.
for (app_label, model_names) in signals.iteritems():
self.really_send_create_signal(app_label, list(set(model_names)),
verbosity=verbosity,
interactive=interactive)
self.pending_create_signals = []
def really_send_create_signal(self, app_label, model_names,
verbosity=0, interactive=False):
"""
Sends a post_syncdb signal for the model specified.
If the model is not found (perhaps it's been deleted?),
no signal is sent.
TODO: The behavior of django.contrib.* apps seems flawed in that
they don't respect created_models. Rather, they blindly execute
over all models within the app sending the signal. This is a
patch we should push Django to make For now, this should work.
"""
if self.debug:
print " - Sending post_syncdb signal for %s: %s" % (app_label, model_names)
app = models.get_app(app_label)
if not app:
return
created_models = []
for model_name in model_names:
model = models.get_model(app_label, model_name)
if model:
created_models.append(model)
if created_models:
if hasattr(dispatcher, "send"):
# Older djangos
dispatcher.send(signal=models.signals.post_syncdb, sender=app,
app=app, created_models=created_models,
verbosity=verbosity, interactive=interactive)
else:
if self._is_multidb():
# Django 1.2+
models.signals.post_syncdb.send(
sender=app,
app=app,
created_models=created_models,
verbosity=verbosity,
interactive=interactive,
db=self.db_alias,
)
else:
# Django 1.1 - 1.0
models.signals.post_syncdb.send(
sender=app,
app=app,
created_models=created_models,
verbosity=verbosity,
interactive=interactive,
)
def mock_model(self, model_name, db_table, db_tablespace='',
pk_field_name='id', pk_field_type=models.AutoField,
pk_field_args=[], pk_field_kwargs={}):
"""
Generates a MockModel class that provides enough information
to be used by a foreign key/many-to-many relationship.
Migrations should prefer to use these rather than actual models
as models could get deleted over time, but these can remain in
migration files forever.
Depreciated.
"""
class MockOptions(object):
def __init__(self):
self.db_table = db_table
self.db_tablespace = db_tablespace or settings.DEFAULT_TABLESPACE
self.object_name = model_name
self.module_name = model_name.lower()
if pk_field_type == models.AutoField:
pk_field_kwargs['primary_key'] = True
self.pk = pk_field_type(*pk_field_args, **pk_field_kwargs)
self.pk.set_attributes_from_name(pk_field_name)
self.abstract = False
def get_field_by_name(self, field_name):
# we only care about the pk field
return (self.pk, self.model, True, False)
def get_field(self, name):
# we only care about the pk field
return self.pk
class MockModel(object):
_meta = None
# We need to return an actual class object here, not an instance
MockModel._meta = MockOptions()
MockModel._meta.model = MockModel
return MockModel
def _db_positive_type_for_alter_column(self, klass, field):
"""
A helper for subclasses overriding _db_type_for_alter_column:
Remove the check constraint from the type string for PositiveInteger
and PositiveSmallInteger fields.
@param klass: The type of the child (required to allow this to be used when it is subclassed)
@param field: The field to generate type for
"""
super_result = super(klass, self)._db_type_for_alter_column(field)
if isinstance(field, (models.PositiveSmallIntegerField, models.PositiveIntegerField)):
return super_result.split(" ", 1)[0]
return super_result
def _alter_add_positive_check(self, klass, field, name, params, sqls):
"""
A helper for subclasses overriding _alter_add_column_mods:
Add a check constraint verifying positivity to PositiveInteger and
PositiveSmallInteger fields.
"""
super(klass, self)._alter_add_column_mods(field, name, params, sqls)
if isinstance(field, (models.PositiveSmallIntegerField, models.PositiveIntegerField)):
uniq_hash = abs(hash(tuple(params.values())))
d = dict(
constraint = "CK_%s_PSTV_%s" % (name, hex(uniq_hash)[2:]),
check = "%s >= 0" % self.quote_name(name))
sqls.append((self.add_check_constraint_fragment % d, []))
# Single-level flattening of lists
def flatten(ls):
nl = []
for l in ls:
nl += l
return nl
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.rdata
import dns.rdatatype
class TLSA(dns.rdata.Rdata):
"""TLSA record
@ivar usage: The certificate usage
@type usage: int
@ivar selector: The selector field
@type selector: int
@ivar mtype: The 'matching type' field
@type mtype: int
@ivar cert: The 'Certificate Association Data' field
@type cert: string
@see: RFC 6698"""
__slots__ = ['usage', 'selector', 'mtype', 'cert']
def __init__(self, rdclass, rdtype, usage, selector,
mtype, cert):
super(TLSA, self).__init__(rdclass, rdtype)
self.usage = usage
self.selector = selector
self.mtype = mtype
self.cert = cert
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %d %s' % (self.usage,
self.selector,
self.mtype,
dns.rdata._hexify(self.cert,
chunksize=128))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
usage = tok.get_uint8()
selector = tok.get_uint8()
mtype = tok.get_uint8()
cert_chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
cert_chunks.append(t.value)
cert = ''.join(cert_chunks)
cert = cert.decode('hex_codec')
return cls(rdclass, rdtype, usage, selector, mtype, cert)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!BBB", self.usage, self.selector, self.mtype)
file.write(header)
file.write(self.cert)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
header = struct.unpack("!BBB", wire[current : current + 3])
current += 3
rdlen -= 3
cert = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], header[2], cert)
from_wire = classmethod(from_wire)
def _cmp(self, other):
hs = struct.pack("!BBB", self.usage, self.selector, self.mtype)
ho = struct.pack("!BBB", other.usage, other.selector, other.mtype)
v = cmp(hs, ho)
if v == 0:
v = cmp(self.cert, other.cert)
return v
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Python Classes/Functions used to Import Tycho Datasets
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# TO-DO: Add time back to the read state function for Tyler's code
# Importing Necessary System Packages
import math
import io
import os
import numpy as np
import matplotlib as plt
import random as rp
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
#from amuse.couple import multiples
# Import the Amuse Stellar Packages
from amuse.ic.kingmodel import new_king_model
from amuse.ic.kroupa import new_kroupa_mass_distribution
# Import cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
# Tycho util import
from tycho import util
#from tycho import multiples2 as multiples
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def read_initial_state(file_prefix):
''' Reads in an initial state for the Tycho Module.
file_prefix: String Value for a Prefix to the Saved File
'''
# TODO: Also everything else in this function.
# First, Define the Directory where Initial State is Stored
file_dir = os.getcwd()+"/InitialState"
file_base = file_dir+"/"+file_prefix
# Second, Read the Master AMUSE Particle Set from a HDF5 File
file_format = "hdf5"
master_set = read_set_from_file(file_base+"_particles.hdf5", format=file_format, close_file=True)
# Third, unPickle the Initial Conditions Array
ic_file = open(file_base+"_ic.pkl", "rb")
ic_array = pickle.load(ic_file)
ic_file.close()
# Fourth, convert ic_array.total_smass and viral_radius from strings to floats
total_smass = float(ic_array.total_smass) | units.kg
viral_radius = float(ic_array.viral_radius) | units.m
# Fifth, Define the Master Set's Converter
converter = nbody_system.nbody_to_si(total_smass, viral_radius)
return master_set, ic_array, converter
# ------------------------------------ #
# RESTART FUNCTION #
# ------------------------------------ #
def read_state_from_file(restart_file, gravity_code, kep, SMALLN):
stars = read_set_from_file(restart_file+".stars.hdf5",'hdf5',version='2.0', close_file=True).copy()
stars_python = read_set_from_file(restart_file+".stars_python.hdf5",'hdf5',version='2.0', close_file=True).copy()
with open(restart_file + ".bookkeeping", "rb") as f:
bookkeeping = pickle.load(f)
f.close()
print(bookkeeping)
root_to_tree = {}
for root in stars:
if hasattr(root, 'components') and not root.components is None:
root_to_tree[root] = datamodel.trees.BinaryTreeOnParticle(root.components[0])
gravity_code.particles.add_particles(stars)
# print bookkeeping['model_time']
# gravity_code.set_begin_time = bookkeeping['model_time']
multiples_code = multiples.Multiples(gravity_code, SMALLN, kep, gravity_constant=units.constants.G)
multiples_code.neighbor_distance_factor = bookkeeping['neighbor_distance_factor']
multiples_code.neighbor_veto = bookkeeping['neighbor_veto']
multiples_code.multiples_external_tidal_correction = bookkeeping['multiples_external_tidal_correction']
multiples_code.multiples_integration_energy_error = bookkeeping['multiples_integration_energy_error']
multiples_code.multiples_internal_tidal_correction = bookkeeping['multiples_internal_tidal_correction']
multiples.root_index = bookkeeping['root_index']
multiples_code.root_to_tree = root_to_tree
# multiples_code.set_model_time = bookkeeping['model_time']
return stars_python, multiples_code
# ------------------------------------------ #
# RESTART CRASH FUNCTION #
# ------------------------------------------ #
def recover_crash(restart_file, gravity_code, kep, SMALLN):
# NEEDS SOME TENDER LOVE AND CARE
stars = read_set_from_file(restart_file+".stars.hdf5",'hdf5',version='2.0', close_file=True).copy()
stars_python = read_set_from_file(restart_file+".stars_python.hdf5",'hdf5',version='2.0', close_file=True).copy()
with open(restart_file + ".bookkeeping", "rb") as f:
bookkeeping = pickle.load(f)
f.close()
print(bookkeeping)
root_to_tree = {}
for root in stars:
if hasattr(root, 'components') and not root.components is None:
root_to_tree[root] = datamodel.trees.BinaryTreeOnParticle(root.components[0])
#gravity_code.particles.add_particles(stars)
#print bookkeeping['model_time']
gravity_code.set_begin_time = bookkeeping['model_time']
multiples_code = multiples.Multiples(gravity_code, SMALLN, kep, gravity_constant=units.constants.G)
multiples_code.neighbor_distance_factor = bookkeeping['neighbor_distance_factor']
multiples_code.neighbor_veto = bookkeeping['neighbor_veto']
multiples_code.multiples_external_tidal_correction = bookkeeping['multiples_external_tidal_correction']
multiples_code.multiples_integration_energy_error = bookkeeping['multiples_integration_energy_error']
multiples_code.multiples_internal_tidal_correction = bookkeeping['multiples_internal_tidal_correction']
multiples.root_index = bookkeeping['root_index']
multiples_code.root_to_tree = root_to_tree
#multiples_code.set_model_time = bookkeeping['model_time']
return bookkeeping['model_time'], multiples_code
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_config_snapshot
short_description: Manage Config Snapshots (config:Snapshot, config:ExportP)
description:
- Manage Config Snapshots on Cisco ACI fabrics.
- Creating new Snapshots is done using the configExportP class.
- Removing Snapshots is done using the configSnapshot class.
notes:
- The APIC does not provide a mechanism for naming the snapshots.
- 'Snapshot files use the following naming structure: ce_<config export policy name>-<yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>.<mss>+<hh>:<mm>.'
- 'Snapshot objects use the following naming structure: run-<yyyy>-<mm>-<dd>T<hh>-<mm>-<ss>.'
- More information about the internal APIC classes B(config:Snapshot) and B(config:ExportP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
description:
description:
- The description for the Config Export Policy.
aliases: [ descr ]
export_policy:
description:
- The name of the Export Policy to use for Config Snapshots.
aliases: [ name ]
format:
description:
- Sets the config backup to be formatted in JSON or XML.
- The APIC defaults to C(json) when unset.
choices: [ json, xml ]
include_secure:
description:
- Determines if secure information should be included in the backup.
- The APIC defaults to C(yes) when unset.
type: bool
max_count:
description:
- Determines how many snapshots can exist for the Export Policy before the APIC starts to rollover.
- Accepted values range between C(1) and C(10).
- The APIC defaults to C(3) when unset.
type: int
snapshot:
description:
- The name of the snapshot to delete.
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Create a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
state: present
export_policy: config_backup
max_count: 10
description: Backups taken before new configs are applied.
delegate_to: localhost
- name: Query all Snapshots
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
- name: Query Snapshots associated with a particular Export Policy
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: query
delegate_to: localhost
register: query_result
- name: Delete a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
snapshot: run-2017-08-24T17-20-05
state: absent
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
description=dict(type='str', aliases=['descr']),
export_policy=dict(type='str', aliases=['name']), # Not required for querying all objects
format=dict(type='str', choices=['json', 'xml']),
include_secure=dict(type='bool'),
max_count=dict(type='int'),
snapshot=dict(type='str'),
state=dict(type='str', choices=['absent', 'present', 'query'], default='present'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['state', 'absent', ['export_policy', 'snapshot']],
['state', 'present', ['export_policy']],
],
)
aci = ACIModule(module)
description = module.params['description']
export_policy = module.params['export_policy']
file_format = module.params['format']
include_secure = aci.boolean(module.params['include_secure'])
max_count = module.params['max_count']
if max_count is not None:
if max_count in range(1, 11):
max_count = str(max_count)
else:
module.fail_json(msg="Parameter 'max_count' must be a number between 1 and 10")
snapshot = module.params['snapshot']
if snapshot is not None and not snapshot.startswith('run-'):
snapshot = 'run-' + snapshot
state = module.params['state']
if state == 'present':
aci.construct_url(
root_class=dict(
aci_class='configExportP',
aci_rn='fabric/configexp-{0}'.format(export_policy),
module_object=export_policy,
target_filter={'name': export_policy},
),
)
aci.get_existing()
aci.payload(
aci_class='configExportP',
class_config=dict(
adminSt='triggered',
descr=description,
format=file_format,
includeSecureFields=include_secure,
maxSnapshotCount=max_count,
name=export_policy,
snapshot='yes',
),
)
aci.get_diff('configExportP')
# Create a new Snapshot
aci.post_config()
else:
# Prefix the proper url to export_policy
if export_policy is not None:
export_policy = 'uni/fabric/configexp-{0}'.format(export_policy)
aci.construct_url(
root_class=dict(
aci_class='configSnapshotCont',
aci_rn='backupst/snapshots-[{0}]'.format(export_policy),
module_object=export_policy,
target_filter={'name': export_policy},
),
subclass_1=dict(
aci_class='configSnapshot',
aci_rn='snapshot-{0}'.format(snapshot),
module_object=snapshot,
target_filter={'name': snapshot},
),
)
aci.get_existing()
if state == 'absent':
# Build POST request to used to remove Snapshot
aci.payload(
aci_class='configSnapshot',
class_config=dict(
name=snapshot,
retire="yes",
),
)
if aci.existing:
aci.get_diff('configSnapshot')
# Mark Snapshot for Deletion
aci.post_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
DOCUMENTATION = """
options:
- See respective platform section for more details
requirements:
- See respective platform section for more details
notes:
- Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
"""
# Documentation fragment for ONTAP (na_ontap)
NA_ONTAP = """
options:
hostname:
required: true
description:
- The hostname or IP address of the ONTAP instance.
username:
required: true
description:
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
aliases: ['user']
password:
required: true
description:
- Password for the specified user.
aliases: ['pass']
https:
description:
- Enable and disable https
type: bool
default: false
validate_certs:
description:
- If set to C(False), the SSL certificates will not be validated.
- This should only set to C(False) used on personally controlled sites using self-signed certificates.
default: true
type: bool
http_port:
description:
- Override the default port (80 or 443) with this port
type: int
ontapi:
description:
- The ontap api version to use
type: int
requirements:
- A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
- Ansible 2.6
- Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
- Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
- To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
notes:
- The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
"""
# Documentation fragment for ONTAP (na_cdot)
ONTAP = """
options:
hostname:
required: true
description:
- The hostname or IP address of the ONTAP instance.
username:
required: true
description:
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
aliases: ['user']
password:
required: true
description:
- Password for the specified user.
aliases: ['pass']
requirements:
- A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
- Ansible 2.2
- netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
notes:
- The modules prefixed with na\\_cdot are built to support the ONTAP storage platform.
"""
# Documentation fragment for SolidFire
SOLIDFIRE = """
options:
hostname:
required: true
description:
- The hostname or IP address of the SolidFire cluster.
username:
required: true
description:
- Please ensure that the user has the adequate permissions. For more information, please read the official documentation
U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
aliases: ['user']
password:
required: true
description:
- Password for the specified user.
aliases: ['pass']
requirements:
- The modules were developed with SolidFire 10.1
- solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
notes:
- The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
"""
# Documentation fragment for E-Series
ESERIES = """
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
api_url:
required: true
description:
- The url to the SANtricity Web Services Proxy or Embedded Web Services API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
required: true
description:
- The ID of the array to manage. This value must be unique for each array.
notes:
- The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
- Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
- M(netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
"""
|
unknown
|
codeparrot/codeparrot-clean
| ||
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006-2007 XenSource Inc.
#============================================================================
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import gettext
import xmlrpclib
import httplib
import socket
import types
translation = gettext.translation('xen-xm', fallback = True)
API_VERSION_1_1 = '1.1'
API_VERSION_1_2 = '1.2'
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception, exn:
import sys
print >>sys.stderr, exn
return "Xen-API failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
_RECONNECT_AND_RETRY = (lambda _ : ())
class UDSHTTPConnection(httplib.HTTPConnection):
"""HTTPConnection subclass to allow HTTP over Unix domain sockets. """
def connect(self):
path = self.host.replace("_", "/")
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(path)
class UDSHTTP(httplib.HTTP):
_connection_class = UDSHTTPConnection
class UDSTransport(xmlrpclib.Transport):
def __init__(self, use_datetime=0):
self._use_datetime = use_datetime
self._extra_headers=[]
def add_extra_header(self, key, value):
self._extra_headers += [ (key,value) ]
def make_connection(self, host):
return UDSHTTP(host)
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", handler)
for key, value in self._extra_headers:
connection.putheader(key, value)
class Session(xmlrpclib.ServerProxy):
"""A server proxy and session manager for communicating with xapi using
the Xen-API.
Example:
session = Session('http://localhost/')
session.login_with_password('me', 'mypassword')
session.xenapi.VM.start(vm_uuid)
session.xenapi.session.logout()
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1):
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
verbose, allow_none, use_datetime=1)
self.uri = uri
self.transport = transport
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
retry_count = 0
while retry_count < 3:
full_params = (self._session,) + params
result = _parse_result(getattr(self, methodname)(*full_params))
if type(result) == types.FunctionType and result == _RECONNECT_AND_RETRY:
retry_count += 1
if self.last_login_method:
self._login(self.last_login_method,
self.last_login_params)
else:
raise xmlrpclib.Fault(401, 'You must log in')
else:
return result
raise xmlrpclib.Fault(
500, 'Tried 3 times to get a valid session, but failed')
def _login(self, method, params):
result = _parse_result(getattr(self, 'session.%s' % method)(*params))
if result == _RECONNECT_AND_RETRY:
raise xmlrpclib.Fault(
500, 'Received SESSION_INVALID when logging in')
self._session = result
self.last_login_method = method
self.last_login_params = params
self.API_version = self._get_api_version()
def _logout(self):
try:
if self.last_login_method.startswith("slave_local"):
return _parse_result(self.session.local_logout(self._session))
else:
return _parse_result(self.session.logout(self._session))
finally:
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def _get_api_version(self):
pool = self.xenapi.pool.get_all()[0]
host = self.xenapi.pool.get_master(pool)
major = self.xenapi.host.get_API_version_major(host)
minor = self.xenapi.host.get_API_version_minor(host)
return "%s.%s"%(major,minor)
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.API_version, self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
else:
return xmlrpclib.ServerProxy.__getattr__(self, name)
def xapi_local():
return Session("http://_var_xapi_xapi/", transport=UDSTransport())
def _parse_result(result):
if type(result) != dict or 'Status' not in result:
raise xmlrpclib.Fault(500, 'Missing Status in response from server' + result)
if result['Status'] == 'Success':
if 'Value' in result:
return result['Value']
else:
raise xmlrpclib.Fault(500,
'Missing Value in response from server')
else:
if 'ErrorDescription' in result:
if result['ErrorDescription'][0] == 'SESSION_INVALID':
return _RECONNECT_AND_RETRY
else:
raise Failure(result['ErrorDescription'])
else:
raise xmlrpclib.Fault(
500, 'Missing ErrorDescription in response from server')
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, API_version, send, name):
self.__API_version = API_version
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<XenAPI._Dispatcher for %s>' % self.__name
else:
return '<XenAPI._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__API_version, self.__send, name)
else:
return _Dispatcher(self.__API_version, self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.