code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
"""Module for singleton pattern"""
class Singleton(object):
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Also, the decorated class cannot be
inherited from. Other than that, there are no restrictions that apply
to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
"""
def __init__(self, decorated):
self._decorated = decorated
def instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated) | unknown | codeparrot/codeparrot-clean | ||
import copy
import multiprocessing
import unittest
from unittest import mock
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connection, connections
from django.test import SimpleTestCase
@unittest.skipUnless(connection.vendor == "sqlite", "SQLite tests")
class TestDbSignatureTests(SimpleTestCase):
def test_custom_test_name(self):
test_connection = copy.copy(connections[DEFAULT_DB_ALIAS])
test_connection.settings_dict = copy.deepcopy(
connections[DEFAULT_DB_ALIAS].settings_dict
)
test_connection.settings_dict["NAME"] = None
test_connection.settings_dict["TEST"]["NAME"] = "custom.sqlite.db"
signature = test_connection.creation_class(test_connection).test_db_signature()
self.assertEqual(signature, (None, "custom.sqlite.db"))
def test_get_test_db_clone_settings_name(self):
test_connection = copy.copy(connections[DEFAULT_DB_ALIAS])
test_connection.settings_dict = copy.deepcopy(
connections[DEFAULT_DB_ALIAS].settings_dict,
)
tests = [
("test.sqlite3", "test_1.sqlite3"),
("test", "test_1"),
]
for test_db_name, expected_clone_name in tests:
with self.subTest(test_db_name=test_db_name):
test_connection.settings_dict["NAME"] = test_db_name
test_connection.settings_dict["TEST"]["NAME"] = test_db_name
creation_class = test_connection.creation_class(test_connection)
clone_settings_dict = creation_class.get_test_db_clone_settings("1")
self.assertEqual(clone_settings_dict["NAME"], expected_clone_name)
@mock.patch.object(multiprocessing, "get_start_method", return_value="unsupported")
def test_get_test_db_clone_settings_not_supported(self, *mocked_objects):
msg = "Cloning with start method 'unsupported' is not supported."
with self.assertRaisesMessage(NotSupportedError, msg):
connection.creation.get_test_db_clone_settings(1) | python | github | https://github.com/django/django | tests/backends/sqlite/test_creation.py |
//// [tests/cases/compiler/baseClassImprovedMismatchErrors.ts] ////
//// [baseClassImprovedMismatchErrors.ts]
class Base {
n: Base | string;
fn() {
return 10;
}
}
class Derived extends Base {
n: Derived | string;
fn() {
return 10 as number | string;
}
}
class DerivedInterface implements Base {
n: DerivedInterface | string;
fn() {
return 10 as number | string;
}
}
//// [baseClassImprovedMismatchErrors.js]
"use strict";
class Base {
fn() {
return 10;
}
}
class Derived extends Base {
fn() {
return 10;
}
}
class DerivedInterface {
fn() {
return 10;
}
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/baseClassImprovedMismatchErrors.js |
#===========================================================================
#
# EpochConverter
#
#===========================================================================
"""EpochConverter module containing class EpochConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.units as units
import matplotlib.dates as date_ticker
from matplotlib.cbook import iterable
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'EpochConverter' ]
#===========================================================================
class EpochConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for Monte Epoch and Duration classes.
"""
# julian date reference for "Jan 1, 0001" minus 1 day because
# matplotlib really wants "Jan 0, 0001"
jdRef = 1721425.5 - 1
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has Epoch data.
= INPUT VARIABLES
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
majloc = date_ticker.AutoDateLocator()
majfmt = date_ticker.AutoDateFormatter( majloc )
return units.AxisInfo( majloc = majloc,
majfmt = majfmt,
label = unit )
#------------------------------------------------------------------------
@staticmethod
def float2epoch( value, unit ):
""": Convert a matplotlib floating-point date into an Epoch of the
specified units.
= INPUT VARIABLES
- value The matplotlib floating-point date.
- unit The unit system to use for the Epoch.
= RETURN VALUE
- Returns the value converted to an Epoch in the sepcified time system.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
secPastRef = value * 86400.0 * U.UnitDbl( 1.0, 'sec' )
return U.Epoch( unit, secPastRef, EpochConverter.jdRef )
#------------------------------------------------------------------------
@staticmethod
def epoch2float( value, unit ):
""": Convert an Epoch value to a float suitible for plotting as a
python datetime object.
= INPUT VARIABLES
- value An Epoch or list of Epochs that need to be converted.
- unit The units to use for an axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
return value.julianDate( unit ) - EpochConverter.jdRef
#------------------------------------------------------------------------
@staticmethod
def duration2float( value ):
""": Convert a Duration value to a float suitible for plotting as a
python datetime object.
= INPUT VARIABLES
- value A Duration or list of Durations that need to be converted.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
return value.days()
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- value The value or list of values that need to be converted.
- unit The units to use for an axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
isNotEpoch = True
isDuration = False
if ( iterable(value) and not isinstance(value, six.string_types) ):
if ( len(value) == 0 ):
return []
else:
return [ EpochConverter.convert( x, unit, axis ) for x in value ]
if ( isinstance(value, U.Epoch) ):
isNotEpoch = False
elif ( isinstance(value, U.Duration) ):
isDuration = True
if ( isNotEpoch and not isDuration and
units.ConversionInterface.is_numlike( value ) ):
return value
if ( unit == None ):
unit = EpochConverter.default_units( value, axis )
if ( isDuration ):
return EpochConverter.duration2float( value )
else:
return EpochConverter.epoch2float( value, unit )
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
"""
frame = None
if ( iterable(value) and not isinstance(value, six.string_types) ):
return EpochConverter.default_units( value[0], axis )
else:
frame = value.frame()
return frame | unknown | codeparrot/codeparrot-clean | ||
import { readFileSync } from 'node:fs'
import { fileURLToPath } from 'node:url'
import { bench } from 'vitest'
import * as CSS from './css-parser.ts'
const currentFolder = fileURLToPath(new URL('..', import.meta.url))
const cssFile = readFileSync(currentFolder + './preflight.css', 'utf-8')
bench('css-parser on preflight.css', () => {
CSS.parse(cssFile)
})
bench('CSS with sourcemaps', () => {
CSS.parse(cssFile, { from: 'input.css' })
}) | typescript | github | https://github.com/tailwindlabs/tailwindcss | packages/tailwindcss/src/css-parser.bench.ts |
#!/usr/bin/env python
import copy
import operator
import os
import os.path
import pickle
import string
import sys
# Constant for C++ files.
FILETYPE_CPP = 2
# Constant for DDDOC files.
FILETYPE_DDDOC = 1
# Constant for none of the above.
FILETYPE_OTHER = 0
SOURCE_ENCODING = 'iso8859-1'
# Extension of C++ files.
CPP_EXTS = ['c', 'C', 'cpp', 'CPP', 'c++', 'C++', 'h', 'H', 'hpp', 'HPP',
'h++', 'H++']
# Extensions of DDDOC files.
DDDOC_EXTS = ['dddoc', 'DDDOC']
# List of ignored directory names.
IGNORED_DIRS = ['CSV', '.svn', 'seeds2', 'find2', 'cmake']
DATA = None
ID = 0
# Text attribute node keys.
TEXT_ATTRIBUTE_KEYS = set(['text', 'table', 'tableheader', 'code', 'console', 'section',
'subsection', 'image', 'contents', 'note', 'file', 'snippet',
'output'])
# Nodes having paths matching the following patterns are considered text
# container nodes. Their children having only one more component which is in
# TEXT_ATTRIBUTE_KEYS are processed in a special way. The last component is
# replaced with 'text' and their content is prefixed by "type=$key:" where $key
# is the original key. The content of the text container nodes is prefixed with
# "type=$text:" and moved to a child with key 'text'.
TEXT_CONTAINER_PATHS = [
'Indexpage.*.description',
'Page.*.description',
'Page.*.summary',
'Page.*.glossary.*',
'Function.*.example',
'Function.*.summary',
'Function.*.description',
'Function.*.remarks',
'Function.*.status',
'Class.*.example',
'Class.*.summary',
'Class.*.description',
'Class.*.remarks',
'Class.*.status',
'Metafunction.*.example',
'Metafunction.*.summary',
'Metafunction.*.description',
'Metafunction.*.remarks',
'Metafunction.*.status',
'Memfunc.*.example',
'Memfunc.*.summary',
'Memfunc.*.description',
'Memfunc.*.remarks',
'Memfunc.*.status',
'Memvar.*.example',
'Memvar.*.summary',
'Memvar.*.description',
'Memvar.*.remarks',
'Memvar.*.status',
'Macro.*.example',
'Macro.*.summary',
'Macro.*.description',
'Macro.*.remarks',
'Macro.*.status',
'Enum.*.example',
'Enum.*.summary',
'Enum.*.description',
'Enum.*.remarks',
'Enum.*.status',
'Spec.*.example',
'Spec.*.summary',
'Spec.*.description',
'Spec.*.remarks',
'Spec.*.status',
'Shortcut.*.example',
'Shortcut.*.summary',
'Shortcut.*.description',
'Shortcut.*.remarks',
'Shortcut.*.status',
'Tag.*.example',
'Tag.*.summary',
'Tag.*.description',
'Tag.*.remarks',
'Tag.*.status',
'Typedef.*.example',
'Typedef.*.summary',
'Typedef.*.description',
'Typedef.*.remarks',
'Typedef.*.status',
'Demo.*.summary',
'Demo.*.description',
'Demo.*.remarks',
'Demo.*.output',
'Adaption.*.example',
'Adaption.*.summary',
'Adaption.*.description',
'Adaption.*.remarks',
'Adaption.*.status',
'Concept.*.example',
'Concept.*.summary',
'Concept.*.description',
'Concept.*.remarks',
'Concept.*.status',
]
def _pathsMatch(path1, path2):
"""Compare two paths with wildcards."""
if not type(path1) is list:
path1 = splitKeys(path1[int(path1[0] == '.'):], '.') # Strip leading '.', if any.
if not type(path2) is list:
path2 = splitKeys(path2[int(path2[0] == '.'):], '.')
if len(path1) != len(path2):
return False
for i, p1 in enumerate(path1):
p2 = path2[i]
if not (p1 == '*' or p2 == '*' or p1 == p2):
return False
return True
def transformDddocEntry(entry):
"""Performs the text container node transformations.
Returns list of entries to add if any.
"""
for path in TEXT_CONTAINER_PATHS:
if _pathsMatch(path, entry.path) and entry.content: # Is text container.
new_entry = copy.deepcopy(entry)
new_entry.content = 'type=text:' + entry.content
entry.content = ''
return [new_entry] # Done.
if not entry.path[-1] in TEXT_ATTRIBUTE_KEYS:
continue # Skip if last component does not match.
if not _pathsMatch(path, entry.path[:-1]):
continue # Skip non-matching path.
# If we reach here, it is a text node.
## print 'TRANSFORMING ', entry
last = entry.path[-1]
entry.path = entry.path[:-1]
entry.content = 'type=' + last + ':' + entry.content
## print ' to ', entry
return [] # Done
return [] # No updates.
class FileCache(object):
"""Simple file contents cache.
Maps paths to (mtime, file contents) pairs.
Attrs:
path Path to the cache file.
content Dict with cache content mapping file name to pair of mtime
and data associated with the cache.
"""
def __init__(self, path):
self.path = path
self.content = {}
self._tryLoad()
def _tryLoad(self):
try:
with open(self.path, 'rb') as f:
self.content = pickle.load(f)
except:
print >>sys.stderr, 'Could not load cache %s' % self.path
return False
print >>sys.stderr, 'Successfully loaded cache %s' % self.path
return True
def flush(self):
"""Store the cache to its file."""
try:
with open(self.path, 'wb') as f:
pickle.dump(self.content, f)
except:
print >>sys.stderr, 'Could not store cache %s' % self.path
return False
print >>sys.stderr, 'Successfully stored cache %s' % self.path
return True
def has_key(self, key):
"""Returns True if the cache has data for this key."""
return self.content.has_key(key)
def isFresh(self, filename):
"""Returns True if the cache is fresh.
The cache is fresh if the file at the given path is not newer than the
data in the cache.
"""
if not self.has_key(filename):
return False
mtime = os.stat(filename).st_mtime
return mtime >= self.content[filename][0]
def get(self, key, defaultValue=None):
"""Return content of the given entry."""
return self.content.get(key, (None, defaultValue))[1]
def set(self, filename, value):
"""Set cache content and mtime."""
mtime = os.stat(filename).st_mtime
self.content[filename] = (mtime, value)
class DddocEntry(object):
def __init__(self, path, content, filename, line_no_begin, line_no_end):
self.path = path
self.content = content
self.filename = filename
self.line_no_begin = line_no_begin
self.line_no_end = line_no_end
def __str__(self):
tpl = ('DddocEntry(path=%s, content=%s, filename=%s, line_no_begin=%s, '
'line_no_end=%s)')
values = (self.path, self.content, self.filename, self.line_no_begin,
self.line_no_end)
return tpl % tuple(map(repr, values))
def __repr__(self):
return self.__str__()
@classmethod
def cmpPathLocation(klass, lhs, rhs):
"""Comparator, by entry path then filename and line number."""
lhs_t = (lhs.path, lhs.filename, lhs.line_no_begin)
rhs_t = (rhs.path, rhs.filename, rhs.line_no_begin)
if lhs_t < rhs_t:
return -1
elif lhs_t > rhs_t:
return 1
else:
return 0
def splitKeys(text, delimiters, limit=None, _cache={}):
"""Splitting that considers escaping of keys using quotes.
>>> splitKeys('.Adaption.\'std::string\'.summary')
['', 'Adaption', '\'std::string\'', 'summary']
"""
if '\u0001' in text:
text = text.split('\u0001', 1)[0] # Remove optional label, used in inheritance.
if _cache.has_key((text, delimiters)):
return _cache[(text, delimiters)]
count = 0
current = []
result = []
str_delimiter = None
for i in range(0, len(text)):
# Handle text in strings.
if str_delimiter:
if text[i] == str_delimiter:
str_delimiter = None
current.append(text[i])
continue
elif text[i] in '\'"':
str_delimiter = text[i]
current.append(text[i])
continue
# Handle non-in-string text.
if text[i] in delimiters:
result.append(''.join(current))
current = []
count += 1
if limit and count >= limit:
result.append(text[i+1:])
_cache[(text, delimiters)] = result
return result
else:
current.append(text[i])
result.append(''.join(current))
_cache[(text, delimiters)] = result
return result
def cleanPath(path_arr):
"""Takes a list with a path and cleans its element.
Cleaning its element currently only consists in removing singel and double
quotes.
"""
def _cleanPathElement(x):
return x.strip().replace('\'', '').replace('"', '')
return map(_cleanPathElement, path_arr)
class FileLoader(object):
"""File loader helper class.
Attrs:
cache FileCache to use for caching.
entries List of DddocEntries objects.
"""
def __init__(self, cache):
self.cache = cache
self.entries = []
def _loadDDDOCFile(self, filename, cache): # TODO(holtgrew): Make Top-Level Function?
# Try to load from cache.
if cache.isFresh(filename):
return cache.get(filename)
# Load file.
with open(filename, 'rb') as f:
text = [x.decode(SOURCE_ENCODING).encode("ascii", "xmlcharrefreplace") for x in f.readlines()]
cache.set(filename, text)
return text
def _loadCPPFile(self, filename, cache): # TODO(holtgrew): Make Top-Level Function?
if cache.isFresh(filename):
return cache.get(filename)
# TODO(holtgrew): This looks overly complicated.
f = open(filename)
lines = [x.decode(SOURCE_ENCODING).encode("ascii", "xmlcharrefreplace") for x in f.readlines()]
f.close()
ret = []
#test for SEQAN_NO_DDDOC
for line in lines:
if line.find("SEQAN_NO_DDDOC") >= 0:
cache.set(filename, ret)
return ret;
incomment = False
innextcomment = False
inextract = False
for line in lines:
line = line.rstrip()
str_line = ""
if len(line) == 0:
if not innextcomment and not incomment:
str_line = "."
else:
str_line = " "
while len(line) > 0 :
if innextcomment:
if line[len(line)-1] == "\\" :
if inextract: str_line += line[: len(line)-1]
else:
if inextract: str_line += line
innextcomment = False
break
elif incomment:
pos1 = line.find("*/")
if pos1 < 0:
if inextract: str_line += line;
break;
else:
if inextract:
str_line += line[:pos1];
line = line[pos1 + 3:];
else:
line = line[pos1 + 2:];
incomment = False;
else:
pos1 = line.find("/*")
pos2 = line.find("//")
pos3 = line.find('"')
if (pos1 >= 0) and ((pos2 < 0) or (pos1 < pos2)) and ((pos3 < 0) or (pos1 < pos3)):
pos9 = line.find("*/", pos1 + 2)
if (len(line) > pos1 + 2):
inextract = (line[pos1 + 2] == "/") or (line[pos1 + 2] == "*")
else:
inextract = False
if pos9 < 0 :
if inextract: str_line += line[pos1 + 3:]
incomment = True
break
else:
if inextract:
str_line += line[pos1 + 3: pos3]
line = line[pos9 + 3:]
else:
line = line[pos9 + 2:]
elif (pos2 >= 0) and ((pos3 < 0) or (pos2 < pos3)):
pos2b = pos2 + 2;
while ((pos2b < len(line)) and ((line[pos2b] == "/") or (line[pos2b] == "*"))):
pos2b += 1
inextract = (pos2b > pos2 + 2)
if line[len(line)-1] == "\\" :
if inextract: str_line += line[pos2b: len(line)-1]
innextcomment = True
else:
if inextract: str_line += line[pos2b:]
break
elif pos3 >= 0:
pos9 = line.find('"', pos3 + 2)
if pos9 < 0:
line = line[pos9+1:]
break
else:
break
else:
break
ret = ret + [str_line]
cache.set(filename, ret)
return ret
def _getFileType(self, filename): # TODO(holtgrew): Make Top-Level Function?
"""Determines file type from filename.
Determines the file type from the extension of the given filename.
>>> getFileType('test.cpp') == FILETYPE_CPP
True
>>> getFileType('path/file.h') == FILETYPE_CPP
True
>>> getFileType('test.dddoc') == FILETYPE_DDDOC
True
Args:
filename Filename to parse.
Returns:
One of {FILETYPE_CPP, FILETYPE_DDDOC, FILETYPE_OTHER}, depending
on the extension of filename.
"""
# Get file extension.
base, ext = os.path.splitext(filename)
if ext[1:] in CPP_EXTS:
return FILETYPE_CPP
elif ext[1:] in DDDOC_EXTS:
return FILETYPE_DDDOC
else:
return FILETYPE_OTHER
def _loadFile(self, filename):
"""Load the file with the given filename.
The line is then split into DDDoc entries, unwrapping entries that span
more than one line. Finally, the keys are expanded, and surrounding
whitespace is stripped.
"""
## print filename
# Load file contents, through a cache.
file_type = self._getFileType(filename)
if file_type == FILETYPE_CPP:
text = self._loadCPPFile(filename, self.cache)
elif file_type == FILETYPE_DDDOC:
text = self._loadDDDOCFile(filename, self.cache)
else:
raise Error("Unknown file type of file %s." % path)
text.append('.')
## print 'LOADING', filename
## print '\n'.join(text)
# Process all lines in the input, join lines that do not begin with a
# dot with the previous ones. This allows the wrapping of lines.
str = False
dddoc_entries = [] # [(path, filename, begin line no, end line no)]
line_no_begin, line_no_end = 1, 1
for line in text:
## if line and line != '.':
## print 'LINE', line
line_no_end += 1
if not line:
continue
if line[0] == '.':
if str is not False and str[0] == '.' and str != '.' and str.strip(): # Skip empty dummy lines.
dddoc_entries.append([str, filename, line_no_begin, line_no_end])
## print dddoc_entries[-1]
line_no_begin = line_no_end
str = line
if str == '.':
str = False
elif str:
if str[-1] != '\n':
str += '\n'
str += line
# Now, expand the keys of dddoc_entries, e.g. dddoc_entries[i][0].
# TODO(holtgrew): Consider escaping of keys here.
stack = []
stack_len_sum = 0
for entry in dddoc_entries:
## print 'ENTRY', entry
## print 'stack=%s' % (stack)
# Split out $key:$value of the entry and $the.$path.$elements from $key.
maybe_pair = splitKeys(entry[0].strip(), ':', 1)
if len(maybe_pair) == 2:
key, value = splitKeys(entry[0].strip(), ':', 1)
else:
key, value = entry[0].strip(), ''
path = splitKeys(key, '.')[1:]
# Count empty entries in the path.
## print ' ', path
empty_count = reduce(operator.add, [1 for x in path if not x], 0)
## print ' empty_count', empty_count
if empty_count <= len(stack):
stack = stack[:empty_count]
stack_len_sum = reduce(operator.add, map(len, stack), 0)
stack.append(path[empty_count:])
stack_len_sum += len(stack[-1])
path = reduce(operator.add, stack, [])
# Remove any leading and trailing whitespace from value and compute
# updated begin and end line no.
line_count = len(value.splitlines())
value_no_leading = value.lstrip()
line_count2 = len(value_no_leading.splitlines())
line_no_begin = entry[2] + line_count - line_count2
value_no_trailing = value_no_leading.rstrip()
line_count3 = len(value_no_trailing.splitlines())
line_no_end = entry[3] - line_count2 + line_count3
# Store the DDDoc entry.
if path:
self.entries.append(DddocEntry(cleanPath(path), value_no_trailing, filename, line_no_begin, line_no_end))
new_entries = transformDddocEntry(self.entries[-1])
## if new_entries:
## print 'NEW ENTRIES', new_entries
self.entries += new_entries
## print self.entries[-1]
def run(self, search_path):
"""Call parseFile() on files.
All files below search_path will be searched that have file type
FILETYPE_CPP or FILETYPE_DOC as determined by getFileType().
Directories with names of IGNORED_DIRS are skipped.
Args:
search_path String, path to search files under.
"""
for root, dirs, files in os.walk(search_path):
# Parse all files.
for file in files:
if os.path.basename(file).startswith('.'):
continue # Skipp hidden files.
path = os.path.join(root, file)
if self._getFileType(path) in [FILETYPE_CPP, FILETYPE_DDDOC]:
self._loadFile(path)
# Exclude ignored diretories.
for ignored in IGNORED_DIRS:
if ignored in dirs:
dirs.remove(ignored)
class DddocTreeNode(object):
"""Represents one entry in the DddocTree.
Attrs:
tree The DddocTree that the node belongs to.
key The key of this child, last element of path.
path The full path to the child.
entry Range [beg, end) of DddocEntry that this node represents.
children dict with the children as key/value pairs.
texts Array of strings with the texts.
"""
def __init__(self, tree, key, path, entry, children={}):
self.tree = tree
self.key = key
self.path = path
self.entry = entry
self.children = children
self.texts = []
def text(self, spacer=' '):
return spacer.join(self.texts)
def __str__(self):
"""Returns dump for the whole tree in a user-readable manner."""
def _str(node, level=0, prefix=''):
space = ' ' * level
if prefix:
prefix = prefix + ' --> '
res = '%s %sDddocTreeNode(key=%s, texts=%s)' % (space, prefix, repr(node.key), repr(node.texts))
for k, child in node.children.iteritems():
res += '\n' + _str(child, level + 1, k)
return res
return _str(self)
def dump(self, stream=sys.stdout):
"""Debug recursive dumping of a tree node."""
print >>stream, self
class DddocTree(object):
"""Tree with the information from the DDDoc contents.
Attrs:
entries The raw entries.
root The root DddocTreeNode.
glossary_nodes List of nodes that contain glossary entries. Built
in finalize().
"""
def __init__(self, entries):
self.entries = entries
#for e in self.entries:
# print e
self.root = DddocTreeNode(self, 'ROOT', [], (0, 0), self._buildSubtree([], 0, len(entries), 0))
self.cache = None
self.glossary_nodes = []
## self.root.dump()
## for entry in self.entries:
## print entry.path, entry.content
def _enableFindCache(self):
if self.cache is None:
self.cache = {}
def finalize(self):
"""Called after tree will not be modified any more.
Enables caching and builds some indices.
"""
self._enableFindCache()
print >>sys.stderr, 'Indexing Glossary Pages'
if 'Page' in self.root.children:
for key, node in self.root.children['Page'].children.iteritems():
if 'glossary' in node.children:
self.glossary_nodes.append(node.children['glossary'])
print >>sys.stderr, ' Found Page.%s' % node.key
def _buildSubtree(self, path, begin_index, end_index, level):
# First, identify the entries belonging to each node (entry.path[i] are
# equal for i = level, inductively, also i <= level).
prev_key = None
prev_beg = None
subseqs = []
for i in range(begin_index, end_index):
if prev_key != self.entries[i].path[level]:
if prev_key != None:
subseqs.append((prev_beg, i))
prev_key = self.entries[i].path[level]
prev_beg = i
if prev_key != None and prev_beg != end_index: # Handle last.
subseqs.append((prev_beg, end_index))
# Now, subseqs contains a sequence of contiguous half-open intervals.
# Each contains the data for one tree node. There is a possibly empty
# sequence of leading entries with paths of length level + 1 containing
# the data for the current level node. The rest is for the level below.
result = {}
for (b, c) in subseqs:
assert b != c
# Split into entries for this and for next level: [a, b); [b, c).
a = b # [a, b) will be for this vertex.
while b < c and len(self.entries[b].path) == level + 1:
b += 1
# Compute node.
path = self.entries[a].path[:(level + 1)]
key = path[level]
node = DddocTreeNode(self, key, path, (a, b))
## print 'new node', key
for i in range(a, b):
if self.entries[i].content:
node.texts.append(self.entries[i].content)
# Compute subtree.
node.children = self._buildSubtree(path, b, c, level + 1)
result[key] = node
return result
def find(self, path):
"""Query tree for a DddocTreeNode.
The argument path can either be a dot-separated string or a list with
this information. If path is a string then one optional leading dot is
optional. Returns None if nothing could be found.
tree.find(['path', 'to', 'node'])
tree.find('path.to.node')
tree.find('.path.to.node')
"""
## print 'FIND(%s)' % repr(path)
# Try to retrieve from cache if there is a cache.
if not self.cache is None:
if not type(path) is str:
key = '.'.join(path)
else:
key = path
if self.cache.has_key(key):
return self.cache[key]
# Split path if is string, ignore leading dot if any.
if type(path) is str:
path = splitKeys(path, '.')
if path and path[0] == '':
path = path[1:]
# Now, query the tree.
def findRecurse(node, path):
"""Helper function that searches for the node with given path."""
if not path:
return node
if not node.children.has_key(path[0]):
return None
return findRecurse(node.children[path[0]], path[1:])
res = findRecurse(self.root, path)
if not self.cache is None:
self.cache['.'.join(path)] = res
return res
# Paths where the inline summary is moved into a .summary child. See
# documentation of processInlineSummaries() for details.
SUMMARY_PATHS = [
'*.*.param.*',
'*.*.returns',
'*.*.tag.*',
'*.*.value.*',
'*.*.returns.param.*', # TODO(holtgrew): Used for metafunctions, could be improved.
'Adaption.*',
'Class.*',
'Concept.*',
'Demo.*',
'Enum.*',
'Function.*',
'Macro.*',
'Memfunc.*',
'Metafunction.*',
'Shortcut.*',
'Spec.*',
'Tag.*',
]
# TODO(holtgrew): Also use for generateAutomaticReferences()
def _matchTreesInNode(tree, node, path, func, block_paths=[['globals']], level=0):
"""Calls func on nodes matching path."""
## print ' ' * level, '_matchTreesInNode(tree', node.path, path, func, level, ')'
if path:
if path[0] == '*':
for child in node.children.itervalues():
_matchTreesInNode(tree, child, path[1:], func, block_paths, level+1)
else:
if node.children.has_key(path[0]):
_matchTreesInNode(tree, node.children[path[0]], path[1:], func, block_paths, level+1)
else:
for block_path in block_paths:
## print node.path[:len(block_path)], '==', block_path
if node.path[:len(block_path)] == block_path:
return # Path is blocked.
func(node)
def processInlineSummaries(tree, paths):
"""Move inline documentation to .summary subpaths.
The nodes matching the values in path are such that inline values are moved
to .summary subnodes for greater consistency and lower variance.
E.g. the following:
.Function.f.param.x:This is param x.
will be transformed into
.Function.f.param.x
..summary:This is param x.
"""
# First, collect nodes for the summary transfer.
collected_nodes = set()
def f(node):
if node.texts:
collected_nodes.add(node)
for path in paths:
_matchTreesInNode(tree, tree.root, splitKeys(path, '.'), f)
# Then, move the inline summaries into a summary node.
for node in collected_nodes:
if not 'summary' in node.children: # Create node if necessary.
summaryNode = DddocTreeNode(tree, 'summary', node.path + ['summary'], (-2,-2))
node.children['summary'] = summaryNode
node.children['summary'].texts += node.texts
node.texts = []
def generateAutomaticReferences(tree):
"""Interpret the globals.relations entries."""
print >>sys.stderr, 'Generating Automatic References'
relations_node = tree.find('globals.relations')
if not relations_node:
return # Empty, do nothing.
# We first collect all automatic links, scheduled to be added later.
additions = []
def appendToAdditions(node):
for node_path in node.texts:
node_path = splitKeys(node_path, '.')
## print ' ' * level, ' ', node_path
res = tree.find(node_path)
## print ' ' * level, ' ', res is not None
if not res:
continue # Not found, Skip # TODO(holtgrew): Warning?
additions.append((res.path + [key], '.'.join(node.path[:2])))
for key, node in relations_node.children.iteritems():
## print 'GENERATE', key, node
for txt in node.texts:
path = splitKeys(txt, '.')
_matchTreesInNode(tree, tree.root, splitKeys(txt, '.'), appendToAdditions)
# Now, add these additions. This circumvents problems leading to infinite
# recursions.
for path, text in additions:
res = tree.find(path)
if not res:
parent = tree.find(path[:-1])
assert parent
res = DddocTreeNode(tree, path[-1], path, None)
parent.children[path[-1]] = res
if not text in res.texts:
res.texts.append(text)
def generateInheritedElements(tree):
"""Push through inheritances."""
print >>sys.stderr, 'Linking Inherited Entities'
inherit_node = tree.find('globals.inherit')
# Contains children: $TARGET_FIELD:$THROUGH_FIELD.$SOURCE_FIELD
all_paths = set()
depends_on = {}
inheritance_rules = []
# First build a dependency graph.
for target_field, child in inherit_node.children.items():
for txt in child.texts:
arr = splitKeys(txt, '.')
through_field = arr[0]
if len(arr) > 1:
source_field = arr[1]
else:
source_field = target_field
inheritance_rules.append((target_field, through_field, source_field))
def registerDependencies(node):
all_paths.add('.'.join(node.path))
if not through_field in node.children:
return
for path in node.children[through_field].texts:
pth = '.'.join(node.path)
depends_on.setdefault(pth, set()).add(path)
_matchTreesInNode(tree, tree.root, ['*', '*'], registerDependencies)
## print 'ALL PATHS', all_paths
# Now, push through references by inheritance for all paths that are not
# linked to and not completed yet.
done = set()
to_do = all_paths - done - set(depends_on.keys())
while to_do:
# Process all that are not completed and have no dependencies.
if not to_do:
raise Exception('Could not process all dependencies. Cyclic dependencies?')
# Actually perform the preprocessing.
for target_path in to_do:
for target_field, through_field, source_field in inheritance_rules:
target_node = tree.find(target_path)
if not through_field in target_node.children:
continue # Skip if no source children.
## print 'TRYING', target_path, through_field, source_field
for source_path in target_node.children[through_field].texts:
source_node = tree.find(source_path)
if not source_field in source_node.children:
continue # Skip if no source field.
for path in source_node.children[source_field].texts:
if not '\u0001' in path: # We use this ugly hack to add the inheritance source here.
path = path + '\u0001' + '.'.join(source_node.path)
# If necessary then create child in target node.
if not target_field in target_node.children:
target_node.children[target_field] = DddocTreeNode(tree, target_field, target_node.path + [target_field], source_node.children[source_field].entry)
# Copy over path.
target_node.children[target_field].texts.append(path)
## print ' appending', path
# Clear out the stuff that we completed.
to_delete = []
for key in depends_on: # Clear out all done.
depends_on[key] -= to_do
if not depends_on[key]:
to_delete.append(key)
for key in to_delete:
del depends_on[key]
done |= to_do # Add done.
to_do = all_paths - done - set(depends_on.keys())
def removeDuplicateTexts(tree):
"""Remove duplicates from texts members.
Suffixes starting with '\u0001' are ignored for the comparisons
and strings with these suffixes are preferred.
"""
##print 'remove duplicates'
def recurse(node):
in_cleaned = {}
cleaned = []
for txt in node.texts:
clean = txt
pos = txt.find('\u0001')
if pos != -1:
clean = txt[:pos]
##print cleaned, repr(clean)
if clean in in_cleaned:
if '\u0001' in clean and not '\u0001' in cleaned[in_cleaned[clean]]:
cleaned[in_cleaned[clean]] = txt
else:
in_cleaned[clean] = len(cleaned)
cleaned.append(txt)
node.texts = cleaned
for child in node.children.itervalues():
recurse(child)
for child in tree.root.children.itervalues():
recurse(child)
# TODO(holtgrew): If needed, this could easily be generalized.
def buildByTypeAndCatIndex(tree):
"""Build an index into the given DddocTree.
The index will be a two-dimensional dict, mapping (first element of path,
value of cat field) to a list of nodes in the DddocTree.
"""
result = {}
def recurse(result, path, node):
## print path, node.path
if len(path) == 2:
if node.children.has_key('cat'):
for cat in node.children['cat'].texts:
result.setdefault(path[0], {}).setdefault(cat, []).append(node)
else:
result.setdefault(path[0], {})[path[1]] = node
if len(path) < 2:
for key, child in node.children.iteritems():
recurse(result, path + [key], child)
for key, child in tree.root.children.iteritems():
recurse(result, [key], child)
## for k1, v1 in result.iteritems():
## for k2, v2 in v1.iteritems():
## print 'k1=%s\tk2=%s\tv=%s' % (k1, k2, [x.path for x in v2])
return result
class ErrorLogger(object):
def __init__(self):
self.error_count = 0
def invalidReference(self, txt, locations):
self.error_count += 1
if not locations:
print >>sys.stderr, 'ERROR: Invalid Reference %s in unknown location (sorry).' % txt
else:
print >>sys.stderr, 'ERROR: Invalid Reference %s in one of the following locations:' % txt
for filename, line in locations:
print >>sys.stderr, ' %s:%s' % (filename, line)
class App(object):
"""Application object for DDDoc.
Provides a facade to the functionality of the core module.
Usage:
app = App()
app.loadFiles([<files>])
app.loadFiles([<files>])
app.loadingComplete()
Attrs:
data The global state Data object.
"""
def __init__(self):
"""Initialize object members."""
self.cache = FileCache('dddoc_cache.bin')
self.file_loader = FileLoader(self.cache)
self.dddoc_tree = None
self.error_logger = ErrorLogger()
def loadFiles(self, path):
"""Load the files with the given file name."""
self.file_loader.run(path)
def loadingComplete(self):
"""Initialize data object.
This method is called after all calls to loadFiles().
"""
# Save the cache to disk again.
self.cache.flush()
# Sort Dddoc Entries and build tree.
self.file_loader.entries.sort(cmp=DddocEntry.cmpPathLocation)
self.dddoc_tree = DddocTree(self.file_loader.entries)
# Generate automatic references.
generateAutomaticReferences(self.dddoc_tree)
# Perform inheritance as configured in global configuration.
generateInheritedElements(self.dddoc_tree)
# Clean duplicates from 'texts' members
removeDuplicateTexts(self.dddoc_tree)
# Move inline summaries into .summary children.
processInlineSummaries(self.dddoc_tree, SUMMARY_PATHS)
# Finally, after all modifications, enable caching and build indices in
# tree.
self.dddoc_tree.finalize()
def getNextId(self):
"""Returns an identifier.
Each id is only returned once.
"""
assert False, "For future use."
self.next_id += 1
return self.next_id - 1 | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Test links:
# http://remixshare.com/download/p946u
#
# Note:
# The remixshare.com website is very very slow, so
# if your download not starts because of pycurl timeouts:
# Adjust timeouts in /usr/share/pyload/module/network/HTTPRequest.py
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class RemixshareCom(SimpleHoster):
__name__ = "RemixshareCom"
__type__ = "hoster"
__version__ = "0.02"
__pattern__ = r'https?://remixshare\.com/(download|dl)/\w+'
__description__ = """Remixshare.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "fuerst.reinje@web.de"),
("Walter Purcaro", "vuolter@gmail.com")]
INFO_PATTERN = r'title=\'.+?\'>(?P<N>.+?)</span><span class=\'light2\'> \((?P<S>\d+) (?P<U>[\w^_]+)\)<'
OFFLINE_PATTERN = r'<h1>Ooops!<'
LINK_PATTERN = r'(http://remixshare\.com/downloadfinal/.+?)"'
TOKEN_PATTERN = r'var acc = (\d+)'
WAIT_PATTERN = r'var XYZ = r"(\d+)"'
def setup(self):
self.multiDL = True
self.chunkLimit = 1
def handleFree(self):
b = re.search(self.LINK_PATTERN, self.html)
if not b:
self.error(_("Cannot parse download url"))
c = re.search(self.TOKEN_PATTERN, self.html)
if not c:
self.error(_("Cannot parse file token"))
dl_url = b.group(1) + c.group(1)
#Check if we have to wait
seconds = re.search(self.WAIT_PATTERN, self.html)
if seconds:
self.logDebug("Wait " + seconds.group(1))
self.wait(int(seconds.group(1)))
# Finally start downloading...
self.download(dl_url, disposition=True)
getInfo = create_getInfo(RemixshareCom) | unknown | codeparrot/codeparrot-clean | ||
#see http://stackoverflow.com/questions/10391123/how-to-run-two-python-blocking-functions-matplotlib-show-and-twisted-reactor-r
if __name__ == '__main__':
from level1 import main
raise SystemExit(main())
from matplotlib import use
use('GTK')
from matplotlib import pyplot
from matplotlib.backends import backend_gtk
from twisted.internet import gtk2reactor
gtk2reactor.install()
#OK, we are done with wierd stuff here, the rest is vanilla
from twisted.internet import reactor, task
from steering.twisted_steering import press
import os
import numpy as np
import time
from math import pi
from pylab import get_current_fig_manager
import networkx as nx
from planning.astar.global_map import (plot_map, GlobalMap,
MIN_UNCONTRAINED_PENALTY)
from screen_capture.localize_map import LocalizeMap
from screen_capture.capture import Capture, find_best_image
from planning.astar.local_graph import plan_path
from smoothing.gd import smooth_graph, graph_to_path
from control.robot_control import particles, robot
from utils import root
class LocalizationDisplay(object):
def __init__(self):
self.fig, self.ax = plot_map()
#position window properly
thismanager = get_current_fig_manager()
try:
thismanager.window.wm_geometry("+700+0")
except AttributeError:
self.fig.canvas.manager.window.move(700,0)
self.ax.set_aspect('equal')
self.ax.set_xlim(0,700)
self.ax.set_ylim(0,500)
self.ax.hold(True)
self.fig.canvas.draw()
def update(self, map_box):
(x0, y0, x1, y1) = map_box
self.ax.set_xlim([x0, x1])
self.ax.set_ylim([y1, y0])
#new_position = (max_loc[0] + w/2, max_loc[1] + h/2)
pyplot.scatter( [(x0 + x1)/2],
[(y0 + y1)/2])
self.fig.canvas.draw()
class LocalizationMapper(object):
def __init__(self):
map_filename = os.path.join(root, 'flash', 'fft2', 'processed', 'aligned_localization_data_map.png')
self.mapper = LocalizeMap(map_filename)
filename = os.path.join(root, 'flash', 'fft2', 'processed', 'level1_start.png')
self.c = Capture(filename)
#default starting value
self.start_pos = [2650, 2650]
self.goal_pos = [1900, 400]
#from twiddle
weight_data = 1.1
weight_smooth = 0.2
self.p_gain = 2.0
self.d_gain = 6.0
self.steering_noise = 0.01
self.distance_noise = 0.05
self.measurement_noise = 0.05
self.speed = 2
#planning
print "planning..."
graph_path = plan_path(self.start_pos, self.goal_pos)
#extract points from graph
path_pos = nx.get_node_attributes(graph_path, 'pos')
#smooth
print "smoothing..."
sg = smooth_graph(graph_path, self.start_pos, self.goal_pos, True,
weight_data, weight_smooth)
#extract points from ad smoothed graph
sg_pos = nx.get_node_attributes(sg, 'pos')
#convert graph to spath
self.spath = graph_to_path(sg)
#plot smoothed path on a graph
nx.draw(sg, sg_pos, node_size=5, edge_color='r')
def run(self):
prev_map_box = None
mg = nx.DiGraph()
myrobot = robot()
template = self.c.snap_gray()
map_box = self.mapper.localize(template, None)
(x0, y0, x1, y1) = map_box
#this is approximate sensor measurement
ax = (x0 + x1)/2
ay = (y0 + y1)/2
self.start_pos = (ax, ay)
return (None, None, None)
myrobot.set(self.start_pos[0], self.start_pos[1], -pi/2)
mg.add_node(0, pos=(myrobot.x, myrobot.y))
myrobot.set_noise(self.steering_noise,
self.distance_noise,
self.measurement_noise)
pfilter = particles(myrobot.x, myrobot.y, myrobot.orientation,
self.steering_noise,
self.distance_noise,
self.measurement_noise)
cte = 0.0
err = 0.0
N = 0
index = 0 # index into the path
if not myrobot.check_goal(self.goal_pos):
start_time = time.time()
diff_cte = -cte
# ----------------------------------------
# compute the CTE
estimate = pfilter.get_position()
### ENTER CODE HERE
x, y, theta = estimate
#find the rigt spath
while True:
x1, y1 = self.spath[index]
Rx = x - x1
Ry = y - y1
x2, y2 = self.spath[index + 1]
dx = x2 - x1
dy = y2 - y1
u = abs(Rx*dx + Ry*dy)/(dx*dx + dy*dy)
if u > 1 and index < (len(self.spath) - 2):
index +=1
else:
break
cte = (Ry * dx - Rx * dy) / (dx * dx + dy * dy)
diff_cte += cte
steer = - self.p_gain * cte - self.d_gain * diff_cte
myrobot = myrobot.move(steer, self.speed, real=True)
pfilter.move(steer, self.speed)
#sense
template = self.c.snap_gray()
map_box = self.mapper.localize(template, prev_map_box)
prev_map_box = map_box
(x0, y0, x1, y1) = map_box
#this is approximate sensor measurement
ax = (x0 + x1)/2
ay = (y0 + y1)/2
Z = (ax, ay)
pfilter.sense(Z)
err += (cte ** 2)
N += 1
robot_pos = (myrobot.x, myrobot.y)
#mg.add_node(N, pos=(myrobot.x, myrobot.y))
#mg.add_edge(N-1, N)
#send update to matplotlib
time_pos = (time.time(), map_box, robot_pos)
#self.connec.send(time_pos)
end_time = time.time()
#fps
fps = 1/(end_time-start_time)
print "%2d frames per sec\r" % fps,
return time_pos
display = LocalizationDisplay()
mapper = LocalizationMapper()
#replaced the call to pyplot.show() with a call to my own Show subclass with a mainloop
class TwistedShow(backend_gtk.Show):
running = False
def mainloop(self):
if not self.running:
self.running = True
reactor.run()
def main():
def proof():
global display, mapper
start_time = time.time()
(t, map_box, robot) = mapper.run()
#display.update(map_box)
end_time = time.time()
fps = 1/(end_time-start_time)
print "%2d frames per sec\r" % fps,
task.LoopingCall(proof).start(0)
TwistedShow()() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.collect;
import static com.google.common.collect.BoundType.OPEN;
import static com.google.common.collect.testing.Helpers.mapEntry;
import static com.google.common.truth.Truth.assertThat;
import static org.junit.Assert.assertThrows;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.collect.testing.MapTestSuiteBuilder;
import com.google.common.collect.testing.SampleElements;
import com.google.common.collect.testing.TestMapGenerator;
import com.google.common.collect.testing.features.CollectionFeature;
import com.google.common.collect.testing.features.CollectionSize;
import com.google.common.collect.testing.features.MapFeature;
import com.google.common.testing.EqualsTester;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.jspecify.annotations.NullUnmarked;
/**
* Tests for {@code TreeRangeMap}.
*
* @author Louis Wasserman
*/
@GwtIncompatible // NavigableMap
@NullUnmarked
public class TreeRangeMapTest extends TestCase {
@AndroidIncompatible // test-suite builders
public static Test suite() {
TestSuite suite = new TestSuite();
suite.addTestSuite(TreeRangeMapTest.class);
suite.addTest(
MapTestSuiteBuilder.using(
new TestMapGenerator<Range<Integer>, String>() {
@Override
public SampleElements<Entry<Range<Integer>, String>> samples() {
return new SampleElements<>(
mapEntry(Range.singleton(0), "banana"),
mapEntry(Range.closedOpen(3, 5), "frisbee"),
mapEntry(Range.atMost(-1), "fruitcake"),
mapEntry(Range.open(10, 15), "elephant"),
mapEntry(Range.closed(20, 22), "umbrella"));
}
@Override
public Map<Range<Integer>, String> create(Object... elements) {
RangeMap<Integer, String> rangeMap = TreeRangeMap.create();
for (Object o : elements) {
@SuppressWarnings("unchecked")
Entry<Range<Integer>, String> entry = (Entry<Range<Integer>, String>) o;
rangeMap.put(entry.getKey(), entry.getValue());
}
return rangeMap.asMapOfRanges();
}
@SuppressWarnings("unchecked")
@Override
public Entry<Range<Integer>, String>[] createArray(int length) {
return (Entry<Range<Integer>, String>[]) new Entry<?, ?>[length];
}
@Override
public Iterable<Entry<Range<Integer>, String>> order(
List<Entry<Range<Integer>, String>> insertionOrder) {
return Range.<Integer>rangeLexOrdering().onKeys().sortedCopy(insertionOrder);
}
@SuppressWarnings("unchecked")
@Override
public Range<Integer>[] createKeyArray(int length) {
return (Range<Integer>[]) new Range<?>[length];
}
@Override
public String[] createValueArray(int length) {
return new String[length];
}
})
.named("TreeRangeMap.asMapOfRanges")
.withFeatures(
CollectionSize.ANY,
MapFeature.SUPPORTS_REMOVE,
MapFeature.ALLOWS_ANY_NULL_QUERIES,
CollectionFeature.KNOWN_ORDER,
CollectionFeature.SUPPORTS_ITERATOR_REMOVE)
.createTestSuite());
suite.addTest(
MapTestSuiteBuilder.using(
new TestMapGenerator<Range<Integer>, String>() {
@Override
public SampleElements<Entry<Range<Integer>, String>> samples() {
return new SampleElements<>(
mapEntry(Range.singleton(0), "banana"),
mapEntry(Range.closedOpen(3, 5), "frisbee"),
mapEntry(Range.atMost(-1), "fruitcake"),
mapEntry(Range.open(10, 15), "elephant"),
mapEntry(Range.closed(20, 22), "umbrella"));
}
@Override
public Map<Range<Integer>, String> create(Object... elements) {
RangeMap<Integer, String> rangeMap = TreeRangeMap.create();
for (Object o : elements) {
@SuppressWarnings("unchecked")
Entry<Range<Integer>, String> entry = (Entry<Range<Integer>, String>) o;
rangeMap.put(entry.getKey(), entry.getValue());
}
return rangeMap.subRangeMap(Range.atMost(22)).asMapOfRanges();
}
@SuppressWarnings("unchecked")
@Override
public Entry<Range<Integer>, String>[] createArray(int length) {
return (Entry<Range<Integer>, String>[]) new Entry<?, ?>[length];
}
@Override
public Iterable<Entry<Range<Integer>, String>> order(
List<Entry<Range<Integer>, String>> insertionOrder) {
return Range.<Integer>rangeLexOrdering().onKeys().sortedCopy(insertionOrder);
}
@SuppressWarnings("unchecked")
@Override
public Range<Integer>[] createKeyArray(int length) {
return (Range<Integer>[]) new Range<?>[length];
}
@Override
public String[] createValueArray(int length) {
return new String[length];
}
})
.named("TreeRangeMap.subRangeMap.asMapOfRanges")
.withFeatures(
CollectionSize.ANY,
MapFeature.SUPPORTS_REMOVE,
MapFeature.ALLOWS_ANY_NULL_QUERIES,
CollectionFeature.KNOWN_ORDER)
.createTestSuite());
suite.addTest(
MapTestSuiteBuilder.using(
new TestMapGenerator<Range<Integer>, String>() {
@Override
public SampleElements<Entry<Range<Integer>, String>> samples() {
return new SampleElements<>(
mapEntry(Range.singleton(0), "banana"),
mapEntry(Range.closedOpen(3, 5), "frisbee"),
mapEntry(Range.atMost(-1), "fruitcake"),
mapEntry(Range.open(10, 15), "elephant"),
mapEntry(Range.closed(20, 22), "umbrella"));
}
@Override
public Map<Range<Integer>, String> create(Object... elements) {
RangeMap<Integer, String> rangeMap = TreeRangeMap.create();
for (Object o : elements) {
@SuppressWarnings("unchecked")
Entry<Range<Integer>, String> entry = (Entry<Range<Integer>, String>) o;
rangeMap.put(entry.getKey(), entry.getValue());
}
return rangeMap.asDescendingMapOfRanges();
}
@SuppressWarnings("unchecked")
@Override
public Entry<Range<Integer>, String>[] createArray(int length) {
return (Entry<Range<Integer>, String>[]) new Entry<?, ?>[length];
}
@Override
public Iterable<Entry<Range<Integer>, String>> order(
List<Entry<Range<Integer>, String>> insertionOrder) {
return Range.<Integer>rangeLexOrdering()
.reverse()
.onKeys()
.sortedCopy(insertionOrder);
}
@SuppressWarnings("unchecked")
@Override
public Range<Integer>[] createKeyArray(int length) {
return (Range<Integer>[]) new Range<?>[length];
}
@Override
public String[] createValueArray(int length) {
return new String[length];
}
})
.named("TreeRangeMap.asDescendingMapOfRanges")
.withFeatures(
CollectionSize.ANY,
MapFeature.SUPPORTS_REMOVE,
MapFeature.ALLOWS_ANY_NULL_QUERIES,
CollectionFeature.KNOWN_ORDER,
CollectionFeature.SUPPORTS_ITERATOR_REMOVE)
.createTestSuite());
suite.addTest(
MapTestSuiteBuilder.using(
new TestMapGenerator<Range<Integer>, String>() {
@Override
public SampleElements<Entry<Range<Integer>, String>> samples() {
return new SampleElements<>(
mapEntry(Range.singleton(0), "banana"),
mapEntry(Range.closedOpen(3, 5), "frisbee"),
mapEntry(Range.atMost(-1), "fruitcake"),
mapEntry(Range.open(10, 15), "elephant"),
mapEntry(Range.closed(20, 22), "umbrella"));
}
@Override
public Map<Range<Integer>, String> create(Object... elements) {
RangeMap<Integer, String> rangeMap = TreeRangeMap.create();
for (Object o : elements) {
@SuppressWarnings("unchecked")
Entry<Range<Integer>, String> entry = (Entry<Range<Integer>, String>) o;
rangeMap.put(entry.getKey(), entry.getValue());
}
return rangeMap.subRangeMap(Range.atMost(22)).asDescendingMapOfRanges();
}
@SuppressWarnings("unchecked")
@Override
public Entry<Range<Integer>, String>[] createArray(int length) {
return (Entry<Range<Integer>, String>[]) new Entry<?, ?>[length];
}
@Override
public Iterable<Entry<Range<Integer>, String>> order(
List<Entry<Range<Integer>, String>> insertionOrder) {
return Range.<Integer>rangeLexOrdering()
.reverse()
.onKeys()
.sortedCopy(insertionOrder);
}
@SuppressWarnings("unchecked")
@Override
public Range<Integer>[] createKeyArray(int length) {
return (Range<Integer>[]) new Range<?>[length];
}
@Override
public String[] createValueArray(int length) {
return new String[length];
}
})
.named("TreeRangeMap.subRangeMap.asDescendingMapOfRanges")
.withFeatures(
CollectionSize.ANY,
MapFeature.SUPPORTS_REMOVE,
MapFeature.ALLOWS_ANY_NULL_QUERIES,
CollectionFeature.KNOWN_ORDER)
.createTestSuite());
return suite;
}
private static final ImmutableList<Range<Integer>> RANGES;
private static final int MIN_BOUND = -1;
private static final int MAX_BOUND = 1;
static {
ImmutableList.Builder<Range<Integer>> builder = ImmutableList.builder();
builder.add(Range.<Integer>all());
// Add one-ended ranges
for (int i = MIN_BOUND; i <= MAX_BOUND; i++) {
for (BoundType type : BoundType.values()) {
builder.add(Range.upTo(i, type));
builder.add(Range.downTo(i, type));
}
}
// Add two-ended ranges
for (int i = MIN_BOUND; i <= MAX_BOUND; i++) {
for (int j = i; j <= MAX_BOUND; j++) {
for (BoundType lowerType : BoundType.values()) {
for (BoundType upperType : BoundType.values()) {
if (i == j & lowerType == OPEN & upperType == OPEN) {
continue;
}
builder.add(Range.range(i, lowerType, j, upperType));
}
}
}
}
RANGES = builder.build();
}
public void testSpanSingleRange() {
for (Range<Integer> range : RANGES) {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(range, 1);
try {
assertEquals(range, rangeMap.span());
assertFalse(range.isEmpty());
} catch (NoSuchElementException e) {
assertTrue(range.isEmpty());
}
}
}
public void testSpanTwoRanges() {
for (Range<Integer> range1 : RANGES) {
for (Range<Integer> range2 : RANGES) {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(range1, 1);
rangeMap.put(range2, 2);
Range<Integer> expected;
if (range1.isEmpty()) {
if (range2.isEmpty()) {
expected = null;
} else {
expected = range2;
}
} else {
if (range2.isEmpty()) {
expected = range1;
} else {
expected = range1.span(range2);
}
}
try {
assertEquals(expected, rangeMap.span());
assertThat(expected).isNotNull();
} catch (NoSuchElementException e) {
assertThat(expected).isNull();
}
}
}
}
public void testAllRangesAlone() {
for (Range<Integer> range : RANGES) {
Map<Integer, Integer> model = new HashMap<>();
putModel(model, range, 1);
RangeMap<Integer, Integer> test = TreeRangeMap.create();
test.put(range, 1);
verify(model, test);
}
}
public void testAllRangePairs() {
for (Range<Integer> range1 : RANGES) {
for (Range<Integer> range2 : RANGES) {
Map<Integer, Integer> model = new HashMap<>();
putModel(model, range1, 1);
putModel(model, range2, 2);
RangeMap<Integer, Integer> test = TreeRangeMap.create();
test.put(range1, 1);
test.put(range2, 2);
verify(model, test);
}
}
}
public void testAllRangeTriples() {
for (Range<Integer> range1 : RANGES) {
for (Range<Integer> range2 : RANGES) {
for (Range<Integer> range3 : RANGES) {
Map<Integer, Integer> model = new HashMap<>();
putModel(model, range1, 1);
putModel(model, range2, 2);
putModel(model, range3, 3);
RangeMap<Integer, Integer> test = TreeRangeMap.create();
test.put(range1, 1);
test.put(range2, 2);
test.put(range3, 3);
verify(model, test);
}
}
}
}
public void testPutAll() {
for (Range<Integer> range1 : RANGES) {
for (Range<Integer> range2 : RANGES) {
for (Range<Integer> range3 : RANGES) {
Map<Integer, Integer> model = new HashMap<>();
putModel(model, range1, 1);
putModel(model, range2, 2);
putModel(model, range3, 3);
RangeMap<Integer, Integer> test = TreeRangeMap.create();
RangeMap<Integer, Integer> test2 = TreeRangeMap.create();
// put range2 and range3 into test2, and then put test2 into test
test.put(range1, 1);
test2.put(range2, 2);
test2.put(range3, 3);
test.putAll(test2);
verify(model, test);
}
}
}
}
public void testPutAndRemove() {
for (Range<Integer> rangeToPut : RANGES) {
for (Range<Integer> rangeToRemove : RANGES) {
Map<Integer, Integer> model = new HashMap<>();
putModel(model, rangeToPut, 1);
removeModel(model, rangeToRemove);
RangeMap<Integer, Integer> test = TreeRangeMap.create();
test.put(rangeToPut, 1);
test.remove(rangeToRemove);
verify(model, test);
}
}
}
public void testPutTwoAndRemove() {
for (Range<Integer> rangeToPut1 : RANGES) {
for (Range<Integer> rangeToPut2 : RANGES) {
for (Range<Integer> rangeToRemove : RANGES) {
Map<Integer, Integer> model = new HashMap<>();
putModel(model, rangeToPut1, 1);
putModel(model, rangeToPut2, 2);
removeModel(model, rangeToRemove);
RangeMap<Integer, Integer> test = TreeRangeMap.create();
test.put(rangeToPut1, 1);
test.put(rangeToPut2, 2);
test.remove(rangeToRemove);
verify(model, test);
}
}
}
}
// identical to testPutTwoAndRemove,
// verifies that putCoalescing() doesn't cause any mappings to change relative to put()
public void testPutCoalescingTwoAndRemove() {
for (Range<Integer> rangeToPut1 : RANGES) {
for (Range<Integer> rangeToPut2 : RANGES) {
for (Range<Integer> rangeToRemove : RANGES) {
Map<Integer, Integer> model = new HashMap<>();
putModel(model, rangeToPut1, 1);
putModel(model, rangeToPut2, 2);
removeModel(model, rangeToRemove);
RangeMap<Integer, Integer> test = TreeRangeMap.create();
test.putCoalescing(rangeToPut1, 1);
test.putCoalescing(rangeToPut2, 2);
test.remove(rangeToRemove);
verify(model, test);
}
}
}
}
public void testPutCoalescing() {
// {[0..1): 1, [1..2): 1, [2..3): 2} -> {[0..2): 1, [2..3): 2}
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.putCoalescing(Range.closedOpen(0, 1), 1);
rangeMap.putCoalescing(Range.closedOpen(1, 2), 1);
rangeMap.putCoalescing(Range.closedOpen(2, 3), 2);
assertEquals(
ImmutableMap.of(Range.closedOpen(0, 2), 1, Range.closedOpen(2, 3), 2),
rangeMap.asMapOfRanges());
}
public void testPutCoalescingEmpty() {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.closedOpen(0, 1), 1);
rangeMap.put(Range.closedOpen(1, 2), 1);
assertEquals(
ImmutableMap.of(Range.closedOpen(0, 1), 1, Range.closedOpen(1, 2), 1),
rangeMap.asMapOfRanges());
rangeMap.putCoalescing(Range.closedOpen(1, 1), 1); // empty range coalesces connected ranges
assertEquals(ImmutableMap.of(Range.closedOpen(0, 2), 1), rangeMap.asMapOfRanges());
}
public void testPutCoalescingSubmapEmpty() {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.closedOpen(0, 1), 1);
rangeMap.put(Range.closedOpen(1, 2), 1);
assertEquals(
ImmutableMap.of(Range.closedOpen(0, 1), 1, Range.closedOpen(1, 2), 1),
rangeMap.asMapOfRanges());
RangeMap<Integer, Integer> subRangeMap = rangeMap.subRangeMap(Range.closedOpen(0, 2));
subRangeMap.putCoalescing(Range.closedOpen(1, 1), 1); // empty range coalesces connected ranges
assertEquals(ImmutableMap.of(Range.closedOpen(0, 2), 1), subRangeMap.asMapOfRanges());
assertEquals(ImmutableMap.of(Range.closedOpen(0, 2), 1), rangeMap.asMapOfRanges());
}
public void testPutCoalescingComplex() {
// {[0..1): 1, [1..3): 1, [3..5): 1, [7..10): 2, [12..15): 2, [18..19): 3}
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.closedOpen(0, 1), 1);
rangeMap.put(Range.closedOpen(1, 3), 1);
rangeMap.put(Range.closedOpen(3, 5), 1);
rangeMap.put(Range.closedOpen(7, 10), 2);
rangeMap.put(Range.closedOpen(12, 15), 2);
rangeMap.put(Range.closedOpen(18, 19), 3);
rangeMap.putCoalescing(Range.closedOpen(-5, -4), 0); // disconnected
rangeMap.putCoalescing(Range.closedOpen(-6, -5), 0); // lower than minimum
rangeMap.putCoalescing(Range.closedOpen(2, 4), 1); // between
rangeMap.putCoalescing(Range.closedOpen(9, 14), 0); // different value
rangeMap.putCoalescing(Range.closedOpen(17, 20), 3); // enclosing
rangeMap.putCoalescing(Range.closedOpen(22, 23), 4); // disconnected
rangeMap.putCoalescing(Range.closedOpen(23, 25), 4); // greater than minimum
// {[-6..-4): 0, [0..1): 1, [1..5): 1, [7..9): 2,
// [9..14): 0, [14..15): 2, [17..20): 3, [22..25): 4}
assertEquals(
new ImmutableMap.Builder<>()
.put(Range.closedOpen(-6, -4), 0)
.put(Range.closedOpen(0, 1), 1) // not coalesced
.put(Range.closedOpen(1, 5), 1)
.put(Range.closedOpen(7, 9), 2)
.put(Range.closedOpen(9, 14), 0)
.put(Range.closedOpen(14, 15), 2)
.put(Range.closedOpen(17, 20), 3)
.put(Range.closedOpen(22, 25), 4)
.build(),
rangeMap.asMapOfRanges());
}
public void testSubRangeMapExhaustive() {
for (Range<Integer> range1 : RANGES) {
for (Range<Integer> range2 : RANGES) {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(range1, 1);
rangeMap.put(range2, 2);
for (Range<Integer> subRange : RANGES) {
RangeMap<Integer, Integer> expected = TreeRangeMap.create();
for (Entry<Range<Integer>, Integer> entry : rangeMap.asMapOfRanges().entrySet()) {
if (entry.getKey().isConnected(subRange)) {
expected.put(entry.getKey().intersection(subRange), entry.getValue());
}
}
RangeMap<Integer, Integer> subRangeMap = rangeMap.subRangeMap(subRange);
assertEquals(expected, subRangeMap);
assertEquals(expected.asMapOfRanges(), subRangeMap.asMapOfRanges());
assertEquals(expected.asDescendingMapOfRanges(), subRangeMap.asDescendingMapOfRanges());
assertEquals(
ImmutableList.copyOf(subRangeMap.asMapOfRanges().entrySet()).reverse(),
ImmutableList.copyOf(subRangeMap.asDescendingMapOfRanges().entrySet()));
if (!expected.asMapOfRanges().isEmpty()) {
assertEquals(expected.span(), subRangeMap.span());
}
for (int i = MIN_BOUND; i <= MAX_BOUND; i++) {
assertEquals(expected.get(i), subRangeMap.get(i));
}
for (Range<Integer> query : RANGES) {
assertEquals(
expected.asMapOfRanges().get(query), subRangeMap.asMapOfRanges().get(query));
}
}
}
}
}
public void testSubSubRangeMap() {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.open(3, 7), 1);
rangeMap.put(Range.closed(9, 10), 2);
rangeMap.put(Range.closed(12, 16), 3);
RangeMap<Integer, Integer> sub1 = rangeMap.subRangeMap(Range.closed(5, 11));
assertEquals(
ImmutableMap.of(Range.closedOpen(5, 7), 1, Range.closed(9, 10), 2), sub1.asMapOfRanges());
RangeMap<Integer, Integer> sub2 = sub1.subRangeMap(Range.open(6, 15));
assertEquals(
ImmutableMap.of(Range.open(6, 7), 1, Range.closed(9, 10), 2), sub2.asMapOfRanges());
}
public void testSubRangeMapPut() {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.open(3, 7), 1);
rangeMap.put(Range.closed(9, 10), 2);
rangeMap.put(Range.closed(12, 16), 3);
RangeMap<Integer, Integer> sub = rangeMap.subRangeMap(Range.closed(5, 11));
assertEquals(
ImmutableMap.of(Range.closedOpen(5, 7), 1, Range.closed(9, 10), 2), sub.asMapOfRanges());
sub.put(Range.closed(7, 9), 4);
assertEquals(
ImmutableMap.of(
Range.closedOpen(5, 7), 1, Range.closed(7, 9), 4, Range.openClosed(9, 10), 2),
sub.asMapOfRanges());
assertEquals(
ImmutableMap.of(
Range.open(3, 7),
1,
Range.closed(7, 9),
4,
Range.openClosed(9, 10),
2,
Range.closed(12, 16),
3),
rangeMap.asMapOfRanges());
assertThrows(IllegalArgumentException.class, () -> sub.put(Range.open(9, 12), 5));
RangeMap<Integer, Integer> subSub = sub.subRangeMap(Range.closedOpen(5, 5));
subSub.put(Range.closedOpen(5, 5), 6); // should be a no-op
assertEquals(
ImmutableMap.of(
Range.open(3, 7),
1,
Range.closed(7, 9),
4,
Range.openClosed(9, 10),
2,
Range.closed(12, 16),
3),
rangeMap.asMapOfRanges());
}
public void testSubRangeMapPutCoalescing() {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.open(3, 7), 1);
rangeMap.put(Range.closed(9, 10), 2);
rangeMap.put(Range.closed(12, 16), 3);
RangeMap<Integer, Integer> sub = rangeMap.subRangeMap(Range.closed(5, 11));
assertEquals(
ImmutableMap.of(Range.closedOpen(5, 7), 1, Range.closed(9, 10), 2), sub.asMapOfRanges());
sub.putCoalescing(Range.closed(7, 9), 2);
assertEquals(
ImmutableMap.of(Range.closedOpen(5, 7), 1, Range.closed(7, 10), 2), sub.asMapOfRanges());
assertEquals(
ImmutableMap.of(Range.open(3, 7), 1, Range.closed(7, 10), 2, Range.closed(12, 16), 3),
rangeMap.asMapOfRanges());
sub.putCoalescing(Range.singleton(7), 1);
assertEquals(
ImmutableMap.of(Range.closed(5, 7), 1, Range.openClosed(7, 10), 2), sub.asMapOfRanges());
assertEquals(
ImmutableMap.of(
Range.open(3, 5),
1,
Range.closed(5, 7),
1,
Range.openClosed(7, 10),
2,
Range.closed(12, 16),
3),
rangeMap.asMapOfRanges());
assertThrows(IllegalArgumentException.class, () -> sub.putCoalescing(Range.open(9, 12), 5));
}
public void testSubRangeMapRemove() {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.open(3, 7), 1);
rangeMap.put(Range.closed(9, 10), 2);
rangeMap.put(Range.closed(12, 16), 3);
RangeMap<Integer, Integer> sub = rangeMap.subRangeMap(Range.closed(5, 11));
assertEquals(
ImmutableMap.of(Range.closedOpen(5, 7), 1, Range.closed(9, 10), 2), sub.asMapOfRanges());
sub.remove(Range.closed(7, 9));
assertEquals(
ImmutableMap.of(Range.closedOpen(5, 7), 1, Range.openClosed(9, 10), 2),
sub.asMapOfRanges());
assertEquals(
ImmutableMap.of(Range.open(3, 7), 1, Range.openClosed(9, 10), 2, Range.closed(12, 16), 3),
rangeMap.asMapOfRanges());
sub.remove(Range.closed(3, 9));
assertEquals(ImmutableMap.of(Range.openClosed(9, 10), 2), sub.asMapOfRanges());
assertEquals(
ImmutableMap.of(Range.open(3, 5), 1, Range.openClosed(9, 10), 2, Range.closed(12, 16), 3),
rangeMap.asMapOfRanges());
}
public void testSubRangeMapClear() {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.open(3, 7), 1);
rangeMap.put(Range.closed(9, 10), 2);
rangeMap.put(Range.closed(12, 16), 3);
RangeMap<Integer, Integer> sub = rangeMap.subRangeMap(Range.closed(5, 11));
sub.clear();
assertEquals(
ImmutableMap.of(Range.open(3, 5), 1, Range.closed(12, 16), 3), rangeMap.asMapOfRanges());
}
public void testCopyOfTreeRangeMap() {
RangeMap<Integer, Integer> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.open(3, 7), 1);
rangeMap.put(Range.closed(9, 10), 2);
rangeMap.put(Range.closed(12, 16), 3);
RangeMap<Integer, Integer> copy = TreeRangeMap.copyOf(rangeMap);
assertEquals(rangeMap.asMapOfRanges(), copy.asMapOfRanges());
}
public void testCopyOfImmutableRangeMap() {
ImmutableRangeMap<Integer, Integer> rangeMap =
ImmutableRangeMap.<Integer, Integer>builder()
.put(Range.open(3, 7), 1)
.put(Range.closed(9, 10), 2)
.put(Range.closed(12, 16), 3)
.build();
RangeMap<Integer, Integer> copy = TreeRangeMap.copyOf(rangeMap);
assertEquals(rangeMap.asMapOfRanges(), copy.asMapOfRanges());
}
// Overriding testEquals because it seems that we get spurious failures when it things empty
// should be unequal to empty.
public void testEquals() {
TreeRangeMap<Integer, Integer> empty = TreeRangeMap.create();
TreeRangeMap<Integer, Integer> nonEmpty = TreeRangeMap.create();
nonEmpty.put(Range.all(), 1);
TreeRangeMap<Integer, Integer> coalesced = TreeRangeMap.create();
coalesced.put(Range.atLeast(1), 1);
coalesced.putCoalescing(Range.atMost(1), 1);
TreeRangeMap<Integer, Integer> differentValues = TreeRangeMap.create();
differentValues.put(Range.closedOpen(1, 2), 2);
differentValues.put(Range.closedOpen(3, 4), 2);
TreeRangeMap<Double, Integer> differentTypes = TreeRangeMap.create();
differentTypes.put(Range.closedOpen(1.0, 2.0), 2);
differentTypes.put(Range.closedOpen(3.0, 4.0), 2);
new EqualsTester()
.addEqualityGroup(empty, TreeRangeMap.<Integer, Integer>create())
.addEqualityGroup(nonEmpty, coalesced)
.addEqualityGroup(differentValues)
.addEqualityGroup(differentTypes)
.testEquals();
}
private void verify(Map<Integer, Integer> model, RangeMap<Integer, Integer> test) {
for (int i = MIN_BOUND - 1; i <= MAX_BOUND + 1; i++) {
assertEquals(model.get(i), test.get(i));
Entry<Range<Integer>, Integer> entry = test.getEntry(i);
assertEquals(model.containsKey(i), entry != null);
if (entry != null) {
assertTrue(test.asMapOfRanges().entrySet().contains(entry));
}
}
for (Range<Integer> range : test.asMapOfRanges().keySet()) {
assertFalse(range.isEmpty());
}
}
private static void putModel(Map<Integer, Integer> model, Range<Integer> range, int value) {
for (int i = MIN_BOUND - 1; i <= MAX_BOUND + 1; i++) {
if (range.contains(i)) {
model.put(i, value);
}
}
}
private static void removeModel(Map<Integer, Integer> model, Range<Integer> range) {
for (int i = MIN_BOUND - 1; i <= MAX_BOUND + 1; i++) {
if (range.contains(i)) {
model.remove(i);
}
}
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/collect/TreeRangeMapTest.java |
"""
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show() | unknown | codeparrot/codeparrot-clean | ||
from mtpy.gui.SmartMT.visualization import MultipleMTResponses
from tests.SmartMT import SmartMTGUITestCase, _click_area
class TestGUIPlotMultipleMTResponse(SmartMTGUITestCase):
def test_multiple_mt_response_compare_type_1(self):
plot_config = self._switch_to_plot(MultipleMTResponses, 2) # type: MultipleMTResponses
_click_area(plot_config._plot_control_ui.ui.radioButton_1, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_skew, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_pt, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_i, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_p, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_t, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_real, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_imaginary, pos=self._pos_check_box)
_click_area(plot_config._rotation_ui.ui.dial_rotation)
self._plot(60000) # this complete image could take very long time to plot
def test_multiple_mt_response_compare_type_2(self):
plot_config = self._switch_to_plot(MultipleMTResponses, 2) # type: MultipleMTResponses
_click_area(plot_config._plot_control_ui.ui.radioButton_2, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_skew, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_pt, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_i, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_p, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_t, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_real, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_imaginary, pos=self._pos_check_box)
_click_area(plot_config._rotation_ui.ui.dial_rotation)
self._plot(60000) # this complete image could take very long time to plot
def test_multiple_mt_response_compare_type_3(self):
plot_config = self._switch_to_plot(MultipleMTResponses, 2) # type: MultipleMTResponses
_click_area(plot_config._plot_control_ui.ui.radioButton_3, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_skew, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_pt, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_i, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_p, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_t, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_real, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_imaginary, pos=self._pos_check_box)
_click_area(plot_config._rotation_ui.ui.dial_rotation)
self._plot(60000) # this complete image could take very long time to plot
def test_multiple_mt_response_all_type_1(self):
plot_config = self._switch_to_plot(MultipleMTResponses, 2) # type: MultipleMTResponses
_click_area(plot_config._plot_control_ui.ui.radioButton_all)
_click_area(plot_config._plot_control_ui.ui.radioButton_1, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_skew, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_pt, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_i, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_p, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_t, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_real, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_imaginary, pos=self._pos_check_box)
_click_area(plot_config._rotation_ui.ui.dial_rotation)
self._plot(60000) # this complete image could take very long time to plot
def test_multiple_mt_response_all_type_2(self):
plot_config = self._switch_to_plot(MultipleMTResponses, 2) # type: MultipleMTResponses
_click_area(plot_config._plot_control_ui.ui.radioButton_all)
_click_area(plot_config._plot_control_ui.ui.radioButton_2, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_skew, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_pt, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_i, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_p, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_t, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_real, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_imaginary, pos=self._pos_check_box)
_click_area(plot_config._rotation_ui.ui.dial_rotation)
self._plot(60000) # this complete image could take very long time to plot
def test_multiple_mt_response_all_type_3(self):
plot_config = self._switch_to_plot(MultipleMTResponses, 2) # type: MultipleMTResponses
_click_area(plot_config._plot_control_ui.ui.radioButton_all)
_click_area(plot_config._plot_control_ui.ui.radioButton_3, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_skew, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_pt, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_i, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_p, pos=self._pos_check_box)
_click_area(plot_config._plot_control_ui.ui.checkBox_strike_t, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_real, pos=self._pos_check_box)
_click_area(plot_config._arrow_ui.ui.checkBox_imaginary, pos=self._pos_check_box)
_click_area(plot_config._rotation_ui.ui.dial_rotation)
self._plot(60000) # this complete image could take very long time to plot | unknown | codeparrot/codeparrot-clean | ||
from django import forms
from .models import *
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
import json
class PersonnelForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = Personnel
fields = ('Person_ID','Role','Dept')
#Form for taking input from user for email frequency
class ProfileForm(forms.ModelForm):
username = forms.CharField(label = 'dd')
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.name = self.user.username
super(ProfileForm, self).__init__(*args, **kwargs)
self.fields['username'] = forms.CharField(label = 'Username',initial = self.name, disabled = 'disabled')
class Meta:
model = NotificationTime
fields = ('Notification_time',)
def save(self, commit=True):
obj = Personnel.objects.get(LDAP_id=self.user.id)
personID = obj.Person_ID
personObj = Personnel.objects.get(Person_ID=personID)
obj_ , created = NotificationTime.objects.update_or_create(Personnel_ID=personObj,defaults={'Notification_time':self.cleaned_data.get('Notification_time')})
return obj_ | unknown | codeparrot/codeparrot-clean | ||
"""
Graph isomorphism functions.
"""
import networkx as nx
from networkx.exception import NetworkXError
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Christopher Ellison cellison@cse.ucdavis.edu)'])
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['could_be_isomorphic',
'fast_could_be_isomorphic',
'faster_could_be_isomorphic',
'is_isomorphic']
def could_be_isomorphic(G1,G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree, triangle, and number of cliques sequences.
"""
# Check global properties
if G1.order() != G2.order(): return False
# Check local properties
d1=G1.degree()
t1=nx.triangles(G1)
c1=nx.number_of_cliques(G1)
props1=[ [d1[v], t1[v], c1[v]] for v in d1 ]
props1.sort()
d2=G2.degree()
t2=nx.triangles(G2)
c2=nx.number_of_cliques(G2)
props2=[ [d2[v], t2[v], c2[v]] for v in d2 ]
props2.sort()
if props1 != props2:
return False
# OK...
return True
graph_could_be_isomorphic=could_be_isomorphic
def fast_could_be_isomorphic(G1,G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree and triangle sequences.
"""
# Check global properties
if G1.order() != G2.order(): return False
# Check local properties
d1=G1.degree()
t1=nx.triangles(G1)
props1=[ [d1[v], t1[v]] for v in d1 ]
props1.sort()
d2=G2.degree()
t2=nx.triangles(G2)
props2=[ [d2[v], t2[v]] for v in d2 ]
props2.sort()
if props1 != props2: return False
# OK...
return True
fast_graph_could_be_isomorphic=fast_could_be_isomorphic
def faster_could_be_isomorphic(G1,G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree sequences.
"""
# Check global properties
if G1.order() != G2.order(): return False
# Check local properties
d1=list(G1.degree().values())
d1.sort()
d2=list(G2.degree().values())
d2.sort()
if d1 != d2: return False
# OK...
return True
faster_graph_could_be_isomorphic=faster_could_be_isomorphic
def is_isomorphic(G1, G2, node_match=None, edge_match=None):
"""Returns True if the graphs G1 and G2 are isomorphic and False otherwise.
Parameters
----------
G1, G2: graphs
The two graphs G1 and G2 must be the same type.
node_match : callable
A function that returns True if node n1 in G1 and n2 in G2 should
be considered equal during the isomorphism test.
If node_match is not specified then node attributes are not considered.
The function will be called like
node_match(G1.node[n1], G2.node[n2]).
That is, the function will receive the node attribute dictionaries
for n1 and n2 as inputs.
edge_match : callable
A function that returns True if the edge attribute dictionary
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
be considered equal during the isomorphism test. If edge_match is
not specified then edge attributes are not considered.
The function will be called like
edge_match(G1[u1][v1], G2[u2][v2]).
That is, the function will receive the edge attribute dictionaries
of the edges under consideration.
Notes
-----
Uses the vf2 algorithm [1]_.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
For digraphs G1 and G2, using 'weight' edge attribute (default: 1)
>>> G1 = nx.DiGraph()
>>> G2 = nx.DiGraph()
>>> G1.add_path([1,2,3,4],weight=1)
>>> G2.add_path([10,20,30,40],weight=2)
>>> em = iso.numerical_edge_match('weight', 1)
>>> nx.is_isomorphic(G1, G2) # no weights considered
True
>>> nx.is_isomorphic(G1, G2, edge_match=em) # match weights
False
For multidigraphs G1 and G2, using 'fill' node attribute (default: '')
>>> G1 = nx.MultiDiGraph()
>>> G2 = nx.MultiDiGraph()
>>> G1.add_nodes_from([1,2,3],fill='red')
>>> G2.add_nodes_from([10,20,30,40],fill='red')
>>> G1.add_path([1,2,3,4],weight=3, linewidth=2.5)
>>> G2.add_path([10,20,30,40],weight=3)
>>> nm = iso.categorical_node_match('fill', 'red')
>>> nx.is_isomorphic(G1, G2, node_match=nm)
True
For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7)
>>> G1.add_edge(1,2, weight=7)
>>> G2.add_edge(10,20)
>>> em = iso.numerical_multiedge_match('weight', 7, rtol=1e-6)
>>> nx.is_isomorphic(G1, G2, edge_match=em)
True
For multigraphs G1 and G2, using 'weight' and 'linewidth' edge attributes
with default values 7 and 2.5. Also using 'fill' node attribute with
default value 'red'.
>>> em = iso.numerical_multiedge_match(['weight', 'linewidth'], [7, 2.5])
>>> nm = iso.categorical_node_match('fill', 'red')
>>> nx.is_isomorphic(G1, G2, edge_match=em, node_match=nm)
True
See Also
--------
numerical_node_match, numerical_edge_match, numerical_multiedge_match
categorical_node_match, categorical_edge_match, categorical_multiedge_match
References
----------
.. [1] L. P. Cordella, P. Foggia, C. Sansone, M. Vento,
"An Improved Algorithm for Matching Large Graphs",
3rd IAPR-TC15 Workshop on Graph-based Representations in
Pattern Recognition, Cuen, pp. 149-159, 2001.
http://amalfi.dis.unina.it/graph/db/papers/vf-algorithm.pdf
"""
if G1.is_directed() and G2.is_directed():
GM = nx.algorithms.isomorphism.DiGraphMatcher
elif (not G1.is_directed()) and (not G2.is_directed()):
GM = nx.algorithms.isomorphism.GraphMatcher
else:
raise NetworkXError("Graphs G1 and G2 are not of the same type.")
gm = GM(G1, G2, node_match=node_match, edge_match=edge_match)
return gm.is_isomorphic() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_group
short_description: Manage device groups on a BIG-IP
description:
- Managing device groups allows you to create HA pairs and clusters
of BIG-IP devices. Usage of this module should be done in conjunction
with the C(bigip_configsync_actions) to sync configuration across
the pair or cluster if auto-sync is disabled.
version_added: 2.5
options:
name:
description:
- Specifies the name of the device group.
required: True
type:
description:
- Specifies that the type of group. A C(sync-failover) device group
contains devices that synchronize their configuration data and fail
over to one another when a device becomes unavailable. A C(sync-only)
device group has no such failover. When creating a new device group,
this option will default to C(sync-only). This setting cannot be
changed once it has been set.
choices:
- sync-failover
- sync-only
description:
description:
- Description of the device group.
auto_sync:
description:
- Indicates whether configuration synchronization occurs manually or
automatically. When creating a new device group, this option will
default to C(false).
type: bool
save_on_auto_sync:
description:
- When performing an auto-sync, specifies whether the configuration
will be saved or not. If C(false), only the running configuration
will be changed on the device(s) being synced to. When creating a
new device group, this option will default to C(false).
type: bool
full_sync:
description:
- Specifies whether the system synchronizes the entire configuration
during synchronization operations. When C(false), the system performs
incremental synchronization operations, based on the cache size
specified in C(max_incremental_sync_size). Incremental configuration
synchronization is a mechanism for synchronizing a device-group's
configuration among its members, without requiring a full configuration
load for each configuration change. In order for this to work, all
devices in the device-group must initially agree on the configuration.
Typically this requires at least one full configuration load to each
device. When creating a new device group, this option will default
to C(false).
type: bool
max_incremental_sync_size:
description:
- Specifies the size of the changes cache for incremental sync. For example,
using the default, if you make more than 1024 KB worth of incremental
changes, the system performs a full synchronization operation. Using
incremental synchronization operations can reduce the per-device sync/load
time for configuration changes. This setting is relevant only when
C(full_sync) is C(false).
state:
description:
- When C(state) is C(present), ensures the device group exists.
- When C(state) is C(absent), ensures that the device group is removed.
choices:
- present
- absent
notes:
- This module is primarily used as a component of configuring HA pairs of
BIG-IP devices.
- Requires BIG-IP >= 12.1.x.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a sync-only device group
bigip_device_group:
name: foo-group
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Create a sync-only device group with auto-sync enabled
bigip_device_group:
name: foo-group
auto_sync: yes
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
save_on_auto_sync:
description: The new save_on_auto_sync value of the device group.
returned: changed
type: bool
sample: true
full_sync:
description: The new full_sync value of the device group.
returned: changed
type: bool
sample: false
description:
description: The new description of the device group.
returned: changed
type: string
sample: this is a device group
type:
description: The new type of the device group.
returned: changed
type: string
sample: sync-failover
auto_sync:
description: The new auto_sync value of the device group.
returned: changed
type: bool
sample: true
max_incremental_sync_size:
description: The new sync size of the device group
returned: changed
type: int
sample: 1000
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'saveOnAutoSync': 'save_on_auto_sync',
'fullLoadOnSync': 'full_sync',
'autoSync': 'auto_sync',
'incrementalConfigSyncSizeMax': 'max_incremental_sync_size'
}
api_attributes = [
'saveOnAutoSync', 'fullLoadOnSync', 'description', 'type', 'autoSync',
'incrementalConfigSyncSizeMax'
]
returnables = [
'save_on_auto_sync', 'full_sync', 'description', 'type', 'auto_sync',
'max_incremental_sync_size'
]
updatables = [
'save_on_auto_sync', 'full_sync', 'description', 'auto_sync',
'max_incremental_sync_size'
]
@property
def save_on_auto_sync(self):
if self._values['save_on_auto_sync'] is None:
return None
elif self._values['save_on_auto_sync'] in BOOLEANS_TRUE:
return True
else:
return False
@property
def auto_sync(self):
if self._values['auto_sync'] is None:
return None
elif self._values['auto_sync'] in [True, 'enabled']:
return 'enabled'
else:
return 'disabled'
@property
def full_sync(self):
if self._values['full_sync'] is None:
return None
elif self._values['full_sync'] in BOOLEANS_TRUE:
return True
else:
return False
@property
def max_incremental_sync_size(self):
if not self.full_sync and self._values['max_incremental_sync_size'] is not None:
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
[
dict(
msg='"max_incremental_sync_size has no effect if "full_sync" is not true',
version='2.4'
)
]
)
if self._values['max_incremental_sync_size'] is None:
return None
return int(self._values['max_incremental_sync_size'])
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class Changes(Parameters):
@property
def auto_sync(self):
if self._values['auto_sync'] in BOOLEANS_TRUE:
return True
else:
return False
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = Parameters(params=self.module.params)
self.have = None
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Changes(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.tm.cm.device_groups.device_group.exists(
name=self.want.name
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the device group")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.cm.device_groups.device_group.create(
name=self.want.name,
**params
)
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.name
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.name
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.name
)
result = resource.attrs
return Parameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
type=dict(
choices=['sync-failover', 'sync-only']
),
description=dict(),
auto_sync=dict(
type='bool',
default='no'
),
save_on_auto_sync=dict(
type='bool',
),
full_sync=dict(
type='bool'
),
name=dict(
required=True
),
max_incremental_sync_size=dict(),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from unittest import main, TestCase
from cogent import LoadTree
from cogent.parse.tree import DndParser
from picrust.make_test_datasets import exclude_tip, yield_test_trees,\
make_distance_based_exclusion_fn, make_distance_based_tip_label_randomizer
from collections import defaultdict
"""
Tests for make_test_datasets.py
"""
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2011-2013, The PICRUSt Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "1.1.4"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
class TestMakeTestTrees(TestCase):
"""Tests of make_test_trees.py"""
def setUp(self):
self.SimpleTree = \
DndParser("((A:0.02,B:0.01)E:0.05,(C:0.01,D:0.01)F:0.05)root;")
self.SimplePolytomyTree = \
DndParser("((A:0.02,B:0.01,B_prime:0.03)E:0.05,(C:0.01,D:0.01)F:0.05)root;")
def test_exclude_tip(self):
"""exclude_tip should yield a holdout tree"""
#Try excluding tip 'B'
test_tree = self.SimpleTree.deepcopy()
obs = exclude_tip(test_tree.getNodeMatchingName('B'),test_tree)
obs_newick = obs.getNewick(with_distances=True)
exp_newick = "((C:0.01,D:0.01)F:0.05,A:0.07)root;"
alt_newick = "(A:0.07,(C:0.01,D:0.01)F:0.05)root;"
#exp_newick and alt_newick represent
#two ways of expressing the same tree
self.assertTrue(obs_newick in [exp_newick,alt_newick])
#Make sure the holdout works with a polytomy
test_tree = self.SimplePolytomyTree.deepcopy()
obs = exclude_tip(test_tree.getNodeMatchingName('B'),test_tree)
obs_newick = obs.getNewick(with_distances=True)
exp_newick = "((A:0.02,'B_prime':0.03)E:0.05,(C:0.01,D:0.01)F:0.05)root;"
self.assertEqual(obs_newick,exp_newick)
#Make sure we raise if the tip is invalid
test_tree = self.SimpleTree.deepcopy()
self.assertRaises(ValueError,exclude_tip,\
test_tree.getNodeMatchingName('E'),test_tree)
def test_make_distance_based_exclusion_fn(self):
"""make_distance_based_exclusion_fn should return a working function"""
exclude_similar_strains =\
make_distance_based_exclusion_fn(0.03)
#Test that new function is documented
exp_doc = 'Exclude neighbors of tip within 0.030000 branch length units'
self.assertEqual(exp_doc,exclude_similar_strains.__doc__)
#Test that the function works
test_tree = self.SimpleTree.deepcopy()
#print test_tree.getNewick(with_distances=True)
tip = test_tree.getNodeMatchingName('C')
obs = exclude_similar_strains(tip,test_tree).getNewick(with_distances=True)
exp = "(A:0.02,B:0.01)root;"
self.assertEqual(obs,exp)
#Test on a tree where a single node will remain
test_tree = \
DndParser("((A:0.02,B:0.01)E:0.05,(C:0.06,D:0.01)F:0.05)root;")
#print test_tree.getNewick(with_distances=True)
tip = test_tree.getNodeMatchingName('D')
obs = exclude_similar_strains(tip,test_tree).getNewick(with_distances=True)
exp = "((A:0.02,B:0.01)E:0.05,C:0.11)root;"
self.assertEqual(obs,exp)
#Test that we raise if distance is too large
test_tree = self.SimpleTree.deepcopy()
test_fn = make_distance_based_exclusion_fn(300.0)
tip = test_tree.getNodeMatchingName('C')
self.assertRaises(ValueError,test_fn,tip,test_tree)
def test_make_distance_based_randomizer(self):
"""make_distance_based_randomizer should randomize tip labels wthin d. NOTE: results are tested by cumulative binomial distribution, so ~1/500 test runs may fail stochasitically."""
tip_randomizer =\
make_distance_based_tip_label_randomizer(0.50)
#Test that the function works
test_replicates = 5000
results = defaultdict(int)
total = 0
for i in range(test_replicates):
test_tree = self.SimpleTree.deepcopy()
#print test_tree.asciiArt()
tip = test_tree.getNodeMatchingName('C')
obs = tip_randomizer(tip,test_tree)
obs_newick = obs.getNewick(with_distances=True)
#print obs.asciiArt()
results[obs_newick]+= 1
total += 1
n_unique_trees = len(results.keys())
#Since only the 4 tips are scrambled
#the number of unique trees is just
#the number of permutations 4! = 4*3*2 = 24
#(but note that half of these are equivalent
#trees with a,b swapped with b,a)
self.assertEqual(n_unique_trees,24)
for tree in sorted(results.keys()):
n_obs = results[tree]
# Treat the n trials as Bernoulli trials
# each has success change 1/24
# so we want the inverse cumulative
# binomial for 5000 trials with p ~ .041666666
# n_successes > 253 has p 0.001 (166 gives a p of 0.001
#on the other tail)
self.assertTrue(n_obs >= 166)
self.assertTrue(n_obs < 253)
def test_yield_test_trees(self):
"""yield_test_trees should yield modified test trees"""
start_tree = self.SimpleTree.deepcopy()
# First, test with simple tree and exclude_tip
# (not a typical use, but easy to predict
# and still important to test)
test_fn = exclude_tip
obs = [o for o in yield_test_trees(start_tree, test_fn)]
test_tips = [tip for tip,tree in obs]
test_trees = [tree for tip,tree in obs]
#print test_tips
#print test_trees
#Test that each tree excludes the correct tip
for i,exp_tip in enumerate(start_tree.tips()):
#print exp_tip
node_names = [obs_tip.Name for obs_tip in test_trees[i].tips()]
#print node_names
self.assertTrue(exp_tip.Name not in node_names)
#Test that the topology is correct for an example tree
self.assertTrue(test_trees[1].getNewick(with_distances=True) in\
["((C:0.01,D:0.01)F:0.05,A:0.07)root;","(A:0.07,(C:0.01,D:0.01)F:0.05)root;"])
#Second, let's test a more realistic test function
test_fn = make_distance_based_exclusion_fn(0.03)
obs = [o for o in yield_test_trees(start_tree, test_fn)]
test_tips = [tip for tip,tree in obs]
test_trees = [tree for tip,tree in obs]
#Test that each tree excludes the tip of interest
#NOTE: this behavior changed some time ago
#We want to exclude the test tip.
for i,exp_tip in enumerate(start_tree.tips()):
#print exp_tip
node_names = [obs_tip.Name for obs_tip in test_trees[i].tips()]
#print node_names
self.assertFalse(exp_tip.Name in node_names)
#Test that the topology is correct for all tips
#Tip 'A'
self.assertEqual(test_trees[0].getNewick(with_distances=True),\
"(C:0.01,D:0.01)root;")
#Tip 'B'
self.assertEqual(test_trees[1].getNewick(with_distances=True),\
"(C:0.01,D:0.01)root;")
#Tip 'C'
self.assertEqual(test_trees[2].getNewick(with_distances=True),\
"(A:0.02,B:0.01)root;")
#Tip 'D'
self.assertEqual(test_trees[3].getNewick(with_distances=True),\
"(A:0.02,B:0.01)root;")
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import annotations
import logging
from enum import Enum
from io import BytesIO
from typing import TYPE_CHECKING, Any
from h2.errors import ErrorCodes
from h2.exceptions import H2Error, ProtocolError, StreamClosedError
from twisted.internet.defer import Deferred
from twisted.internet.error import ConnectionClosed
from twisted.python.failure import Failure
from twisted.web.client import ResponseFailed
from scrapy.exceptions import DownloadCancelledError
from scrapy.http.headers import Headers
from scrapy.utils._download_handlers import (
get_maxsize_msg,
get_warnsize_msg,
make_response,
)
from scrapy.utils.httpobj import urlparse_cached
if TYPE_CHECKING:
from hpack import HeaderTuple
from scrapy.core.http2.protocol import H2ClientProtocol
from scrapy.http import Request, Response
logger = logging.getLogger(__name__)
class InactiveStreamClosed(ConnectionClosed):
"""Connection was closed without sending request headers
of the stream. This happens when a stream is waiting for other
streams to close and connection is lost."""
def __init__(self, request: Request) -> None:
self.request = request
def __str__(self) -> str:
return f"InactiveStreamClosed: Connection was closed without sending the request {self.request!r}"
class InvalidHostname(H2Error):
def __init__(
self, request: Request, expected_hostname: str, expected_netloc: str
) -> None:
self.request = request
self.expected_hostname = expected_hostname
self.expected_netloc = expected_netloc
def __str__(self) -> str:
return f"InvalidHostname: Expected {self.expected_hostname} or {self.expected_netloc} in {self.request}"
class StreamCloseReason(Enum):
# Received a StreamEnded event from the remote
ENDED = 1
# Received a StreamReset event -- ended abruptly
RESET = 2
# Transport connection was lost
CONNECTION_LOST = 3
# Expected response body size is more than allowed limit
MAXSIZE_EXCEEDED = 4
# Response deferred is cancelled by the client
# (happens when client called response_deferred.cancel())
CANCELLED = 5
# Connection lost and the stream was not initiated
INACTIVE = 6
# The hostname of the request is not same as of connected peer hostname
# As a result sending this request will the end the connection
INVALID_HOSTNAME = 7
# Actual response body size is more than allowed limit
MAXSIZE_EXCEEDED_ACTUAL = 8
class Stream:
"""Represents a single HTTP/2 Stream.
Stream is a bidirectional flow of bytes within an established connection,
which may carry one or more messages. Handles the transfer of HTTP Headers
and Data frames.
Role of this class is to
1. Combine all the data frames
"""
def __init__(
self,
stream_id: int,
request: Request,
protocol: H2ClientProtocol,
download_maxsize: int = 0,
download_warnsize: int = 0,
) -> None:
"""
Arguments:
stream_id -- Unique identifier for the stream within a single HTTP/2 connection
request -- The HTTP request associated to the stream
protocol -- Parent H2ClientProtocol instance
"""
self.stream_id: int = stream_id
self._request: Request = request
self._protocol: H2ClientProtocol = protocol
self._download_maxsize = self._request.meta.get(
"download_maxsize", download_maxsize
)
self._download_warnsize = self._request.meta.get(
"download_warnsize", download_warnsize
)
# Metadata of an HTTP/2 connection stream
# initialized when stream is instantiated
self.metadata: dict[str, Any] = {
"request_content_length": (
0 if self._request.body is None else len(self._request.body)
),
# Flag to keep track whether the stream has initiated the request
"request_sent": False,
# Flag to track whether we have logged about exceeding download warnsize
"reached_warnsize": False,
# Each time we send a data frame, we will decrease value by the amount send.
"remaining_content_length": (
0 if self._request.body is None else len(self._request.body)
),
# Flag to keep track whether client (self) have closed this stream
"stream_closed_local": False,
# Flag to keep track whether the server has closed the stream
"stream_closed_server": False,
}
# Private variable used to build the response
# this response is then converted to appropriate Response class
# passed to the response deferred callback
self._response: dict[str, Any] = {
# Data received frame by frame from the server is appended
# and passed to the response Deferred when completely received.
"body": BytesIO(),
# The amount of data received that counts against the
# flow control window
"flow_controlled_size": 0,
# Headers received after sending the request
"headers": Headers({}),
}
def _cancel(_: Any) -> None:
# Close this stream as gracefully as possible
# If the associated request is initiated we reset this stream
# else we directly call close() method
if self.metadata["request_sent"]:
self.reset_stream(StreamCloseReason.CANCELLED)
else:
self.close(StreamCloseReason.CANCELLED)
self._deferred_response: Deferred[Response] = Deferred(_cancel)
def __repr__(self) -> str:
return f"Stream(id={self.stream_id!r})"
@property
def _log_warnsize(self) -> bool:
"""Checks if we have received data which exceeds the download warnsize
and whether we have not already logged about it.
Returns:
True if both the above conditions hold true
False if any of the conditions is false
"""
content_length_header = int(
self._response["headers"].get(b"Content-Length", -1)
)
return (
self._download_warnsize
and (
self._response["flow_controlled_size"] > self._download_warnsize
or content_length_header > self._download_warnsize
)
and not self.metadata["reached_warnsize"]
)
def get_response(self) -> Deferred[Response]:
"""Simply return a Deferred which fires when response
from the asynchronous request is available
"""
return self._deferred_response
def check_request_url(self) -> bool:
# Make sure that we are sending the request to the correct URL
url = urlparse_cached(self._request)
return (
url.netloc == str(self._protocol.metadata["uri"].host, "utf-8")
or url.netloc == str(self._protocol.metadata["uri"].netloc, "utf-8")
or url.netloc
== f"{self._protocol.metadata['ip_address']}:{self._protocol.metadata['uri'].port}"
)
def _get_request_headers(self) -> list[tuple[str, str]]:
url = urlparse_cached(self._request)
path = url.path
if url.query:
path += "?" + url.query
# This pseudo-header field MUST NOT be empty for "http" or "https"
# URIs; "http" or "https" URIs that do not contain a path component
# MUST include a value of '/'. The exception to this rule is an
# OPTIONS request for an "http" or "https" URI that does not include
# a path component; these MUST include a ":path" pseudo-header field
# with a value of '*' (refer RFC 7540 - Section 8.1.2.3)
if not path:
path = "*" if self._request.method == "OPTIONS" else "/"
# Make sure pseudo-headers comes before all the other headers
headers = [
(":method", self._request.method),
(":authority", url.netloc),
]
# The ":scheme" and ":path" pseudo-header fields MUST
# be omitted for CONNECT method (refer RFC 7540 - Section 8.3)
if self._request.method != "CONNECT":
headers += [
(":scheme", self._protocol.metadata["uri"].scheme),
(":path", path),
]
content_length = str(len(self._request.body))
headers.append(("Content-Length", content_length))
content_length_name = self._request.headers.normkey(b"Content-Length")
for name, values in self._request.headers.items():
for value_bytes in values:
value = str(value_bytes, "utf-8")
if name == content_length_name:
if value != content_length:
logger.warning(
"Ignoring bad Content-Length header %r of request %r, "
"sending %r instead",
value,
self._request,
content_length,
)
continue
headers.append((str(name, "utf-8"), value))
return headers
def initiate_request(self) -> None:
if self.check_request_url():
headers = self._get_request_headers()
self._protocol.conn.send_headers(self.stream_id, headers, end_stream=False)
self.metadata["request_sent"] = True
self.send_data()
else:
# Close this stream calling the response errback
# Note that we have not sent any headers
self.close(StreamCloseReason.INVALID_HOSTNAME)
def send_data(self) -> None:
"""Called immediately after the headers are sent. Here we send all the
data as part of the request.
If the content length is 0 initially then we end the stream immediately and
wait for response data.
Warning: Only call this method when stream not closed from client side
and has initiated request already by sending HEADER frame. If not then
stream will raise ProtocolError (raise by h2 state machine).
"""
if self.metadata["stream_closed_local"]:
raise StreamClosedError(self.stream_id)
# Firstly, check what the flow control window is for current stream.
window_size = self._protocol.conn.local_flow_control_window(
stream_id=self.stream_id
)
# Next, check what the maximum frame size is.
max_frame_size = self._protocol.conn.max_outbound_frame_size
# We will send no more than the window size or the remaining file size
# of data in this call, whichever is smaller.
bytes_to_send_size = min(window_size, self.metadata["remaining_content_length"])
# We now need to send a number of data frames.
while bytes_to_send_size > 0:
chunk_size = min(bytes_to_send_size, max_frame_size)
data_chunk_start_id = (
self.metadata["request_content_length"]
- self.metadata["remaining_content_length"]
)
data_chunk = self._request.body[
data_chunk_start_id : data_chunk_start_id + chunk_size
]
self._protocol.conn.send_data(self.stream_id, data_chunk, end_stream=False)
bytes_to_send_size -= chunk_size
self.metadata["remaining_content_length"] -= chunk_size
self.metadata["remaining_content_length"] = max(
0, self.metadata["remaining_content_length"]
)
# End the stream if no more data needs to be send
if self.metadata["remaining_content_length"] == 0:
self._protocol.conn.end_stream(self.stream_id)
# Q. What about the rest of the data?
# Ans: Remaining Data frames will be sent when we get a WindowUpdate frame
def receive_window_update(self) -> None:
"""Flow control window size was changed.
Send data that earlier could not be sent as we were
blocked behind the flow control.
"""
if (
self.metadata["remaining_content_length"]
and not self.metadata["stream_closed_server"]
and self.metadata["request_sent"]
):
self.send_data()
def receive_data(self, data: bytes, flow_controlled_length: int) -> None:
self._response["body"].write(data)
self._response["flow_controlled_size"] += flow_controlled_length
# We check maxsize here in case the Content-Length header was not received
if (
self._download_maxsize
and self._response["flow_controlled_size"] > self._download_maxsize
):
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED_ACTUAL)
return
if self._log_warnsize:
self.metadata["reached_warnsize"] = True
warning_msg = get_warnsize_msg(
self._response["flow_controlled_size"],
self._download_warnsize,
self._request,
expected=False,
)
logger.warning(warning_msg)
# Acknowledge the data received
self._protocol.conn.acknowledge_received_data(
self._response["flow_controlled_size"], self.stream_id
)
def receive_headers(self, headers: list[HeaderTuple]) -> None:
for name, value in headers:
self._response["headers"].appendlist(name, value)
# Check if we exceed the allowed max data size which can be received
expected_size = int(self._response["headers"].get(b"Content-Length", -1))
if self._download_maxsize and expected_size > self._download_maxsize:
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED)
return
if self._log_warnsize:
self.metadata["reached_warnsize"] = True
warning_msg = get_warnsize_msg(
expected_size, self._download_warnsize, self._request, expected=True
)
logger.warning(warning_msg)
def reset_stream(self, reason: StreamCloseReason = StreamCloseReason.RESET) -> None:
"""Close this stream by sending a RST_FRAME to the remote peer"""
if self.metadata["stream_closed_local"]:
raise StreamClosedError(self.stream_id)
# Clear buffer earlier to avoid keeping data in memory for a long time
self._response["body"].truncate(0)
self.metadata["stream_closed_local"] = True
self._protocol.conn.reset_stream(self.stream_id, ErrorCodes.REFUSED_STREAM)
self.close(reason)
def close(
self,
reason: StreamCloseReason,
errors: list[BaseException] | None = None,
from_protocol: bool = False,
) -> None:
"""Based on the reason sent we will handle each case."""
if self.metadata["stream_closed_server"]:
raise StreamClosedError(self.stream_id)
if not isinstance(reason, StreamCloseReason):
raise TypeError(
f"Expected StreamCloseReason, received {reason.__class__.__qualname__}"
)
# Have default value of errors as an empty list as
# some cases can add a list of exceptions
errors = errors or []
if not from_protocol:
self._protocol.pop_stream(self.stream_id)
self.metadata["stream_closed_server"] = True
# We do not check for Content-Length or Transfer-Encoding in response headers
# and add `partial` flag as in HTTP/1.1 as 'A request or response that includes
# a payload body can include a content-length header field' (RFC 7540 - Section 8.1.2.6)
# NOTE: Order of handling the events is important here
# As we immediately cancel the request when maxsize is exceeded while
# receiving DATA_FRAME's when we have received the headers (not
# having Content-Length)
if reason in {
StreamCloseReason.MAXSIZE_EXCEEDED,
StreamCloseReason.MAXSIZE_EXCEEDED_ACTUAL,
}:
expected_size = int(
self._response["headers"].get(
b"Content-Length", self._response["flow_controlled_size"]
)
)
error_msg = get_maxsize_msg(
expected_size,
self._download_maxsize,
self._request,
expected=reason == StreamCloseReason.MAXSIZE_EXCEEDED,
)
logger.error(error_msg)
self._deferred_response.errback(DownloadCancelledError(error_msg))
elif reason is StreamCloseReason.ENDED:
self._fire_response_deferred()
# Stream was abruptly ended here
elif reason is StreamCloseReason.CANCELLED:
# Client has cancelled the request. Remove all the data
# received and fire the response deferred with no flags set
# NOTE: The data is already flushed in Stream.reset_stream() called
# immediately when the stream needs to be cancelled
# There maybe no :status in headers, we make
# HTTP Status Code: 499 - Client Closed Request
self._response["headers"][":status"] = "499"
self._fire_response_deferred()
elif reason is StreamCloseReason.RESET:
self._deferred_response.errback(
ResponseFailed(
[
Failure(
f"Remote peer {self._protocol.metadata['ip_address']} sent RST_STREAM",
ProtocolError,
)
]
)
)
elif reason is StreamCloseReason.CONNECTION_LOST:
self._deferred_response.errback(ResponseFailed(errors))
elif reason is StreamCloseReason.INACTIVE:
errors.insert(0, InactiveStreamClosed(self._request))
self._deferred_response.errback(ResponseFailed(errors))
else:
assert reason is StreamCloseReason.INVALID_HOSTNAME
self._deferred_response.errback(
InvalidHostname(
self._request,
str(self._protocol.metadata["uri"].host, "utf-8"),
f"{self._protocol.metadata['ip_address']}:{self._protocol.metadata['uri'].port}",
)
)
def _fire_response_deferred(self) -> None:
"""Builds response from the self._response dict
and fires the response deferred callback with the
generated response instance"""
response = make_response(
url=self._request.url,
status=int(self._response["headers"][":status"]),
headers=self._response["headers"],
body=self._response["body"].getvalue(),
certificate=self._protocol.metadata["certificate"],
ip_address=self._protocol.metadata["ip_address"],
protocol="h2",
)
self._deferred_response.callback(response) | python | github | https://github.com/scrapy/scrapy | scrapy/core/http2/stream.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Statistical test assertions calibrated for their error rates.
Statistical tests have an inescapable probability of error: a correct
sampler can still fail a test by chance, and an incorrect sampler can
still pass a test by chance. This library is about bounding both of
those error rates. This requires admitting a task-specific notion of
"discrepancy": Correct code will fail rarely, code that misbehaves by
more than the discrepancy will pass rarely, and nothing reliable can
be said about code that misbehaves, but misbehaves by less than the
discrepancy.
# Example
Consider testing that the mean of a scalar probability distribution P
is some expected constant. Suppose the support of P is the interval
`[0, 1]`. Then you might do this:
```python
from tensorflow_probability.python.distributions.internal import statistical_testing
expected_mean = ...
num_samples = 5000
samples = ... draw 5000 samples from P
# Check that the mean looks right
check1 = statistical_testing.assert_true_mean_equal_by_dkwm(
samples, low=0., high=1., expected=expected_mean,
false_fail_rate=1e-6)
# Check that the difference in means detectable with 5000 samples is
# small enough
check2 = tf.assert_less(
statistical_testing.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=1.0,
false_fail_rate=1e-6, false_pass_rate=1e-6),
0.01)
# Be sure to execute both assertion ops
sess.run([check1, check2])
```
The second assertion is an instance of experiment design. It's a
deterministic computation (independent of the code under test) that
checks that `5000` samples is enough to reliably resolve mean
differences of `0.01` or more. Here "reliably" means that if the code
under test is correct, the probability of drawing an unlucky sample
that causes this test to fail is at most 1e-6; and if the code under
test is incorrect enough that its true mean is 0.01 more or less than
expected, then the probability of drawing a "lucky" sample that causes
the test to false-pass is also at most 1e-6.
# Overview
Every function in this library can be characterized in terms of:
- The property being tested, such as the full density of the
distribution under test, or just its true mean, or a single
Bernoulli probability, etc.
- The relation being asserted, e.g., whether the mean is less, more,
or equal to the given expected value.
- The stochastic bound being relied upon, such as the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
or the CDF of the binomial distribution (for assertions about
Bernoulli probabilities).
- The number of sample sets in the statistical test. For example,
testing equality of means has a one-sample variant, where the
expected mean is given exactly, and a two-sample variant, where the
expected mean is itself given by a set of samples (e.g., from an
alternative algorithm).
- What operation(s) of the test are to be performed. Each test has
three of these:
1. `assert` executes the test. Specifically, it creates a TF op that
produces an error if it has enough evidence to prove that the
property under test is violated. These functions depend on the
desired false failure rate, because that determines the sizes of
appropriate confidence intervals, etc.
2. `min_discrepancy` computes the smallest difference reliably
detectable by that test, given the sample count and error rates.
What it's a difference of is test-specific. For example, a test
for equality of means would make detection guarantees about the
difference the true means.
3. `min_num_samples` computes the minimum number of samples needed
to reliably detect a given discrepancy with given error rates.
The latter two are for experimental design, and are meant to be
usable either interactively or inline in the overall test method.
This library follows a naming convention, to make room for every
combination of the above. A name mentions the operation first, then
the property, then the relation, then the bound, then, if the test
takes more than one set of samples, a token indicating this. For
example, `assert_true_mean_equal_by_dkwm` (which is implicitly
one-sample). Each name is a grammatically sound noun phrase (or verb
phrase, for the asserts).
# Asymptotic properties
The number of samples needed tends to scale as `O(1/discrepancy**2)` and
as `O(log(1/error_rate))`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = [
"true_mean_confidence_interval_by_dkwm",
"assert_true_mean_equal_by_dkwm",
"min_discrepancy_of_true_means_detectable_by_dkwm",
"min_num_samples_for_dkwm_mean_test",
"assert_true_mean_in_interval_by_dkwm",
"assert_true_mean_equal_by_dkwm_two_sample",
"min_discrepancy_of_true_means_detectable_by_dkwm_two_sample",
"min_num_samples_for_dkwm_mean_two_sample_test",
]
def _batch_sort_vector(x, ascending=True, name=None):
with ops.name_scope(name, "_batch_sort_vector", [x]):
x = ops.convert_to_tensor(x, name="x")
n = array_ops.shape(x)[-1]
if ascending:
y, _ = nn_ops.top_k(-x, k=n, sorted=True)
y = -y
else:
y, _ = nn_ops.top_k(x, k=n, sorted=True)
y.set_shape(x.shape)
return y
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with ops.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
n = array_ops.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = array_ops.concat([math_ops.range(1, n), [0]], axis=0)
samples = array_ops.transpose(samples, perm)
samples = _batch_sort_vector(samples)
# The maximum mean is given by taking `envelope`-worth of
# probability from the smallest samples and moving it to the
# maximum value. This amounts to:
# - ignoring the smallest k samples, where `k/n < envelope`
# - taking a `1/n - (envelope - k/n)` part of the index k sample
# - taking all the other samples
# - and adding `envelope * high` at the end.
# The following is a vectorized and batched way of computing this.
# `max_mean_contrib` is a mask implementing the previous.
batch_size = array_ops.shape(samples)[-1]
batch_size = math_ops.cast(batch_size, dtype=samples.dtype.base_dtype)
step = 1. / batch_size
cum_steps = step * math_ops.range(
1, batch_size + 1, dtype=samples.dtype.base_dtype)
max_mean_contrib = clip_ops.clip_by_value(
cum_steps - envelope[..., array_ops.newaxis],
clip_value_min=0.,
clip_value_max=step)
return math_ops.reduce_sum(
samples * max_mean_contrib, axis=-1) + envelope * high
def _maximum_mean(samples, envelope, high, name=None):
"""Returns a stochastic upper bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded above, then
the mean is bounded above as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `high`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `high`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
high: Floating-point `Tensor` of upper bounds on the distributions'
supports. `samples <= high`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of upper bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be larger than
the corresponding `high`.
"""
with ops.name_scope(name, "maximum_mean", [samples, envelope, high]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
high = ops.convert_to_tensor(high, name="high")
xmax = math_ops.reduce_max(samples, axis=[0])
msg = "Given sample maximum value exceeds expectations"
check_op = check_ops.assert_less_equal(xmax, high, message=msg)
with ops.control_dependencies([check_op]):
return array_ops.identity(_do_maximum_mean(samples, envelope, high))
def _minimum_mean(samples, envelope, low, name=None):
"""Returns a stochastic lower bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded below, then
the mean is bounded below as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `low`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `low`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
low: Floating-point `Tensor` of lower bounds on the distributions'
supports. `samples >= low`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of lower bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be smaller than
the corresponding `low`.
"""
with ops.name_scope(name, "minimum_mean", [samples, envelope, low]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
low = ops.convert_to_tensor(low, name="low")
xmin = math_ops.reduce_min(samples, axis=[0])
msg = "Given sample minimum value falls below expectations"
check_op = check_ops.assert_greater_equal(xmin, low, message=msg)
with ops.control_dependencies([check_op]):
return - _do_maximum_mean(-samples, envelope, -low)
def _dkwm_cdf_envelope(n, error_rate, name=None):
"""Computes the CDF envelope that the DKWM inequality licenses.
The [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
gives a stochastic bound on the distance between the true cumulative
distribution function (CDF) of any distribution and its empirical
CDF. To wit, for `n` iid samples from any distribution with CDF F,
```none
P(sup_x |F_n(x) - F(x)| > eps) < 2exp(-2n eps^2)
```
This function computes the envelope size `eps` as a function of the
number of samples `n` and the desired limit on the left-hand
probability above.
Args:
n: `Tensor` of numbers of samples drawn.
error_rate: Floating-point `Tensor` of admissible rates of mistakes.
name: A name for this operation (optional).
Returns:
eps: `Tensor` of maximum distances the true CDF can be from the
empirical CDF. This scales as `O(sqrt(-log(error_rate)))` and
as `O(1 / sqrt(n))`. The shape is the broadcast of `n` and
`error_rate`.
"""
with ops.name_scope(name, "dkwm_cdf_envelope", [n, error_rate]):
n = math_ops.cast(n, dtype=error_rate.dtype)
return math_ops.sqrt(-gen_math_ops.log(error_rate / 2.) / (2. * n))
def _check_shape_dominates(samples, parameters):
"""Check that broadcasting `samples` against `parameters` does not expand it.
Why? Because I want to be very sure that the samples tensor is not
accidentally enlarged by broadcasting against tensors that are
supposed to be describing the distribution(s) sampled from, lest the
sample counts end up inflated.
Args:
samples: A `Tensor` whose shape is to be protected against broadcasting.
parameters: A list of `Tensor`s who are parameters for the statistical test.
Returns:
samples: Return original `samples` with control dependencies attached
to ensure no broadcasting.
"""
def check(t):
samples_batch_shape = array_ops.shape(samples)[1:]
broadcasted_batch_shape = array_ops.broadcast_dynamic_shape(
samples_batch_shape, array_ops.shape(t))
# This rank check ensures that I don't get a wrong answer from the
# _shapes_ broadcasting against each other.
samples_batch_ndims = array_ops.size(samples_batch_shape)
ge = check_ops.assert_greater_equal(
samples_batch_ndims, array_ops.rank(t))
eq = check_ops.assert_equal(samples_batch_shape, broadcasted_batch_shape)
return ge, eq
checks = list(itertools.chain(*[check(t) for t in parameters]))
with ops.control_dependencies(checks):
return array_ops.identity(samples)
def true_mean_confidence_interval_by_dkwm(
samples, low, high, error_rate=1e-6, name=None):
"""Computes a confidence interval for the mean of a scalar distribution.
In batch mode, computes confidence intervals for all distributions
in the batch (which need not be identically distributed).
Relies on the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
The probability (over the randomness of drawing the given samples)
that any true mean is outside the corresponding returned interval is
no more than the given `error_rate`. The size of the intervals
scale as
`O(1 / sqrt(#samples))`, as `O(high - low)`, and as `O(-log(error_rate))`.
Note that `error_rate` is a total error rate for all the confidence
intervals in the batch. As such, if the batch is nontrivial, the
error rate is not broadcast but divided (evenly) among the batch
members.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
error_rate: *Scalar* floating-point `Tensor` admissible total rate
of mistakes.
name: A name for this operation (optional).
Returns:
low: A floating-point `Tensor` of stochastic lower bounds on the
true means.
high: A floating-point `Tensor` of stochastic upper bounds on the
true means.
"""
with ops.name_scope(
name, "true_mean_confidence_interval_by_dkwm",
[samples, low, high, error_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
error_rate = ops.convert_to_tensor(error_rate, name="error_rate")
samples = _check_shape_dominates(samples, [low, high])
check_ops.assert_scalar(error_rate) # Static shape
error_rate = _itemwise_error_rate(error_rate, [low, high], samples)
n = array_ops.shape(samples)[0]
envelope = _dkwm_cdf_envelope(n, error_rate)
min_mean = _minimum_mean(samples, envelope, low)
max_mean = _maximum_mean(samples, envelope, high)
return min_mean, max_mean
def _itemwise_error_rate(
total_error_rate, param_tensors, sample_tensor=None, name=None):
with ops.name_scope(
name, "itemwise_error_rate",
[total_error_rate, param_tensors, sample_tensor]):
result_shape = [1]
for p_tensor in param_tensors:
result_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(p_tensor), result_shape)
if sample_tensor is not None:
result_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(sample_tensor)[1:], result_shape)
num_items = math_ops.reduce_prod(result_shape)
return total_error_rate / math_ops.cast(
num_items, dtype=total_error_rate.dtype)
def assert_true_mean_equal_by_dkwm(
samples, low, high, expected, false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is as expected.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the true mean of some distribution from which the given samples are
drawn is _not_ the given expected mean with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want to
check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected: Floating-point `Tensor` of expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean is
outside the corresponding confidence interval.
"""
with ops.name_scope(
name, "assert_true_mean_equal_by_dkwm",
[samples, low, high, expected, false_fail_rate]):
return assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected, expected, false_fail_rate)
def min_discrepancy_of_true_means_detectable_by_dkwm(
n, low, high, false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy that a DKWM-based test can detect.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the distances between true
means detectable by a DKWM-based test.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discr[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean (resp. in the expected interval), then `assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will fail with
probability at most `false_fail_rate / K` (which amounts to
`false_fail_rate` if applied to the whole batch at once), and (b) if
the true mean differs from the expected mean (resp. falls outside
the expected interval) by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will pass with
probability at most `false_pass_rate`.
The detectable discrepancy scales as
- `O(high[i] - low[i])`,
- `O(1 / sqrt(n[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with ops.name_scope(
name, "min_discrepancy_of_true_means_detectable_by_dkwm",
[n, low, high, false_fail_rate, false_pass_rate]):
n = ops.convert_to_tensor(n, name="n")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Algorithm: Assume a true CDF F. The DKWM inequality gives a
# stochastic bound on how far the observed empirical CDF F_n can be.
# Then, using the DKWM inequality again gives a stochastic bound on
# the farthest candidate true CDF F' that
# true_mean_confidence_interval_by_dkwm might consider. At worst, these
# errors may go in the same direction, so the distance between F and
# F' is bounded by the sum.
# On batching: false fail rates sum, so I need to reduce
# the input to account for the batching. False pass rates
# max, so I don't.
sampling_envelope = _dkwm_cdf_envelope(n, false_pass_rate)
false_fail_rate = _itemwise_error_rate(false_fail_rate, [n, low, high])
analysis_envelope = _dkwm_cdf_envelope(n, false_fail_rate)
return (high - low) * (sampling_envelope + analysis_envelope)
def min_num_samples_for_dkwm_mean_test(
discrepancy, low, high,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a one-sample DKWM mean test.
To wit, returns an upper bound on the number of samples necessary to
guarantee detecting a mean difference of at least the given
`discrepancy`, with the given `false_fail_rate` and `false_pass_rate`,
using the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
on a scalar distribution supported on `[low, high]`.
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low: `Tensor` of lower bounds on the distributions' support.
high: `Tensor` of upper bounds on the distributions' support.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
The `discrepancy`, `low`, and `high` tensors must have
broadcast-compatible shapes.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discrepancy[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean (resp. in the expected interval), then `assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will fail with
probability at most `false_fail_rate / K` (which amounts to
`false_fail_rate` if applied to the whole batch at once), and (b) if
the true mean differs from the expected mean (resp. falls outside
the expected interval) by at least `discrepancy[i]`,
`assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will pass with
probability at most `false_pass_rate`.
The required number of samples scales
as `O((high[i] - low[i])**2)`, `O(-log(false_fail_rate/K))`,
`O(-log(false_pass_rate))`, and `O(1 / discrepancy[i]**2)`.
"""
with ops.name_scope(
name, "min_num_samples_for_dkwm_mean_test",
[low, high, false_fail_rate, false_pass_rate, discrepancy]):
discrepancy = ops.convert_to_tensor(
discrepancy, name="discrepancy")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Could choose to cleverly allocate envelopes, but this is sound.
envelope1 = discrepancy / (2. * (high - low))
envelope2 = envelope1
false_fail_rate = _itemwise_error_rate(
false_fail_rate, [low, high, discrepancy])
n1 = -math_ops.log(false_fail_rate / 2.) / (2. * envelope1**2)
n2 = -math_ops.log(false_pass_rate / 2.) / (2. * envelope2**2)
return math_ops.maximum(n1, n2)
def assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected_low, expected_high,
false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is in the given interval.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the mean of the distribution from which the given samples are
drawn is _outside_ the given interval with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want
to check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected_low: Floating-point `Tensor` of lower bounds on the
expected true means.
expected_high: Floating-point `Tensor` of upper bounds on the
expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean
interval does not overlap with the corresponding confidence
interval.
"""
with ops.name_scope(
name, "assert_true_mean_in_interval_by_dkwm",
[samples, low, high, expected_low, expected_high, false_fail_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
expected_low = ops.convert_to_tensor(expected_low, name="expected_low")
expected_high = ops.convert_to_tensor(expected_high, name="expected_high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples = _check_shape_dominates(
samples, [low, high, expected_low, expected_high])
min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
samples, low, high, false_fail_rate)
# Assert that the interval [min_mean, max_mean] intersects the
# interval [expected_low, expected_high]. This is true if
# max_mean >= expected_low and min_mean <= expected_high.
# By DeMorgan's law, that's also equivalent to
# not (max_mean < expected_low or min_mean > expected_high),
# which is a way of saying the two intervals are not disjoint.
check_confidence_interval_can_intersect = check_ops.assert_greater_equal(
max_mean, expected_low, message="Confidence interval does not "
"intersect: true mean smaller than expected")
with ops.control_dependencies([check_confidence_interval_can_intersect]):
return check_ops.assert_less_equal(
min_mean, expected_high, message="Confidence interval does not "
"intersect: true mean greater than expected")
def assert_true_mean_equal_by_dkwm_two_sample(
samples1, low1, high1, samples2, low2, high2,
false_fail_rate=1e-6, name=None):
"""Asserts the means of the given distributions are equal.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the means of the distributions from which the given samples are
drawn are _not_ equal with statistical significance `false_fail_rate`
or stronger, otherwise passes. If you also want to check that you
are gathering enough evidence that a pass is not spurious, see
`min_num_samples_for_dkwm_mean_two_sample_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm_two_sample`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples1: Floating-point `Tensor` of samples from the
distribution(s) A. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
The support is bounded: `low1 <= samples1 <= high1`.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
samples2: Floating-point `Tensor` of samples from the
distribution(s) B. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
The support is bounded: `low2 <= samples2 <= high2`.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any pair of confidence
intervals true for corresponding true means do not overlap.
"""
with ops.name_scope(
name, "assert_true_mean_equal_by_dkwm_two_sample",
[samples1, low1, high1, samples2, low2, high2, false_fail_rate]):
samples1 = ops.convert_to_tensor(samples1, name="samples1")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
samples2 = ops.convert_to_tensor(samples2, name="samples2")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples1 = _check_shape_dominates(samples1, [low1, high1])
samples2 = _check_shape_dominates(samples2, [low2, high2])
compatible_samples = check_ops.assert_equal(
array_ops.shape(samples1)[1:], array_ops.shape(samples2)[1:])
with ops.control_dependencies([compatible_samples]):
# Could in principle play games with cleverly allocating
# significance instead of the even split below. It may be possible
# to get tighter intervals, in order to obtain a higher power test.
# Any allocation strategy that depends only on the support bounds
# and sample counts should be valid; however, because the intervals
# scale as O(-log(false_fail_rate)), there doesn't seem to be much
# room to win.
min_mean_2, max_mean_2 = true_mean_confidence_interval_by_dkwm(
samples2, low2, high2, false_fail_rate / 2.)
return assert_true_mean_in_interval_by_dkwm(
samples1, low1, high1, min_mean_2, max_mean_2, false_fail_rate / 2.)
def min_discrepancy_of_true_means_detectable_by_dkwm_two_sample(
n1, low1, high1, n2, low2, high2,
false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy for a two-sample DKWM-based test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n1: `Tensor` of numbers of samples to be drawn from the distributions A.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
n2: `Tensor` of numbers of samples to be drawn from the distributions B.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the distances between true means
detectable by a two-sample DKWM-based test.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The detectable distribution scales as
- `O(high1[i] - low1[i])`, `O(high2[i] - low2[i])`,
- `O(1 / sqrt(n1[i]))`, `O(1 / sqrt(n2[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with ops.name_scope(
name, "min_discrepancy_of_true_means_detectable_by_dkwm_two_sample",
[n1, low1, high1, n2, low2, high2, false_fail_rate, false_pass_rate]):
n1 = ops.convert_to_tensor(n1, name="n1")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
n2 = ops.convert_to_tensor(n2, name="n2")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
det_disc1 = min_discrepancy_of_true_means_detectable_by_dkwm(
n1, low1, high1, false_fail_rate / 2., false_pass_rate / 2.)
det_disc2 = min_discrepancy_of_true_means_detectable_by_dkwm(
n2, low2, high2, false_fail_rate / 2., false_pass_rate / 2.)
return det_disc1 + det_disc2
def min_num_samples_for_dkwm_mean_two_sample_test(
discrepancy, low1, high1, low2, high2,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a two-sample DKWM mean test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n1: `Tensor` of numbers of samples to be drawn from the distributions A.
n2: `Tensor` of numbers of samples to be drawn from the distributions B.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The required number of samples scales as
- `O((high1[i] - low1[i])**2)`, `O((high2[i] - low2[i])**2)`,
- `O(-log(false_fail_rate/K))`,
- `O(-log(false_pass_rate))`, and
- `O(1 / discrepancy[i]**2)`.
"""
with ops.name_scope(
name, "min_num_samples_for_dkwm_mean_two_sample_test",
[low1, high1, low2, high2,
false_fail_rate, false_pass_rate, discrepancy]):
discrepancy = ops.convert_to_tensor(discrepancy, name="discrepancy")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Could choose to cleverly allocate discrepancy tolerances and
# failure probabilities, but this is sound.
n1 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low1, high1,
false_fail_rate / 2., false_pass_rate / 2.)
n2 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low2, high2,
false_fail_rate / 2., false_pass_rate / 2.)
return n1, n2 | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v16.grid_layout_upgrade.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana",
"version": "v0",
"datasource": {
"name": "-- Grafana --"
},
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "CPU Usage",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-10": {
"kind": "Panel",
"spec": {
"id": 10,
"title": "System Logs",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "logs",
"version": "",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "logs",
"originalOptions": {
"height": 100
}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-11": {
"kind": "Panel",
"spec": {
"id": 11,
"title": "Server Metrics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Memory Usage",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "stat",
"version": "",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "singlestat",
"originalOptions": {}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "Process List",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "table",
"version": "",
"spec": {
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Network I/O",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"height": 200
}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "Disk I/O",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"height": 200
}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-6": {
"kind": "Panel",
"spec": {
"id": 6,
"title": "Temperature",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "",
"spec": {
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-7": {
"kind": "Panel",
"spec": {
"id": 7,
"title": "Uptime",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "stat",
"version": "",
"spec": {
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-8": {
"kind": "Panel",
"spec": {
"id": 8,
"title": "Load Average",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "bargauge",
"version": "",
"spec": {
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-9": {
"kind": "Panel",
"spec": {
"id": 9,
"title": "Description Panel",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "text",
"version": "",
"spec": {
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 12,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 0,
"width": 12,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Collapsed Row",
"collapse": true,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 24,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 8,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 8,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Visible Row Title",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 8,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-6"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 8,
"y": 0,
"width": 8,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-7"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 16,
"y": 0,
"width": 8,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-8"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 16,
"height": 4,
"element": {
"kind": "ElementReference",
"name": "panel-9"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 16,
"y": 0,
"width": 8,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-10"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"repeat": {
"mode": "variable",
"value": "server"
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 24,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-11"
}
}
}
]
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "V16 Grid Layout Migration Test Dashboard",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v16.grid_layout_upgrade.v42.v2beta1.json |
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**************************************
pathintegral - nuclear quantum effects
**************************************
- method to automatically run the system including nuclear quantum effects using the Feynman path-integral
!!WARNING: THIS IS STILL AN EXPERIMENTAL FEATURE!!
This method creates, based on the supplied topology of the system, an path-integral representation with P beads.
The path-integral system is a fully classical analog, which has to be run at an effective temperature P*T.
The method needs the following parameters:
* allParticles
particles of the sytem
* props
particle properties
* types
types, e.g. read from the gromacs parser
* system
* exclusions
non-bonded exclusions
* integrator
* langevin
langevin integrator
* rcut
the cutoff used for the rings non-bonded interactions
* P
the Trotter Number (number of imaginary time slices)
* polymerInitR
polymer radius for setting up ring in 2d plane
* hbar
hbar in gromacs units [kJ/mol ps]
* disableVVl
disable Virtual Verlet List (slow but safe). If false, the neighbour search is based on the VirtualParticles extension, which contain
the rings. This speeds up neighbour search significantly.
"""
import copy
import math
import espressopp
from espressopp import Real3D, Int3D
def createPathintegralSystem(allParticles,
props,
types,
system,
exclusions,
integrator,
langevin,
rcut,
P,
polymerInitR=0.01,
hbar=0.063507807,
disableVVL=False
):
# Turns the classical system into a Pathintegral system with P beads
numtypes=max(types)+1
num_cla_part=len(allParticles)
## make a dictionary for properties
##(TODO: better to use esp++ particle ?)
propDict={}
for p in props: propDict.update({p:len(propDict)})
piParticles=[]
ringids={} #dict with key: classical particle id, value vector of ids in the ring polymer
vptuples=[]
if not disableVVL:
vcl=espressopp.CellList()
ftpl = espressopp.FixedTupleList(system.storage)
#vvl=espressopp.VirtualVerletList(system, rcut, ftpl)
vvl=espressopp.VirtualVerletList(system, rcut, ftpl)
# create a cell list which will store the virtual particles after domain decomposition
vvl.setCellList(vcl)
## some data structures that will be usefull later
## ringids has all imaginary time beads belonging to a classical bead pid
## allParticlesById is used to acces particles properties by pid
allParticlesById={}
for p in allParticles:
pid=p[propDict['id']]
ringids.update({pid:[]})
allParticlesById.update({pid:p})
for i in xrange(1,P):
for p in allParticles:
pid=p[propDict['id']]
newparticle=copy.deepcopy(p)
# set types accoring to imag time index
newparticle[propDict['type']]=newparticle[propDict['type']]+numtypes*i
# set positions
newpos=newparticle[propDict['pos']]
newpos[0]=newpos[0]+polymerInitR*math.cos(i*2*math.pi/P)-polymerInitR
newpos[1]=newpos[1]+polymerInitR*math.sin(i*2*math.pi/P)
newid=len(allParticles)+len(piParticles)+1
newparticle[propDict['id']]=newid
piParticles.append(newparticle)
ringids[pid].append(newid)
if not disableVVL:
iVerletLists={}
for i in xrange(1,P+1):
iVerletLists.update({i:espressopp.VerletList(system, 0, rebuild=False)})
iVerletLists[i].disconnect()
## map types to sub-verlet lists using the VirtualVerletList classical
## classical types are in types
## type at imaginary time i=t+numtypes*i
for i in xrange(1,P+1):
tt=[]
for j in xrange(0, numtypes):
pitype=types[j]+numtypes*(i-1)
tt.append(pitype)
#print i, "mapped", tt, " to ", iVerletLists[i]
vvl.mapTypeToVerletList(tt, iVerletLists[1])
system.storage.addParticles(piParticles, *props)
#print "1 PYTHON IMG 1947", system.storage.getParticle(1947).pos, system.storage.getParticle(1947).imageBox
#print "RINGIDS", ringids
# store each ring in a FixedTupleList
if not disableVVL:
vParticles=[]
vptype=numtypes*(P+1)+1 # this is the type assigned to virtual particles
for k, v in ringids.iteritems():
cog=allParticlesById[k][propDict['pos']]
for pid in v:
cog=cog+allParticlesById[k][propDict['pos']]
cog=cog/(len(v)+1)
#create a virtual particle for each ring
vpprops = ['id', 'pos', 'v', 'type', 'mass', 'q']
vpid=len(allParticles)+len(piParticles)+len(vParticles)+1
part = [vpid ,cog,Real3D(0, 0, 0), vptype, 0, 0]
vParticles.append(part)
# first item in tuple is the virtual particle id:
t=[vpid]
t.append(k)
t=t+v
vptuples.append(t)
#print "VPARTICLE", part, "TUPLE", t
system.storage.addParticles(vParticles, *vpprops)
#always decpmpose before adding tuples
system.storage.decompose()
for t in vptuples:
ftpl.addTuple(t)
extVP = espressopp.integrator.ExtVirtualParticles(system, vcl)
extVP.addVirtualParticleTypes([vptype])
extVP.setFixedTupleList(ftpl)
integrator.addExtension(extVP)
# expand non-bonded potentials
numInteraction=system.getNumberOfInteractions()
for n in xrange(numInteraction):
interaction=system.getInteraction(n)
## TODO: in case of VVL: clone interaction, add potential!
print "expanding interaction", interaction
if interaction.bondType() == espressopp.interaction.Nonbonded:
for i in xrange(P):
for j in xrange(numtypes):
for k in xrange(numtypes):
pot=interaction.getPotential(j, k)
interaction.setPotential(numtypes*i+j, numtypes*i+k, pot)
print "Interaction", numtypes*i+j, numtypes*i+k, pot
if not disableVVL:
vl=interaction.getVerletList()
#print "VL has", vl.totalSize(),"disconnecting"
vl.disconnect()
interaction.setVerletList(iVerletLists[1])
if interaction.bondType() == espressopp.interaction.Pair:
bond_fpl=interaction.getFixedPairList()
cla_bonds=[]
# loop over bond lists returned by each cpu
for l in bond_fpl.getBonds():
cla_bonds.extend(l)
#print "CLA BONDS", bond_fpl.size()
for i in xrange(1, P):
tmp=0
for b in cla_bonds:
# create additional bonds for this imag time
bond_fpl.add(b[0]+num_cla_part*i, b[1]+num_cla_part*i)
tmp+=1
#print "trying to add", tmp, "bonds"
#print "i=", i, " PI BONDS", bond_fpl.size()
if interaction.bondType() == espressopp.interaction.Angular:
angle_ftl=interaction.getFixedTripleList()
# loop over triple lists returned by each cpu
cla_angles=[]
for l in angle_ftl.getTriples():
cla_angles.extend(l)
#print "CLA_ANGLES", cla_angles
for i in xrange(1, P):
for a in cla_angles:
# create additional angles for this imag time
angle_ftl.add(a[0]+num_cla_part*i,
a[1]+num_cla_part*i, a[2]+num_cla_part*i)
if interaction.bondType() == espressopp.interaction.Dihedral:
dihedral_fql=interaction.getFixedQuadrupleList()
cla_dihedrals=[]
for l in dihedral_fql.getQuadruples():
cla_dihedrals.extend(l)
for i in xrange(1, P):
for d in cla_dihedrals:
# create additional dihedrals for this imag time
dihedral_fql.add(d[0]+num_cla_part*i,
d[1]+num_cla_part*i, d[2]+num_cla_part*i, d[3]+num_cla_part*i)
piexcl=[]
for i in xrange(1, P):
for e in exclusions:
# create additional exclusions for this imag time
piexcl.append((e[0]+num_cla_part*i, e[1]+num_cla_part*i))
exclusions.extend(piexcl)
if not disableVVL:
vvl.exclude(exclusions)
# now we analyze how many unique different masses are in the system as we have to create an harmonic spring interaction for each of them
unique_masses=[]
for p in allParticles:
mass=p[propDict['mass']]
if not mass in unique_masses:
unique_masses.append(mass)
kineticTermInteractions={} # key: mass value: corresponding harmonic spring interaction
for m in unique_masses:
fpl=espressopp.FixedPairList(system.storage)
k=m*P*P*langevin.temperature*langevin.temperature/(hbar*hbar)
pot=espressopp.interaction.Harmonic(k,0.0)
interb = espressopp.interaction.FixedPairListHarmonic(system, fpl, pot)
system.addInteraction(interb)
kineticTermInteractions.update({m:interb})
for idcla, idpi in ringids.iteritems():
p=allParticlesById[idcla]
mass=p[propDict['mass']]
interactionList=kineticTermInteractions[mass].getFixedPairList() #find the appropriate interaction based on the mass
# harmonic spring between atom at imag-time i and imag-time i+1
for i in xrange(len(idpi)-1):
interactionList.add(idpi[i],idpi[i+1])
#close the ring
interactionList.add(idcla,idpi[0])
interactionList.add(idcla,idpi[len(idpi)-1])
# instead of scaling the potentials, we scale the temperature!
langevin.temperature = langevin.temperature*P
if not disableVVL:
return iVerletLists | unknown | codeparrot/codeparrot-clean | ||
"""
kombu.utils.finalize
====================
Execute cleanup handlers when objects go out of scope.
Taken from :class:`multiprocessing.util.Finalize`.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import weakref
from itertools import count
__all__ = ["Finalize"]
class Finalize(object):
"""Object finalization using weakrefs."""
_count = count().next
_registry = {}
def __init__(self, obj, callback, args=(), kwargs=None,
exitpriority=None):
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, self._count())
self._registry[self._key] = self
def __call__(self, wr=None):
"""Run the callback unless it has already been called or
cancelled."""
try:
self._registry.pop(self._key)
except KeyError:
pass
else:
try:
return self._callback(*self._args, **self._kwargs)
finally:
self._reset()
def _reset(self):
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def cancel(self):
"""Cancel finalization of the object."""
try:
self._registry.pop(self._key)
except KeyError:
pass
else:
self._reset()
def still_active(self):
self._key in self._registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
return "<Finalize: (dead)>"
if obj is None:
return
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=%r' % (self._args, )
if self._kwargs:
x += ', kwargs=%r' % (self._kwargs, )
if self._key[0] is not None:
x += ', exitprority=%r' % (self._key[0], )
return x + '>' | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
#include <ngx_iocp_module.h>
static ngx_int_t ngx_iocp_init(ngx_cycle_t *cycle, ngx_msec_t timer);
static ngx_thread_value_t __stdcall ngx_iocp_timer(void *data);
static void ngx_iocp_done(ngx_cycle_t *cycle);
static ngx_int_t ngx_iocp_add_event(ngx_event_t *ev, ngx_int_t event,
ngx_uint_t key);
static ngx_int_t ngx_iocp_del_connection(ngx_connection_t *c, ngx_uint_t flags);
static ngx_int_t ngx_iocp_process_events(ngx_cycle_t *cycle, ngx_msec_t timer,
ngx_uint_t flags);
static void *ngx_iocp_create_conf(ngx_cycle_t *cycle);
static char *ngx_iocp_init_conf(ngx_cycle_t *cycle, void *conf);
static ngx_str_t iocp_name = ngx_string("iocp");
static ngx_command_t ngx_iocp_commands[] = {
{ ngx_string("iocp_threads"),
NGX_EVENT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
0,
offsetof(ngx_iocp_conf_t, threads),
NULL },
{ ngx_string("post_acceptex"),
NGX_EVENT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
0,
offsetof(ngx_iocp_conf_t, post_acceptex),
NULL },
{ ngx_string("acceptex_read"),
NGX_EVENT_CONF|NGX_CONF_FLAG,
ngx_conf_set_flag_slot,
0,
offsetof(ngx_iocp_conf_t, acceptex_read),
NULL },
ngx_null_command
};
static ngx_event_module_t ngx_iocp_module_ctx = {
&iocp_name,
ngx_iocp_create_conf, /* create configuration */
ngx_iocp_init_conf, /* init configuration */
{
ngx_iocp_add_event, /* add an event */
NULL, /* delete an event */
NULL, /* enable an event */
NULL, /* disable an event */
NULL, /* add an connection */
ngx_iocp_del_connection, /* delete an connection */
NULL, /* trigger a notify */
ngx_iocp_process_events, /* process the events */
ngx_iocp_init, /* init the events */
ngx_iocp_done /* done the events */
}
};
ngx_module_t ngx_iocp_module = {
NGX_MODULE_V1,
&ngx_iocp_module_ctx, /* module context */
ngx_iocp_commands, /* module directives */
NGX_EVENT_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
ngx_os_io_t ngx_iocp_io = {
ngx_overlapped_wsarecv,
NULL,
ngx_udp_overlapped_wsarecv,
NULL,
NULL,
NULL,
ngx_overlapped_wsasend_chain,
0
};
static HANDLE iocp;
static ngx_tid_t timer_thread;
static ngx_msec_t msec;
static ngx_int_t
ngx_iocp_init(ngx_cycle_t *cycle, ngx_msec_t timer)
{
ngx_iocp_conf_t *cf;
cf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module);
if (iocp == NULL) {
iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0,
cf->threads);
}
if (iocp == NULL) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"CreateIoCompletionPort() failed");
return NGX_ERROR;
}
ngx_io = ngx_iocp_io;
ngx_event_actions = ngx_iocp_module_ctx.actions;
ngx_event_flags = NGX_USE_IOCP_EVENT;
if (timer == 0) {
return NGX_OK;
}
/*
* The waitable timer could not be used, because
* GetQueuedCompletionStatus() does not set a thread to alertable state
*/
if (timer_thread == NULL) {
msec = timer;
if (ngx_create_thread(&timer_thread, ngx_iocp_timer, &msec, cycle->log)
!= 0)
{
return NGX_ERROR;
}
}
ngx_event_flags |= NGX_USE_TIMER_EVENT;
return NGX_OK;
}
static ngx_thread_value_t __stdcall
ngx_iocp_timer(void *data)
{
ngx_msec_t timer = *(ngx_msec_t *) data;
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ngx_cycle->log, 0,
"THREAD %p %p", &msec, data);
for ( ;; ) {
Sleep(timer);
ngx_time_update();
#if 1
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ngx_cycle->log, 0, "timer");
#endif
}
#if defined(__WATCOMC__) || defined(__GNUC__)
return 0;
#endif
}
static void
ngx_iocp_done(ngx_cycle_t *cycle)
{
if (CloseHandle(iocp) == -1) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"iocp CloseHandle() failed");
}
iocp = NULL;
}
static ngx_int_t
ngx_iocp_add_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t key)
{
ngx_connection_t *c;
c = (ngx_connection_t *) ev->data;
c->read->active = 1;
c->write->active = 1;
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"iocp add: fd:%d k:%ui ov:%p", c->fd, key, &ev->ovlp);
if (CreateIoCompletionPort((HANDLE) c->fd, iocp, key, 0) == NULL) {
ngx_log_error(NGX_LOG_ALERT, c->log, ngx_errno,
"CreateIoCompletionPort() failed");
return NGX_ERROR;
}
return NGX_OK;
}
static ngx_int_t
ngx_iocp_del_connection(ngx_connection_t *c, ngx_uint_t flags)
{
#if 0
if (flags & NGX_CLOSE_EVENT) {
return NGX_OK;
}
if (CancelIo((HANDLE) c->fd) == 0) {
ngx_log_error(NGX_LOG_ALERT, c->log, ngx_errno, "CancelIo() failed");
return NGX_ERROR;
}
#endif
return NGX_OK;
}
static ngx_int_t
ngx_iocp_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags)
{
int rc;
u_int key;
u_long bytes;
ngx_err_t err;
ngx_msec_t delta;
ngx_event_t *ev;
ngx_event_ovlp_t *ovlp;
if (timer == NGX_TIMER_INFINITE) {
timer = INFINITE;
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "iocp timer: %M", timer);
rc = GetQueuedCompletionStatus(iocp, &bytes, (PULONG_PTR) &key,
(LPOVERLAPPED *) &ovlp, (u_long) timer);
if (rc == 0) {
err = ngx_errno;
} else {
err = 0;
}
delta = ngx_current_msec;
if (flags & NGX_UPDATE_TIME) {
ngx_time_update();
}
ngx_log_debug4(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"iocp: %d b:%d k:%d ov:%p", rc, bytes, key, ovlp);
if (timer != INFINITE) {
delta = ngx_current_msec - delta;
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"iocp timer: %M, delta: %M", timer, delta);
}
if (err) {
if (ovlp == NULL) {
if (err != WAIT_TIMEOUT) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, err,
"GetQueuedCompletionStatus() failed");
return NGX_ERROR;
}
return NGX_OK;
}
ovlp->error = err;
}
if (ovlp == NULL) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"GetQueuedCompletionStatus() returned no operation");
return NGX_ERROR;
}
ev = ovlp->event;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, err, "iocp event:%p", ev);
if (err == ERROR_NETNAME_DELETED /* the socket was closed */
|| err == ERROR_OPERATION_ABORTED /* the operation was canceled */)
{
/*
* the WSA_OPERATION_ABORTED completion notification
* for a file descriptor that was closed
*/
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, err,
"iocp: aborted event %p", ev);
return NGX_OK;
}
if (err) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, err,
"GetQueuedCompletionStatus() returned operation error");
}
switch (key) {
case NGX_IOCP_ACCEPT:
if (bytes) {
ev->ready = 1;
}
break;
case NGX_IOCP_IO:
ev->complete = 1;
ev->ready = 1;
break;
case NGX_IOCP_CONNECT:
ev->ready = 1;
}
ev->available = bytes;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"iocp event handler: %p", ev->handler);
ev->handler(ev);
return NGX_OK;
}
static void *
ngx_iocp_create_conf(ngx_cycle_t *cycle)
{
ngx_iocp_conf_t *cf;
cf = ngx_palloc(cycle->pool, sizeof(ngx_iocp_conf_t));
if (cf == NULL) {
return NULL;
}
cf->threads = NGX_CONF_UNSET;
cf->post_acceptex = NGX_CONF_UNSET;
cf->acceptex_read = NGX_CONF_UNSET;
return cf;
}
static char *
ngx_iocp_init_conf(ngx_cycle_t *cycle, void *conf)
{
ngx_iocp_conf_t *cf = conf;
ngx_conf_init_value(cf->threads, 0);
ngx_conf_init_value(cf->post_acceptex, 10);
ngx_conf_init_value(cf->acceptex_read, 1);
return NGX_CONF_OK;
} | c | github | https://github.com/nginx/nginx | src/event/modules/ngx_iocp_module.c |
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the PyGTK mainloop.
In order to use this support, simply do the following::
| from twisted.internet import gtkreactor
| gtkreactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
"""
import sys
# System Imports
try:
import pygtk
pygtk.require('1.2')
except ImportError, AttributeError:
pass # maybe we're using pygtk before this hack existed.
import gtk
from zope.interface import implements
# Twisted Imports
from twisted.python import log, runtime, deprecate, versions
from twisted.internet.interfaces import IReactorFDSet
# Sibling Imports
from twisted.internet import posixbase, selectreactor
deprecatedSince = versions.Version("Twisted", 10, 1, 0)
deprecationMessage = ("All new applications should be written with gtk 2.x, "
"which is supported by twisted.internet.gtk2reactor.")
class GtkReactor(posixbase.PosixReactorBase):
"""
GTK+ event loop reactor.
@ivar _reads: A dictionary mapping L{FileDescriptor} instances to gtk INPUT_READ
watch handles.
@ivar _writes: A dictionary mapping L{FileDescriptor} instances to gtk
INTPUT_WRITE watch handles.
@ivar _simtag: A gtk timeout handle for the next L{simulate} call.
"""
implements(IReactorFDSet)
deprecate.deprecatedModuleAttribute(deprecatedSince, deprecationMessage,
__name__, "GtkReactor")
def __init__(self):
"""
Initialize the file descriptor tracking dictionaries and the base
class.
"""
self._simtag = None
self._reads = {}
self._writes = {}
posixbase.PosixReactorBase.__init__(self)
def addReader(self, reader):
if reader not in self._reads:
self._reads[reader] = gtk.input_add(reader, gtk.GDK.INPUT_READ, self.callback)
def addWriter(self, writer):
if writer not in self._writes:
self._writes[writer] = gtk.input_add(writer, gtk.GDK.INPUT_WRITE, self.callback)
def getReaders(self):
return self._reads.keys()
def getWriters(self):
return self._writes.keys()
def removeAll(self):
return self._removeAll(self._reads, self._writes)
def removeReader(self, reader):
if reader in self._reads:
gtk.input_remove(self._reads[reader])
del self._reads[reader]
def removeWriter(self, writer):
if writer in self._writes:
gtk.input_remove(self._writes[writer])
del self._writes[writer]
doIterationTimer = None
def doIterationTimeout(self, *args):
self.doIterationTimer = None
return 0 # auto-remove
def doIteration(self, delay):
# flush some pending events, return if there was something to do
# don't use the usual "while gtk.events_pending(): mainiteration()"
# idiom because lots of IO (in particular test_tcp's
# ProperlyCloseFilesTestCase) can keep us from ever exiting.
log.msg(channel='system', event='iteration', reactor=self)
if gtk.events_pending():
gtk.mainiteration(0)
return
# nothing to do, must delay
if delay == 0:
return # shouldn't delay, so just return
self.doIterationTimer = gtk.timeout_add(int(delay * 1000),
self.doIterationTimeout)
# This will either wake up from IO or from a timeout.
gtk.mainiteration(1) # block
# note: with the .simulate timer below, delays > 0.1 will always be
# woken up by the .simulate timer
if self.doIterationTimer:
# if woken by IO, need to cancel the timer
gtk.timeout_remove(self.doIterationTimer)
self.doIterationTimer = None
def crash(self):
posixbase.PosixReactorBase.crash(self)
gtk.mainquit()
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
gtk.timeout_add(0, self.simulate)
gtk.mainloop()
def _readAndWrite(self, source, condition):
# note: gtk-1.2's gtk_input_add presents an API in terms of gdk
# constants like INPUT_READ and INPUT_WRITE. Internally, it will add
# POLL_HUP and POLL_ERR to the poll() events, but if they happen it
# will turn them back into INPUT_READ and INPUT_WRITE. gdkevents.c
# maps IN/HUP/ERR to INPUT_READ, and OUT/ERR to INPUT_WRITE. This
# means there is no immediate way to detect a disconnected socket.
# The g_io_add_watch() API is more suited to this task. I don't think
# pygtk exposes it, though.
why = None
didRead = None
try:
if condition & gtk.GDK.INPUT_READ:
why = source.doRead()
didRead = source.doRead
if not why and condition & gtk.GDK.INPUT_WRITE:
# if doRead caused connectionLost, don't call doWrite
# if doRead is doWrite, don't call it again.
if not source.disconnected and source.doWrite != didRead:
why = source.doWrite()
didRead = source.doWrite # if failed it was in write
except:
why = sys.exc_info()[1]
log.msg('Error In %s' % source)
log.deferr()
if why:
self._disconnectSelectable(source, why, didRead == source.doRead)
def callback(self, source, condition):
log.callWithLogger(source, self._readAndWrite, source, condition)
self.simulate() # fire Twisted timers
return 1 # 1=don't auto-remove the source
def simulate(self):
"""Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
gtk.timeout_remove(self._simtag)
self.runUntilCurrent()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# Quoth someone other than me, "grumble", yet I know not why. Try to be
# more specific in your complaints, guys. -exarkun
self._simtag = gtk.timeout_add(int(timeout * 1010), self.simulate)
class PortableGtkReactor(selectreactor.SelectReactor):
"""Reactor that works on Windows.
input_add is not supported on GTK+ for Win32, apparently.
@ivar _simtag: A gtk timeout handle for the next L{simulate} call.
"""
_simtag = None
deprecate.deprecatedModuleAttribute(deprecatedSince, deprecationMessage,
__name__, "PortableGtkReactor")
def crash(self):
selectreactor.SelectReactor.crash(self)
gtk.mainquit()
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
self.simulate()
gtk.mainloop()
def simulate(self):
"""Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
gtk.timeout_remove(self._simtag)
self.iterate()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# See comment for identical line in GtkReactor.simulate.
self._simtag = gtk.timeout_add((timeout * 1010), self.simulate)
def install():
"""Configure the twisted mainloop to be run inside the gtk mainloop.
"""
reactor = GtkReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
deprecate.deprecatedModuleAttribute(deprecatedSince, deprecationMessage,
__name__, "install")
def portableInstall():
"""Configure the twisted mainloop to be run inside the gtk mainloop.
"""
reactor = PortableGtkReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
deprecate.deprecatedModuleAttribute(deprecatedSince, deprecationMessage,
__name__, "portableInstall")
if runtime.platform.getType() != 'posix':
install = portableInstall
__all__ = ['install'] | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Docker types.
*/
@NullMarked
package org.springframework.boot.buildpack.platform.docker.type;
import org.jspecify.annotations.NullMarked; | java | github | https://github.com/spring-projects/spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/docker/type/package-info.java |
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox import IsA # noqa
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from openstack_dashboard import api
from openstack_dashboard.api import fwaas
from openstack_dashboard.test import helpers as test
class FirewallTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:firewalls:index' % DASHBOARD)
ADDRULE_PATH = 'horizon:%s:firewalls:addrule' % DASHBOARD
ADDPOLICY_PATH = 'horizon:%s:firewalls:addpolicy' % DASHBOARD
ADDFIREWALL_PATH = 'horizon:%s:firewalls:addfirewall' % DASHBOARD
RULE_DETAIL_PATH = 'horizon:%s:firewalls:ruledetails' % DASHBOARD
POLICY_DETAIL_PATH = 'horizon:%s:firewalls:policydetails' % DASHBOARD
FIREWALL_DETAIL_PATH = 'horizon:%s:firewalls:firewalldetails' % DASHBOARD
UPDATERULE_PATH = 'horizon:%s:firewalls:updaterule' % DASHBOARD
UPDATEPOLICY_PATH = 'horizon:%s:firewalls:updatepolicy' % DASHBOARD
UPDATEFIREWALL_PATH = 'horizon:%s:firewalls:updatefirewall' % DASHBOARD
INSERTRULE_PATH = 'horizon:%s:firewalls:insertrule' % DASHBOARD
REMOVERULE_PATH = 'horizon:%s:firewalls:removerule' % DASHBOARD
ADDROUTER_PATH = 'horizon:%s:firewalls:addrouter' % DASHBOARD
REMOVEROUTER_PATH = 'horizon:%s:firewalls:removerouter' % DASHBOARD
def set_up_expect(self, fwaas_router_extension=True):
# retrieve rules
tenant_id = self.tenant.id
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'fwaasrouterinsertion'
).AndReturn(fwaas_router_extension)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'fwaasrouterinsertion'
).MultipleTimes().AndReturn(fwaas_router_extension)
api.fwaas.rule_list_for_tenant(
IsA(http.HttpRequest),
tenant_id).AndReturn(self.fw_rules.list())
# retrieves policies
policies = self.fw_policies.list()
api.fwaas.policy_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(policies)
# retrieves firewalls
firewalls = self.firewalls.list()
api.fwaas.firewall_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(firewalls)
routers = self.routers.list()
api.neutron.router_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(routers)
api.neutron.router_list(
IsA(http.HttpRequest), tenant_id=tenant_id). \
MultipleTimes().AndReturn(routers)
api.fwaas.firewall_list_for_tenant(
IsA(http.HttpRequest), tenant_id='1'). \
MultipleTimes().AndReturn(firewalls)
def set_up_expect_with_exception(self):
tenant_id = self.tenant.id
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'fwaasrouterinsertion').AndReturn(True)
api.fwaas.rule_list_for_tenant(
IsA(http.HttpRequest),
tenant_id).AndRaise(self.exceptions.neutron)
api.fwaas.policy_list_for_tenant(
IsA(http.HttpRequest),
tenant_id).AndRaise(self.exceptions.neutron)
api.fwaas.firewall_list_for_tenant(
IsA(http.HttpRequest),
tenant_id).AndRaise(self.exceptions.neutron)
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant'),
api.neutron: ('is_extension_supported',
'router_list',), })
def test_index_firewalls(self):
self.set_up_expect()
self.mox.ReplayAll()
tenant_id = self.tenant.id
res = self.client.get(self.INDEX_URL, tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.firewalls.list()))
# TODO(absubram): Change test_index_firewalls for with and without
# router extensions.
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant'),
api.neutron: ('is_extension_supported',
'router_list',), })
def test_index_policies(self):
self.set_up_expect()
self.mox.ReplayAll()
tenant_id = self.tenant.id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__policies',
tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['policiestable_table'].data),
len(self.fw_policies.list()))
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant'),
api.neutron: ('is_extension_supported',
'router_list',), })
def test_index_rules(self):
self.set_up_expect()
self.mox.ReplayAll()
tenant_id = self.tenant.id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__rules',
tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['rulestable_table'].data),
len(self.fw_rules.list()))
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant'),
api.neutron: ('is_extension_supported',), })
def test_index_exception_firewalls(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
tenant_id = self.tenant.id
res = self.client.get(self.INDEX_URL, tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant'),
api.neutron: ('is_extension_supported',), })
def test_index_exception_policies(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
tenant_id = self.tenant.id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__policies',
tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['policiestable_table'].data), 0)
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant'),
api.neutron: ('is_extension_supported',), })
def test_index_exception_rules(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
tenant_id = self.tenant.id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__rules',
tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['rulestable_table'].data), 0)
@test.create_stubs({api.fwaas: ('rule_create',), })
def test_add_rule_post(self):
rule1 = self.fw_rules.first()
form_data = {'name': rule1.name,
'description': rule1.description,
'protocol': rule1.protocol,
'action': rule1.action,
'source_ip_address': rule1.source_ip_address,
'source_port': rule1.source_port,
'destination_ip_address': rule1.destination_ip_address,
'destination_port': rule1.destination_port,
'shared': rule1.shared,
'enabled': rule1.enabled
}
api.fwaas.rule_create(
IsA(http.HttpRequest), **form_data).AndReturn(rule1)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDRULE_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_rule_post_with_error(self):
rule1 = self.fw_rules.first()
form_data = {'name': rule1.name,
'description': rule1.description,
'protocol': 'abc',
'action': 'pass',
'source_ip_address': rule1.source_ip_address,
'source_port': rule1.source_port,
'destination_ip_address': rule1.destination_ip_address,
'destination_port': rule1.destination_port,
'shared': rule1.shared,
'enabled': rule1.enabled
}
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDRULE_PATH), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.fwaas: ('policy_create',
'rule_list_for_tenant'), })
def test_add_policy_post(self):
policy = self.fw_policies.first()
rules = self.fw_rules.list()
tenant_id = self.tenant.id
form_data = {'name': policy.name,
'description': policy.description,
'firewall_rules': policy.firewall_rules,
'shared': policy.shared,
'audited': policy.audited
}
post_data = {'name': policy.name,
'description': policy.description,
'rule': policy.firewall_rules,
'shared': policy.shared,
'audited': policy.audited
}
# NOTE: SelectRulesAction.populate_rule_choices() lists rule not
# associated with any policy. We need to ensure that rules specified
# in policy.firewall_rules in post_data (above) are not associated
# with any policy. Test data in neutron_data is data in a stable state,
# so we need to modify here.
for rule in rules:
if rule.id in policy.firewall_rules:
rule.firewall_policy_id = rule.policy = None
api.fwaas.rule_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(rules)
api.fwaas.policy_create(
IsA(http.HttpRequest), **form_data).AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDPOLICY_PATH), post_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_create',
'rule_list_for_tenant'), })
def test_add_policy_post_with_error(self):
policy = self.fw_policies.first()
rules = self.fw_rules.list()
tenant_id = self.tenant.id
form_data = {'description': policy.description,
'firewall_rules': None,
'shared': policy.shared,
'audited': policy.audited
}
api.fwaas.rule_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(rules)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDPOLICY_PATH), form_data)
self.assertFormErrors(res, 1)
def _test_add_firewall_post(self, router_extension=False):
firewall = self.firewalls.first()
policies = self.fw_policies.list()
tenant_id = self.tenant.id
if router_extension:
routers = self.routers.list()
firewalls = self.firewalls.list()
form_data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': firewall.firewall_policy_id,
'shared': firewall.shared,
'admin_state_up': firewall.admin_state_up
}
if router_extension:
form_data['router_ids'] = firewall.router_ids
api.neutron.router_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(routers)
api.fwaas.firewall_list_for_tenant(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndReturn(firewalls)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'fwaasrouterinsertion').AndReturn(router_extension)
api.fwaas.policy_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(policies)
api.fwaas.firewall_create(
IsA(http.HttpRequest), **form_data).AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDFIREWALL_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_create',
'policy_list_for_tenant',),
api.neutron: ('is_extension_supported',), })
def test_add_firewall_post(self):
self._test_add_firewall_post()
# @test.create_stubs({api.fwaas: ('firewall_create',
# 'policy_list_for_tenant',
# 'firewall_list_for_tenant',),
# api.neutron: ('is_extension_supported',
# 'router_list'), })
# def test_add_firewall_post_with_router_extension(self):
# self._test_add_firewall_post(router_extension=True)
# TODO(absubram): Fix test_add_firewall_post_with_router_extension
# It currently fails because views.py is not
# initializing the AddRouter workflow?
@test.create_stubs({api.fwaas: ('firewall_create',
'policy_list_for_tenant',),
api.neutron: ('is_extension_supported',), })
def test_add_firewall_post_with_error(self):
firewall = self.firewalls.first()
policies = self.fw_policies.list()
tenant_id = self.tenant.id
form_data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': None,
'shared': firewall.shared,
'admin_state_up': firewall.admin_state_up
}
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'fwaasrouterinsertion').AndReturn(False)
api.fwaas.policy_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(policies)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDFIREWALL_PATH), form_data)
self.assertFormErrors(res, 1)
@test.create_stubs({api.fwaas: ('rule_get',)})
def test_update_rule_get(self):
rule = self.fw_rules.first()
api.fwaas.rule_get(IsA(http.HttpRequest), rule.id).AndReturn(rule)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATERULE_PATH, args=(rule.id,)))
self.assertTemplateUsed(res, 'project/firewalls/updaterule.html')
@test.create_stubs({api.fwaas: ('rule_get', 'rule_update')})
def test_update_rule_post(self):
rule = self.fw_rules.first()
api.fwaas.rule_get(IsA(http.HttpRequest), rule.id).AndReturn(rule)
data = {'name': 'new name',
'description': 'new desc',
'protocol': 'ICMP',
'action': 'ALLOW',
'shared': False,
'enabled': True,
'source_ip_address': rule.source_ip_address,
'destination_ip_address': None,
'source_port': None,
'destination_port': rule.destination_port,
}
api.fwaas.rule_update(IsA(http.HttpRequest), rule.id, **data)\
.AndReturn(rule)
self.mox.ReplayAll()
form_data = data.copy()
form_data['destination_ip_address'] = ''
form_data['source_port'] = ''
res = self.client.post(
reverse(self.UPDATERULE_PATH, args=(rule.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('rule_get', 'rule_update')})
def test_update_protocol_any_rule_post(self):
# protocol any means protocol == None in neutron context.
rule = self.fw_rules.get(protocol=None)
api.fwaas.rule_get(IsA(http.HttpRequest), rule.id).AndReturn(rule)
data = {'name': 'new name',
'description': 'new desc',
'protocol': 'ICMP',
'action': 'ALLOW',
'shared': False,
'enabled': True,
'source_ip_address': rule.source_ip_address,
'destination_ip_address': None,
'source_port': None,
'destination_port': rule.destination_port,
}
api.fwaas.rule_update(IsA(http.HttpRequest), rule.id, **data)\
.AndReturn(rule)
self.mox.ReplayAll()
form_data = data.copy()
form_data['destination_ip_address'] = ''
form_data['source_port'] = ''
res = self.client.post(
reverse(self.UPDATERULE_PATH, args=(rule.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('rule_get', 'rule_update')})
def test_update_rule_protocol_to_ANY_post(self):
rule = self.fw_rules.first()
api.fwaas.rule_get(IsA(http.HttpRequest), rule.id).AndReturn(rule)
data = {'name': 'new name',
'description': 'new desc',
'protocol': None,
'action': 'ALLOW',
'shared': False,
'enabled': True,
'source_ip_address': rule.source_ip_address,
'destination_ip_address': None,
'source_port': None,
'destination_port': rule.destination_port,
}
api.fwaas.rule_update(IsA(http.HttpRequest), rule.id, **data)\
.AndReturn(rule)
self.mox.ReplayAll()
form_data = data.copy()
form_data['destination_ip_address'] = ''
form_data['source_port'] = ''
form_data['protocol'] = 'ANY'
res = self.client.post(
reverse(self.UPDATERULE_PATH, args=(rule.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get',)})
def test_update_policy_get(self):
policy = self.fw_policies.first()
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEPOLICY_PATH, args=(policy.id,)))
self.assertTemplateUsed(res, 'project/firewalls/updatepolicy.html')
@test.create_stubs({api.fwaas: ('policy_get', 'policy_update',
'rule_list_for_tenant')})
def test_update_policy_post(self):
policy = self.fw_policies.first()
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
data = {'name': 'new name',
'description': 'new desc',
'shared': True,
'audited': False
}
api.fwaas.policy_update(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.UPDATEPOLICY_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_get', 'policy_list_for_tenant')})
def test_update_firewall_get(self):
firewall = self.firewalls.first()
policies = self.fw_policies.list()
tenant_id = self.tenant.id
api.fwaas.policy_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(policies)
api.fwaas.firewall_get(IsA(http.HttpRequest),
firewall.id).AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEFIREWALL_PATH, args=(firewall.id,)))
self.assertTemplateUsed(res, 'project/firewalls/updatefirewall.html')
@test.create_stubs({api.fwaas: ('firewall_get', 'policy_list_for_tenant',
'firewall_update')})
def test_update_firewall_post(self):
firewall = self.firewalls.first()
tenant_id = self.tenant.id
api.fwaas.firewall_get(IsA(http.HttpRequest),
firewall.id).AndReturn(firewall)
data = {'name': 'new name',
'description': 'new desc',
'firewall_policy_id': firewall.firewall_policy_id,
'admin_state_up': False
}
policies = self.fw_policies.list()
api.fwaas.policy_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(policies)
api.fwaas.firewall_update(IsA(http.HttpRequest), firewall.id, **data)\
.AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.UPDATEFIREWALL_PATH, args=(firewall.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get', 'policy_insert_rule',
'rule_list_for_tenant', 'rule_get')})
def test_policy_insert_rule(self):
policy = self.fw_policies.first()
tenant_id = self.tenant.id
rules = self.fw_rules.list()
new_rule_id = rules[2].id
data = {'firewall_rule_id': new_rule_id,
'insert_before': rules[1].id,
'insert_after': rules[0].id}
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
policy.firewall_rules = [rules[0].id,
new_rule_id,
rules[1].id]
api.fwaas.rule_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(rules)
api.fwaas.rule_get(
IsA(http.HttpRequest), new_rule_id).AndReturn(rules[2])
api.fwaas.policy_insert_rule(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.INSERTRULE_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get', 'policy_remove_rule',
'rule_list_for_tenant', 'rule_get')})
def test_policy_remove_rule(self):
policy = self.fw_policies.first()
tenant_id = self.tenant.id
rules = self.fw_rules.list()
remove_rule_id = policy.firewall_rules[0]
left_rule_id = policy.firewall_rules[1]
data = {'firewall_rule_id': remove_rule_id}
after_remove_policy_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'tenant_id': '1',
'name': 'policy1',
'description': 'policy description',
'firewall_rules': [left_rule_id],
'audited': True,
'shared': True}
after_remove_policy = fwaas.Policy(after_remove_policy_dict)
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
api.fwaas.rule_list_for_tenant(
IsA(http.HttpRequest), tenant_id).AndReturn(rules)
api.fwaas.rule_get(
IsA(http.HttpRequest), remove_rule_id).AndReturn(rules[0])
api.fwaas.policy_remove_rule(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(after_remove_policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.REMOVERULE_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_get',
'firewall_list_for_tenant',
'firewall_update',
'firewall_unassociated_routers_list')})
def test_firewall_add_router(self):
tenant_id = self.tenant.id
firewall = self.firewalls.first()
routers = self.routers.list()
existing_router_ids = firewall.router_ids
add_router_ids = [routers[1].id]
form_data = {'router_ids': add_router_ids}
post_data = {'router_ids': add_router_ids + existing_router_ids}
api.fwaas.firewall_get(
IsA(http.HttpRequest), firewall.id).AndReturn(firewall)
api.fwaas.firewall_unassociated_routers_list(
IsA(http.HttpRequest), tenant_id).AndReturn(routers)
firewall.router_ids = [add_router_ids, existing_router_ids]
api.fwaas.firewall_update(
IsA(http.HttpRequest),
firewall.id, **post_data).AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.ADDROUTER_PATH, args=(firewall.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_get',
'firewall_update',
'firewall_unassociated_routers_list'),
api.neutron: ('router_list',), })
def test_firewall_remove_router(self):
firewall = self.firewalls.first()
tenant_id = self.tenant.id
routers = self.routers.list()
existing_router_ids = firewall.router_ids
form_data = {'router_ids': existing_router_ids}
api.fwaas.firewall_get(
IsA(http.HttpRequest), firewall.id).AndReturn(firewall)
api.neutron.router_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(routers)
firewall.router_ids = []
api.fwaas.firewall_update(
IsA(http.HttpRequest),
firewall.id, **form_data).AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.REMOVEROUTER_PATH, args=(firewall.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant',
'rule_delete'),
api.neutron: ('is_extension_supported',
'router_list',), })
def test_delete_rule(self):
self.set_up_expect()
rule = self.fw_rules.first()
api.fwaas.rule_delete(IsA(http.HttpRequest), rule.id)
self.mox.ReplayAll()
form_data = {"action": "rulestable__deleterule__%s" % rule.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant',
'policy_delete'),
api.neutron: ('is_extension_supported',
'router_list',), })
def test_delete_policy(self):
self.set_up_expect()
policy = self.fw_policies.first()
api.fwaas.policy_delete(IsA(http.HttpRequest), policy.id)
self.mox.ReplayAll()
form_data = {"action": "policiestable__deletepolicy__%s" % policy.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.fwaas: ('firewall_list_for_tenant',
'policy_list_for_tenant',
'rule_list_for_tenant',
'firewall_delete'),
api.neutron: ('is_extension_supported',
'router_list',), })
def test_delete_firewall(self):
self.set_up_expect()
fwl = self.firewalls.first()
api.fwaas.firewall_delete(IsA(http.HttpRequest), fwl.id)
self.mox.ReplayAll()
form_data = {"action": "firewallstable__deletefirewall__%s" % fwl.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
DNS server framework - intended to simplify creation of custom resolvers.
Comprises the following components:
DNSServer - socketserver wrapper (in most cases you should just
need to pass this an appropriate resolver instance
and start in either foreground/background)
DNSHandler - handler instantiated by DNSServer to handle requests
The 'handle' method deals with the sending/receiving
packets (handling TCP length prefix) and delegates
the protocol handling to 'get_reply'. This decodes
packet, hands off a DNSRecord to the Resolver instance,
and encodes the returned DNSRecord.
In most cases you dont need to change DNSHandler unless
you need to get hold of the raw protocol data in the
Resolver
DNSLogger - The class provides a default set of logging functions for
the various stages of the request handled by a DNSServer
instance which are enabled/disabled by flags in the 'log'
class variable.
Resolver - Instance implementing a 'resolve' method that receives
the decodes request packet and returns a response.
To implement a custom resolver in most cases all you need
is to implement this interface.
Note that there is only a single instance of the Resolver
so need to be careful about thread-safety and blocking
The following examples use the server framework:
fixedresolver.py - Simple resolver which will respond to all
requests with a fixed response
zoneresolver.py - Resolver which will take a standard zone
file input
shellresolver.py - Example of a dynamic resolver
proxy.py - DNS proxy
intercept.py - Intercepting DNS proxy
>>> resolver = BaseResolver()
>>> logger = DNSLogger(prefix=False)
>>> server = DNSServer(resolver,port=8053,address="localhost",logger=logger)
>>> server.start_thread()
>>> q = DNSRecord.question("abc.def")
>>> a = q.send("localhost",8053)
Request: [...] (udp) / 'abc.def.' (A)
Reply: [...] (udp) / 'abc.def.' (A) / RRs:
>>> print(DNSRecord.parse(a))
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.def. IN A
>>> server.stop()
>>> class TestResolver:
... def resolve(self,request,handler):
... reply = request.reply()
... reply.add_answer(*RR.fromZone("abc.def. 60 A 1.2.3.4"))
... return reply
>>> resolver = TestResolver()
>>> server = DNSServer(resolver,port=8053,address="localhost",logger=logger,tcp=True)
>>> server.start_thread()
>>> a = q.send("localhost",8053,tcp=True)
Request: [...] (tcp) / 'abc.def.' (A)
Reply: [...] (tcp) / 'abc.def.' (A) / RRs: A
>>> print(DNSRecord.parse(a))
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.def. IN A
;; ANSWER SECTION:
abc.def. 60 IN A 1.2.3.4
>>> server.stop()
"""
from __future__ import print_function
import binascii,socket,struct,threading,time
try:
import socketserver
except ImportError:
import SocketServer as socketserver
from dnslib import DNSRecord,DNSError,QTYPE,RCODE,RR
class BaseResolver(object):
"""
Base resolver implementation. Provides 'resolve' method which is
called by DNSHandler with the decode request (DNSRecord instance)
and returns a DNSRecord instance as reply.
In most cases you should be able to create a custom resolver by
just replacing the resolve method with appropriate resolver code for
application (see fixedresolver/zoneresolver/shellresolver for
examples)
Note that a single instance is used by all DNSHandler instances so
need to consider blocking & thread safety.
"""
def resolve(self,request,handler):
"""
Example resolver - respond to all requests with NXDOMAIN
"""
reply = request.reply()
reply.header.rcode = getattr(RCODE,'NXDOMAIN')
return reply
class DNSHandler(socketserver.BaseRequestHandler):
"""
Handler for socketserver. Transparently handles both TCP/UDP requests
(TCP requests have length prepended) and hands off lookup to resolver
instance specified in <SocketServer>.resolver
"""
udplen = 0 # Max udp packet length (0 = ignore)
def handle(self):
if self.server.socket_type == socket.SOCK_STREAM:
self.protocol = 'tcp'
data = self.request.recv(8192)
length = struct.unpack("!H",bytes(data[:2]))[0]
while len(data) - 2 < length:
data += self.request.recv(8192)
data = data[2:]
else:
self.protocol = 'udp'
data,connection = self.request
self.server.logger.log_recv(self,data)
try:
rdata = self.get_reply(data)
self.server.logger.log_send(self,rdata)
if self.protocol == 'tcp':
rdata = struct.pack("!H",len(rdata)) + rdata
self.request.sendall(rdata)
else:
connection.sendto(rdata,self.client_address)
except DNSError as e:
self.server.logger.log_error(self,e)
def get_reply(self,data):
request = DNSRecord.parse(data)
self.server.logger.log_request(self,request)
resolver = self.server.resolver
reply = resolver.resolve(request,self)
self.server.logger.log_reply(self,reply)
if self.protocol == 'udp':
rdata = reply.pack()
if self.udplen and len(rdata) > self.udplen:
truncated_reply = reply.truncate()
rdata = truncated_reply.pack()
self.server.logger.log_truncated(self,truncated_reply)
else:
rdata = reply.pack()
return rdata
class DNSLogger:
"""
The class provides a default set of logging functions for the various
stages of the request handled by a DNSServer instance which are
enabled/disabled by flags in the 'log' class variable.
To customise logging create an object which implements the DNSLogger
interface and pass instance to DNSServer.
The methods which the logger instance must implement are:
log_recv - Raw packet received
log_send - Raw packet sent
log_request - DNS Request
log_reply - DNS Response
log_truncated - Truncated
log_error - Decoding error
log_data - Dump full request/response
"""
def __init__(self,log="",prefix=True):
"""
Selectively enable log hooks depending on log argument
(comma separated list of hooks to enable/disable)
- If empty enable default log hooks
- If entry starts with '+' (eg. +send,+recv) enable hook
- If entry starts with '-' (eg. -data) disable hook
- If entry doesn't start with +/- replace defaults
Prefix argument enables/disables log prefix
"""
default = ["request","reply","truncated","error"]
log = log.split(",") if log else []
enabled = set([ s for s in log if s[0] not in '+-'] or default)
[ enabled.add(l[1:]) for l in log if l.startswith('+') ]
[ enabled.discard(l[1:]) for l in log if l.startswith('-') ]
for l in ['log_recv','log_send','log_request','log_reply',
'log_truncated','log_error','log_data']:
if l[4:] not in enabled:
setattr(self,l,self.log_pass)
self.prefix = prefix
def log_pass(self,*args):
pass
def log_prefix(self,handler):
if self.prefix:
return "%s [%s:%s] " % (time.strftime("%Y-%M-%d %X"),
handler.__class__.__name__,
handler.server.resolver.__class__.__name__)
else:
return ""
def log_recv(self,handler,data):
print("%sReceived: [%s:%d] (%s) <%d> : %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
len(data),
binascii.hexlify(data)))
def log_send(self,handler,data):
print("%sSent: [%s:%d] (%s) <%d> : %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
len(data),
binascii.hexlify(data)))
def log_request(self,handler,request):
print("%sRequest: [%s:%d] (%s) / '%s' (%s)" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
request.q.qname,
QTYPE[request.q.qtype]))
self.log_data(request)
def log_reply(self,handler,reply):
print("%sReply: [%s:%d] (%s) / '%s' (%s) / RRs: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
reply.q.qname,
QTYPE[reply.q.qtype],
",".join([QTYPE[a.rtype] for a in reply.rr])))
self.log_data(reply)
def log_truncated(self,handler,reply):
print("%sTruncated Reply: [%s:%d] (%s) / '%s' (%s) / RRs: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
reply.q.qname,
QTYPE[reply.q.qtype],
",".join([QTYPE[a.rtype] for a in reply.rr])))
self.log_data(reply)
def log_error(self,handler,e):
print("%sInvalid Request: [%s:%d] (%s) :: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
e))
def log_data(self,dnsobj):
print("\n",dnsobj.toZone(" "),"\n",sep="")
class UDPServer(socketserver.UDPServer):
allow_reuse_address = True
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
class DNSServer(object):
"""
Convenience wrapper for socketserver instance allowing
either UDP/TCP server to be started in blocking more
or as a background thread.
Processing is delegated to custom resolver (instance) and
optionally custom logger (instance), handler (class), and
server (class)
In most cases only a custom resolver instance is required
(and possibly logger)
"""
def __init__(self,resolver,
address="",
port=53,
tcp=False,
logger=None,
handler=DNSHandler,
server=None):
"""
resolver: resolver instance
address: listen address (default: "")
port: listen port (default: 53)
tcp: UDP (false) / TCP (true) (default: False)
logger: logger instance (default: DNSLogger)
handler: handler class (default: DNSHandler)
server: socketserver class (default: UDPServer/TCPServer)
"""
if not server:
if tcp:
server = TCPServer
else:
server = UDPServer
self.server = server((address,port),handler)
self.server.resolver = resolver
self.server.logger = logger or DNSLogger()
def start(self):
self.server.serve_forever()
def start_thread(self):
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.server.shutdown()
def isAlive(self):
return self.thread.isAlive()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet preprocessing for ResNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
IMAGE_SIZE = 224
CROP_PADDING = 32
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize_bicubic([image], # pylint: disable=g-long-lambda
[image_size, image_size])[0])
return image
def _decode_and_center_crop(image_bytes, image_size):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize_bicubic([image], [image_size, image_size])[0]
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size) | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
const { CachedSource, ConcatSource, RawSource } = require("webpack-sources");
const { UsageState } = require("./ExportsInfo");
const Template = require("./Template");
const CssModulesPlugin = require("./css/CssModulesPlugin");
const JavascriptModulesPlugin = require("./javascript/JavascriptModulesPlugin");
/** @typedef {import("webpack-sources").Source} Source */
/** @typedef {import("./Compiler")} Compiler */
/** @typedef {import("./ExportsInfo")} ExportsInfo */
/** @typedef {import("./ExportsInfo").ExportInfo} ExportInfo */
/** @typedef {import("./Module")} Module */
/** @typedef {import("./Module").BuildMeta} BuildMeta */
/** @typedef {import("./ModuleGraph")} ModuleGraph */
/** @typedef {import("./RequestShortener")} RequestShortener */
/**
* @template T
* @param {Iterable<T>} iterable iterable
* @returns {string} joined with comma
*/
const joinIterableWithComma = (iterable) => {
// This is more performant than Array.from().join(", ")
// as it doesn't create an array
let str = "";
let first = true;
for (const item of iterable) {
if (first) {
first = false;
} else {
str += ", ";
}
str += item;
}
return str;
};
/**
* @param {ConcatSource} source output
* @param {string} indent spacing
* @param {ExportsInfo} exportsInfo data
* @param {ModuleGraph} moduleGraph moduleGraph
* @param {RequestShortener} requestShortener requestShortener
* @param {Set<ExportInfo>} alreadyPrinted deduplication set
* @returns {void}
*/
const printExportsInfoToSource = (
source,
indent,
exportsInfo,
moduleGraph,
requestShortener,
alreadyPrinted = new Set()
) => {
const otherExportsInfo = exportsInfo.otherExportsInfo;
let alreadyPrintedExports = 0;
// determine exports to print
/** @type {ExportInfo[]} */
const printedExports = [];
for (const exportInfo of exportsInfo.orderedExports) {
if (!alreadyPrinted.has(exportInfo)) {
alreadyPrinted.add(exportInfo);
printedExports.push(exportInfo);
} else {
alreadyPrintedExports++;
}
}
let showOtherExports = false;
if (!alreadyPrinted.has(otherExportsInfo)) {
alreadyPrinted.add(otherExportsInfo);
showOtherExports = true;
} else {
alreadyPrintedExports++;
}
// print the exports
for (const exportInfo of printedExports) {
const target = exportInfo.getTarget(moduleGraph);
source.add(
`${Template.toComment(
`${indent}export ${JSON.stringify(exportInfo.name).slice(
1,
-1
)} [${exportInfo.getProvidedInfo()}] [${exportInfo.getUsedInfo()}] [${exportInfo.getRenameInfo()}]${
target
? ` -> ${target.module.readableIdentifier(requestShortener)}${
target.export
? ` .${target.export
.map((e) => JSON.stringify(e).slice(1, -1))
.join(".")}`
: ""
}`
: ""
}`
)}\n`
);
if (exportInfo.exportsInfo) {
printExportsInfoToSource(
source,
`${indent} `,
exportInfo.exportsInfo,
moduleGraph,
requestShortener,
alreadyPrinted
);
}
}
if (alreadyPrintedExports) {
source.add(
`${Template.toComment(
`${indent}... (${alreadyPrintedExports} already listed exports)`
)}\n`
);
}
if (showOtherExports) {
const target = otherExportsInfo.getTarget(moduleGraph);
if (
target ||
otherExportsInfo.provided !== false ||
otherExportsInfo.getUsed(undefined) !== UsageState.Unused
) {
const title =
printedExports.length > 0 || alreadyPrintedExports > 0
? "other exports"
: "exports";
source.add(
`${Template.toComment(
`${indent}${title} [${otherExportsInfo.getProvidedInfo()}] [${otherExportsInfo.getUsedInfo()}]${
target
? ` -> ${target.module.readableIdentifier(requestShortener)}`
: ""
}`
)}\n`
);
}
}
};
/** @typedef {{ header: RawSource | undefined, full: WeakMap<Source, CachedSource> }} CacheEntry */
/** @type {WeakMap<RequestShortener, WeakMap<Module, CacheEntry>>} */
const caches = new WeakMap();
const PLUGIN_NAME = "ModuleInfoHeaderPlugin";
class ModuleInfoHeaderPlugin {
/**
* @param {boolean=} verbose add more information like exports, runtime requirements and bailouts
*/
constructor(verbose = true) {
/** @type {boolean} */
this._verbose = verbose;
}
/**
* @param {Compiler} compiler the compiler
* @returns {void}
*/
apply(compiler) {
const { _verbose: verbose } = this;
compiler.hooks.compilation.tap(PLUGIN_NAME, (compilation) => {
const javascriptHooks =
JavascriptModulesPlugin.getCompilationHooks(compilation);
javascriptHooks.renderModulePackage.tap(
PLUGIN_NAME,
(
moduleSource,
module,
{ chunk, chunkGraph, moduleGraph, runtimeTemplate }
) => {
const { requestShortener } = runtimeTemplate;
/** @type {undefined | CacheEntry} */
let cacheEntry;
let cache = caches.get(requestShortener);
if (cache === undefined) {
caches.set(requestShortener, (cache = new WeakMap()));
cache.set(
module,
(cacheEntry = { header: undefined, full: new WeakMap() })
);
} else {
cacheEntry = cache.get(module);
if (cacheEntry === undefined) {
cache.set(
module,
(cacheEntry = { header: undefined, full: new WeakMap() })
);
} else if (!verbose) {
const cachedSource = cacheEntry.full.get(moduleSource);
if (cachedSource !== undefined) return cachedSource;
}
}
const source = new ConcatSource();
let header = cacheEntry.header;
if (header === undefined) {
header = this.generateHeader(module, requestShortener);
cacheEntry.header = header;
}
source.add(header);
if (verbose) {
const exportsType = /** @type {BuildMeta} */ (module.buildMeta)
.exportsType;
source.add(
`${Template.toComment(
exportsType
? `${exportsType} exports`
: "unknown exports (runtime-defined)"
)}\n`
);
if (exportsType) {
const exportsInfo = moduleGraph.getExportsInfo(module);
printExportsInfoToSource(
source,
"",
exportsInfo,
moduleGraph,
requestShortener
);
}
source.add(
`${Template.toComment(
`runtime requirements: ${joinIterableWithComma(
chunkGraph.getModuleRuntimeRequirements(module, chunk.runtime)
)}`
)}\n`
);
const optimizationBailout =
moduleGraph.getOptimizationBailout(module);
if (optimizationBailout) {
for (const text of optimizationBailout) {
const code =
typeof text === "function" ? text(requestShortener) : text;
source.add(`${Template.toComment(`${code}`)}\n`);
}
}
source.add(moduleSource);
return source;
}
source.add(moduleSource);
const cachedSource = new CachedSource(source);
cacheEntry.full.set(moduleSource, cachedSource);
return cachedSource;
}
);
javascriptHooks.chunkHash.tap(PLUGIN_NAME, (_chunk, hash) => {
hash.update(PLUGIN_NAME);
hash.update("1");
});
const cssHooks = CssModulesPlugin.getCompilationHooks(compilation);
cssHooks.renderModulePackage.tap(
PLUGIN_NAME,
(moduleSource, module, { runtimeTemplate }) => {
const { requestShortener } = runtimeTemplate;
/** @type {undefined | CacheEntry} */
let cacheEntry;
let cache = caches.get(requestShortener);
if (cache === undefined) {
caches.set(requestShortener, (cache = new WeakMap()));
cache.set(
module,
(cacheEntry = { header: undefined, full: new WeakMap() })
);
} else {
cacheEntry = cache.get(module);
if (cacheEntry === undefined) {
cache.set(
module,
(cacheEntry = { header: undefined, full: new WeakMap() })
);
} else if (!verbose) {
const cachedSource = cacheEntry.full.get(moduleSource);
if (cachedSource !== undefined) return cachedSource;
}
}
const source = new ConcatSource();
let header = cacheEntry.header;
if (header === undefined) {
header = this.generateHeader(module, requestShortener);
cacheEntry.header = header;
}
source.add(header);
source.add(moduleSource);
const cachedSource = new CachedSource(source);
cacheEntry.full.set(moduleSource, cachedSource);
return cachedSource;
}
);
cssHooks.chunkHash.tap(PLUGIN_NAME, (_chunk, hash) => {
hash.update(PLUGIN_NAME);
hash.update("1");
});
});
}
/**
* @param {Module} module the module
* @param {RequestShortener} requestShortener request shortener
* @returns {RawSource} the header
*/
generateHeader(module, requestShortener) {
const req = module.readableIdentifier(requestShortener);
const reqStr = req.replace(/\*\//g, "*_/");
const reqStrStar = "*".repeat(reqStr.length);
const headerStr = `/*!****${reqStrStar}****!*\\\n !*** ${reqStr} ***!\n \\****${reqStrStar}****/\n`;
return new RawSource(headerStr);
}
}
module.exports = ModuleInfoHeaderPlugin; | javascript | github | https://github.com/webpack/webpack | lib/ModuleInfoHeaderPlugin.js |
from __future__ import unicode_literals
import copy
import inspect
import sys
import warnings
from itertools import chain
from django.apps import apps
from django.apps.config import MODELS_MODULE_NAME
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, ImproperlyConfigured,
MultipleObjectsReturned, ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connections,
router, transaction,
)
from django.db.models import signals
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.fields import AutoField
from django.db.models.fields.related import (
ForeignObjectRel, ManyToOneRel, OneToOneField, add_lazy_relation,
)
from django.db.models.manager import ensure_default_manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.query_utils import (
DeferredAttribute, deferred_class_factory,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
# If the model is imported before the configuration for its
# application is created (#21719), or isn't in an installed
# application (#21680), use the legacy logic to figure out the
# app_label by looking one level up from the package or module
# named 'models'. If no such package or module exists, fall
# back to looking one level up from the module this model is
# defined in.
# For 'django.contrib.sites.models', this would be 'sites'.
# For 'geo.models.places' this would be 'geo'.
msg = (
"Model class %s.%s doesn't declare an explicit app_label "
"and either isn't in an application in INSTALLED_APPS or "
"else was imported before its application was loaded. "
"This will no longer be supported in Django 1.9." %
(module, name))
if not abstract:
warnings.warn(msg, RemovedInDjango19Warning, stacklevel=2)
model_module = sys.modules[new_class.__module__]
package_components = model_module.__name__.split('.')
package_components.reverse() # find the last occurrence of 'models'
try:
app_label_index = package_components.index(MODELS_MODULE_NAME) + 1
except ValueError:
app_label_index = 1
try:
kwargs = {"app_label": package_components[app_label_index]}
except IndexError:
raise ImproperlyConfigured(
'Unable to detect the app label for model "%s." '
'Ensure that its module, "%s", is located inside an installed '
'app.' % (new_class.__name__, model_module.__name__)
)
else:
kwargs = {"app_label": app_config.label}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.virtual_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
base._meta.concrete_model._meta.proxied_children.append(new_class._meta)
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
parent_links[field.rel.to] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in parent_links:
field = parent_links[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers: # NOQA (redefinition of _)
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(
'%s.%s' % (opts.app_label, opts.model_name)
)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
ensure_default_manager(cls)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
(isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
or field.column is None)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if cls._deferred:
new = cls(**dict(zip(field_names, values)))
else:
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
if not self._deferred:
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id, [], simple_class_factory), data
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
class_id = model._meta.app_label, model._meta.object_name
return (model_unpickle, (class_id, defers, deferred_class_factory), data)
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled model instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if isinstance(self.__class__.__dict__.get(f.attname), DeferredAttribute)
}
def refresh_from_db(self, using=None, fields=None, **kwargs):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
if self._deferred:
non_deferred_model = self._meta.proxy_for_model
else:
non_deferred_model = self.__class__
db_instance_qs = non_deferred_model._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif self._deferred:
deferred_fields = self.get_deferred_fields()
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.rel and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.related_field.attname)
if local_val != related_val:
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if (f.attname not in self.__dict__ and
isinstance(self.__class__.__dict__[f.attname], DeferredAttribute))
]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = cls._base_manager.using(using).filter(
**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.rel.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = cls._check_id_field() + cls._check_field_name_clashes()
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
hint=None,
obj=None,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
hint=None,
obj=None,
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
hint=None,
obj=None,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for __, manager, __ in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.rel.to, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.rel.through, ModelBase))
for f in fields:
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s.%s'." % (
f.rel.through._meta.app_label,
f.rel.through._meta.object_name
),
hint=None,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields
if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
hint=None,
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
hint=None,
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents.
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = (f.name == "id" and
clash and clash.name == "id" and clash.model == cls)
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
hint=None,
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
hint=None,
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.rel, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
hint=None,
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
("'%s' refers to field '%s' which is not local "
"to model '%s'.") % (
option, field_name, cls._meta.object_name,
),
hint=("This issue may be caused by multi-table "
"inheritance."),
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
("'ordering' must be a tuple or list "
"(even if you want to order by only one field)."),
hint=None,
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
fields = (f for f in fields if
f != '_order' or not cls._meta.order_with_respect_to)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if '__' not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
hint=None,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if (f.db_column is None and column_name is not None
and len(column_name) > allowed_len):
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.rel.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if (m2m.db_column is None and rel_name is not None
and len(rel_name) > allowed_len):
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=("Use 'through' to create a separate model "
"for M2M and then set column_name using "
"'db_column'."),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
########
# MISC #
########
def simple_class_factory(model, attrs):
"""
Needed for dynamic classes.
"""
return model
def model_unpickle(model_id, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
if not apps.ready:
apps.populate(settings.INSTALLED_APPS)
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception) | unknown | codeparrot/codeparrot-clean | ||
from sklearn.neighbors import KNeighborsClassifier
from .common import Benchmark, Estimator, Predictor
from .datasets import _20newsgroups_lowdim_dataset
from .utils import make_gen_classif_scorers
class KNeighborsClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for KNeighborsClassifier.
"""
param_names = ["algorithm", "dimension", "n_jobs"]
params = (["brute", "kd_tree", "ball_tree"], ["low", "high"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
algorithm, dimension, n_jobs = params
if Benchmark.data_size == "large":
n_components = 40 if dimension == "low" else 200
else:
n_components = 10 if dimension == "low" else 50
data = _20newsgroups_lowdim_dataset(n_components=n_components)
return data
def make_estimator(self, params):
algorithm, dimension, n_jobs = params
estimator = KNeighborsClassifier(algorithm=algorithm, n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self) | python | github | https://github.com/scikit-learn/scikit-learn | asv_benchmarks/benchmarks/neighbors.py |
# -*- coding: utf-8 -*-
from fuel.datasets import H5PYDataset
from fuel.transformers.defaults import uint8_pixels_to_floatX
from fuel.utils import find_in_data_path
class SVHN(H5PYDataset):
"""The Street View House Numbers (SVHN) dataset.
SVHN [SVHN] is a real-world image dataset for developing machine
learning and object recognition algorithms with minimal requirement
on data preprocessing and formatting. It can be seen as similar in
flavor to MNIST [LBBH] (e.g., the images are of small cropped
digits), but incorporates an order of magnitude more labeled data
(over 600,000 digit images) and comes from a significantly harder,
unsolved, real world problem (recognizing digits and numbers in
natural scene images). SVHN is obtained from house numbers in
Google Street View images.
Parameters
----------
which_format : {1, 2}
SVHN format 1 contains the full numbers, whereas SVHN format 2
contains cropped digits.
which_sets : tuple of str
Which split to load. Valid values are 'train', 'test' and 'extra',
corresponding to the training set (73,257 examples), the test
set (26,032 examples) and the extra set (531,131 examples).
Note that SVHN does not have a validation set; usually you will
create your own training/validation split using the `subset`
argument.
"""
_filename = 'svhn_format_{}.hdf5'
default_transformers = uint8_pixels_to_floatX(('features',))
def __init__(self, which_format, which_sets, **kwargs):
self.which_format = which_format
super(SVHN, self).__init__(
file_or_path=find_in_data_path(self.filename),
which_sets=which_sets, **kwargs)
@property
def filename(self):
return self._filename.format(self.which_format) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2019 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.server.engine
import java.lang.reflect.*
import java.net.*
internal fun ClassLoader.allURLs(): Set<URL> {
val parentUrls = parent?.allURLs() ?: emptySet()
if (this is URLClassLoader) {
val urls = urLs.filterNotNull().toSet()
return urls + parentUrls
}
val ucp = urlClassPath() ?: return parentUrls
return parentUrls + ucp
}
/**
* This only works in JDK9+ with VM option `--add-opens java.base/jdk.internal.loader=ALL-UNNAMED`
* This is required since [allURLs] function is unable to lookup url list due to modules and class loaders
* reorganisation in JDK9+. However, if failed, it fallbacks to [urlClassPathByPackagesList] implementation
* that should always work.
*/
private fun ClassLoader.urlClassPath(): List<URL>? {
try {
val ucpField = javaClass.findURLClassPathField() ?: return null
ucpField.isAccessible = true
val ucpInstance = ucpField.get(this) ?: return null
val getURLsMethod = ucpInstance.javaClass.getMethod("getURLs") ?: return null
getURLsMethod.isAccessible = true
@Suppress("UNCHECKED_CAST")
val urls = getURLsMethod.invoke(ucpInstance) as Array<URL>?
return urls?.toList()
} catch (_: Throwable) {
return try {
urlClassPathByPackagesList()
} catch (_: Throwable) {
null
}
}
}
/**
* Extract classloader's packages list and guess URLs by package segments.
* Unlike the old way, this doesn't require any black magic so works well on all JDKs
* from JDK6 to the latest.
*/
private fun ClassLoader.urlClassPathByPackagesList(): List<URL> {
val allPackagePaths = ClassLoaderDelegate(this).packagesList().map { it.replace('.', '/') }
.flatMapTo(HashSet<String>()) { packageName ->
val segments = packageName.split('/')
(1..segments.size).map { segments.subList(0, it).joinToString("/") } + packageName
}.sortedBy { it.count { character -> character == '/' } } + ""
return allPackagePaths.flatMap { path -> getResources(path)?.toList() ?: emptyList() }
.distinctBy { it.path.substringBefore('!') }
}
private fun Class<*>.findURLClassPathField(): Field? {
declaredFields.firstOrNull { it.name == "ucp" && it.type.simpleName == "URLClassPath" }?.let { return it }
return superclass?.findURLClassPathField() ?: return null
}
/**
* This is auxillary classloader that is not used for loading classes. The purpose is just
* to get access to [getPackages] function that is unfortunately protected.
*/
private class ClassLoaderDelegate(delegate: ClassLoader) : ClassLoader(delegate) {
fun packagesList(): List<String> = getPackages().map { it.name }
} | kotlin | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-core/jvm/src/io/ktor/server/engine/ClassLoaders.kt |
#include "jemalloc/internal/jemalloc_preamble.h"
/*
* The hooks are a little bit screwy -- they're not genuinely exported in the
* sense that we want them available to end-users, but we do want them visible
* from outside the generated library, so that we can use them in test code.
*/
JEMALLOC_EXPORT
void (*test_hooks_arena_new_hook)() = NULL;
JEMALLOC_EXPORT
void (*test_hooks_libc_hook)() = NULL; | c | github | https://github.com/redis/redis | deps/jemalloc/src/test_hooks.c |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator_lib.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See Estimator
steps: See Estimator
continue_training: See Estimator
config: See Estimator
verbose: See Estimator
"""
super(GMM, self).__init__(model_dir=model_dir, config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See Estimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(x, None,
self._num_clusters,
self.batch_size)
_legacy_train_model( # pylint: disable=protected-access
self,
input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS]
for prediction in super(GMM, self).predict(
x=x, batch_size=batch_size, as_iterable=True)
])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES]
for prediction in super(GMM, self).predict(
x=x, batch_size=batch_size, as_iterable=True)
])
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())], 1)
return features
def _get_train_ops(self, features, _):
(_, _, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores, model_predictions, _, _) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {GMM.SCORES: _streaming_sum(math_ops.reduce_sum(losses))}
# TODO(xavigonzalvo): delete this after implementing model-fn based Estimator.
def _legacy_train_model(estimator,
input_fn,
steps,
feed_fn=None,
init_op=None,
init_feed_fn=None,
init_fn=None,
device_fn=None,
monitors=None,
log_every_steps=100,
fail_on_nan_loss=True,
max_steps=None):
"""Legacy train function of Estimator."""
if hasattr(estimator.config, 'execution_mode'):
if estimator.config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(
estimator.config.training_worker_max_startup_secs,
estimator.config.task_id *
estimator.config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
estimator.config.task_id)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or estimator._device_fn # pylint: disable=protected-access
with ops.Graph().as_default() as g, g.device(device_fn):
random_seed_lib.set_random_seed(estimator.config.tf_random_seed)
global_step = framework.create_global_step(g)
features, labels = input_fn()
estimator._check_inputs(features, labels) # pylint: disable=protected-access
# The default return type of _get_train_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_train_ops returns a
# (train_op, loss) tuple. The following else-statement code covers these
# cases, but will soon be deleted after the subclasses are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
train_ops = estimator._get_train_ops(features, labels) # pylint: disable=protected-access
if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature
train_op = train_ops.train_op
loss_op = train_ops.loss
if estimator.config.is_chief:
hooks = train_ops.training_chief_hooks + train_ops.training_hooks
else:
hooks = train_ops.training_hooks
else: # Legacy signature
if len(train_ops) != 2:
raise ValueError('Expected a tuple of train_op and loss, got {}'.format(
train_ops))
train_op = train_ops[0]
loss_op = train_ops[1]
hooks = []
hooks += monitor_lib.replace_monitors_with_hooks(monitors, estimator)
ops.add_to_collection(ops.GraphKeys.LOSSES, loss_op)
return graph_actions._monitored_train( # pylint: disable=protected-access
graph=g,
output_dir=estimator.model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
init_op=init_op,
init_feed_dict=init_feed_fn() if init_feed_fn is not None else None,
init_fn=init_fn,
log_every_steps=log_every_steps,
supervisor_is_chief=estimator.config.is_chief,
supervisor_master=estimator.config.master,
supervisor_save_model_secs=estimator.config.save_checkpoints_secs,
supervisor_save_model_steps=estimator.config.save_checkpoints_steps,
supervisor_save_summaries_steps=estimator.config.save_summary_steps,
keep_checkpoint_max=estimator.config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
estimator.config.keep_checkpoint_every_n_hours),
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
hooks=hooks,
max_steps=max_steps) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'prince'
DESCRIPTION = 'Statistical factor analysis in Python'
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
URL = 'https://github.com/MaxHalford/prince'
EMAIL = 'maxhalford25@gmail.com'
AUTHOR = 'Max Halford'
REQUIRES_PYTHON = '>=3.4.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'matplotlib>=3.0.2',
'numpy>=1.16.1',
'pandas>=0.24.0',
'scipy>=1.1.0',
'scikit-learn>=0.20.1'
]
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/python2
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
first = True
s = 2e-5
eta_contour_levels = np.append(np.arange(-1e-4, 0, s), np.arange(s, 1e-4, s))
zoom_lat = True
zoom_lat = False
zoom_lat = 'eta' in sys.argv[1]
fontsize=8
figsize=(9, 3)
filename = sys.argv[1]
ref_filename = sys.argv[2]
if True:
print(filename)
data = np.loadtxt(filename, skiprows=3)
labelsx = data[0,1:]
labelsy = data[1:,0]
data = data[1:,1:]
if np.isnan(data).any():
print("Skipping "+filename+" because of NaN")
sys.exit(1)
if zoom_lat:
while labelsy[1] < 10:
labelsy = labelsy[1:]
data = data[1:]
while labelsy[-2] > 80:
labelsy = labelsy[0:-2]
data = data[0:-2]
# while labelsx[1] < 90:
# tmplabelsx = labelsx[0]
# labelsx[0:-1] = labelsx[1:]
# labelsx[-1] = tmplabelsx
#
# tmpdata = data[:,0]
# data[:,0:-1] = data[:,1:]
# data[:,-1] = tmpdata
# Reference
if True:
refdata = np.loadtxt(ref_filename, skiprows=3)
refdata = refdata[1:,1:]
if zoom_lat:
while labelsy[1] < 10:
labelsy = labelsy[1:]
refdata = refdata[1:]
while labelsy[-2] > 80:
labelsy = labelsy[0:-2]
refdata = refdata[0:-2]
if first:
lon_min = labelsx[0]
lon_max = labelsx[-1]
lat_min = labelsy[0]
lat_max = labelsy[-1]
new_labelsx = np.linspace(lon_min, lon_max, 7)
new_labelsy = np.linspace(lat_min, lat_max, 7)
labelsx = np.interp(new_labelsx, labelsx, labelsx)
labelsy = np.interp(new_labelsy, labelsy, labelsy)
if first:
cmin = np.amin(data)
cmax = np.amax(data)
if 'eta' in filename:
cmin *= 1.2
cmax *= 1.2
extent = (labelsx[0], labelsx[-1], labelsy[0], labelsy[-1])
plt.figure(figsize=figsize)
plt.imshow(data, interpolation='nearest', extent=extent, origin='lower', aspect='auto')
plt.clim(cmin, cmax)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fontsize)
plt.title(filename, fontsize=fontsize)
if 'eta' in filename:
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=eta_contour_levels, linewidths=0.5)
plt.contour(refdata, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=eta_contour_levels, linewidths=0.5, linestyles='dashed')
else:
if cmin != cmax:
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, linewidths=0.5)
plt.contour(refdata, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, linewidths=0.5, linestyles='dashed')
ax = plt.gca()
ax.xaxis.set_label_coords(0.5, -0.075)
plt.xticks(labelsx, fontsize=fontsize)
plt.xlabel("Longitude", fontsize=fontsize)
plt.yticks(labelsy, fontsize=fontsize)
plt.ylabel("Latitude", fontsize=fontsize)
#plt.show()
outfilename = filename.replace('.csv', '.png')
print(outfilename)
plt.savefig(outfilename, dpi=200)
plt.close()
first = False | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/qcom,gcc-sm6115.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Global Clock & Reset Controller on SM6115 and SM4250
maintainers:
- Iskren Chernev <iskren.chernev@gmail.com>
description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SM4250/6115.
See also: include/dt-bindings/clock/qcom,gcc-sm6115.h
properties:
compatible:
const: qcom,gcc-sm6115
clocks:
items:
- description: Board XO source
- description: Sleep clock source
clock-names:
items:
- const: bi_tcxo
- const: sleep_clk
required:
- compatible
- clocks
- clock-names
- '#power-domain-cells'
allOf:
- $ref: qcom,gcc.yaml#
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
clock-controller@1400000 {
compatible = "qcom,gcc-sm6115";
reg = <0x01400000 0x1f0000>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
clock-names = "bi_tcxo", "sleep_clk";
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, <&sleep_clk>;
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/qcom,gcc-sm6115.yaml |
"""
code from nanophotonics/npmie repository
url: https://github.com/nanophotonics/npmie
code by Alan Sanders
modified to use in MSTM-studio by L.Aavakyan
"""
import numpy as np
from mstm_studio.mstm_spectrum import Material
try:
from scipy.special import sph_jnyn
except:
from scipy.special import spherical_yn, spherical_jn
def sph_jnyn(maxn, z):
jn = []
djn = []
yn = []
dyn = []
for n in range(0, maxn+1):
jn.append (spherical_jn(n, z))
djn.append(spherical_jn(n, z, derivative=True))
yn.append (spherical_yn(n, z))
dyn.append(spherical_yn(n, z, derivative=True))
return np.array(jn), np.array(djn), np.array(yn), np.array(dyn)
def sph_hn(n, x):
# calculate spherical hankel, h(n,x) = j(n,x) + iy(n,x) #
jn, djn, yn, dyn = sph_jnyn(n, x)
hn = jn + 1j * yn
dhn = djn + 1j * dyn
return hn, dhn
def calculate_mie_coefficients(n_max, x, m):
"""
Calculates the Mie coefficients.
:rtype : object
:param n_max:
:param x: size parameter
:param m:
"""
# calculate spherical bessels #
jn, djn, yn, dyn = sph_jnyn(n_max, x) # j(n, x), y(n, x)
jm, djm, ym, dym = sph_jnyn(n_max, m * x) # j(n, mx), y(n, mx)
# calculate spherical hankel #
hn, dhn = sph_hn(n_max, x) # h(n, x)
# calculate riccati bessel functions #
dpsi_n = [x * jn[n-1] - n * jn[n] for n in range(0, len(jn))]
dpsi_m = [m * x * jm[n-1] - n * jm[n] for n in range(0, len(jm))]
dzeta_n = [x * hn[n-1] - n * hn[n] for n in range(0, len(hn))]
a_n = (m**2 * jm * dpsi_n - jn * dpsi_m) / (m**2 * jm * dzeta_n - hn * dpsi_m)
b_n = (jm * dpsi_n - jn * dpsi_m) / (jm * dzeta_n - hn * dpsi_m)
return a_n, b_n
def calculate_mie_efficiencies(r, wavelength, n_sph, n_med):
"""
Calculates the mie efficiencies (q_scat, q_abs, q_ext, q_bscat)
for a sphere in a dielectric medium at a given wavelength.
:rtype : object
:param r: radius of the sphere
:param wavelength: wavelength of illumination
:param n_sph: complex refractive index of the sphere
:param n_med: real refractive index of the dielectric medium
:return:
"""
# calculate size parameter #
x = n_med * (2 * np.pi / wavelength) * r # x = n_med * kr, size parameter
m = n_sph / n_med
# n_max = int(np.ceil(x.real)+1) # number of terms in series expansion
n_max = int(x + 4 * x**(1.0 / 3.0) + 2) # number of terms in series expansion
q_scat = 0
q_bscat = 0
q_ext = 0
q_abs = 0
a_n, b_n = calculate_mie_coefficients(n_max, x, m)
a = 0
b = 0
for n in range(1, n_max):
a += a_n[n]
b += b_n[n]
q_scat += (2 * n + 1) * (abs(a_n[n])**2 + abs(b_n[n])**2)
q_bscat += (2 * n + 1) * ((-1)**n) * (abs(a_n[n])**2 + abs(b_n[n])**2)
q_ext += (2 * n + 1) * (a_n[n] + b_n[n]).real
q_scat *= 2 / x**2
q_bscat *= 2 / x**2
q_ext *= 2 / x**2
q_abs = q_ext - q_scat
return q_scat, q_bscat, q_ext, q_abs
def calculate_mie_spectra(wavelengths, r, material, n_medium=1.):
"""
Calculates the mie scattering and extinction efficiency of spherical
nanoparticles with radius r and given material surrounded by a medium n_med
for a set of given wavelengths.
:rtype : object
:param wavelengths: array of wavelengths to calculate spectra from
:param r: radius of the sphere
:param material: instance of Material class
:param n_med: refractive index of the surrounding dielectric medium
"""
mie_scattering = []
mie_backscattering = []
mie_extinction = []
mie_absorption = []
for wl in wavelengths:
n_sph = material.get_n(wl) + 1j * material.get_k(wl)
q_scat, q_bscat, q_ext, q_abs = calculate_mie_efficiencies(
r, wl, n_sph, n_medium
)
mie_scattering.append(q_scat)
mie_backscattering.append(q_bscat)
mie_extinction.append(q_ext)
mie_absorption.append(q_abs)
return (np.array(mie_scattering), np.array(mie_backscattering),
np.array(mie_extinction), np.array(mie_absorption))
if __name__ == '__main__':
import matplotlib.pyplot as plt
#~ diameter_np = raw_input('Enter nanoparticle diameter (nm): ')
#~ material = raw_input("Enter nanoparticle material: ")
#~ medium = raw_input("Enter surrounding medium: ")
diameter_np = material = medium = '' # test
if diameter_np == '':
diameter_np = 140.
else:
diameter_np = float(diameter_np)
if material == '':
material = 'Au'
if medium == '':
medium = 1.
else:
medium = float(medium)
mat_dict = {'Au': 'etaGold.txt', 'Ag': 'etaSilver.txt'}
material_object = Material(3) # Material(mat_dict[material])
wavelength = np.arange(300, 1000, 0.1)
mie_scattering, mie_backscattering, mie_extinction, \
mie_absorption = calculate_mie_spectra(
wavelength, diameter_np / 2.0, material_object, medium
)
# save to file
data = np.stack([wavelength, mie_scattering, mie_backscattering, \
mie_extinction, mie_absorption])
np.savetxt('MIE.dat', np.transpose(data), header='wl\tscatt\tbscatt\text\tabs')
fig = plt.figure()
# wavelength plots #
ax = fig.add_subplot(411)
ax.plot(wavelength, mie_scattering, 'r', label='scattering')
ax.set_xticklabels(ax.get_xticklabels(), visible=False)
ax.set_ylabel('scattering')
ax = fig.add_subplot(412)
ax.plot(wavelength, mie_backscattering, 'k', label='back-scattering')
ax.set_xticklabels(ax.get_xticklabels(), visible=False)
ax.set_ylabel('back-scattering')
ax = fig.add_subplot(413)
ax.plot(wavelength, mie_extinction, 'b', label='extinction')
ax.set_xticklabels(ax.get_xticklabels(), visible=False)
ax.set_ylabel('extinction')
ax = fig.add_subplot(414)
ax.plot(wavelength, mie_absorption, 'g', label='absorption')
ax.set_ylabel('absorption')
ax.set_xlabel('wavelength (nm)')
plt.tight_layout()
plt.show() | unknown | codeparrot/codeparrot-clean | ||
{
"html": {
"type": "Fragment",
"start": 0,
"end": 15,
"children": [
{
"type": "Element",
"start": 0,
"end": 15,
"name": "!doctype",
"attributes": [
{
"type": "Attribute",
"start": 10,
"end": 14,
"name": "html",
"name_loc": {
"start": {
"line": 1,
"column": 10,
"character": 10
},
"end": {
"line": 1,
"column": 14,
"character": 14
}
},
"value": true
}
],
"children": []
}
]
}
} | json | github | https://github.com/sveltejs/svelte | packages/svelte/tests/parser-legacy/samples/elements/output.json |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import copy
from cms.utils.urlutils import admin_reverse
from django.contrib.sites.models import Site
from cms.api import create_page
from cms.models import Page, Placeholder
from cms.utils import get_cms_setting
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import SettingsOverride
class SiteTestCase(CMSTestCase):
"""Site framework specific test cases.
All stuff which is changing settings.SITE_ID for tests should come here.
"""
def setUp(self):
self.assertEqual(Site.objects.all().count(), 1)
with SettingsOverride(SITE_ID=1):
u = self._create_user("test", True, True)
# setup sites
self.site2 = Site.objects.create(domain="sample2.com", name="sample2.com", pk=2)
self.site3 = Site.objects.create(domain="sample3.com", name="sample3.com", pk=3)
self._login_context = self.login_user_context(u)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def test_site_framework(self):
#Test the site framework, and test if it's possible to disable it
with SettingsOverride(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
response = self.client.get("/en/admin/cms/page/?site__exact=%s" % self.site3.pk)
self.assertEqual(response.status_code, 200)
create_page("page_3b", "nav_playground.html", "de", site=self.site3)
with SettingsOverride(SITE_ID=self.site3.pk):
create_page("page_3a", "nav_playground.html", "nl", site=self.site3)
# with param
self.assertEqual(Page.objects.on_site(self.site2.pk).count(), 1)
self.assertEqual(Page.objects.on_site(self.site3.pk).count(), 2)
self.assertEqual(Page.objects.drafts().on_site().count(), 2)
with SettingsOverride(SITE_ID=self.site2.pk):
# without param
self.assertEqual(Page.objects.drafts().on_site().count(), 1)
def test_site_preview(self):
page = create_page("page", "nav_playground.html", "de", site=self.site2, published=True)
with self.login_user_context(self.get_superuser()):
response = self.client.get(admin_reverse('cms_page_preview_page', args=[page.pk, 'de']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response._headers['location'][1], 'http://sample2.com/de/?%s&language=de' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
def test_site_publish(self):
self._login_context.__exit__(None, None, None)
pages = {"2": list(range(0, 5)), "3": list(range(0, 5))}
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[3][1]['public'] = True
with SettingsOverride(CMS_LANGUAGES=lang_settings, LANGUAGE_CODE="de"):
with SettingsOverride(SITE_ID=self.site2.pk):
pages["2"][0] = create_page("page_2", "nav_playground.html", "de",
site=self.site2)
pages["2"][0].publish('de')
pages["2"][1] = create_page("page_2_1", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2)
pages["2"][2] = create_page("page_2_2", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2)
pages["2"][3] = create_page("page_2_1_1", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2)
pages["2"][4] = create_page("page_2_1_2", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2)
for page in pages["2"]:
page.publish('de')
for page in pages["2"]:
if page.is_home:
page_url = "/de/"
else:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
with SettingsOverride(SITE_ID=self.site3.pk):
pages["3"][0] = create_page("page_3", "nav_playground.html", "de",
site=self.site3)
pages["3"][0].publish('de')
pages["3"][1] = create_page("page_3_1", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3)
pages["3"][2] = create_page("page_3_2", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3)
pages["3"][3] = create_page("page_3_1_1", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3)
pages["3"][4] = create_page("page_3_1_2", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3)
for page in pages["3"]:
page.publish('de')
for page in pages["3"]:
if page.is_home:
page_url = "/de/"
else:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
def test_site_delete(self):
with SettingsOverride(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
self.assertEqual(Placeholder.objects.count(), 2)
self.site2.delete()
self.assertEqual(Placeholder.objects.count(), 0) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "active_support/core_ext/module"
require "action_view/model_naming"
module ActionView
# = Action View \Record \Identifier
#
# RecordIdentifier encapsulates methods used by various ActionView helpers
# to associate records with DOM elements.
#
# Consider for example the following code that form of post:
#
# <%= form_with(model: post) do |f| %>
# <%= f.text_field :body %>
# <% end %>
#
# When +post+ is a new, unsaved ActiveRecord::Base instance, the resulting HTML
# is:
#
# <form class="new_post" id="new_post" action="/posts" accept-charset="UTF-8" method="post">
# <input type="text" name="post[body]" id="post_body" />
# </form>
#
# When +post+ is a persisted ActiveRecord::Base instance, the resulting HTML
# is:
#
# <form class="edit_post" id="edit_post_42" action="/posts/42" accept-charset="UTF-8" method="post">
# <input type="text" value="What a wonderful world!" name="post[body]" id="post_body" />
# </form>
#
# In both cases, the +id+ and +class+ of the wrapping DOM element are
# automatically generated, following naming conventions encapsulated by the
# RecordIdentifier methods #dom_id and #dom_class:
#
# dom_id(Post) # => "new_post"
# dom_class(Post) # => "post"
# dom_id(Post.new) # => "new_post"
# dom_class(Post.new) # => "post"
# dom_id(Post.find 42) # => "post_42"
# dom_class(Post.find 42) # => "post"
#
# Note that these methods do not strictly require +Post+ to be a subclass of
# ActiveRecord::Base.
# Any +Post+ class will work as long as its instances respond to +to_key+
# and +model_name+, given that +model_name+ responds to +param_key+.
# For instance:
#
# class Post
# attr_accessor :to_key
#
# def model_name
# OpenStruct.new param_key: 'post'
# end
#
# def self.find(id)
# new.tap { |post| post.to_key = [id] }
# end
# end
module RecordIdentifier
extend self
extend ModelNaming
include ModelNaming
JOIN = "_"
NEW = "new"
# The DOM class convention is to use the singular form of an object or class.
#
# dom_class(post) # => "post"
# dom_class(Person) # => "person"
#
# If you need to address multiple instances of the same class in the same view, you can prefix the dom_class:
#
# dom_class(post, :edit) # => "edit_post"
# dom_class(Person, :edit) # => "edit_person"
def dom_class(record_or_class, prefix = nil)
singular = model_name_from_record_or_class(record_or_class).param_key
prefix ? "#{prefix}#{JOIN}#{singular}" : singular
end
# The DOM id convention is to use the singular form of an object or class with the id following an underscore.
# If no id is found, prefix with "new_" instead.
#
# dom_id(Post.find(45)) # => "post_45"
# dom_id(Post) # => "new_post"
#
# If you need to address multiple instances of the same class in the same view, you can prefix the dom_id:
#
# dom_id(Post.find(45), :edit) # => "edit_post_45"
# dom_id(Post, :custom) # => "custom_post"
def dom_id(record_or_class, prefix = nil)
raise ArgumentError, "dom_id must be passed a record_or_class as the first argument, you passed #{record_or_class.inspect}" unless record_or_class
record_id = record_key_for_dom_id(record_or_class) unless record_or_class.is_a?(Class)
if record_id
"#{dom_class(record_or_class, prefix)}#{JOIN}#{record_id}"
else
dom_class(record_or_class, prefix || NEW)
end
end
# The DOM target convention is to concatenate any number of parameters into a string.
# Records are passed through dom_id, while string and symbols are retained.
#
# dom_target(Post.find(45)) # => "post_45"
# dom_target(Post.find(45), :edit) # => "post_45_edit"
# dom_target(Post.find(45), :edit, :special) # => "post_45_edit_special"
# dom_target(Post.find(45), Comment.find(1)) # => "post_45_comment_1"
def dom_target(*objects)
objects.map! do |object|
case object
when Symbol, String
object
when Class
dom_class(object)
else
dom_id(object)
end
end
objects.join(JOIN)
end
private
# Returns a string representation of the key attribute(s) that is suitable for use in an HTML DOM id.
# This can be overwritten to customize the default generated string representation if desired.
# If you need to read back a key from a dom_id in order to query for the underlying database record,
# you should write a helper like 'person_record_from_dom_id' that will extract the key either based
# on the default implementation (which just joins all key attributes with '_') or on your own
# overwritten version of the method. By default, this implementation passes the key string through a
# method that replaces all characters that are invalid inside DOM ids, with valid ones. You need to
# make sure yourself that your dom ids are valid, in case you override this method.
def record_key_for_dom_id(record) # :doc:
key = convert_to_model(record).to_key
key && key.all? ? key.join(JOIN) : nil
end
end
end | ruby | github | https://github.com/rails/rails | actionview/lib/action_view/record_identifier.rb |
from abc import abstractmethod, ABCMeta
import logging
from typing import Dict, Optional
from ray.rllib.evaluation.episode import MultiAgentEpisode
from ray.rllib.utils.typing import AgentID, EpisodeID, PolicyID, \
TensorType
logger = logging.getLogger(__name__)
class _SampleCollector(metaclass=ABCMeta):
"""Collects samples for all policies and agents from a multi-agent env.
Note: This is an experimental class only used when
`config._use_trajectory_view_api` = True.
Once `_use_trajectory_view_api` becomes the default in configs:
This class will deprecate the `SampleBatchBuilder` and
`MultiAgentBatchBuilder` classes.
This API is controlled by RolloutWorker objects to store all data
generated by Environments and Policies/Models during rollout and
postprocessing. It's purposes are to a) make data collection and
SampleBatch/input_dict generation from this data faster, b) to unify
the way we collect samples from environments and model (outputs), thereby
allowing for possible user customizations, c) to allow for more complex
inputs fed into different policies (e.g. multi-agent case with inter-agent
communication channel).
"""
@abstractmethod
def add_init_obs(self, episode_id: EpisodeID, agent_id: AgentID,
policy_id: PolicyID, init_obs: TensorType) -> None:
"""Adds an initial obs (after reset) to this collector.
Since the very first observation in an environment is collected w/o
additional data (w/o actions, w/o reward) after env.reset() is called,
this method initializes a new trajectory for a given agent.
`add_init_obs()` has to be called first for each agent/episode-ID
combination. After this, only `add_action_reward_next_obs()` must be
called for that same agent/episode-pair.
Args:
episode_id (EpisodeID): Unique id for the episode we are adding
values for.
agent_id (AgentID): Unique id for the agent we are adding
values for.
policy_id (PolicyID): Unique id for policy controlling the agent.
init_obs (TensorType): Initial observation (after env.reset()).
Examples:
>>> obs = env.reset()
>>> collector.add_init_obs(12345, 0, "pol0", obs)
>>> obs, r, done, info = env.step(action)
>>> collector.add_action_reward_next_obs(12345, 0, "pol0", {
... "action": action, "obs": obs, "reward": r, "done": done
... })
"""
raise NotImplementedError
@abstractmethod
def add_action_reward_next_obs(self, episode_id: EpisodeID,
agent_id: AgentID, policy_id: PolicyID,
values: Dict[str, TensorType]) -> None:
"""Add the given dictionary (row) of values to this collector.
The incoming data (`values`) must include action, reward, done, and
next_obs information and may include any other information.
For the initial observation (after Env.reset()) of the given agent/
episode-ID combination, `add_initial_obs()` must be called instead.
Args:
episode_id (EpisodeID): Unique id for the episode we are adding
values for.
agent_id (AgentID): Unique id for the agent we are adding
values for.
policy_id (PolicyID): Unique id for policy controlling the agent.
values (Dict[str, TensorType]): Row of values to add for this
agent. This row must contain the keys SampleBatch.ACTION,
REWARD, NEW_OBS, and DONE.
Examples:
>>> obs = env.reset()
>>> collector.add_init_obs(12345, 0, "pol0", obs)
>>> obs, r, done, info = env.step(action)
>>> collector.add_action_reward_next_obs(12345, 0, "pol0", {
... "action": action, "obs": obs, "reward": r, "done": done
... })
"""
raise NotImplementedError
@abstractmethod
def total_env_steps(self) -> int:
"""Returns total number of steps taken in the env (sum of all agents).
Returns:
int: The number of steps taken in total in the environment over all
agents.
"""
raise NotImplementedError
@abstractmethod
def get_inference_input_dict(self, policy_id: PolicyID) -> \
Dict[str, TensorType]:
"""Returns an input_dict for an (inference) forward pass from our data.
The input_dict can then be used for action computations inside a
Policy via `Policy.compute_actions_from_input_dict()`.
Args:
policy_id (PolicyID): The Policy ID to get the input dict for.
Returns:
Dict[str, TensorType]: The input_dict to be passed into the ModelV2
for inference/training.
Examples:
>>> obs, r, done, info = env.step(action)
>>> collector.add_action_reward_next_obs(12345, 0, "pol0", {
... "action": action, "obs": obs, "reward": r, "done": done
... })
>>> input_dict = collector.get_inference_input_dict(policy.model)
>>> action = policy.compute_actions_from_input_dict(input_dict)
>>> # repeat
"""
raise NotImplementedError
@abstractmethod
def has_non_postprocessed_data(self) -> bool:
"""Returns whether there is pending, unprocessed data.
Returns:
bool: True if there is at least some data that has not been
postprocessed yet.
"""
raise NotImplementedError
@abstractmethod
def postprocess_trajectories_so_far(
self, episode: Optional[MultiAgentEpisode] = None) -> None:
"""Apply postprocessing to unprocessed data (in one or all episodes).
Generates (single-trajectory) SampleBatches for all Policies/Agents and
calls Policy.postprocess_trajectory on each of these. Postprocessing
may happens in-place, meaning any changes to the viewed data columns
are directly reflected inside this collector's buffers.
Also makes sure that additional (newly created) data columns are
correctly added to the buffers.
Args:
episode (Optional[MultiAgentEpisode]): The Episode object for which
to post-process data. If not provided, postprocess data for all
episodes.
"""
raise NotImplementedError
@abstractmethod
def check_missing_dones(self, episode_id: EpisodeID) -> None:
"""Checks whether given episode is properly terminated with done=True.
This applies to all agents in the episode.
Args:
episode_id (EpisodeID): The episode ID to check for proper
termination.
Raises:
ValueError: If `episode` has no done=True at the end.
"""
raise NotImplementedError
@abstractmethod
def get_multi_agent_batch_and_reset(self):
"""Returns the accumulated sample batches for each policy.
Any unprocessed rows will be first postprocessed with a policy
postprocessor. The internal state of this builder will be reset to
start the next batch.
This is usually called to collect samples for policy training.
Returns:
MultiAgentBatch: Returns the accumulated sample batches for each
policy inside one MultiAgentBatch object.
"""
raise NotImplementedError | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-30 03:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def 初始狀況表(app, editor):
狀況陣列 = [
"範例",
"愛討論",
"品質:講袂清楚",
"品質:有人聲雜音",
"品質:有非人聲雜音",
"詞:有外語詞",
"詞:有合音",
"詞:講法佮辭典無仝",
"句:無合文法",
]
語料狀況表 = app.get_model("語料庫", "語料狀況表")
for 一狀況 in 狀況陣列:
語料狀況表.objects.get_or_create(狀況=一狀況)
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='語料狀況表',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('狀況', models.CharField(max_length=30, unique=True)),
],
options={
'verbose_name': '狀況表',
'verbose_name_plural': '狀況表',
},
),
migrations.CreateModel(
name='語料表',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('聲音開始時間', models.FloatField()),
('聲音結束時間', models.FloatField()),
('語者', models.CharField(db_index=True, max_length=50)),
('漢字', models.TextField(blank=True)),
('本調臺羅', models.TextField(blank=True)),
('口語調臺羅', models.TextField(blank=True)),
('華語', models.TextField(blank=True)),
('校對時間', models.DateTimeField(auto_now=True)),
('頭一版資料', models.TextField(blank=True)),
('頭一版通用', models.TextField(blank=True)),
('sing5hong5舊編號', models.CharField(max_length=200, null=True)),
('sing5hong5新編號', models.CharField(max_length=200, null=True)),
('sing5hong5有揀出來用無', models.BooleanField(default=False)),
('校對者', models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL)),
('語料狀況', models.ManyToManyField(blank=True, to='語料庫.語料狀況表')),
],
options={
'verbose_name': '語料表',
'verbose_name_plural': '語料表',
},
),
migrations.CreateModel(
name='音檔表',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('類別', models.CharField(choices=[
('戲劇', '戲劇'), ('朗讀', '朗讀'), ('新聞', '新聞'), ('對話', '對話')],
db_index=True, max_length=20)),
('原始檔', models.FileField(blank=True, upload_to='')),
('資料夾名', models.CharField(max_length=50)),
('聲音檔名', models.CharField(max_length=200)),
('聽拍檔名', models.CharField(max_length=200)),
('加入時間', models.DateTimeField(
auto_now_add=True, db_index=True)),
],
options={
'ordering': ['資料夾名', '聲音檔名', '聽拍檔名'],
},
),
migrations.AlterUniqueTogether(
name='音檔表',
unique_together=set([('資料夾名', '聽拍檔名')]),
),
migrations.AddField(
model_name='語料表',
name='音檔',
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='資料', to='語料庫.音檔表'),
),
migrations.RunPython(初始狀況表, lambda *x:x),
] | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"testing"
"time"
"github.com/blang/semver/v4"
v1 "k8s.io/api/coordination/v1"
v1beta1 "k8s.io/api/coordination/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestPickBestLeaderOldestEmulationVersion(t *testing.T) {
tests := []struct {
name string
candidates []*v1beta1.LeaseCandidate
want *v1beta1.LeaseCandidate
}{
{
name: "empty",
candidates: []*v1beta1.LeaseCandidate{},
want: nil,
},
{
name: "single candidate",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
want: &v1beta1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
{
name: "multiple candidates, different emulation versions",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)},
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.2.0",
BinaryVersion: "0.2.0",
},
},
},
want: &v1beta1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "v1",
BinaryVersion: "v1",
},
},
},
{
name: "multiple candidates, same emulation versions, different binary versions",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)},
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.2.0",
},
},
},
want: &v1beta1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
{
name: "multiple candidates, same emulation versions, same binary versions, different creation timestamps",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)},
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
want: &v1beta1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := pickBestLeaderOldestEmulationVersion(tt.candidates)
if got != nil && tt.want != nil {
if got.Name != tt.want.Name || got.Namespace != tt.want.Namespace {
t.Errorf("pickBestLeaderOldestEmulationVersion() = %v, want %v", got, tt.want)
}
} else if got != tt.want {
t.Errorf("pickBestLeaderOldestEmulationVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestValidLeaseCandidateForOldestEmulationVersion(t *testing.T) {
tests := []struct {
name string
candidate *v1beta1.LeaseCandidate
want bool
}{
{
name: "valid emulation and binary versions",
candidate: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
want: true,
},
{
name: "invalid emulation version",
candidate: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "invalid",
BinaryVersion: "0.1.0",
},
},
want: false,
},
{
name: "invalid binary version",
candidate: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "invalid",
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := validLeaseCandidateForOldestEmulationVersion(tt.candidate)
if got != tt.want {
t.Errorf("validLeaseCandidateForOldestEmulationVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetEmulationVersion(t *testing.T) {
tests := []struct {
name string
candidate *v1beta1.LeaseCandidate
want semver.Version
}{
{
name: "valid emulation version",
candidate: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
},
},
want: semver.MustParse("0.1.0"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := getEmulationVersionOrZero(tt.candidate)
if got.FinalizeVersion() != tt.want.FinalizeVersion() {
t.Errorf("getEmulationVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetBinaryVersion(t *testing.T) {
tests := []struct {
name string
candidate *v1beta1.LeaseCandidate
want semver.Version
}{
{
name: "valid binary version",
candidate: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
BinaryVersion: "0.3.0",
},
},
want: semver.MustParse("0.3.0"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := getBinaryVersionOrZero(tt.candidate)
if got.FinalizeVersion() != tt.want.FinalizeVersion() {
t.Errorf("getBinaryVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestCompare(t *testing.T) {
nowTime := time.Now()
cases := []struct {
name string
lhs *v1beta1.LeaseCandidate
rhs *v1beta1.LeaseCandidate
expectedResult int
}{
{
name: "identical versions earlier timestamp",
lhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
ObjectMeta: metav1.ObjectMeta{
CreationTimestamp: metav1.Time{Time: nowTime.Add(time.Duration(1))},
},
},
rhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
ObjectMeta: metav1.ObjectMeta{
CreationTimestamp: metav1.Time{Time: nowTime},
},
},
expectedResult: 1,
},
{
name: "no lhs version",
lhs: &v1beta1.LeaseCandidate{},
rhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
},
expectedResult: -1,
},
{
name: "no rhs version",
lhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
},
rhs: &v1beta1.LeaseCandidate{},
expectedResult: 1,
},
{
name: "invalid lhs version",
lhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "xyz",
BinaryVersion: "xyz",
},
},
rhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
},
expectedResult: -1,
},
{
name: "invalid rhs version",
lhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
},
rhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "xyz",
BinaryVersion: "xyz",
},
},
expectedResult: 1,
},
{
name: "lhs less than rhs",
lhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.20.0",
},
},
rhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.20.0",
},
},
expectedResult: -1,
},
{
name: "rhs less than lhs",
lhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.20.0",
},
},
rhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.20.0",
},
},
expectedResult: 1,
},
{
name: "lhs less than rhs, lexographical order check",
lhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.2.0",
BinaryVersion: "1.20.0",
},
},
rhs: &v1beta1.LeaseCandidate{
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.20.0",
},
},
expectedResult: -1,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
result := compare(tc.lhs, tc.rhs)
if result != tc.expectedResult {
t.Errorf("Expected comparison result of %d but got %d", tc.expectedResult, result)
}
})
}
}
func TestShouldReelect(t *testing.T) {
cases := []struct {
name string
candidates []*v1beta1.LeaseCandidate
currentLeader *v1beta1.LeaseCandidate
expectResult bool
}{
{
name: "candidate with newer binary version",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-2",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.20.0",
Strategy: v1.OldestEmulationVersion,
},
},
},
currentLeader: &v1beta1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
expectResult: false,
},
{
name: "no newer candidates",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-2",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
},
currentLeader: &v1beta1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
expectResult: false,
},
{
name: "no candidates",
candidates: []*v1beta1.LeaseCandidate{},
currentLeader: &v1beta1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1beta1.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
expectResult: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
result := shouldReelect(tc.candidates, tc.currentLeader)
if tc.expectResult != result {
t.Errorf("Expected %t but got %t", tc.expectResult, result)
}
})
}
}
func TestPickBestStrategy(t *testing.T) {
tests := []struct {
name string
candidates []*v1beta1.LeaseCandidate
wantStrategy v1.CoordinatedLeaseStrategy
wantError bool
}{
{
name: "single candidate, single preferred strategy",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
Strategy: v1.OldestEmulationVersion,
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, different preferred strategies should fail",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
},
wantError: true,
},
{
name: "multiple candidates, different preferred strategy different binary version should resolve",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.32.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, different preferred strategy different binary version should resolve, order agnostic",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.32.0",
Strategy: v1.OldestEmulationVersion,
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, different preferred strategy different binary version string comparison check",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.1.10",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.1.2",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, same preferred strategy",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.OldestEmulationVersion,
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, conflicting preferred strategy",
candidates: []*v1beta1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1beta1.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
},
wantStrategy: "",
wantError: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotStrategy, err := pickBestStrategy(tc.candidates)
gotError := err != nil
if gotError != tc.wantError {
t.Errorf("pickBestStrategy() error = %v,:%v want %v", gotError, err, tc.wantError)
}
if !gotError && gotStrategy != tc.wantStrategy {
t.Errorf("pickBestStrategy() = %v, want %v", gotStrategy, tc.wantStrategy)
}
})
}
}
func shouldReelect(candidates []*v1beta1.LeaseCandidate, currentLeader *v1beta1.LeaseCandidate) bool {
pickedLeader := pickBestLeaderOldestEmulationVersion(candidates)
if pickedLeader == nil {
return false
}
return compare(currentLeader, pickedLeader) > 0
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controlplane/controller/leaderelection/election_test.go |
import unittest
from neo4j.v1 import basic_auth
from evolvingdag.neo4j_connector import Neo4jConnector
class Test(unittest.TestCase):
"""Tests Neo4jConnector
TODO: basic auth is consistent with Travis' setup; use
https://docs.travis-ci.com/user/environment-variables/#Defining-encrypted-variables-in-.travis.yml
TODO: use IntegrationTestCase for better isolation
https://github.com/neo4j/neo4j-python-driver/blob/1.6/test/integration/tools.py
"""
@classmethod
def setUpClass(cls):
"""Initializes the Neo4j driver"""
cls.conn = Neo4jConnector(
"bolt://localhost",
basic_auth("neo4j", "neo4j"))
@classmethod
def tearDownClass(cls):
"""Shuts down the Neo4j driver"""
cls.conn.close()
def tearDown(self):
"""Deletes all nodes in the current graph"""
with Test.conn._driver.session() as session:
session.run("MATCH (n) WITH n LIMIT 1000 DETACH DELETE n")
def test_add_node(self):
"""Tests if Neo4j nodes can be created"""
properties = {"name": "a", "time": 4}
with Test.conn._driver.session() as session:
tx = session.begin_transaction()
Neo4jConnector.add_node(tx, "Node", properties)
tx.commit()
with Test.conn._driver.session() as session:
result = session.run("MATCH (a:Node) WHERE a.time = $val "
"RETURN a.name",
val=properties["time"])
record = next(iter(result))
self.assertEqual(properties["name"], record[0])
def test_add_edge(self):
"""Tests if Neo4j nodes can be created"""
node_properties = {"name": "a"}
edge_properties = {"kind_of": "b"}
with Test.conn._driver.session() as session:
tx = session.begin_transaction()
Neo4jConnector.add_node(tx, "Node1", node_properties)
Neo4jConnector.add_node(tx, "Node2", node_properties)
Neo4jConnector.add_edge(
tx, "Node1", "Node2", "anticipates", edge_properties)
tx.commit()
with Test.conn._driver.session() as session:
result = session.run("MATCH (a:Node1)-[r]->(b:Node2) "
"WHERE a.name = b.name "
"RETURN r.kind_of, type(r)")
record = next(iter(result))
self.assertEqual(edge_properties["kind_of"], record[0])
self.assertEqual("anticipates", record[1]) | unknown | codeparrot/codeparrot-clean | ||
import unittest
import bob.learn.boosting
import numpy
import bob
import bob.learn.boosting.utils
class TestBoosting(unittest.TestCase):
"""Class to test the LUT trainer """
def _data(self, digits = [3, 0], count = 20):
self.database = bob.learn.boosting.utils.MNIST()
# get the data
inputs, targets = [], []
for digit in digits:
input, target = self.database.data(labels = digit)
inputs.append(input[:count])
targets.append(target[:count])
return numpy.vstack(inputs), numpy.hstack(targets)
def _align_uni(self, targets):
# align target data to be used in a uni-variate classification
aligned = numpy.ones(targets.shape)
aligned[targets != targets[0]] = -1
return aligned
def _align_multi(self, targets, digits):
aligned = - numpy.ones((targets.shape[0], len(digits)))
for i, d in enumerate(digits):
aligned[targets==d, i] = 1
return aligned
def test01_stump_boosting(self):
# get test input data
inputs, targets = self._data()
aligned = self._align_uni(targets)
# for stump trainers, the exponential loss function is preferred
loss_function = bob.learn.boosting.ExponentialLoss()
weak_trainer = bob.learn.boosting.StumpTrainer()
booster = bob.learn.boosting.Boosting(weak_trainer, loss_function)
# perform boosting
machine = booster.train(inputs.astype(numpy.float64), aligned, number_of_rounds=1)
# check the result
weight = 1.83178082
self.assertEqual(machine.weights.shape, (1,1))
self.assertTrue(numpy.allclose(machine.weights, -weight))
self.assertEqual(len(machine.weak_machines), 1)
self.assertEqual(machine.indices, [483])
weak = machine.weak_machines[0]
self.assertTrue(isinstance(weak, bob.learn.boosting.StumpMachine))
self.assertEqual(weak.threshold, 15.5)
self.assertEqual(weak.polarity, 1.)
# check first training image
single = machine(inputs[0].astype(numpy.uint16))
self.assertAlmostEqual(single, weight)
# check all training images
scores = numpy.ndarray(aligned.shape)
labels = numpy.ndarray(aligned.shape)
machine(inputs.astype(numpy.uint16), scores, labels)
# assert that 39 (out of 40) labels are correctly classified by a single feature position
self.assertTrue(numpy.allclose(labels * scores, weight))
self.assertEqual(numpy.count_nonzero(labels == aligned), 39)
def test02_lut_boosting(self):
# get test input data
inputs, targets = self._data()
aligned = self._align_uni(targets)
# for stump trainers, the logit loss function is preferred
loss_function = bob.learn.boosting.LogitLoss()
weak_trainer = bob.learn.boosting.LUTTrainer(256)
booster = bob.learn.boosting.Boosting(weak_trainer, loss_function)
# perform boosting
weight = 15.46452387
machine = booster.train(inputs.astype(numpy.uint16), aligned, number_of_rounds=1)
self.assertEqual(machine.weights.shape, (1,1))
self.assertTrue(numpy.allclose(machine.weights, -weight))
self.assertEqual(len(machine.weak_machines), 1)
self.assertEqual(machine.indices, [379])
weak = machine.weak_machines[0]
self.assertTrue(isinstance(weak, bob.learn.boosting.LUTMachine))
self.assertEqual(weak.lut.shape, (256,1))
# check first training image
single = machine(inputs[0].astype(numpy.uint16))
self.assertAlmostEqual(single, weight)
# check all training images
scores = numpy.ndarray(aligned.shape)
labels = numpy.ndarray(aligned.shape)
machine(inputs.astype(numpy.uint16), scores, labels)
# assert that 40 (out of 40) labels are correctly classified by a single feature position
self.assertTrue(numpy.allclose(labels * scores, weight))
self.assertEqual(numpy.count_nonzero(labels == aligned), 40)
def test03_multi_shared(self):
# get test input data
digits = [1, 4, 7, 9]
inputs, targets = self._data(digits)
aligned = self._align_multi(targets, digits)
# for stump trainers, the logit loss function is preferred
loss_function = bob.learn.boosting.LogitLoss()
weak_trainer = bob.learn.boosting.LUTTrainer(256, len(digits), "shared")
booster = bob.learn.boosting.Boosting(weak_trainer, loss_function)
# perform boosting
weights = numpy.array([2.5123104, 2.19725677, 2.34455412, 1.94584326])
machine = booster.train(inputs.astype(numpy.uint16), aligned, number_of_rounds=1)
self.assertEqual(machine.weights.shape, (1,len(digits)))
self.assertTrue(numpy.allclose(machine.weights, -weights))
self.assertEqual(len(machine.weak_machines), 1)
self.assertEqual(machine.indices, [437])
weak = machine.weak_machines[0]
self.assertTrue(isinstance(weak, bob.learn.boosting.LUTMachine))
self.assertEqual(weak.lut.shape, (256,4))
# check first training image
score = numpy.ndarray(4)
machine(inputs[0].astype(numpy.uint16), score)
self.assertTrue(numpy.allclose(score, weights * numpy.array([1., -1., -1., -1.])))
# check all training images
scores = numpy.ndarray(aligned.shape)
labels = numpy.ndarray(aligned.shape)
machine(inputs.astype(numpy.uint16), scores, labels)
# assert that 286 (out of 360) labels are correctly classified by a single feature position
self.assertTrue(all([numpy.allclose(numpy.abs(scores[i]), weights) for i in range(labels.shape[0])]))
self.assertEqual(numpy.count_nonzero(labels == aligned), 286)
def test04_multi_independent(self):
# get test input data
digits = [1, 4, 7, 9]
inputs, targets = self._data(digits)
aligned = self._align_multi(targets, digits)
# for stump trainers, the logit loss function is preferred
loss_function = bob.learn.boosting.LogitLoss()
weak_trainer = bob.learn.boosting.LUTTrainer(256, len(digits), "independent")
booster = bob.learn.boosting.Boosting(weak_trainer, loss_function)
# perform boosting
weights = numpy.array([2.94443872, 2.70805517, 2.34454354, 2.94443872])
machine = booster.train(inputs.astype(numpy.uint16), aligned, number_of_rounds=1)
self.assertEqual(machine.weights.shape, (1,len(digits)))
self.assertTrue(numpy.allclose(machine.weights, -weights))
self.assertEqual(len(machine.weak_machines), 1)
self.assertTrue(all(machine.indices == [215, 236, 264, 349]))
weak = machine.weak_machines[0]
self.assertTrue(isinstance(weak, bob.learn.boosting.LUTMachine))
self.assertEqual(weak.lut.shape, (256,4))
# check first training image
score = numpy.ndarray(4)
machine(inputs[0].astype(numpy.uint16), score)
self.assertTrue(numpy.allclose(score, weights * numpy.array([1., -1., -1., -1.])))
# check all training images
scores = numpy.ndarray(aligned.shape)
labels = numpy.ndarray(aligned.shape)
machine(inputs.astype(numpy.uint16), scores, labels)
# assert that 294 (out of 360) labels are correctly classified by a single feature position
self.assertTrue(all([numpy.allclose(numpy.abs(scores[i]), weights) for i in range(labels.shape[0])]))
self.assertEqual(numpy.count_nonzero(labels == aligned), 294) | unknown | codeparrot/codeparrot-clean | ||
"""
This module implements the upload and remove endpoints of the profile image api.
"""
from contextlib import closing
import datetime
import itertools
import logging
from django.utils.translation import ugettext as _
from django.utils.timezone import utc
from rest_framework import permissions, status
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.djangoapps.user_api.errors import UserNotFound
from openedx.core.lib.api.authentication import (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
from openedx.core.lib.api.parsers import TypedFileUploadParser
from openedx.core.lib.api.permissions import IsUserInUrl
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin
from openedx.core.djangoapps.user_api.accounts.image_helpers import get_profile_image_names, set_has_profile_image
from .exceptions import ImageValidationError
from .images import (
IMAGE_TYPES, validate_uploaded_image, create_profile_images, remove_profile_images
)
log = logging.getLogger(__name__)
LOG_MESSAGE_CREATE = 'Generated and uploaded images %(image_names)s for user %(user_id)s'
LOG_MESSAGE_DELETE = 'Deleted images %(image_names)s for user %(user_id)s'
def _make_upload_dt():
"""
Generate a server-side timestamp for the upload. This is in a separate
function so its behavior can be overridden in tests.
"""
return datetime.datetime.utcnow().replace(tzinfo=utc)
class ProfileImageView(DeveloperErrorViewMixin, APIView):
"""
**Use Cases**
Add or remove profile images associated with user accounts.
The requesting user must be signed in. Users can only add profile
images to their own account. Users with staff access can remove
profile images for other user accounts. All other users can remove
only their own profile images.
**Example Requests**
POST /api/user/v1/accounts/{username}/image
DELETE /api/user/v1/accounts/{username}/image
**Example POST Responses**
When the requesting user attempts to upload an image for their own
account, the request returns one of the following responses:
* If the upload could not be performed, the request returns an HTTP 400
"Bad Request" response with information about why the request failed.
* If the upload is successful, the request returns an HTTP 204 "No
Content" response with no additional content.
If the requesting user tries to upload an image for a different
user, the request returns one of the following responses:
* If no user matches the "username" parameter, the request returns an
HTTP 404 "Not Found" response.
* If the user whose profile image is being uploaded exists, but the
requesting user does not have staff access, the request returns an
HTTP 404 "Not Found" response.
* If the specified user exists, and the requesting user has staff
access, the request returns an HTTP 403 "Forbidden" response.
**Example DELETE Responses**
When the requesting user attempts to remove the profile image for
their own account, the request returns one of the following
responses:
* If the image could not be removed, the request returns an HTTP 400
"Bad Request" response with information about why the request failed.
* If the request successfully removes the image, the request returns
an HTTP 204 "No Content" response with no additional content.
When the requesting user tries to remove the profile image for a
different user, the view will return one of the following responses:
* If the requesting user has staff access, and the "username" parameter
matches a user, the profile image for the specified user is deleted,
and the request returns an HTTP 204 "No Content" response with no
additional content.
* If the requesting user has staff access, but no user is matched by
the "username" parameter, the request returns an HTTP 404 "Not Found"
response.
* If the requesting user does not have staff access, the request
returns an HTTP 404 "Not Found" response, regardless of whether
the user exists or not.
"""
parser_classes = (MultiPartParser, FormParser, TypedFileUploadParser)
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrl)
upload_media_types = set(itertools.chain(*(image_type.mimetypes for image_type in IMAGE_TYPES.values())))
def post(self, request, username):
"""
POST /api/user/v1/accounts/{username}/image
"""
# validate request:
# verify that the user's
# ensure any file was sent
if 'file' not in request.FILES:
return Response(
{
"developer_message": u"No file provided for profile image",
"user_message": _(u"No file provided for profile image"),
},
status=status.HTTP_400_BAD_REQUEST
)
# process the upload.
uploaded_file = request.FILES['file']
# no matter what happens, delete the temporary file when we're done
with closing(uploaded_file):
# image file validation.
try:
validate_uploaded_image(uploaded_file)
except ImageValidationError as error:
return Response(
{"developer_message": error.message, "user_message": error.user_message},
status=status.HTTP_400_BAD_REQUEST,
)
# generate profile pic and thumbnails and store them
profile_image_names = get_profile_image_names(username)
create_profile_images(uploaded_file, profile_image_names)
# update the user account to reflect that a profile image is available.
set_has_profile_image(username, True, _make_upload_dt())
log.info(
LOG_MESSAGE_CREATE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, username):
"""
DELETE /api/user/v1/accounts/{username}/image
"""
try:
# update the user account to reflect that the images were removed.
set_has_profile_image(username, False)
# remove physical files from storage.
profile_image_names = get_profile_image_names(username)
remove_profile_images(profile_image_names)
log.info(
LOG_MESSAGE_DELETE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
class ProfileImageUploadView(APIView):
"""
**DEPRECATION WARNING**
/api/profile_images/v1/{username}/upload is deprecated.
All requests should now be sent to
/api/user/v1/accounts/{username}/image
"""
parser_classes = ProfileImageView.parser_classes
authentication_classes = ProfileImageView.authentication_classes
permission_classes = ProfileImageView.permission_classes
def post(self, request, username):
"""
POST /api/profile_images/v1/{username}/upload
"""
return ProfileImageView().post(request, username)
class ProfileImageRemoveView(APIView):
"""
**DEPRECATION WARNING**
/api/profile_images/v1/{username}/remove is deprecated.
This endpoint's POST is replaced by the DELETE method at
/api/user/v1/accounts/{username}/image.
"""
authentication_classes = ProfileImageView.authentication_classes
permission_classes = ProfileImageView.permission_classes
def post(self, request, username):
"""
POST /api/profile_images/v1/{username}/remove
"""
return ProfileImageView().delete(request, username) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
module: manageiq_tags
short_description: Management of resource tags in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.5'
author: Daniel Korn (@dkorn)
description:
- The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
options:
state:
description:
- absent - tags should not exist,
- present - tags should exist,
- list - list current tags.
choices: ['absent', 'present', 'list']
default: 'present'
tags:
description:
- tags - list of dictionaries, each includes 'name' and 'category' keys.
- required if state is present or absent.
resource_type:
description:
- the relevant resource type in manageiq
required: true
choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
'data store', 'group', 'resource pool', 'service', 'service template',
'template', 'tenant', 'user']
resource_name:
description:
- the relevant resource name in manageiq
required: true
'''
EXAMPLES = '''
- name: Create new tags for a provider in ManageIQ
manageiq_tags:
resource_name: 'EngLab'
resource_type: 'provider'
tags:
- category: environment
name: prod
- category: owner
name: prod_ops
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Remove tags for a provider in ManageIQ
manageiq_tags:
state: absent
resource_name: 'EngLab'
resource_type: 'provider'
tags:
- category: environment
name: prod
- category: owner
name: prod_ops
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: List current tags for a provider in ManageIQ
manageiq_tags:
state: list
resource_name: 'EngLab'
resource_type: 'provider'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
def query_resource_id(manageiq, resource_type, resource_name):
""" Query the resource name in ManageIQ.
Returns:
the resource id if it exists in manageiq, Fail otherwise.
"""
resource = manageiq.find_collection_resource_by(resource_type, name=resource_name)
if resource:
return resource["id"]
else:
msg = "{resource_name} {resource_type} does not exist in manageiq".format(
resource_name=resource_name, resource_type=resource_type)
manageiq.module.fail_json(msg=msg)
class ManageIQTags(object):
"""
Object to execute tags management operations of manageiq resources.
"""
def __init__(self, manageiq, resource_type, resource_id):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
self.resource_type = resource_type
self.resource_id = resource_id
self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
api_url=self.api_url,
resource_type=resource_type,
resource_id=resource_id)
def full_tag_name(self, tag):
""" Returns the full tag name in manageiq
"""
return '/managed/{tag_category}/{tag_name}'.format(
tag_category=tag['category'],
tag_name=tag['name'])
def clean_tag_object(self, tag):
""" Clean a tag object to have human readable form of:
{
full_name: STR,
name: STR,
display_name: STR,
category: STR
}
"""
full_name = tag.get('name')
categorization = tag.get('categorization', {})
return dict(
full_name=full_name,
name=categorization.get('name'),
display_name=categorization.get('display_name'),
category=categorization.get('category', {}).get('name'))
def query_resource_tags(self):
""" Returns a set of the tag objects assigned to the resource
"""
url = '{resource_url}/tags?expand=resources&attributes=categorization'
try:
response = self.client.get(url.format(resource_url=self.resource_url))
except Exception as e:
msg = "Failed to query {resource_type} tags: {error}".format(
resource_type=self.resource_type,
error=e)
self.module.fail_json(msg=msg)
resources = response.get('resources', [])
# clean the returned rest api tag object to look like:
# {full_name: STR, name: STR, display_name: STR, category: STR}
tags = [self.clean_tag_object(tag) for tag in resources]
return tags
def tags_to_update(self, tags, action):
""" Create a list of tags we need to update in ManageIQ.
Returns:
Whether or not a change took place and a message describing the
operation executed.
"""
tags_to_post = []
assigned_tags = self.query_resource_tags()
# make a list of assigned full tag names strings
# e.g. ['/managed/environment/prod', ...]
assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
for tag in tags:
assigned = self.full_tag_name(tag) in assigned_tags_set
if assigned and action == 'unassign':
tags_to_post.append(tag)
elif (not assigned) and action == 'assign':
tags_to_post.append(tag)
return tags_to_post
def assign_or_unassign_tags(self, tags, action):
""" Perform assign/unassign action
"""
# get a list of tags needed to be changed
tags_to_post = self.tags_to_update(tags, action)
if not tags_to_post:
return dict(
changed=False,
msg="Tags already {action}ed, nothing to do".format(action=action))
# try to assign or unassign tags to resource
url = '{resource_url}/tags'.format(resource_url=self.resource_url)
try:
response = self.client.post(url, action=action, resources=tags)
except Exception as e:
msg = "Failed to {action} tag: {error}".format(
action=action,
error=e)
self.module.fail_json(msg=msg)
# check all entities in result to be successful
for result in response['results']:
if not result['success']:
msg = "Failed to {action}: {message}".format(
action=action,
message=result['message'])
self.module.fail_json(msg=msg)
# successfully changed all needed tags
return dict(
changed=True,
msg="Successfully {action}ed tags".format(action=action))
def main():
actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
argument_spec = dict(
tags=dict(type='list'),
resource_name=dict(required=True, type='str'),
resource_type=dict(required=True, type='str',
choices=manageiq_entities().keys()),
state=dict(required=False, type='str',
choices=['present', 'absent', 'list'], default='present'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['tags']),
('state', 'absent', ['tags'])
],
)
tags = module.params['tags']
resource_type_key = module.params['resource_type']
resource_name = module.params['resource_name']
state = module.params['state']
# get the action and resource type
action = actions[state]
resource_type = manageiq_entities()[resource_type_key]
manageiq = ManageIQ(module)
# query resource id, fail if resource does not exist
resource_id = query_resource_id(manageiq, resource_type, resource_name)
manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
if action == 'list':
# return a list of current tags for this object
current_tags = manageiq_tags.query_resource_tags()
res_args = dict(changed=False, tags=current_tags)
else:
# assign or unassign the tags
res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
module.exit_json(**res_args)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
from tensorflow.contrib import learn
import h5py # pylint: disable=g-bad-import-order
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x_train = h5f['X_train']
x_test = h5f['X_test']
y_train = h5f['y_train']
y_test = h5f['y_test']
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.kms.exceptions import NotFoundException
from tests.compat import unittest
class TestKMS(unittest.TestCase):
def setUp(self):
self.kms = boto.connect_kms()
def test_list_keys(self):
response = self.kms.list_keys()
self.assertIn('Keys', response)
def test_handle_not_found_exception(self):
with self.assertRaises(NotFoundException):
# Describe some key that does not exists
self.kms.describe_key(
key_id='nonexistant_key',
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright (C) 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'davidbyttow@google.com (David Byttow)'
import logging
import unittest
import urllib2
import opensocial
from opensocial import oauth
class TestOAuth(unittest.TestCase):
def setUp(self):
self.config = opensocial.ContainerConfig(
oauth_consumer_key='oauth.org:12345689',
oauth_consumer_secret='not_a_secret',
server_rpc_base='http://oauthbox.appspot.com/rpc')
self.container = opensocial.ContainerContext(self.config)
self.user_id = '101'
def test_fetch(self):
data = self.container.fetch_person(self.user_id)
self.assertEquals(data.get_field('verified'), 'True') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class purchase_order_line(orm.Model):
def _invoiced_qty(self, cursor, user, ids, name, arg, context=None):
res = {}
for line in self.browse(cursor, user, ids, context=context):
invoiced_qty = 0.0
for invoice_line in line.invoice_lines:
invoiced_qty += invoice_line.quantity
res[line.id] = invoiced_qty
return res
_inherit = 'purchase.order.line'
_columns = {
'invoiced_qty': fields.function(_invoiced_qty,
string='Invoiced quantity', type='float'),
} | unknown | codeparrot/codeparrot-clean | ||
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(miri)))]
use tokio::io::AsyncReadExt;
#[tokio::test]
async fn repeat_poll_read_is_cooperative() {
tokio::select! {
biased;
_ = async {
loop {
let mut buf = [0u8; 4096];
tokio::io::repeat(0b101).read_exact(&mut buf).await.unwrap();
}
} => {},
_ = tokio::task::yield_now() => {}
}
} | rust | github | https://github.com/tokio-rs/tokio | tokio/tests/io_repeat.rs |
/*
* The only function exported here is `PyArray_LegacyCanCastTypeTo`, which
* is currently still in use when first registering a userdtype.
*
* The extremely limited use means that it can probably remain unmaintained
* until such a time where legay user dtypes are deprecated and removed
* entirely.
*/
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
#include "scalartypes.h"
#include "_datetime.h"
#include "datetime_strings.h"
#include "can_cast_table.h"
#include "convert_datatype.h"
#include "dtypemeta.h"
#include "legacy_dtype_implementation.h"
/*
* Compare the field dictionaries for two types.
*
* Return 1 if the field types and field names of the two descrs are equal and
* in the same order, 0 if not.
*/
static int
_equivalent_fields(_PyArray_LegacyDescr *type1, _PyArray_LegacyDescr *type2) {
int val;
if (type1->fields == type2->fields && type1->names == type2->names) {
return 1;
}
if (type1->fields == NULL || type2->fields == NULL) {
return 0;
}
val = PyObject_RichCompareBool(type1->fields, type2->fields, Py_EQ);
if (val != 1 || PyErr_Occurred()) {
PyErr_Clear();
return 0;
}
val = PyObject_RichCompareBool(type1->names, type2->names, Py_EQ);
if (val != 1 || PyErr_Occurred()) {
PyErr_Clear();
return 0;
}
return 1;
}
/*
* Compare the subarray data for two types.
* Return 1 if they are the same, 0 if not.
*/
static int
_equivalent_subarrays(PyArray_ArrayDescr *sub1, PyArray_ArrayDescr *sub2)
{
int val;
if (sub1 == sub2) {
return 1;
}
if (sub1 == NULL || sub2 == NULL) {
return 0;
}
val = PyObject_RichCompareBool(sub1->shape, sub2->shape, Py_EQ);
if (val != 1 || PyErr_Occurred()) {
PyErr_Clear();
return 0;
}
return PyArray_EquivTypes(sub1->base, sub2->base);
}
static unsigned char
PyArray_LegacyEquivTypes(PyArray_Descr *type1, PyArray_Descr *type2)
{
int type_num1, type_num2, size1, size2;
if (type1 == type2) {
return NPY_TRUE;
}
if (!PyDataType_ISLEGACY(type1) || !PyDataType_ISLEGACY(type2)) {
return NPY_FALSE;
}
type_num1 = type1->type_num;
type_num2 = type2->type_num;
size1 = type1->elsize;
size2 = type2->elsize;
if (size1 != size2) {
return NPY_FALSE;
}
if (PyArray_ISNBO(type1->byteorder) != PyArray_ISNBO(type2->byteorder)) {
return NPY_FALSE;
}
if (PyDataType_SUBARRAY(type1) || PyDataType_SUBARRAY(type2)) {
return ((type_num1 == type_num2)
&& _equivalent_subarrays(PyDataType_SUBARRAY(type1), PyDataType_SUBARRAY(type2)));
}
if (type_num1 == NPY_VOID || type_num2 == NPY_VOID) {
return ((type_num1 == type_num2) && _equivalent_fields(
(_PyArray_LegacyDescr *)type1, (_PyArray_LegacyDescr *)type2));
}
if (type_num1 == NPY_DATETIME
|| type_num1 == NPY_TIMEDELTA
|| type_num2 == NPY_DATETIME
|| type_num2 == NPY_TIMEDELTA) {
return ((type_num1 == type_num2)
&& has_equivalent_datetime_metadata(type1, type2));
}
return type1->kind == type2->kind;
}
static unsigned char
PyArray_LegacyEquivTypenums(int typenum1, int typenum2)
{
PyArray_Descr *d1, *d2;
npy_bool ret;
if (typenum1 == typenum2) {
return NPY_SUCCEED;
}
d1 = PyArray_DescrFromType(typenum1);
d2 = PyArray_DescrFromType(typenum2);
ret = PyArray_LegacyEquivTypes(d1, d2);
Py_DECREF(d1);
Py_DECREF(d2);
return ret;
}
static int
PyArray_LegacyCanCastSafely(int fromtype, int totype)
{
PyArray_Descr *from;
/* Fast table lookup for small type numbers */
if ((unsigned int)fromtype < NPY_NTYPES_LEGACY &&
(unsigned int)totype < NPY_NTYPES_LEGACY) {
return _npy_can_cast_safely_table[fromtype][totype];
}
/* Identity */
if (fromtype == totype) {
return 1;
}
from = PyArray_DescrFromType(fromtype);
/*
* cancastto is a NPY_NOTYPE terminated C-int-array of types that
* the data-type can be cast to safely.
*/
if (PyDataType_GetArrFuncs(from)->cancastto) {
int *curtype = PyDataType_GetArrFuncs(from)->cancastto;
while (*curtype != NPY_NOTYPE) {
if (*curtype++ == totype) {
Py_DECREF(from);
return 1;
}
}
}
Py_DECREF(from);
return 0;
}
static npy_bool
PyArray_LegacyCanCastTo(PyArray_Descr *from, PyArray_Descr *to)
{
int from_type_num = from->type_num;
int to_type_num = to->type_num;
npy_bool ret;
ret = (npy_bool) PyArray_LegacyCanCastSafely(from_type_num, to_type_num);
if (ret) {
/* Check String and Unicode more closely */
if (from_type_num == NPY_STRING) {
if (to_type_num == NPY_STRING) {
ret = (from->elsize <= to->elsize);
}
else if (to_type_num == NPY_UNICODE) {
ret = (from->elsize << 2 <= to->elsize);
}
}
else if (from_type_num == NPY_UNICODE) {
if (to_type_num == NPY_UNICODE) {
ret = (from->elsize <= to->elsize);
}
}
/*
* For datetime/timedelta, only treat casts moving towards
* more precision as safe.
*/
else if (from_type_num == NPY_DATETIME && to_type_num == NPY_DATETIME) {
PyArray_DatetimeMetaData *meta1, *meta2;
meta1 = get_datetime_metadata_from_dtype(from);
if (meta1 == NULL) {
PyErr_Clear();
return 0;
}
meta2 = get_datetime_metadata_from_dtype(to);
if (meta2 == NULL) {
PyErr_Clear();
return 0;
}
return can_cast_datetime64_metadata(meta1, meta2,
NPY_SAFE_CASTING);
}
else if (from_type_num == NPY_TIMEDELTA &&
to_type_num == NPY_TIMEDELTA) {
PyArray_DatetimeMetaData *meta1, *meta2;
meta1 = get_datetime_metadata_from_dtype(from);
if (meta1 == NULL) {
PyErr_Clear();
return 0;
}
meta2 = get_datetime_metadata_from_dtype(to);
if (meta2 == NULL) {
PyErr_Clear();
return 0;
}
return can_cast_timedelta64_metadata(meta1, meta2,
NPY_SAFE_CASTING);
}
/*
* If to_type_num is STRING or unicode
* see if the length is long enough to hold the
* stringified value of the object.
*/
else if (to_type_num == NPY_STRING || to_type_num == NPY_UNICODE) {
/*
* Boolean value cast to string type is 5 characters max
* for string 'False'.
*/
int char_size = 1;
if (to_type_num == NPY_UNICODE) {
char_size = 4;
}
ret = 0;
if (PyDataType_ISUNSIZED(to)) {
ret = 1;
}
/*
* Need at least 5 characters to convert from boolean
* to 'True' or 'False'.
*/
else if (from->kind == 'b' && to->elsize >= 5 * char_size) {
ret = 1;
}
else if (from->kind == 'u') {
/* Guard against unexpected integer size */
if (from->elsize > 8 || from->elsize < 0) {
ret = 0;
}
else if (to->elsize >=
REQUIRED_STR_LEN[from->elsize] * char_size) {
ret = 1;
}
}
else if (from->kind == 'i') {
/* Guard against unexpected integer size */
if (from->elsize > 8 || from->elsize < 0) {
ret = 0;
}
/* Extra character needed for sign */
else if (to->elsize >=
(REQUIRED_STR_LEN[from->elsize] + 1) * char_size) {
ret = 1;
}
}
}
}
return ret;
}
/*
* Compare two field dictionaries for castability.
*
* Return 1 if 'field1' can be cast to 'field2' according to the rule
* 'casting', 0 if not.
*
* Castabiliy of field dictionaries is defined recursively: 'field1' and
* 'field2' must have the same field names (possibly in different
* orders), and the corresponding field types must be castable according
* to the given casting rule.
*/
static int
can_cast_fields(PyObject *field1, PyObject *field2, NPY_CASTING casting)
{
Py_ssize_t ppos;
PyObject *key;
PyObject *tuple1, *tuple2;
if (field1 == field2) {
return 1;
}
if (field1 == NULL || field2 == NULL) {
return 0;
}
if (PyDict_Size(field1) != PyDict_Size(field2)) {
return 0;
}
/* Iterate over all the fields and compare for castability */
ppos = 0;
while (PyDict_Next(field1, &ppos, &key, &tuple1)) { // noqa: borrowed-ref OK
if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { // noqa: borrowed-ref OK
return 0;
}
/* Compare the dtype of the field for castability */
if (!PyArray_CanCastTypeTo(
(PyArray_Descr *)PyTuple_GET_ITEM(tuple1, 0),
(PyArray_Descr *)PyTuple_GET_ITEM(tuple2, 0),
casting)) {
return 0;
}
}
return 1;
}
NPY_NO_EXPORT npy_bool
PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
NPY_CASTING casting)
{
_PyArray_LegacyDescr *lfrom = (_PyArray_LegacyDescr *)from;
_PyArray_LegacyDescr *lto = (_PyArray_LegacyDescr *)to;
/*
* Fast paths for equality and for basic types.
*/
if (from == to ||
((NPY_LIKELY(PyDataType_ISNUMBER(from)) ||
PyDataType_ISOBJECT(from)) &&
NPY_LIKELY(from->type_num == to->type_num) &&
NPY_LIKELY(from->byteorder == to->byteorder))) {
return 1;
}
if (!PyDataType_ISLEGACY(from) || !PyDataType_ISLEGACY(to)) {
return 0;
}
/*
* Cases with subarrays and fields need special treatment.
*/
if (PyDataType_HASFIELDS(from)) {
/*
* If from is a structured data type, then it can be cast to a simple
* non-object one only for unsafe casting *and* if it has a single
* field; recurse just in case the single field is itself structured.
*/
if (!PyDataType_HASFIELDS(to) && !PyDataType_ISOBJECT(to)) {
if ((casting == NPY_UNSAFE_CASTING || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)) &&
PyDict_Size(lfrom->fields) == 1) {
Py_ssize_t ppos = 0;
PyObject *tuple;
PyArray_Descr *field;
PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); // noqa: borrowed-ref OK
field = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0);
/*
* For a subarray, we need to get the underlying type;
* since we already are casting unsafely, we can ignore
* the shape.
*/
if (PyDataType_HASSUBARRAY(field)) {
field = PyDataType_SUBARRAY(field)->base;
}
return PyArray_LegacyCanCastTypeTo(field, to, casting);
}
else {
return 0;
}
}
/*
* Casting from one structured data type to another depends on the fields;
* we pass that case on to the EquivTypenums case below.
*
* TODO: move that part up here? Need to check whether equivalent type
* numbers is an addition constraint that is needed.
*
* TODO/FIXME: For now, always allow structured to structured for unsafe
* casting; this is not correct, but needed since the treatment in can_cast
* below got out of sync with astype; see gh-13667.
*/
if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) {
return 1;
}
}
else if (PyDataType_HASFIELDS(to)) {
/*
* If "from" is a simple data type and "to" has fields, then only
* unsafe casting works (and that works always, even to multiple fields).
*/
return (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0);
}
/*
* Everything else we consider castable for unsafe for now.
* FIXME: ensure what we do here is consistent with "astype",
* i.e., deal more correctly with subarrays and user-defined dtype.
*/
else if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) {
return 1;
}
/*
* Equivalent simple types can be cast with any value of 'casting', but
* we need to be careful about structured to structured.
*/
if (PyArray_LegacyEquivTypenums(from->type_num, to->type_num)) {
/* For complicated case, use EquivTypes (for now) */
if (PyTypeNum_ISUSERDEF(from->type_num) ||
PyDataType_SUBARRAY(from) != NULL) {
int ret;
/* Only NPY_NO_CASTING prevents byte order conversion */
if ((casting != NPY_NO_CASTING) &&
(!PyArray_ISNBO(from->byteorder) ||
!PyArray_ISNBO(to->byteorder))) {
PyArray_Descr *nbo_from, *nbo_to;
nbo_from = PyArray_DescrNewByteorder(from, NPY_NATIVE);
nbo_to = PyArray_DescrNewByteorder(to, NPY_NATIVE);
if (nbo_from == NULL || nbo_to == NULL) {
Py_XDECREF(nbo_from);
Py_XDECREF(nbo_to);
PyErr_Clear();
return 0;
}
ret = PyArray_LegacyEquivTypes(nbo_from, nbo_to);
Py_DECREF(nbo_from);
Py_DECREF(nbo_to);
}
else {
ret = PyArray_LegacyEquivTypes(from, to);
}
return ret;
}
if (PyDataType_HASFIELDS(from)) {
switch (casting) {
case NPY_EQUIV_CASTING:
case NPY_SAFE_CASTING:
case NPY_SAME_KIND_CASTING:
/*
* `from' and `to' must have the same fields, and
* corresponding fields must be (recursively) castable.
*/
return can_cast_fields(lfrom->fields, lto->fields, casting);
case NPY_NO_CASTING:
default:
return PyArray_LegacyEquivTypes(from, to);
}
}
switch (from->type_num) {
case NPY_DATETIME: {
PyArray_DatetimeMetaData *meta1, *meta2;
meta1 = get_datetime_metadata_from_dtype(from);
if (meta1 == NULL) {
PyErr_Clear();
return 0;
}
meta2 = get_datetime_metadata_from_dtype(to);
if (meta2 == NULL) {
PyErr_Clear();
return 0;
}
if (casting == NPY_NO_CASTING) {
return PyArray_ISNBO(from->byteorder) ==
PyArray_ISNBO(to->byteorder) &&
can_cast_datetime64_metadata(meta1, meta2, casting);
}
else {
return can_cast_datetime64_metadata(meta1, meta2, casting);
}
}
case NPY_TIMEDELTA: {
PyArray_DatetimeMetaData *meta1, *meta2;
meta1 = get_datetime_metadata_from_dtype(from);
if (meta1 == NULL) {
PyErr_Clear();
return 0;
}
meta2 = get_datetime_metadata_from_dtype(to);
if (meta2 == NULL) {
PyErr_Clear();
return 0;
}
if (casting == NPY_NO_CASTING) {
return PyArray_ISNBO(from->byteorder) ==
PyArray_ISNBO(to->byteorder) &&
can_cast_timedelta64_metadata(meta1, meta2, casting);
}
else {
return can_cast_timedelta64_metadata(meta1, meta2, casting);
}
}
default:
switch (casting) {
case NPY_NO_CASTING:
return PyArray_LegacyEquivTypes(from, to);
case NPY_EQUIV_CASTING:
return (from->elsize == to->elsize);
case NPY_SAFE_CASTING:
return (from->elsize <= to->elsize);
default:
return 1;
}
break;
}
}
/* If safe or same-kind casts are allowed */
else if (casting == NPY_SAFE_CASTING || casting == NPY_SAME_KIND_CASTING) {
if (PyArray_LegacyCanCastTo(from, to)) {
return 1;
}
else if(casting == NPY_SAME_KIND_CASTING) {
/*
* Also allow casting from lower to higher kinds, according
* to the ordering provided by dtype_kind_to_ordering.
* Some kinds, like datetime, don't fit in the hierarchy,
* and are special cased as -1.
*/
int from_order, to_order;
from_order = dtype_kind_to_ordering(from->kind);
to_order = dtype_kind_to_ordering(to->kind);
if (to->kind == 'm') {
/* both types being timedelta is already handled before. */
int integer_order = dtype_kind_to_ordering('i');
return (from_order != -1) && (from_order <= integer_order);
}
return (from_order != -1) && (from_order <= to_order);
}
else {
return 0;
}
}
/* NPY_NO_CASTING or NPY_EQUIV_CASTING was specified */
else {
return 0;
}
} | c | github | https://github.com/numpy/numpy | numpy/_core/src/multiarray/legacy_dtype_implementation.c |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@nmap.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the terms and conditions of this license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@nmap.com for further *
# * information. *
# * *
# * If you have received a written license agreement or contract for *
# * Covered Software stating terms other than these, you may choose to use *
# * and redistribute Covered Software under those terms instead of these. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
"""
higwidgets/higlogindialog.py
a basic login/authentication dialog
"""
__all__ = ['HIGLoginDialog']
import gtk
from higdialogs import HIGDialog
from higlabels import HIGEntryLabel
from higtables import HIGTable
from higentries import HIGTextEntry, HIGPasswordEntry
class HIGLoginDialog(HIGDialog):
"""
A dialog that asks for basic login information (username / password)
"""
def __init__(self, title='Login',
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)):
HIGDialog.__init__(self, title, buttons=buttons)
self.username_label = HIGEntryLabel("Username:")
self.username_entry = HIGTextEntry()
self.password_label = HIGEntryLabel("Password:")
self.password_entry = HIGPasswordEntry()
self.username_password_table = HIGTable(2, 2)
self.username_password_table.attach_label(self.username_label,
0, 1, 0, 1)
self.username_password_table.attach_entry(self.username_entry,
1, 2, 0, 1)
self.username_password_table.attach_label(self.password_label,
0, 1, 1, 2)
self.username_password_table.attach_entry(self.password_entry,
1, 2, 1, 2)
self.vbox.pack_start(self.username_password_table, False, False)
self.set_default_response(gtk.RESPONSE_ACCEPT)
def run(self):
self.show_all()
return HIGDialog.run(self)
if __name__ == '__main__':
from gtkutils import gtk_constant_name
# HIGLoginDialog
d = HIGLoginDialog()
response_value = d.run()
print gtk_constant_name('response', response_value)
d.destroy() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Balazs Pocze <banyek@gawker.com>
# Certain parts are taken from Mark Theunissen's mysqldb module
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mysql_variables
short_description: Manage MySQL global variables
description:
- Query / Set MySQL variables
version_added: 1.3
author: "Balazs Pocze (@banyek)"
options:
variable:
description:
- Variable name to operate
required: True
value:
description:
- If set, then sets variable value to this
required: False
extends_documentation_fragment: mysql
'''
EXAMPLES = '''
# Check for sync_binlog setting
- mysql_variables:
variable: sync_binlog
# Set read_only variable to 1
- mysql_variables:
variable: read_only
value: 1
'''
import os
import warnings
from re import match
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError, mysql_quote_identifier
from ansible.module_utils.mysql import mysql_connect, mysqldb_found
from ansible.module_utils._text import to_native
def typedvalue(value):
"""
Convert value to number whenever possible, return same value
otherwise.
>>> typedvalue('3')
3
>>> typedvalue('3.0')
3.0
>>> typedvalue('foobar')
'foobar'
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall()
if len(mysqlvar_val) is 1:
return mysqlvar_val[0][1]
else:
return None
def setvariable(cursor, mysqlvar, value):
""" Set a global mysql variable to a given value
The DB driver will handle quoting of the given value based on its
type, thus numeric strings like '3.0' or '8' are illegal, they
should be passed as numeric literals.
"""
query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars')
try:
cursor.execute(query + "%s", (value,))
cursor.fetchall()
result = True
except Exception as e:
result = to_native(e)
return result
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
variable=dict(default=None),
value=dict(default=None),
ssl_cert=dict(default=None),
ssl_key=dict(default=None),
ssl_ca=dict(default=None),
connect_timeout=dict(default=30, type='int'),
config_file=dict(default="~/.my.cnf", type="path")
)
)
user = module.params["login_user"]
password = module.params["login_password"]
ssl_cert = module.params["ssl_cert"]
ssl_key = module.params["ssl_key"]
ssl_ca = module.params["ssl_ca"]
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
db = 'mysql'
mysqlvar = module.params["variable"]
value = module.params["value"]
if mysqlvar is None:
module.fail_json(msg="Cannot run without variable to operate with")
if match('^[0-9a-z_]+$', mysqlvar) is None:
module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar)
if not mysqldb_found:
module.fail_json(msg="The MySQL-python module is required.")
else:
warnings.filterwarnings('error', category=MySQLdb.Warning)
try:
cursor = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception as e:
if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e)))
mysqlvar_val = getvariable(cursor, mysqlvar)
if mysqlvar_val is None:
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
if value is None:
module.exit_json(msg=mysqlvar_val)
else:
# Type values before using them
value_wanted = typedvalue(value)
value_actual = typedvalue(mysqlvar_val)
if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False)
try:
result = setvariable(cursor, mysqlvar, value_wanted)
except SQLParseError as e:
result = to_native(e)
if result is True:
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
else:
module.fail_json(msg=result, changed=False)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import { render } from "@testing-library/react";
import * as React from "react";
import { Links, Outlet, createRoutesStub } from "../../../index";
describe("<Links>", () => {
describe("crossOrigin", () => {
it("renders stylesheet links with crossOrigin attribute when provided", () => {
let RoutesStub = createRoutesStub([
{
id: "root",
path: "/",
links: () => [{ rel: "stylesheet", href: "/assets/styles.css" }],
Component() {
return (
<>
<Links crossOrigin="anonymous" />
<Outlet />
</>
);
},
children: [
{ id: "index", index: true, Component: () => <div>Index</div> },
],
},
]);
let { container } = render(<RoutesStub />);
let stylesheetLink = container.ownerDocument.querySelector(
'link[rel="stylesheet"][href="/assets/styles.css"]',
);
expect(stylesheetLink).toBeTruthy();
expect(stylesheetLink?.getAttribute("crossorigin")).toBe("anonymous");
});
it("renders stylesheet links without crossOrigin when not provided", () => {
let RoutesStub = createRoutesStub([
{
id: "root",
path: "/",
links: () => [{ rel: "stylesheet", href: "/assets/styles.css" }],
Component() {
return (
<>
<Links />
<Outlet />
</>
);
},
children: [
{ id: "index", index: true, Component: () => <div>Index</div> },
],
},
]);
let { container } = render(<RoutesStub />);
let stylesheetLink = container.ownerDocument.querySelector(
'link[rel="stylesheet"][href="/assets/styles.css"]',
);
expect(stylesheetLink).toBeTruthy();
expect(stylesheetLink?.hasAttribute("crossorigin")).toBe(false);
});
it("link descriptor crossOrigin overrides the component prop", () => {
let RoutesStub = createRoutesStub([
{
id: "root",
path: "/",
links: () => [
{
rel: "stylesheet",
href: "/assets/styles.css",
crossOrigin: "use-credentials",
},
],
Component() {
return (
<>
<Links crossOrigin="anonymous" />
<Outlet />
</>
);
},
children: [
{ id: "index", index: true, Component: () => <div>Index</div> },
],
},
]);
let { container } = render(<RoutesStub />);
let stylesheetLink = container.ownerDocument.querySelector(
'link[rel="stylesheet"][href="/assets/styles.css"]',
);
expect(stylesheetLink).toBeTruthy();
expect(stylesheetLink?.getAttribute("crossorigin")).toBe(
"use-credentials",
);
});
it("link descriptor crossOrigin works without the component prop", () => {
let RoutesStub = createRoutesStub([
{
id: "root",
path: "/",
links: () => [
{
rel: "stylesheet",
href: "/assets/styles.css",
crossOrigin: "anonymous",
},
],
Component() {
return (
<>
<Links />
<Outlet />
</>
);
},
children: [
{ id: "index", index: true, Component: () => <div>Index</div> },
],
},
]);
let { container } = render(<RoutesStub />);
let stylesheetLink = container.ownerDocument.querySelector(
'link[rel="stylesheet"][href="/assets/styles.css"]',
);
expect(stylesheetLink).toBeTruthy();
expect(stylesheetLink?.getAttribute("crossorigin")).toBe("anonymous");
});
it("link descriptor crossOrigin undefined does not override the component prop", () => {
let RoutesStub = createRoutesStub([
{
id: "root",
path: "/",
links: () => [
{
rel: "stylesheet",
href: "/assets/styles.css",
crossOrigin: undefined,
},
],
Component() {
return (
<>
<Links crossOrigin="anonymous" />
<Outlet />
</>
);
},
children: [
{ id: "index", index: true, Component: () => <div>Index</div> },
],
},
]);
let { container } = render(<RoutesStub />);
let stylesheetLink = container.ownerDocument.querySelector(
'link[rel="stylesheet"][href="/assets/styles.css"]',
);
expect(stylesheetLink).toBeTruthy();
expect(stylesheetLink?.getAttribute("crossorigin")).toBe("anonymous");
});
});
}); | typescript | github | https://github.com/remix-run/react-router | packages/react-router/__tests__/dom/ssr/links-test.tsx |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask import abort
import mongoengine
from mongoengine.queryset import MultipleObjectsReturned, DoesNotExist, QuerySet
from mongoengine.base import ValidationError
from .sessions import *
from .pagination import *
def _include_mongoengine(obj):
for module in mongoengine, mongoengine.fields:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
class MongoEngine(object):
def __init__(self, app=None):
_include_mongoengine(self)
self.Document = Document
self.DynamicDocument = DynamicDocument
if app is not None:
self.init_app(app)
def init_app(self, app):
conn_settings = app.config.get('MONGODB_SETTINGS', None)
if not conn_settings:
conn_settings = {
'db': app.config.get('MONGODB_DB', None),
'username': app.config.get('MONGODB_USERNAME', None),
'password': app.config.get('MONGODB_PASSWORD', None),
'host': app.config.get('MONGODB_HOST', None),
'port': int(app.config.get('MONGODB_PORT', 0)) or None
}
conn_settings = dict([(k.lower(), v) for k, v in conn_settings.items() if v])
if 'replicaset' in conn_settings:
conn_settings['replicaSet'] = conn_settings['replicaset']
del conn_settings['replicaset']
self.connection = mongoengine.connect(**conn_settings)
app.extensions = getattr(app, 'extensions', {})
app.extensions['mongoengine'] = self
self.app = app
class BaseQuerySet(QuerySet):
"""
A base queryset with handy extras
"""
def get_or_404(self, *args, **kwargs):
try:
return self.get(*args, **kwargs)
except (MultipleObjectsReturned, DoesNotExist, ValidationError):
abort(404)
def first_or_404(self):
obj = self.first()
if obj is None:
abort(404)
return obj
def paginate(self, page, per_page, error_out=True):
return Pagination(self, page, per_page)
def paginate_field(self, field_name, doc_id, page, per_page,
total=None):
item = self.get(id=doc_id)
count = getattr(item, field_name + "_count", '')
total = total or count or len(getattr(item, field_name))
return ListFieldPagination(self, doc_id, field_name, page, per_page,
total=total)
class Document(mongoengine.Document):
"""Abstract document with extra helpers in the queryset class"""
meta = {'abstract': True,
'queryset_class': BaseQuerySet}
def paginate_field(self, field_name, page, per_page, total=None):
count = getattr(self, field_name + "_count", '')
total = total or count or len(getattr(self, field_name))
return ListFieldPagination(self.__class__.objects, self.pk, field_name,
page, per_page, total=total)
class DynamicDocument(mongoengine.DynamicDocument):
"""Abstract Dynamic document with extra helpers in the queryset class"""
meta = {'abstract': True,
'queryset_class': BaseQuerySet} | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unescapeHTML,
)
class TVN24IE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:[^/]+)\.)?tvn24(?:bis)?\.pl/(?:[^/]+/)*(?P<id>[^/]+)\.html'
_TESTS = [{
'url': 'http://www.tvn24.pl/wiadomosci-z-kraju,3/oredzie-artura-andrusa,702428.html',
'md5': 'fbdec753d7bc29d96036808275f2130c',
'info_dict': {
'id': '1584444',
'ext': 'mp4',
'title': '"Święta mają być wesołe, dlatego, ludziska, wszyscy pod jemiołę"',
'description': 'Wyjątkowe orędzie Artura Andrusa, jednego z gości "Szkła kontaktowego".',
'thumbnail': 're:http://.*[.]jpeg',
}
}, {
'url': 'http://fakty.tvn24.pl/ogladaj-online,60/53-konferencja-bezpieczenstwa-w-monachium,716431.html',
'only_matching': True,
}, {
'url': 'http://sport.tvn24.pl/pilka-nozna,105/ligue-1-kamil-glik-rozcial-glowe-monaco-tylko-remisuje-z-bastia,716522.html',
'only_matching': True,
}, {
'url': 'http://tvn24bis.pl/poranek,146,m/gen-koziej-w-tvn24-bis-wracamy-do-czasow-zimnej-wojny,715660.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
def extract_json(attr, name, fatal=True):
return self._parse_json(
self._search_regex(
r'\b%s=(["\'])(?P<json>(?!\1).+?)\1' % attr, webpage,
name, group='json', fatal=fatal) or '{}',
video_id, transform_source=unescapeHTML, fatal=fatal)
quality_data = extract_json('data-quality', 'formats')
formats = []
for format_id, url in quality_data.items():
formats.append({
'url': url,
'format_id': format_id,
'height': int_or_none(format_id.rstrip('p')),
})
self._sort_formats(formats)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(
webpage, default=None) or self._html_search_regex(
r'\bdata-poster=(["\'])(?P<url>(?!\1).+?)\1', webpage,
'thumbnail', group='url')
share_params = extract_json(
'data-share-params', 'share params', fatal=False)
if isinstance(share_params, dict):
video_id = share_params.get('id') or video_id
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
} | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG for demonstrating the behavior of the Assets feature in Airflow, including conditional and
asset expression-based scheduling.
Notes on usage:
Turn on all the DAGs.
asset_produces_1 is scheduled to run daily. Once it completes, it triggers several DAGs due to its asset
being updated. asset_consumes_1 is triggered immediately, as it depends solely on the asset produced by
asset_produces_1. consume_1_or_2_with_asset_expressions will also be triggered, as its condition of
either asset_produces_1 or asset_produces_2 being updated is satisfied with asset_produces_1.
asset_consumes_1_and_2 will not be triggered after asset_produces_1 runs because it requires the asset
from asset_produces_2, which has no schedule and must be manually triggered.
After manually triggering asset_produces_2, several DAGs will be affected. asset_consumes_1_and_2 should
run because both its asset dependencies are now met. consume_1_and_2_with_asset_expressions will be
triggered, as it requires both asset_produces_1 and asset_produces_2 assets to be updated.
consume_1_or_2_with_asset_expressions will be triggered again, since it's conditionally set to run when
either asset is updated.
consume_1_or_both_2_and_3_with_asset_expressions demonstrates complex asset dependency logic.
This DAG triggers if asset_produces_1 is updated or if both asset_produces_2 and dag3_asset
are updated. This example highlights the capability to combine updates from multiple assets with logical
expressions for advanced scheduling.
conditional_asset_and_time_based_timetable illustrates the integration of time-based scheduling with
asset dependencies. This DAG is configured to execute either when both asset_produces_1 and
asset_produces_2 assets have been updated or according to a specific cron schedule, showcasing
Airflow's versatility in handling mixed triggers for asset and time-based scheduling.
The DAGs asset_consumes_1_never_scheduled and asset_consumes_unknown_never_scheduled will not run
automatically as they depend on assets that do not get updated or are not produced by any scheduled tasks.
"""
from __future__ import annotations
# [START asset_def]
import pendulum
from airflow.providers.standard.operators.bash import BashOperator
from airflow.sdk import DAG, Asset, AssetOrTimeSchedule, CronTriggerTimetable
dag1_asset = Asset("s3://dag1/output_1.txt", extra={"hi": "bye"})
dag2_asset = Asset("s3://dag2/output_1.txt", extra={"hi": "bye"})
dag3_asset = Asset("s3://dag3/output_3.txt", extra={"hi": "bye"})
with DAG(
dag_id="asset_produces_1",
catchup=False,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule="@daily",
tags=["produces", "asset-scheduled"],
) as dag1:
# [START task_outlet]
BashOperator(outlets=[dag1_asset], task_id="producing_task_1", bash_command="sleep 5")
# [END task_outlet]
with DAG(
dag_id="asset_produces_2",
catchup=False,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=None,
tags=["produces", "asset-scheduled"],
) as dag2:
BashOperator(outlets=[dag2_asset], task_id="producing_task_2", bash_command="sleep 5")
# [START dag_dep]
with DAG(
dag_id="asset_consumes_1",
catchup=False,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=[dag1_asset],
tags=["consumes", "asset-scheduled"],
) as dag3:
# [END dag_dep]
BashOperator(
outlets=[Asset("s3://consuming_1_task/asset_other.txt")],
task_id="consuming_1",
bash_command="sleep 5",
)
with DAG(
dag_id="asset_consumes_1_and_2",
catchup=False,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=[dag1_asset, dag2_asset],
tags=["consumes", "asset-scheduled"],
) as dag4:
BashOperator(
outlets=[Asset("s3://consuming_2_task/asset_other_unknown.txt")],
task_id="consuming_2",
bash_command="sleep 5",
)
with DAG(
dag_id="asset_consumes_1_never_scheduled",
catchup=False,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=[
dag1_asset,
Asset("s3://unrelated/this-asset-doesnt-get-triggered"),
],
tags=["consumes", "asset-scheduled"],
) as dag5:
BashOperator(
outlets=[Asset("s3://consuming_2_task/asset_other_unknown.txt")],
task_id="consuming_3",
bash_command="sleep 5",
)
with DAG(
dag_id="asset_consumes_unknown_never_scheduled",
catchup=False,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=[
Asset("s3://unrelated/asset3.txt"),
Asset("s3://unrelated/asset_other_unknown.txt"),
],
tags=["asset-scheduled"],
) as dag6:
BashOperator(
task_id="unrelated_task",
outlets=[Asset("s3://unrelated_task/asset_other_unknown.txt")],
bash_command="sleep 5",
)
with DAG(
dag_id="consume_1_and_2_with_asset_expressions",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=(dag1_asset & dag2_asset),
) as dag5:
BashOperator(
outlets=[Asset("s3://consuming_2_task/asset_other_unknown.txt")],
task_id="consume_1_and_2_with_asset_expressions",
bash_command="sleep 5",
)
with DAG(
dag_id="consume_1_or_2_with_asset_expressions",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=(dag1_asset | dag2_asset),
) as dag6:
BashOperator(
outlets=[Asset("s3://consuming_2_task/asset_other_unknown.txt")],
task_id="consume_1_or_2_with_asset_expressions",
bash_command="sleep 5",
)
with DAG(
dag_id="consume_1_or_both_2_and_3_with_asset_expressions",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=(dag1_asset | (dag2_asset & dag3_asset)),
) as dag7:
BashOperator(
outlets=[Asset("s3://consuming_2_task/asset_other_unknown.txt")],
task_id="consume_1_or_both_2_and_3_with_asset_expressions",
bash_command="sleep 5",
)
with DAG(
dag_id="conditional_asset_and_time_based_timetable",
catchup=False,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=AssetOrTimeSchedule(
timetable=CronTriggerTimetable("0 1 * * 3", timezone="UTC"), assets=(dag1_asset & dag2_asset)
),
tags=["asset-time-based-timetable"],
) as dag8:
BashOperator(
outlets=[Asset("s3://asset_time_based/asset_other_unknown.txt")],
task_id="conditional_asset_and_time_based_timetable",
bash_command="sleep 5",
)
# [END asset_def] | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/example_dags/example_assets.py |
<?php
namespace Illuminate\View\Compilers\Concerns;
trait CompilesIncludes
{
/**
* Compile the each statements into valid PHP.
*
* @param string $expression
* @return string
*/
protected function compileEach($expression)
{
return "<?php echo \$__env->renderEach{$expression}; ?>";
}
/**
* Compile the include statements into valid PHP.
*
* @param string $expression
* @return string
*/
protected function compileInclude($expression)
{
$expression = $this->stripParentheses($expression);
return "<?php echo \$__env->make({$expression}, array_diff_key(get_defined_vars(), ['__data' => 1, '__path' => 1]))->render(); ?>";
}
/**
* Compile the include-if statements into valid PHP.
*
* @param string $expression
* @return string
*/
protected function compileIncludeIf($expression)
{
$expression = $this->stripParentheses($expression);
return "<?php if (\$__env->exists({$expression})) echo \$__env->make({$expression}, array_diff_key(get_defined_vars(), ['__data' => 1, '__path' => 1]))->render(); ?>";
}
/**
* Compile the include-when statements into valid PHP.
*
* @param string $expression
* @return string
*/
protected function compileIncludeWhen($expression)
{
$expression = $this->stripParentheses($expression);
return "<?php echo \$__env->renderWhen($expression, array_diff_key(get_defined_vars(), ['__data' => 1, '__path' => 1])); ?>";
}
/**
* Compile the include-unless statements into valid PHP.
*
* @param string $expression
* @return string
*/
protected function compileIncludeUnless($expression)
{
$expression = $this->stripParentheses($expression);
return "<?php echo \$__env->renderUnless($expression, array_diff_key(get_defined_vars(), ['__data' => 1, '__path' => 1])); ?>";
}
/**
* Compile the include-first statements into valid PHP.
*
* @param string $expression
* @return string
*/
protected function compileIncludeFirst($expression)
{
$expression = $this->stripParentheses($expression);
return "<?php echo \$__env->first({$expression}, array_diff_key(get_defined_vars(), ['__data' => 1, '__path' => 1]))->render(); ?>";
}
/**
* Compile the include-isolated statements into valid PHP.
*
* @param string $expression
* @return string
*/
protected function compileIncludeIsolated($expression)
{
$expression = $this->stripParentheses($expression);
return "<?php echo \$__env->make({$expression})->render(); ?>";
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/View/Compilers/Concerns/CompilesIncludes.php |
ktor:
deployment:
port : 4244 | unknown | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-core/jvm/test-resources/custom.config.yaml |
#
# euc_jp.py: Python Unicode Codec for EUC_JP
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
) | unknown | codeparrot/codeparrot-clean | ||
package local
// This package contains the legacy in-proc calls in HCS using the v1 schema
// for Windows runtime purposes.
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim"
containerd "github.com/containerd/containerd/v2/client"
"github.com/containerd/containerd/v2/pkg/cio"
cerrdefs "github.com/containerd/errdefs"
"github.com/containerd/log"
"github.com/moby/moby/v2/daemon/internal/libcontainerd/queue"
libcontainerdtypes "github.com/moby/moby/v2/daemon/internal/libcontainerd/types"
"github.com/moby/moby/v2/errdefs"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
"google.golang.org/protobuf/types/known/timestamppb"
)
type process struct {
// mu guards the mutable fields of this struct.
//
// Always lock mu before ctr's mutex to prevent deadlocks.
mu sync.Mutex
id string // Invariants: immutable
ctr *container // Invariants: immutable, ctr != nil
hcsProcess hcsshim.Process // Is set to nil on process exit
exited *containerd.ExitStatus // Valid iff waitCh is closed
waitCh chan struct{}
}
type task struct {
process
}
type container struct {
mu sync.Mutex
// The ociSpec is required, as client.Create() needs a spec, but can
// be called from the RestartManager context which does not otherwise
// have access to the Spec
//
// A container value with ociSpec == nil represents a container which
// has been loaded with (*client).LoadContainer, and is ineligible to
// be Start()ed.
ociSpec *specs.Spec
hcsContainer hcsshim.Container // Is set to nil on container delete
isPaused bool
client *client
id string
terminateInvoked bool
// task is a reference to the current task for the container. As a
// corollary, when task == nil the container has no current task: the
// container was never Start()ed or the task was Delete()d.
task *task
}
// defaultOwner is a tag passed to HCS to allow it to differentiate between
// container creator management stacks. We hard code "docker" in the case
// of docker.
const defaultOwner = "docker"
type client struct {
backend libcontainerdtypes.Backend
logger *log.Entry
eventQ queue.Queue
}
// NewClient creates a new local executor for windows
func NewClient(ctx context.Context, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
return &client{
backend: b,
logger: log.G(ctx).WithField("module", "libcontainerd"),
}, nil
}
func (c *client) Version(ctx context.Context) (containerd.Version, error) {
return containerd.Version{}, errors.New("not implemented on Windows")
}
// NewContainer is the entrypoint to create a container from a spec.
// Table below shows the fields required for HCS JSON calling parameters,
// where if not populated, is omitted.
// +-----------------+--------------------------------------------+---------------------------------------------------+
// | | Isolation=Process | Isolation=Hyper-V |
// +-----------------+--------------------------------------------+---------------------------------------------------+
// | VolumePath | \\?\\Volume{GUIDa} | |
// | LayerFolderPath | %root%\windowsfilter\containerID | |
// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
// +-----------------+--------------------------------------------+---------------------------------------------------+
//
// Isolation=Process example:
//
// {
// "SystemType": "Container",
// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
// "Owner": "docker",
// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
// "IgnoreFlushesDuringBoot": true,
// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
// "Layers": [{
// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
// }],
// "HostName": "5e0055c814a6",
// "MappedDirectories": [],
// "HvPartition": false,
// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
// }
//
// Isolation=Hyper-V example:
//
// {
// "SystemType": "Container",
// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
// "Owner": "docker",
// "IgnoreFlushesDuringBoot": true,
// "Layers": [{
// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
// }],
// "HostName": "475c2c58933b",
// "MappedDirectories": [],
// "HvPartition": true,
// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
// "DNSSearchList": "a.com,b.com,c.com",
// "HvRuntime": {
// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
// },
// }
func (c *client) NewContainer(_ context.Context, id string, spec *specs.Spec, _ string, _ any, _ ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
if spec.Linux != nil {
return nil, errors.New("linux containers are not supported on this platform")
}
ctr, err := c.createWindows(id, spec)
if err != nil {
return nil, err
}
c.eventQ.Append(id, func() {
c.logger.WithFields(log.Fields{
"container": id,
"event": libcontainerdtypes.EventCreate,
}).Info("sending event")
err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, libcontainerdtypes.EventInfo{
ContainerID: id,
})
if err != nil {
c.logger.WithFields(log.Fields{
"container": id,
"event": libcontainerdtypes.EventCreate,
"error": err,
}).Error("failed to process event")
}
})
return ctr, nil
}
func (c *client) createWindows(id string, spec *specs.Spec) (*container, error) {
logger := c.logger.WithField("container", id)
configuration := &hcsshim.ContainerConfig{
SystemType: "Container",
Name: id,
Owner: defaultOwner,
IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
HostName: spec.Hostname,
HvPartition: false,
}
c.extractResourcesFromSpec(spec, configuration)
if spec.Windows.Resources != nil {
if spec.Windows.Resources.Storage != nil {
if spec.Windows.Resources.Storage.Bps != nil {
configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
}
if spec.Windows.Resources.Storage.Iops != nil {
configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
}
}
}
if spec.Windows.HyperV != nil {
configuration.HvPartition = true
}
if spec.Windows.Network != nil {
configuration.EndpointList = spec.Windows.Network.EndpointList
configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
if spec.Windows.Network.DNSSearchList != nil {
configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
}
configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
}
if cs, ok := spec.Windows.CredentialSpec.(string); ok {
configuration.Credentials = cs
}
// We must have least two layers in the spec, the bottom one being a
// base image, the top one being the RW layer.
if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
return nil, fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
}
// Strip off the top-most layer as that's passed in separately to HCS
configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
if configuration.HvPartition {
// We don't currently support setting the utility VM image explicitly.
// TODO circa RS5, this may be re-locatable.
if spec.Windows.HyperV.UtilityVMPath != "" {
return nil, errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
}
// Find the upper-most utility VM image.
var uvmImagePath string
for _, path := range layerFolders {
fullPath := filepath.Join(path, "UtilityVM")
_, err := os.Stat(fullPath)
if err == nil {
uvmImagePath = fullPath
break
}
if !os.IsNotExist(err) {
return nil, err
}
}
if uvmImagePath == "" {
return nil, errors.New("utility VM image could not be found")
}
configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
if spec.Root.Path != "" {
return nil, errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
}
} else {
const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
return nil, fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
}
// HCS API requires the trailing backslash to be removed
configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
}
if spec.Root.Readonly {
return nil, errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
}
for _, layerPath := range layerFolders {
_, filename := filepath.Split(layerPath)
g, err := hcsshim.NameToGuid(filename)
if err != nil {
return nil, err
}
configuration.Layers = append(configuration.Layers, hcsshim.Layer{
ID: g.ToString(),
Path: layerPath,
})
}
// Add the mounts (volumes, bind mounts etc) to the structure
var mds []hcsshim.MappedDir
var mps []hcsshim.MappedPipe
for _, mount := range spec.Mounts {
const pipePrefix = `\\.\pipe\`
if mount.Type != "" {
return nil, fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
}
if strings.HasPrefix(mount.Destination, pipePrefix) {
mp := hcsshim.MappedPipe{
HostPath: mount.Source,
ContainerPipeName: mount.Destination[len(pipePrefix):],
}
mps = append(mps, mp)
} else {
md := hcsshim.MappedDir{
HostPath: mount.Source,
ContainerPath: mount.Destination,
ReadOnly: false,
}
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
md.ReadOnly = true
}
}
mds = append(mds, md)
}
}
configuration.MappedDirectories = mds
configuration.MappedPipes = mps
if len(spec.Windows.Devices) > 0 {
// Add any device assignments
if configuration.HvPartition {
return nil, errors.New("device assignment is not supported for HyperV containers")
}
for _, d := range spec.Windows.Devices {
// Per https://github.com/microsoft/hcsshim/blob/v0.9.2/internal/uvm/virtual_device.go#L17-L18,
// these represent an Interface Class GUID.
if d.IDType != "class" && d.IDType != "vpci-class-guid" {
return nil, errors.Errorf("device assignment of type '%s' is not supported", d.IDType)
}
configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
}
}
hcsContainer, err := hcsshim.CreateContainer(id, configuration)
if err != nil {
return nil, err
}
// Construct a container object for calling start on it.
ctr := &container{
client: c,
id: id,
ociSpec: spec,
hcsContainer: hcsContainer,
}
logger.Debug("starting container")
if err := ctr.hcsContainer.Start(); err != nil {
logger.WithError(err).Error("failed to start container")
ctr.mu.Lock()
if err := ctr.terminateContainer(); err != nil {
logger.WithError(err).Error("failed to cleanup after a failed Start")
} else {
logger.Debug("cleaned up after failed Start by calling Terminate")
}
ctr.mu.Unlock()
return nil, err
}
logger.Debug("createWindows() completed successfully")
return ctr, nil
}
func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
if spec.Windows.Resources == nil {
return
}
if spec.Windows.Resources.CPU != nil {
if spec.Windows.Resources.CPU.Count != nil {
// This check is being done here rather than in adaptContainerSettings
// because we don't want to update the HostConfig in case this container
// is moved to a host with more CPUs than this one.
cpuCount := *spec.Windows.Resources.CPU.Count
hostCPUCount := uint64(runtime.NumCPU())
if cpuCount > hostCPUCount {
c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
cpuCount = hostCPUCount
}
configuration.ProcessorCount = uint32(cpuCount)
}
if spec.Windows.Resources.CPU.Shares != nil {
configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
}
if spec.Windows.Resources.CPU.Maximum != nil {
configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
}
}
if spec.Windows.Resources.Memory != nil {
if spec.Windows.Resources.Memory.Limit != nil {
configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
}
}
}
func (ctr *container) NewTask(_ context.Context, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, retErr error) {
ctr.mu.Lock()
defer ctr.mu.Unlock()
switch {
case ctr.ociSpec == nil:
return nil, errors.WithStack(errdefs.NotImplemented(errors.New("a restored container cannot be started")))
case ctr.task != nil:
return nil, errors.WithStack(errdefs.NotModified(cerrdefs.ErrAlreadyExists))
}
logger := ctr.client.logger.WithField("container", ctr.id)
// Note we always tell HCS to create stdout as it's required
// regardless of '-i' or '-t' options, so that docker can always grab
// the output through logs. We also tell HCS to always create stdin,
// even if it's not used - it will be closed shortly. Stderr is only
// created if it we're not -t.
var (
emulateConsole bool
createStdErrPipe bool
)
if ctr.ociSpec.Process != nil {
emulateConsole = ctr.ociSpec.Process.Terminal
createStdErrPipe = !ctr.ociSpec.Process.Terminal
}
createProcessParms := &hcsshim.ProcessConfig{
EmulateConsole: emulateConsole,
WorkingDirectory: ctr.ociSpec.Process.Cwd,
CreateStdInPipe: true,
CreateStdOutPipe: true,
CreateStdErrPipe: createStdErrPipe,
}
if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
}
// Configure the environment for the process
createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
// Configure the CommandLine/CommandArgs
setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
createProcessParms.User = ctr.ociSpec.Process.User.Username
// Start the command running in the container.
newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
if err != nil {
logger.WithError(err).Error("CreateProcess() failed")
return nil, err
}
defer func() {
if retErr != nil {
if err := newProcess.Kill(); err != nil {
logger.WithError(err).Error("failed to kill process")
}
go func() {
if err := newProcess.Wait(); err != nil {
logger.WithError(err).Error("failed to wait for process")
}
if err := newProcess.Close(); err != nil {
logger.WithError(err).Error("failed to clean process resources")
}
}()
}
}()
pid := newProcess.Pid()
logger.WithField("pid", pid).Debug("init process started")
dio, err := newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
if err != nil {
logger.WithError(err).Error("failed to get stdio pipes")
return nil, err
}
_, err = attachStdio(dio)
if err != nil {
logger.WithError(err).Error("failed to attach stdio")
return nil, err
}
t := &task{process{
id: ctr.id,
ctr: ctr,
hcsProcess: newProcess,
waitCh: make(chan struct{}),
}}
// All fallible operations have succeeded so it is now safe to set the
// container's current task.
ctr.task = t
// Spin up a goroutine to notify the backend and clean up resources when
// the task exits. Defer until after the start event is sent so that the
// exit event is not sent out-of-order.
defer func() { go t.reap() }()
// Generate the associated event
ctr.client.eventQ.Append(ctr.id, func() {
ei := libcontainerdtypes.EventInfo{
ContainerID: ctr.id,
ProcessID: t.id,
Pid: uint32(pid),
}
ctr.client.logger.WithFields(log.Fields{
"container": ctr.id,
"event": libcontainerdtypes.EventStart,
"event-info": ei,
}).Info("sending event")
err := ctr.client.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
if err != nil {
ctr.client.logger.WithError(err).WithFields(log.Fields{
"container": ei.ContainerID,
"event": libcontainerdtypes.EventStart,
"event-info": ei,
}).Error("failed to process event")
}
})
logger.Debug("start() completed")
return t, nil
}
func (*task) Start(context.Context) error {
// No-op on Windows.
return nil
}
func (ctr *container) Task(context.Context) (libcontainerdtypes.Task, error) {
ctr.mu.Lock()
defer ctr.mu.Unlock()
if ctr.task == nil {
return nil, errdefs.NotFound(cerrdefs.ErrNotFound)
}
return ctr.task, nil
}
// setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
if process.CommandLine != "" {
createProcessParms.CommandLine = process.CommandLine
} else {
createProcessParms.CommandLine = escapeArgs(process.Args)
}
}
// escapeArgs makes a Windows-style escaped command line from a set of arguments
func escapeArgs(args []string) string {
escapedArgs := make([]string, len(args))
for i, a := range args {
escapedArgs[i] = windows.EscapeArg(a)
}
return strings.Join(escapedArgs, " ")
}
func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
stdin, stdout, stderr, err := newProcess.Stdio()
if err != nil {
return nil, err
}
dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
// Convert io.ReadClosers to io.Readers
if stdout != nil {
dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
}
if stderr != nil {
dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
}
return dio, nil
}
// Exec launches a process in a running container.
//
// The processID argument is entirely informational. As there is no mechanism
// (exposed through the libcontainerd interfaces) to enumerate or reference an
// exec'd process by ID, uniqueness is not currently enforced.
func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Process, retErr error) {
hcsContainer, err := t.getHCSContainer()
if err != nil {
return nil, err
}
logger := t.ctr.client.logger.WithFields(log.Fields{
"container": t.ctr.id,
"exec": processID,
})
// Note we always tell HCS to
// create stdout as it's required regardless of '-i' or '-t' options, so that
// docker can always grab the output through logs. We also tell HCS to always
// create stdin, even if it's not used - it will be closed shortly. Stderr
// is only created if it we're not -t.
createProcessParms := &hcsshim.ProcessConfig{
CreateStdInPipe: true,
CreateStdOutPipe: true,
CreateStdErrPipe: !spec.Terminal,
}
if spec.Terminal {
createProcessParms.EmulateConsole = true
if spec.ConsoleSize != nil {
createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
}
}
// Take working directory from the process to add if it is defined,
// otherwise take from the first process.
if spec.Cwd != "" {
createProcessParms.WorkingDirectory = spec.Cwd
} else {
createProcessParms.WorkingDirectory = t.ctr.ociSpec.Process.Cwd
}
// Configure the environment for the process
createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
// Configure the CommandLine/CommandArgs
setCommandLineAndArgs(spec, createProcessParms)
logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
createProcessParms.User = spec.User.Username
// Start the command running in the container.
newProcess, err := hcsContainer.CreateProcess(createProcessParms)
if err != nil {
logger.WithError(err).Errorf("exec's CreateProcess() failed")
return nil, err
}
defer func() {
if retErr != nil {
if err := newProcess.Kill(); err != nil {
logger.WithError(err).Error("failed to kill process")
}
go func() {
if err := newProcess.Wait(); err != nil {
logger.WithError(err).Error("failed to wait for process")
}
if err := newProcess.Close(); err != nil {
logger.WithError(err).Error("failed to clean process resources")
}
}()
}
}()
dio, err := newIOFromProcess(newProcess, spec.Terminal)
if err != nil {
logger.WithError(err).Error("failed to get stdio pipes")
return nil, err
}
// Tell the engine to attach streams back to the client
_, err = attachStdio(dio)
if err != nil {
return nil, err
}
p := &process{
id: processID,
ctr: t.ctr,
hcsProcess: newProcess,
waitCh: make(chan struct{}),
}
// Spin up a goroutine to notify the backend and clean up resources when
// the process exits. Defer until after the start event is sent so that
// the exit event is not sent out-of-order.
defer func() { go p.reap() }()
pid := newProcess.Pid()
t.ctr.client.eventQ.Append(t.ctr.id, func() {
ei := libcontainerdtypes.EventInfo{
ContainerID: t.ctr.id,
ProcessID: p.id,
Pid: uint32(pid),
}
t.ctr.client.logger.WithFields(log.Fields{
"container": t.ctr.id,
"event": libcontainerdtypes.EventExecAdded,
"event-info": ei,
}).Info("sending event")
err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecAdded, ei)
if err != nil {
t.ctr.client.logger.WithError(err).WithFields(log.Fields{
"container": t.ctr.id,
"event": libcontainerdtypes.EventExecAdded,
"event-info": ei,
}).Error("failed to process event")
}
err = t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecStarted, ei)
if err != nil {
t.ctr.client.logger.WithError(err).WithFields(log.Fields{
"container": t.ctr.id,
"event": libcontainerdtypes.EventExecStarted,
"event-info": ei,
}).Error("failed to process event")
}
})
return p, nil
}
func (p *process) Pid() uint32 {
p.mu.Lock()
hcsProcess := p.hcsProcess
p.mu.Unlock()
if hcsProcess == nil {
return 0
}
return uint32(hcsProcess.Pid())
}
func (p *process) Kill(_ context.Context, signal syscall.Signal) error {
p.mu.Lock()
hcsProcess := p.hcsProcess
p.mu.Unlock()
if hcsProcess == nil {
return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
}
return hcsProcess.Kill()
}
// Kill handles `docker stop` on Windows. While Linux has support for
// the full range of signals, signals aren't really implemented on Windows.
// We fake supporting regular stop and -9 to force kill.
func (t *task) Kill(_ context.Context, signal syscall.Signal) error {
hcsContainer, err := t.getHCSContainer()
if err != nil {
return err
}
logger := t.ctr.client.logger.WithFields(log.Fields{
"container": t.ctr.id,
"process": t.id,
"pid": t.Pid(),
"signal": signal,
})
logger.Debug("Signal()")
var op string
if signal == syscall.SIGKILL {
// Terminate the compute system
t.ctr.mu.Lock()
t.ctr.terminateInvoked = true
t.ctr.mu.Unlock()
op, err = "terminate", hcsContainer.Terminate()
} else {
// Shut down the container
op, err = "shutdown", hcsContainer.Shutdown()
}
if err != nil {
if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
// ignore errors
logger.WithError(err).Errorf("failed to %s hccshim container", op)
}
}
return nil
}
// Resize handles a CLI event to resize an interactive docker run or docker
// exec window.
func (p *process) Resize(_ context.Context, width, height uint32) error {
p.mu.Lock()
hcsProcess := p.hcsProcess
p.mu.Unlock()
if hcsProcess == nil {
return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
}
p.ctr.client.logger.WithFields(log.Fields{
"container": p.ctr.id,
"process": p.id,
"height": height,
"width": width,
"pid": hcsProcess.Pid(),
}).Debug("resizing")
return hcsProcess.ResizeConsole(uint16(width), uint16(height))
}
func (p *process) CloseStdin(context.Context) error {
p.mu.Lock()
hcsProcess := p.hcsProcess
p.mu.Unlock()
if hcsProcess == nil {
return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
}
return hcsProcess.CloseStdin()
}
// Pause handles pause requests for containers
func (t *task) Pause(_ context.Context) error {
if t.ctr.ociSpec.Windows.HyperV == nil {
return errdefs.NotImplemented(errors.WithStack(errors.New("not implemented for containers using process isolation")))
}
t.ctr.mu.Lock()
defer t.ctr.mu.Unlock()
if err := t.assertIsCurrentTask(); err != nil {
return err
}
if t.ctr.hcsContainer == nil {
return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
}
if err := t.ctr.hcsContainer.Pause(); err != nil {
return err
}
t.ctr.isPaused = true
t.ctr.client.eventQ.Append(t.ctr.id, func() {
err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
ContainerID: t.ctr.id,
ProcessID: t.id,
})
t.ctr.client.logger.WithFields(log.Fields{
"container": t.ctr.id,
"event": libcontainerdtypes.EventPaused,
}).Info("sending event")
if err != nil {
t.ctr.client.logger.WithError(err).WithFields(log.Fields{
"container": t.ctr.id,
"event": libcontainerdtypes.EventPaused,
}).Error("failed to process event")
}
})
return nil
}
// Resume handles resume requests for containers
func (t *task) Resume(ctx context.Context) error {
if t.ctr.ociSpec.Windows.HyperV == nil {
return errors.New("cannot resume Windows Server Containers")
}
t.ctr.mu.Lock()
defer t.ctr.mu.Unlock()
if err := t.assertIsCurrentTask(); err != nil {
return err
}
if t.ctr.hcsContainer == nil {
return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
}
if err := t.ctr.hcsContainer.Resume(); err != nil {
return err
}
t.ctr.isPaused = false
t.ctr.client.eventQ.Append(t.ctr.id, func() {
err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
ContainerID: t.ctr.id,
ProcessID: t.id,
})
t.ctr.client.logger.WithFields(log.Fields{
"container": t.ctr.id,
"event": libcontainerdtypes.EventResumed,
}).Info("sending event")
if err != nil {
t.ctr.client.logger.WithError(err).WithFields(log.Fields{
"container": t.ctr.id,
"event": libcontainerdtypes.EventResumed,
}).Error("failed to process event")
}
})
return nil
}
// Stats handles stats requests for containers
func (t *task) Stats(_ context.Context) (*libcontainerdtypes.Stats, error) {
hc, err := t.getHCSContainer()
if err != nil {
return nil, err
}
readAt := time.Now()
s, err := hc.Statistics()
if err != nil {
return nil, err
}
return &libcontainerdtypes.Stats{
Read: readAt,
HCSStats: &s,
}, nil
}
// LoadContainer is the handler for restoring a container
func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
c.logger.WithField("container", id).Debug("LoadContainer()")
// TODO Windows: On RS1, a re-attach isn't possible.
// However, there is a scenario in which there is an issue.
// Consider a background container. The daemon dies unexpectedly.
// HCS will still have the compute service alive and running.
// For consistence, we call in to shoot it regardless if HCS knows about it
// We explicitly just log a warning if the terminate fails.
// Then we tell the backend the container exited.
hc, err := hcsshim.OpenContainer(id)
if err != nil {
return nil, errdefs.NotFound(errors.New("container not found"))
}
const terminateTimeout = time.Minute * 2
err = hc.Terminate()
if hcsshim.IsPending(err) {
err = hc.WaitTimeout(terminateTimeout)
} else if hcsshim.IsAlreadyStopped(err) {
err = nil
}
if err != nil {
c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
return nil, err
}
return &container{
client: c,
hcsContainer: hc,
id: id,
}, nil
}
// AttachTask is only called by the daemon when restoring containers. As
// re-attach isn't possible (see LoadContainer), a NotFound error is
// unconditionally returned to allow restore to make progress.
func (*container) AttachTask(context.Context, libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
return nil, errdefs.NotFound(cerrdefs.ErrNotImplemented)
}
// Pids returns a list of process IDs running in a container. It is not
// implemented on Windows.
func (t *task) Pids(context.Context) ([]containerd.ProcessInfo, error) {
return nil, errors.New("not implemented on Windows")
}
// Summary returns a summary of the processes running in a container.
// This is present in Windows to support docker top. In linux, the
// engine shells out to ps to get process information. On Windows, as
// the containers could be Hyper-V containers, they would not be
// visible on the container host. However, libcontainerd does have
// that information.
func (t *task) Summary(_ context.Context) ([]libcontainerdtypes.Summary, error) {
hc, err := t.getHCSContainer()
if err != nil {
return nil, err
}
p, err := hc.ProcessList()
if err != nil {
return nil, err
}
pl := make([]libcontainerdtypes.Summary, len(p))
for i := range p {
pl[i] = libcontainerdtypes.Summary{
ImageName: p[i].ImageName,
CreatedAt: timestamppb.New(p[i].CreateTimestamp),
KernelTime_100Ns: p[i].KernelTime100ns,
MemoryCommitBytes: p[i].MemoryCommitBytes,
MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
MemoryWorkingSetSharedBytes: p[i].MemoryWorkingSetSharedBytes,
ProcessID: p[i].ProcessId,
UserTime_100Ns: p[i].UserTime100ns,
ExecID: "",
}
}
return pl, nil
}
func (p *process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
select {
case <-ctx.Done():
return nil, errors.WithStack(ctx.Err())
case <-p.waitCh:
default:
return nil, errdefs.Conflict(errors.New("process is running"))
}
return p.exited, nil
}
func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
select {
case <-ctx.Done():
return nil, errors.WithStack(ctx.Err())
case <-t.waitCh:
default:
return nil, errdefs.Conflict(errors.New("container is not stopped"))
}
t.ctr.mu.Lock()
defer t.ctr.mu.Unlock()
if err := t.assertIsCurrentTask(); err != nil {
return nil, err
}
t.ctr.task = nil
return t.exited, nil
}
func (t *task) ForceDelete(ctx context.Context) error {
select {
case <-t.waitCh: // Task is already stopped.
_, err := t.Delete(ctx)
return err
default:
}
if err := t.Kill(ctx, syscall.SIGKILL); err != nil {
return errors.Wrap(err, "could not force-kill task")
}
select {
case <-ctx.Done():
return ctx.Err()
case <-t.waitCh:
_, err := t.Delete(ctx)
return err
}
}
func (t *task) Status(ctx context.Context) (containerd.Status, error) {
select {
case <-t.waitCh:
return containerd.Status{
Status: containerd.Stopped,
ExitStatus: t.exited.ExitCode(),
ExitTime: t.exited.ExitTime(),
}, nil
default:
}
t.ctr.mu.Lock()
defer t.ctr.mu.Unlock()
s := containerd.Running
if t.ctr.isPaused {
s = containerd.Paused
}
return containerd.Status{Status: s}, nil
}
func (*task) UpdateResources(context.Context, *libcontainerdtypes.Resources) error {
// Updating resource isn't supported on Windows
// but we should return nil for enabling updating container
return nil
}
func (*task) CreateCheckpoint(context.Context, string, bool) error {
return errors.New("Windows: Containers do not support checkpoints")
}
// assertIsCurrentTask returns a non-nil error if the task has been deleted.
func (t *task) assertIsCurrentTask() error {
if t.ctr.task != t {
return errors.WithStack(errdefs.NotFound(fmt.Errorf("task %q not found", t.id)))
}
return nil
}
// getHCSContainer returns a reference to the hcsshim Container for the task's
// container if neither the task nor container have been deleted.
//
// t.ctr.mu must not be locked by the calling goroutine when calling this
// function.
func (t *task) getHCSContainer() (hcsshim.Container, error) {
t.ctr.mu.Lock()
defer t.ctr.mu.Unlock()
if err := t.assertIsCurrentTask(); err != nil {
return nil, err
}
hc := t.ctr.hcsContainer
if hc == nil {
return nil, errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", t.ctr.id)))
}
return hc, nil
}
// ctr mutex must be held when calling this function.
func (ctr *container) shutdownContainer() error {
var err error
const waitTimeout = time.Minute * 5
if !ctr.terminateInvoked {
err = ctr.hcsContainer.Shutdown()
}
if hcsshim.IsPending(err) || ctr.terminateInvoked {
err = ctr.hcsContainer.WaitTimeout(waitTimeout)
} else if hcsshim.IsAlreadyStopped(err) {
err = nil
}
if err != nil {
ctr.client.logger.WithError(err).WithField("container", ctr.id).
Debug("failed to shutdown container, terminating it")
terminateErr := ctr.terminateContainer()
if terminateErr != nil {
ctr.client.logger.WithError(terminateErr).WithField("container", ctr.id).
Error("failed to shutdown container, and subsequent terminate also failed")
return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
}
return err
}
return nil
}
// ctr mutex must be held when calling this function.
func (ctr *container) terminateContainer() error {
const terminateTimeout = time.Minute * 5
ctr.terminateInvoked = true
err := ctr.hcsContainer.Terminate()
if hcsshim.IsPending(err) {
err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
} else if hcsshim.IsAlreadyStopped(err) {
err = nil
}
if err != nil {
ctr.client.logger.WithError(err).WithField("container", ctr.id).
Debug("failed to terminate container")
return err
}
return nil
}
func (p *process) reap() {
logger := p.ctr.client.logger.WithFields(log.Fields{
"container": p.ctr.id,
"process": p.id,
})
var eventErr error
// Block indefinitely for the process to exit.
if err := p.hcsProcess.Wait(); err != nil {
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
}
// Fall through here, do not return. This ensures we tell the
// docker engine that the process/container has exited to avoid
// a container being dropped on the floor.
}
exitedAt := time.Now()
exitCode, err := p.hcsProcess.ExitCode()
if err != nil {
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
logger.WithError(err).Warnf("unable to get exit code for process")
}
// Since we got an error retrieving the exit code, make sure that the
// code we return doesn't incorrectly indicate success.
exitCode = -1
// Fall through here, do not return. This ensures we tell the
// docker engine that the process/container has exited to avoid
// a container being dropped on the floor.
}
p.mu.Lock()
hcsProcess := p.hcsProcess
p.hcsProcess = nil
p.mu.Unlock()
if err := hcsProcess.Close(); err != nil {
logger.WithError(err).Warnf("failed to cleanup hcs process resources")
exitCode = -1
eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
}
// Explicit locking is not required as reads from exited are
// synchronized using waitCh.
p.exited = containerd.NewExitStatus(uint32(exitCode), exitedAt, nil)
close(p.waitCh)
p.ctr.client.eventQ.Append(p.ctr.id, func() {
ei := libcontainerdtypes.EventInfo{
ContainerID: p.ctr.id,
ProcessID: p.id,
Pid: uint32(hcsProcess.Pid()),
ExitCode: uint32(exitCode),
ExitedAt: exitedAt,
Error: eventErr,
}
p.ctr.client.logger.WithFields(log.Fields{
"container": p.ctr.id,
"event": libcontainerdtypes.EventExit,
"event-info": ei,
}).Info("sending event")
err := p.ctr.client.backend.ProcessEvent(p.ctr.id, libcontainerdtypes.EventExit, ei)
if err != nil {
p.ctr.client.logger.WithError(err).WithFields(log.Fields{
"container": p.ctr.id,
"event": libcontainerdtypes.EventExit,
"event-info": ei,
}).Error("failed to process event")
}
})
}
func (ctr *container) Delete(context.Context) error {
ctr.mu.Lock()
defer ctr.mu.Unlock()
if ctr.hcsContainer == nil {
return errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", ctr.id)))
}
// Check that there is no task currently running.
if ctr.task != nil {
select {
case <-ctr.task.waitCh:
default:
return errors.WithStack(errdefs.Conflict(errors.New("container is not stopped")))
}
}
var (
logger = ctr.client.logger.WithFields(log.Fields{
"container": ctr.id,
})
thisErr error
)
if err := ctr.shutdownContainer(); err != nil {
logger.WithError(err).Warn("failed to shutdown container")
thisErr = errors.Wrap(err, "failed to shutdown container")
} else {
logger.Debug("completed container shutdown")
}
if err := ctr.hcsContainer.Close(); err != nil {
logger.WithError(err).Error("failed to clean hcs container resources")
thisErr = errors.Wrap(err, "failed to terminate container")
}
ctr.hcsContainer = nil
return thisErr
} | go | github | https://github.com/moby/moby | daemon/internal/libcontainerd/local/local_windows.go |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
ENROLLED = 'ENROLLED'
STATES = ((ENROLLED, ENROLLED),) | unknown | codeparrot/codeparrot-clean | ||
.content {
@apply text-lg leading-relaxed;
}
.content p,
.content ul,
.content ol,
.content blockquote {
@apply my-6;
}
.content a {
@apply underline;
}
.content ul,
.content ol {
@apply pl-4;
}
.content ul {
@apply list-disc;
}
.content ol {
@apply list-decimal;
}
.content ul > li > ul,
.content ol > li > ol {
@apply my-0 ml-4;
}
.content ul > li > ul {
list-style: circle;
}
.content h2 {
@apply text-3xl mt-12 mb-4 leading-snug;
}
.content h3 {
@apply text-2xl mt-8 mb-4 leading-snug;
}
.content h4 {
@apply text-xl mt-6 mb-4 leading-snug;
}
.content pre {
@apply whitespace-pre overflow-x-auto p-4 text-sm leading-tight border border-gray-400 bg-gray-100;
}
.content code {
@apply text-sm;
}
.content figcaption {
@apply text-center text-sm;
}
.content blockquote {
@apply border-l-4 border-gray-500 bg-gray-200 italic ml-0 py-4 px-6;
}
.content blockquote p {
@apply mt-0;
}
.content blockquote cite {
@apply not-italic;
}
.content audio {
@apply w-full;
} | css | github | https://github.com/vercel/next.js | examples/cms-drupal/components/post-body.module.css |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for einsum op."""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.platform import googletest
class EinsumOpTest(xla_test.XLATestCase):
"""Test cases for einsum op."""
def _testUnary(self, op, inp, expected):
"""Verifies that unary 'op' produces 'expected' when fed input 'inp'."""
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name='a')
output = op(pinp)
result = session.run(output, {pinp: inp})
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
def _testBinary(self, op, a, b, expected):
"""Verifies that binary 'op' produces 'expected' when fed 'a' and 'b'."""
with self.session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name='a')
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name='b')
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
self.assertAllCloseAccordingToType(result, expected, rtol=1e-3)
def testMatMul(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ij,jk->ik', x, y),
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
def testImplicitForm(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ijk,kji', x, y),
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
np.array([[[1], [3], [2]], [[5], [6], [8]]], dtype=dtype),
expected=np.array(128, dtype=dtype))
def testReducedIndices(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ij,j->', x, y),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([3, 2], dtype=dtype),
expected=np.array(59, dtype=dtype))
def testUnary(self):
for dtype in self.float_types:
self._testUnary(
lambda x: special_math_ops.einsum('ijk->kji', x),
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
expected=np.array([[[1], [2], [6]], [[3], [5], [8]]], dtype=dtype))
if __name__ == '__main__':
googletest.main() | python | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/tests/einsum_op_test.py |
from tornado import websocket, web, ioloop
import json
import motor
cl = []
commands = {"left": motor.left,
"right": motor.right,
"forward": motor.forward,
"reverse": motor.reverse,
"stop": motor.stop,
"exit": motor.cleanup
}
class IndexHandler(web.RequestHandler):
def get(self):
self.render("index.html")
class SocketHandler(websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
if self not in cl:
cl.append(self)
self.sendMsg("Client Connected")
def on_close(self):
if self in cl:
cl.remove(self)
def on_message(self, message):
messageObject = json.loads(message)
if messageObject['type'] == "command":
print("command")
elif messageObject['type'] == "direction":
commands[messageObject['value']]()
def sendMsg(self, message):
data = {"type": "server", "value" : message}
self.write_message(data)
app = web.Application([
(r'/', IndexHandler),
(r'/ws', SocketHandler)
])
if __name__ == '__main__':
app.listen(8888)
ioloop.IOLoop.instance().start() | unknown | codeparrot/codeparrot-clean | ||
import {Component} from '@angular/core';
import {RouterOutlet} from '@angular/router';
@Component({
selector: 'app-root',
template: `
<nav>
<a href="/">Home</a>
|
<a href="/user">User</a>
</nav>
<router-outlet />
`,
imports: [RouterOutlet],
})
export class App {} | typescript | github | https://github.com/angular/angular | adev/src/content/tutorials/learn-angular/steps/12-enable-routing/answer/src/app/app.ts |
/*
Unobtrusive JavaScript
https://github.com/rails/rails/blob/main/actionview/app/javascript
Released under the MIT license
*/
const linkClickSelector = "a[data-confirm], a[data-method], a[data-remote]:not([disabled]), a[data-disable-with], a[data-disable]";
const buttonClickSelector = {
selector: "button[data-remote]:not([form]), button[data-confirm]:not([form])",
exclude: "form button"
};
const inputChangeSelector = "select[data-remote], input[data-remote], textarea[data-remote]";
const formSubmitSelector = "form:not([data-turbo=true])";
const formInputClickSelector = "form:not([data-turbo=true]) input[type=submit], form:not([data-turbo=true]) input[type=image], form:not([data-turbo=true]) button[type=submit], form:not([data-turbo=true]) button:not([type]), input[type=submit][form], input[type=image][form], button[type=submit][form], button[form]:not([type])";
const formDisableSelector = "input[data-disable-with]:enabled, button[data-disable-with]:enabled, textarea[data-disable-with]:enabled, input[data-disable]:enabled, button[data-disable]:enabled, textarea[data-disable]:enabled";
const formEnableSelector = "input[data-disable-with]:disabled, button[data-disable-with]:disabled, textarea[data-disable-with]:disabled, input[data-disable]:disabled, button[data-disable]:disabled, textarea[data-disable]:disabled";
const fileInputSelector = "input[name][type=file]:not([disabled])";
const linkDisableSelector = "a[data-disable-with], a[data-disable]";
const buttonDisableSelector = "button[data-remote][data-disable-with], button[data-remote][data-disable]";
let nonce = null;
const loadCSPNonce = () => {
const metaTag = document.querySelector("meta[name=csp-nonce]");
return nonce = metaTag && metaTag.content;
};
const cspNonce = () => nonce || loadCSPNonce();
const m = Element.prototype.matches || Element.prototype.matchesSelector || Element.prototype.mozMatchesSelector || Element.prototype.msMatchesSelector || Element.prototype.oMatchesSelector || Element.prototype.webkitMatchesSelector;
const matches = function(element, selector) {
if (selector.exclude) {
return m.call(element, selector.selector) && !m.call(element, selector.exclude);
} else {
return m.call(element, selector);
}
};
const EXPANDO = "_ujsData";
const getData = (element, key) => element[EXPANDO] ? element[EXPANDO][key] : undefined;
const setData = function(element, key, value) {
if (!element[EXPANDO]) {
element[EXPANDO] = {};
}
return element[EXPANDO][key] = value;
};
const $ = selector => Array.prototype.slice.call(document.querySelectorAll(selector));
const isContentEditable = function(element) {
var isEditable = false;
do {
if (element.isContentEditable) {
isEditable = true;
break;
}
element = element.parentElement;
} while (element);
return isEditable;
};
const csrfToken = () => {
const meta = document.querySelector("meta[name=csrf-token]");
return meta && meta.content;
};
const csrfParam = () => {
const meta = document.querySelector("meta[name=csrf-param]");
return meta && meta.content;
};
const CSRFProtection = xhr => {
const token = csrfToken();
if (token) {
return xhr.setRequestHeader("X-CSRF-Token", token);
}
};
const refreshCSRFTokens = () => {
const token = csrfToken();
const param = csrfParam();
if (token && param) {
return $('form input[name="' + param + '"]').forEach((input => input.value = token));
}
};
const AcceptHeaders = {
"*": "*/*",
text: "text/plain",
html: "text/html",
xml: "application/xml, text/xml",
json: "application/json, text/javascript",
script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"
};
const ajax = options => {
options = prepareOptions(options);
var xhr = createXHR(options, (function() {
const response = processResponse(xhr.response != null ? xhr.response : xhr.responseText, xhr.getResponseHeader("Content-Type"));
if (Math.floor(xhr.status / 100) === 2) {
if (typeof options.success === "function") {
options.success(response, xhr.statusText, xhr);
}
} else {
if (typeof options.error === "function") {
options.error(response, xhr.statusText, xhr);
}
}
return typeof options.complete === "function" ? options.complete(xhr, xhr.statusText) : undefined;
}));
if (options.beforeSend && !options.beforeSend(xhr, options)) {
return false;
}
if (xhr.readyState === XMLHttpRequest.OPENED) {
return xhr.send(options.data);
}
};
var prepareOptions = function(options) {
options.url = options.url || location.href;
options.type = options.type.toUpperCase();
if (options.type === "GET" && options.data) {
if (options.url.indexOf("?") < 0) {
options.url += "?" + options.data;
} else {
options.url += "&" + options.data;
}
}
if (!(options.dataType in AcceptHeaders)) {
options.dataType = "*";
}
options.accept = AcceptHeaders[options.dataType];
if (options.dataType !== "*") {
options.accept += ", */*; q=0.01";
}
return options;
};
var createXHR = function(options, done) {
const xhr = new XMLHttpRequest;
xhr.open(options.type, options.url, true);
xhr.setRequestHeader("Accept", options.accept);
if (typeof options.data === "string") {
xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8");
}
if (!options.crossDomain) {
xhr.setRequestHeader("X-Requested-With", "XMLHttpRequest");
CSRFProtection(xhr);
}
xhr.withCredentials = !!options.withCredentials;
xhr.onreadystatechange = function() {
if (xhr.readyState === XMLHttpRequest.DONE) {
return done(xhr);
}
};
return xhr;
};
var processResponse = function(response, type) {
if (typeof response === "string" && typeof type === "string") {
if (type.match(/\bjson\b/)) {
try {
response = JSON.parse(response);
} catch (error) {}
} else if (type.match(/\b(?:java|ecma)script\b/)) {
const script = document.createElement("script");
script.setAttribute("nonce", cspNonce());
script.text = response;
document.head.appendChild(script).parentNode.removeChild(script);
} else if (type.match(/\b(xml|html|svg)\b/)) {
const parser = new DOMParser;
type = type.replace(/;.+/, "");
try {
response = parser.parseFromString(response, type);
} catch (error1) {}
}
}
return response;
};
const href = element => element.href;
const isCrossDomain = function(url) {
const originAnchor = document.createElement("a");
originAnchor.href = location.href;
const urlAnchor = document.createElement("a");
try {
urlAnchor.href = url;
return !((!urlAnchor.protocol || urlAnchor.protocol === ":") && !urlAnchor.host || originAnchor.protocol + "//" + originAnchor.host === urlAnchor.protocol + "//" + urlAnchor.host);
} catch (e) {
return true;
}
};
let preventDefault;
let {CustomEvent: CustomEvent} = window;
if (typeof CustomEvent !== "function") {
CustomEvent = function(event, params) {
const evt = document.createEvent("CustomEvent");
evt.initCustomEvent(event, params.bubbles, params.cancelable, params.detail);
return evt;
};
CustomEvent.prototype = window.Event.prototype;
({preventDefault: preventDefault} = CustomEvent.prototype);
CustomEvent.prototype.preventDefault = function() {
const result = preventDefault.call(this);
if (this.cancelable && !this.defaultPrevented) {
Object.defineProperty(this, "defaultPrevented", {
get() {
return true;
}
});
}
return result;
};
}
const fire = (obj, name, data) => {
const event = new CustomEvent(name, {
bubbles: true,
cancelable: true,
detail: data
});
obj.dispatchEvent(event);
return !event.defaultPrevented;
};
const stopEverything = e => {
fire(e.target, "ujs:everythingStopped");
e.preventDefault();
e.stopPropagation();
e.stopImmediatePropagation();
};
const delegate = (element, selector, eventType, handler) => element.addEventListener(eventType, (function(e) {
let {target: target} = e;
while (!!(target instanceof Element) && !matches(target, selector)) {
target = target.parentNode;
}
if (target instanceof Element && handler.call(target, e) === false) {
e.preventDefault();
e.stopPropagation();
}
}));
const toArray = e => Array.prototype.slice.call(e);
const serializeElement = (element, additionalParam) => {
let inputs = [ element ];
if (matches(element, "form")) {
inputs = toArray(element.elements);
}
const params = [];
inputs.forEach((function(input) {
if (!input.name || input.disabled) {
return;
}
if (matches(input, "fieldset[disabled] *")) {
return;
}
if (matches(input, "select")) {
toArray(input.options).forEach((function(option) {
if (option.selected) {
params.push({
name: input.name,
value: option.value
});
}
}));
} else if (input.checked || [ "radio", "checkbox", "submit" ].indexOf(input.type) === -1) {
params.push({
name: input.name,
value: input.value
});
}
}));
if (additionalParam) {
params.push(additionalParam);
}
return params.map((function(param) {
if (param.name) {
return `${encodeURIComponent(param.name)}=${encodeURIComponent(param.value)}`;
} else {
return param;
}
})).join("&");
};
const formElements = (form, selector) => {
if (matches(form, "form")) {
return toArray(form.elements).filter((el => matches(el, selector)));
} else {
return toArray(form.querySelectorAll(selector));
}
};
const handleConfirmWithRails = rails => function(e) {
if (!allowAction(this, rails)) {
stopEverything(e);
}
};
const confirm = (message, element) => window.confirm(message);
var allowAction = function(element, rails) {
let callback;
const message = element.getAttribute("data-confirm");
if (!message) {
return true;
}
let answer = false;
if (fire(element, "confirm")) {
try {
answer = rails.confirm(message, element);
} catch (error) {}
callback = fire(element, "confirm:complete", [ answer ]);
}
return answer && callback;
};
const handleDisabledElement = function(e) {
const element = this;
if (element.disabled) {
stopEverything(e);
}
};
const enableElement = e => {
let element;
if (e instanceof Event) {
if (isXhrRedirect(e)) {
return;
}
element = e.target;
} else {
element = e;
}
if (isContentEditable(element)) {
return;
}
if (matches(element, linkDisableSelector)) {
return enableLinkElement(element);
} else if (matches(element, buttonDisableSelector) || matches(element, formEnableSelector)) {
return enableFormElement(element);
} else if (matches(element, formSubmitSelector)) {
return enableFormElements(element);
}
};
const disableElement = e => {
const element = e instanceof Event ? e.target : e;
if (isContentEditable(element)) {
return;
}
if (matches(element, linkDisableSelector)) {
return disableLinkElement(element);
} else if (matches(element, buttonDisableSelector) || matches(element, formDisableSelector)) {
return disableFormElement(element);
} else if (matches(element, formSubmitSelector)) {
return disableFormElements(element);
}
};
var disableLinkElement = function(element) {
if (getData(element, "ujs:disabled")) {
return;
}
const replacement = element.getAttribute("data-disable-with");
if (replacement != null) {
setData(element, "ujs:enable-with", element.innerHTML);
element.innerHTML = replacement;
}
element.addEventListener("click", stopEverything);
return setData(element, "ujs:disabled", true);
};
var enableLinkElement = function(element) {
const originalText = getData(element, "ujs:enable-with");
if (originalText != null) {
element.innerHTML = originalText;
setData(element, "ujs:enable-with", null);
}
element.removeEventListener("click", stopEverything);
return setData(element, "ujs:disabled", null);
};
var disableFormElements = form => formElements(form, formDisableSelector).forEach(disableFormElement);
var disableFormElement = function(element) {
if (getData(element, "ujs:disabled")) {
return;
}
const replacement = element.getAttribute("data-disable-with");
if (replacement != null) {
if (matches(element, "button")) {
setData(element, "ujs:enable-with", element.innerHTML);
element.innerHTML = replacement;
} else {
setData(element, "ujs:enable-with", element.value);
element.value = replacement;
}
}
element.disabled = true;
return setData(element, "ujs:disabled", true);
};
var enableFormElements = form => formElements(form, formEnableSelector).forEach((element => enableFormElement(element)));
var enableFormElement = function(element) {
const originalText = getData(element, "ujs:enable-with");
if (originalText != null) {
if (matches(element, "button")) {
element.innerHTML = originalText;
} else {
element.value = originalText;
}
setData(element, "ujs:enable-with", null);
}
element.disabled = false;
return setData(element, "ujs:disabled", null);
};
var isXhrRedirect = function(event) {
const xhr = event.detail ? event.detail[0] : undefined;
return xhr && xhr.getResponseHeader("X-Xhr-Redirect");
};
const handleMethodWithRails = rails => function(e) {
const link = this;
const method = link.getAttribute("data-method");
if (!method) {
return;
}
if (isContentEditable(this)) {
return;
}
const href = rails.href(link);
const csrfToken$1 = csrfToken();
const csrfParam$1 = csrfParam();
const form = document.createElement("form");
let formContent = `<input name='_method' value='${method}' type='hidden' />`;
if (csrfParam$1 && csrfToken$1 && !isCrossDomain(href)) {
formContent += `<input name='${csrfParam$1}' value='${csrfToken$1}' type='hidden' />`;
}
formContent += '<input type="submit" />';
form.method = "post";
form.action = href;
form.target = link.target;
form.innerHTML = formContent;
form.style.display = "none";
document.body.appendChild(form);
form.querySelector('[type="submit"]').click();
stopEverything(e);
};
const isRemote = function(element) {
const value = element.getAttribute("data-remote");
return value != null && value !== "false";
};
const handleRemoteWithRails = rails => function(e) {
let data, method, url;
const element = this;
if (!isRemote(element)) {
return true;
}
if (!fire(element, "ajax:before")) {
fire(element, "ajax:stopped");
return false;
}
if (isContentEditable(element)) {
fire(element, "ajax:stopped");
return false;
}
const withCredentials = element.getAttribute("data-with-credentials");
const dataType = element.getAttribute("data-type") || "script";
if (matches(element, formSubmitSelector)) {
const button = getData(element, "ujs:submit-button");
method = getData(element, "ujs:submit-button-formmethod") || element.getAttribute("method") || "get";
url = getData(element, "ujs:submit-button-formaction") || element.getAttribute("action") || location.href;
if (method.toUpperCase() === "GET") {
url = url.replace(/\?.*$/, "");
}
if (element.enctype === "multipart/form-data") {
data = new FormData(element);
if (button != null) {
data.append(button.name, button.value);
}
} else {
data = serializeElement(element, button);
}
setData(element, "ujs:submit-button", null);
setData(element, "ujs:submit-button-formmethod", null);
setData(element, "ujs:submit-button-formaction", null);
} else if (matches(element, buttonClickSelector) || matches(element, inputChangeSelector)) {
method = element.getAttribute("data-method");
url = element.getAttribute("data-url");
data = serializeElement(element, element.getAttribute("data-params"));
} else {
method = element.getAttribute("data-method");
url = rails.href(element);
data = element.getAttribute("data-params");
}
ajax({
type: method || "GET",
url: url,
data: data,
dataType: dataType,
beforeSend(xhr, options) {
if (fire(element, "ajax:beforeSend", [ xhr, options ])) {
return fire(element, "ajax:send", [ xhr ]);
} else {
fire(element, "ajax:stopped");
return false;
}
},
success(...args) {
return fire(element, "ajax:success", args);
},
error(...args) {
return fire(element, "ajax:error", args);
},
complete(...args) {
return fire(element, "ajax:complete", args);
},
crossDomain: isCrossDomain(url),
withCredentials: withCredentials != null && withCredentials !== "false"
});
stopEverything(e);
};
const formSubmitButtonClick = function(e) {
const button = this;
const {form: form} = button;
if (!form) {
return;
}
if (button.name) {
setData(form, "ujs:submit-button", {
name: button.name,
value: button.value
});
}
setData(form, "ujs:formnovalidate-button", button.formNoValidate);
setData(form, "ujs:submit-button-formaction", button.getAttribute("formaction"));
return setData(form, "ujs:submit-button-formmethod", button.getAttribute("formmethod"));
};
const preventInsignificantClick = function(e) {
const link = this;
const method = (link.getAttribute("data-method") || "GET").toUpperCase();
const data = link.getAttribute("data-params");
const metaClick = e.metaKey || e.ctrlKey;
const insignificantMetaClick = metaClick && method === "GET" && !data;
const nonPrimaryMouseClick = e.button != null && e.button !== 0;
if (nonPrimaryMouseClick || insignificantMetaClick) {
e.stopImmediatePropagation();
}
};
const Rails = {
$: $,
ajax: ajax,
buttonClickSelector: buttonClickSelector,
buttonDisableSelector: buttonDisableSelector,
confirm: confirm,
cspNonce: cspNonce,
csrfToken: csrfToken,
csrfParam: csrfParam,
CSRFProtection: CSRFProtection,
delegate: delegate,
disableElement: disableElement,
enableElement: enableElement,
fileInputSelector: fileInputSelector,
fire: fire,
formElements: formElements,
formEnableSelector: formEnableSelector,
formDisableSelector: formDisableSelector,
formInputClickSelector: formInputClickSelector,
formSubmitButtonClick: formSubmitButtonClick,
formSubmitSelector: formSubmitSelector,
getData: getData,
handleDisabledElement: handleDisabledElement,
href: href,
inputChangeSelector: inputChangeSelector,
isCrossDomain: isCrossDomain,
linkClickSelector: linkClickSelector,
linkDisableSelector: linkDisableSelector,
loadCSPNonce: loadCSPNonce,
matches: matches,
preventInsignificantClick: preventInsignificantClick,
refreshCSRFTokens: refreshCSRFTokens,
serializeElement: serializeElement,
setData: setData,
stopEverything: stopEverything
};
const handleConfirm = handleConfirmWithRails(Rails);
Rails.handleConfirm = handleConfirm;
const handleMethod = handleMethodWithRails(Rails);
Rails.handleMethod = handleMethod;
const handleRemote = handleRemoteWithRails(Rails);
Rails.handleRemote = handleRemote;
const start = function() {
if (window._rails_loaded) {
throw new Error("rails-ujs has already been loaded!");
}
window.addEventListener("pageshow", (function() {
$(formEnableSelector).forEach((function(el) {
if (getData(el, "ujs:disabled")) {
enableElement(el);
}
}));
$(linkDisableSelector).forEach((function(el) {
if (getData(el, "ujs:disabled")) {
enableElement(el);
}
}));
}));
delegate(document, linkDisableSelector, "ajax:complete", enableElement);
delegate(document, linkDisableSelector, "ajax:stopped", enableElement);
delegate(document, buttonDisableSelector, "ajax:complete", enableElement);
delegate(document, buttonDisableSelector, "ajax:stopped", enableElement);
delegate(document, linkClickSelector, "click", preventInsignificantClick);
delegate(document, linkClickSelector, "click", handleDisabledElement);
delegate(document, linkClickSelector, "click", handleConfirm);
delegate(document, linkClickSelector, "click", disableElement);
delegate(document, linkClickSelector, "click", handleRemote);
delegate(document, linkClickSelector, "click", handleMethod);
delegate(document, buttonClickSelector, "click", preventInsignificantClick);
delegate(document, buttonClickSelector, "click", handleDisabledElement);
delegate(document, buttonClickSelector, "click", handleConfirm);
delegate(document, buttonClickSelector, "click", disableElement);
delegate(document, buttonClickSelector, "click", handleRemote);
delegate(document, inputChangeSelector, "change", handleDisabledElement);
delegate(document, inputChangeSelector, "change", handleConfirm);
delegate(document, inputChangeSelector, "change", handleRemote);
delegate(document, formSubmitSelector, "submit", handleDisabledElement);
delegate(document, formSubmitSelector, "submit", handleConfirm);
delegate(document, formSubmitSelector, "submit", handleRemote);
delegate(document, formSubmitSelector, "submit", (e => setTimeout((() => disableElement(e)), 13)));
delegate(document, formSubmitSelector, "ajax:send", disableElement);
delegate(document, formSubmitSelector, "ajax:complete", enableElement);
delegate(document, formInputClickSelector, "click", preventInsignificantClick);
delegate(document, formInputClickSelector, "click", handleDisabledElement);
delegate(document, formInputClickSelector, "click", handleConfirm);
delegate(document, formInputClickSelector, "click", formSubmitButtonClick);
document.addEventListener("DOMContentLoaded", refreshCSRFTokens);
document.addEventListener("DOMContentLoaded", loadCSPNonce);
return window._rails_loaded = true;
};
Rails.start = start;
if (typeof jQuery !== "undefined" && jQuery && jQuery.ajax) {
if (jQuery.rails) {
throw new Error("If you load both jquery_ujs and rails-ujs, use rails-ujs only.");
}
jQuery.rails = Rails;
jQuery.ajaxPrefilter((function(options, originalOptions, xhr) {
if (!options.crossDomain) {
return CSRFProtection(xhr);
}
}));
}
export { Rails as default }; | javascript | github | https://github.com/rails/rails | actionview/app/assets/javascripts/rails-ujs.esm.js |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_account_analytic_line(osv.osv_memory):
_name = "project.account.analytic.line"
_description = "Analytic Entries by line"
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
}
def action_open_window(self, cr, uid, ids, context=None):
mod_obj =self.pool.get('ir.model.data')
domain = []
data = self.read(cr, uid, ids, [])[0]
from_date = data['from_date']
to_date = data['to_date']
if from_date and to_date:
domain = [('date','>=',from_date), ('date','<=',to_date)]
elif from_date:
domain = [('date','>=',from_date)]
elif to_date:
domain = [('date','<=',to_date)]
result = mod_obj.get_object_reference(cr, uid, 'account', 'view_account_analytic_line_filter')
id = result and result[1] or False
return {
'name': _('Analytic Entries by line'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'account.analytic.line',
'type': 'ir.actions.act_window',
'domain': domain,
'search_view_id': id['res_id'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Macroblock analysis
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "src/dec/common_dec.h"
#include "src/dsp/dsp.h"
#include "src/enc/vp8i_enc.h"
#include "src/utils/thread_utils.h"
#include "src/utils/utils.h"
#include "src/webp/encode.h"
#include "src/webp/types.h"
#define MAX_ITERS_K_MEANS 6
//------------------------------------------------------------------------------
// Smooth the segment map by replacing isolated block by the majority of its
// neighbours.
static void SmoothSegmentMap(VP8Encoder* const enc) {
int n, x, y;
const int w = enc->mb_w;
const int h = enc->mb_h;
const int majority_cnt_3_x_3_grid = 5;
uint8_t* const tmp = (uint8_t*)WebPSafeMalloc(w * h, sizeof(*tmp));
assert((uint64_t)(w * h) == (uint64_t)w * h); // no overflow, as per spec
if (tmp == NULL) return;
for (y = 1; y < h - 1; ++y) {
for (x = 1; x < w - 1; ++x) {
int cnt[NUM_MB_SEGMENTS] = { 0 };
const VP8MBInfo* const mb = &enc->mb_info[x + w * y];
int majority_seg = mb->segment;
// Check the 8 neighbouring segment values.
cnt[mb[-w - 1].segment]++; // top-left
cnt[mb[-w + 0].segment]++; // top
cnt[mb[-w + 1].segment]++; // top-right
cnt[mb[ - 1].segment]++; // left
cnt[mb[ + 1].segment]++; // right
cnt[mb[ w - 1].segment]++; // bottom-left
cnt[mb[ w + 0].segment]++; // bottom
cnt[mb[ w + 1].segment]++; // bottom-right
for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
if (cnt[n] >= majority_cnt_3_x_3_grid) {
majority_seg = n;
break;
}
}
tmp[x + y * w] = majority_seg;
}
}
for (y = 1; y < h - 1; ++y) {
for (x = 1; x < w - 1; ++x) {
VP8MBInfo* const mb = &enc->mb_info[x + w * y];
mb->segment = tmp[x + y * w];
}
}
WebPSafeFree(tmp);
}
//------------------------------------------------------------------------------
// set segment susceptibility 'alpha' / 'beta'
static WEBP_INLINE int clip(int v, int m, int M) {
return (v < m) ? m : (v > M) ? M : v;
}
static void SetSegmentAlphas(VP8Encoder* const enc,
const int centers[NUM_MB_SEGMENTS],
int mid) {
const int nb = enc->segment_hdr.num_segments;
int min = centers[0], max = centers[0];
int n;
if (nb > 1) {
for (n = 0; n < nb; ++n) {
if (min > centers[n]) min = centers[n];
if (max < centers[n]) max = centers[n];
}
}
if (max == min) max = min + 1;
assert(mid <= max && mid >= min);
for (n = 0; n < nb; ++n) {
const int alpha = 255 * (centers[n] - mid) / (max - min);
const int beta = 255 * (centers[n] - min) / (max - min);
enc->dqm[n].alpha = clip(alpha, -127, 127);
enc->dqm[n].beta = clip(beta, 0, 255);
}
}
//------------------------------------------------------------------------------
// Compute susceptibility based on DCT-coeff histograms:
// the higher, the "easier" the macroblock is to compress.
#define MAX_ALPHA 255 // 8b of precision for susceptibilities.
#define ALPHA_SCALE (2 * MAX_ALPHA) // scaling factor for alpha.
#define DEFAULT_ALPHA (-1)
#define IS_BETTER_ALPHA(alpha, best_alpha) ((alpha) > (best_alpha))
static int FinalAlphaValue(int alpha) {
alpha = MAX_ALPHA - alpha;
return clip(alpha, 0, MAX_ALPHA);
}
static int GetAlpha(const VP8Histogram* const histo) {
// 'alpha' will later be clipped to [0..MAX_ALPHA] range, clamping outer
// values which happen to be mostly noise. This leaves the maximum precision
// for handling the useful small values which contribute most.
const int max_value = histo->max_value;
const int last_non_zero = histo->last_non_zero;
const int alpha =
(max_value > 1) ? ALPHA_SCALE * last_non_zero / max_value : 0;
return alpha;
}
static void InitHistogram(VP8Histogram* const histo) {
histo->max_value = 0;
histo->last_non_zero = 1;
}
//------------------------------------------------------------------------------
// Simplified k-Means, to assign Nb segments based on alpha-histogram
static void AssignSegments(VP8Encoder* const enc,
const int alphas[MAX_ALPHA + 1]) {
// 'num_segments' is previously validated and <= NUM_MB_SEGMENTS, but an
// explicit check is needed to avoid spurious warning about 'n + 1' exceeding
// array bounds of 'centers' with some compilers (noticed with gcc-4.9).
const int nb = (enc->segment_hdr.num_segments < NUM_MB_SEGMENTS) ?
enc->segment_hdr.num_segments : NUM_MB_SEGMENTS;
int centers[NUM_MB_SEGMENTS];
int weighted_average = 0;
int map[MAX_ALPHA + 1];
int a, n, k;
int min_a = 0, max_a = MAX_ALPHA, range_a;
// 'int' type is ok for histo, and won't overflow
int accum[NUM_MB_SEGMENTS], dist_accum[NUM_MB_SEGMENTS];
assert(nb >= 1);
assert(nb <= NUM_MB_SEGMENTS);
// bracket the input
for (n = 0; n <= MAX_ALPHA && alphas[n] == 0; ++n) {}
min_a = n;
for (n = MAX_ALPHA; n > min_a && alphas[n] == 0; --n) {}
max_a = n;
range_a = max_a - min_a;
// Spread initial centers evenly
for (k = 0, n = 1; k < nb; ++k, n += 2) {
assert(n < 2 * nb);
centers[k] = min_a + (n * range_a) / (2 * nb);
}
for (k = 0; k < MAX_ITERS_K_MEANS; ++k) { // few iters are enough
int total_weight;
int displaced;
// Reset stats
for (n = 0; n < nb; ++n) {
accum[n] = 0;
dist_accum[n] = 0;
}
// Assign nearest center for each 'a'
n = 0; // track the nearest center for current 'a'
for (a = min_a; a <= max_a; ++a) {
if (alphas[a]) {
while (n + 1 < nb && abs(a - centers[n + 1]) < abs(a - centers[n])) {
n++;
}
map[a] = n;
// accumulate contribution into best centroid
dist_accum[n] += a * alphas[a];
accum[n] += alphas[a];
}
}
// All point are classified. Move the centroids to the
// center of their respective cloud.
displaced = 0;
weighted_average = 0;
total_weight = 0;
for (n = 0; n < nb; ++n) {
if (accum[n]) {
const int new_center = (dist_accum[n] + accum[n] / 2) / accum[n];
displaced += abs(centers[n] - new_center);
centers[n] = new_center;
weighted_average += new_center * accum[n];
total_weight += accum[n];
}
}
weighted_average = (weighted_average + total_weight / 2) / total_weight;
if (displaced < 5) break; // no need to keep on looping...
}
// Map each original value to the closest centroid
for (n = 0; n < enc->mb_w * enc->mb_h; ++n) {
VP8MBInfo* const mb = &enc->mb_info[n];
const int alpha = mb->alpha;
mb->segment = map[alpha];
mb->alpha = centers[map[alpha]]; // for the record.
}
if (nb > 1) {
const int smooth = (enc->config->preprocessing & 1);
if (smooth) SmoothSegmentMap(enc);
}
SetSegmentAlphas(enc, centers, weighted_average); // pick some alphas.
}
//------------------------------------------------------------------------------
// Macroblock analysis: collect histogram for each mode, deduce the maximal
// susceptibility and set best modes for this macroblock.
// Segment assignment is done later.
// Number of modes to inspect for 'alpha' evaluation. We don't need to test all
// the possible modes during the analysis phase: we risk falling into a local
// optimum, or be subject to boundary effect
#define MAX_INTRA16_MODE 2
#define MAX_INTRA4_MODE 2
#define MAX_UV_MODE 2
static int MBAnalyzeBestIntra16Mode(VP8EncIterator* const it) {
const int max_mode = MAX_INTRA16_MODE;
int mode;
int best_alpha = DEFAULT_ALPHA;
int best_mode = 0;
VP8MakeLuma16Preds(it);
for (mode = 0; mode < max_mode; ++mode) {
VP8Histogram histo;
int alpha;
InitHistogram(&histo);
VP8CollectHistogram(it->yuv_in + Y_OFF_ENC,
it->yuv_p + VP8I16ModeOffsets[mode],
0, 16, &histo);
alpha = GetAlpha(&histo);
if (IS_BETTER_ALPHA(alpha, best_alpha)) {
best_alpha = alpha;
best_mode = mode;
}
}
VP8SetIntra16Mode(it, best_mode);
return best_alpha;
}
static int FastMBAnalyze(VP8EncIterator* const it) {
// Empirical cut-off value, should be around 16 (~=block size). We use the
// [8-17] range and favor intra4 at high quality, intra16 for low quality.
const int q = (int)it->enc->config->quality;
const uint32_t kThreshold = 8 + (17 - 8) * q / 100;
int k;
uint32_t dc[16], m, m2;
for (k = 0; k < 16; k += 4) {
VP8Mean16x4(it->yuv_in + Y_OFF_ENC + k * BPS, &dc[k]);
}
for (m = 0, m2 = 0, k = 0; k < 16; ++k) {
m += dc[k];
m2 += dc[k] * dc[k];
}
if (kThreshold * m2 < m * m) {
VP8SetIntra16Mode(it, 0); // DC16
} else {
const uint8_t modes[16] = { 0 }; // DC4
VP8SetIntra4Mode(it, modes);
}
return 0;
}
static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
int best_alpha = DEFAULT_ALPHA;
int smallest_alpha = 0;
int best_mode = 0;
const int max_mode = MAX_UV_MODE;
int mode;
VP8MakeChroma8Preds(it);
for (mode = 0; mode < max_mode; ++mode) {
VP8Histogram histo;
int alpha;
InitHistogram(&histo);
VP8CollectHistogram(it->yuv_in + U_OFF_ENC,
it->yuv_p + VP8UVModeOffsets[mode],
16, 16 + 4 + 4, &histo);
alpha = GetAlpha(&histo);
if (IS_BETTER_ALPHA(alpha, best_alpha)) {
best_alpha = alpha;
}
// The best prediction mode tends to be the one with the smallest alpha.
if (mode == 0 || alpha < smallest_alpha) {
smallest_alpha = alpha;
best_mode = mode;
}
}
VP8SetIntraUVMode(it, best_mode);
return best_alpha;
}
static void MBAnalyze(VP8EncIterator* const it,
int alphas[MAX_ALPHA + 1],
int* const alpha, int* const uv_alpha) {
const VP8Encoder* const enc = it->enc;
int best_alpha, best_uv_alpha;
VP8SetIntra16Mode(it, 0); // default: Intra16, DC_PRED
VP8SetSkip(it, 0); // not skipped
VP8SetSegment(it, 0); // default segment, spec-wise.
if (enc->method <= 1) {
best_alpha = FastMBAnalyze(it);
} else {
best_alpha = MBAnalyzeBestIntra16Mode(it);
}
best_uv_alpha = MBAnalyzeBestUVMode(it);
// Final susceptibility mix
best_alpha = (3 * best_alpha + best_uv_alpha + 2) >> 2;
best_alpha = FinalAlphaValue(best_alpha);
alphas[best_alpha]++;
it->mb->alpha = best_alpha; // for later remapping.
// Accumulate for later complexity analysis.
*alpha += best_alpha; // mixed susceptibility (not just luma)
*uv_alpha += best_uv_alpha;
}
static void DefaultMBInfo(VP8MBInfo* const mb) {
mb->type = 1; // I16x16
mb->uv_mode = 0;
mb->skip = 0; // not skipped
mb->segment = 0; // default segment
mb->alpha = 0;
}
//------------------------------------------------------------------------------
// Main analysis loop:
// Collect all susceptibilities for each macroblock and record their
// distribution in alphas[]. Segments is assigned a-posteriori, based on
// this histogram.
// We also pick an intra16 prediction mode, which shouldn't be considered
// final except for fast-encode settings. We can also pick some intra4 modes
// and decide intra4/intra16, but that's usually almost always a bad choice at
// this stage.
static void ResetAllMBInfo(VP8Encoder* const enc) {
int n;
for (n = 0; n < enc->mb_w * enc->mb_h; ++n) {
DefaultMBInfo(&enc->mb_info[n]);
}
// Default susceptibilities.
enc->dqm[0].alpha = 0;
enc->dqm[0].beta = 0;
// Note: we can't compute this 'alpha' / 'uv_alpha' -> set to default value.
enc->alpha = 0;
enc->uv_alpha = 0;
WebPReportProgress(enc->pic, enc->percent + 20, &enc->percent);
}
// struct used to collect job result
typedef struct {
WebPWorker worker;
int alphas[MAX_ALPHA + 1];
int alpha, uv_alpha;
VP8EncIterator it;
int delta_progress;
} SegmentJob;
// main work call
static int DoSegmentsJob(void* arg1, void* arg2) {
SegmentJob* const job = (SegmentJob*)arg1;
VP8EncIterator* const it = (VP8EncIterator*)arg2;
int ok = 1;
if (!VP8IteratorIsDone(it)) {
uint8_t tmp[32 + WEBP_ALIGN_CST];
uint8_t* const scratch = (uint8_t*)WEBP_ALIGN(tmp);
do {
// Let's pretend we have perfect lossless reconstruction.
VP8IteratorImport(it, scratch);
MBAnalyze(it, job->alphas, &job->alpha, &job->uv_alpha);
ok = VP8IteratorProgress(it, job->delta_progress);
} while (ok && VP8IteratorNext(it));
}
return ok;
}
#ifdef WEBP_USE_THREAD
static void MergeJobs(const SegmentJob* const src, SegmentJob* const dst) {
int i;
for (i = 0; i <= MAX_ALPHA; ++i) dst->alphas[i] += src->alphas[i];
dst->alpha += src->alpha;
dst->uv_alpha += src->uv_alpha;
}
#endif
// initialize the job struct with some tasks to perform
static void InitSegmentJob(VP8Encoder* const enc, SegmentJob* const job,
int start_row, int end_row) {
WebPGetWorkerInterface()->Init(&job->worker);
job->worker.data1 = job;
job->worker.data2 = &job->it;
job->worker.hook = DoSegmentsJob;
VP8IteratorInit(enc, &job->it);
VP8IteratorSetRow(&job->it, start_row);
VP8IteratorSetCountDown(&job->it, (end_row - start_row) * enc->mb_w);
memset(job->alphas, 0, sizeof(job->alphas));
job->alpha = 0;
job->uv_alpha = 0;
// only one of both jobs can record the progress, since we don't
// expect the user's hook to be multi-thread safe
job->delta_progress = (start_row == 0) ? 20 : 0;
}
// main entry point
int VP8EncAnalyze(VP8Encoder* const enc) {
int ok = 1;
const int do_segments =
enc->config->emulate_jpeg_size || // We need the complexity evaluation.
(enc->segment_hdr.num_segments > 1) ||
(enc->method <= 1); // for method 0 - 1, we need preds[] to be filled.
if (do_segments) {
const int last_row = enc->mb_h;
const int total_mb = last_row * enc->mb_w;
#ifdef WEBP_USE_THREAD
// We give a little more than a half work to the main thread.
const int split_row = (9 * last_row + 15) >> 4;
const int kMinSplitRow = 2; // minimal rows needed for mt to be worth it
const int do_mt = (enc->thread_level > 0) && (split_row >= kMinSplitRow);
#else
const int do_mt = 0;
#endif
const WebPWorkerInterface* const worker_interface =
WebPGetWorkerInterface();
SegmentJob main_job;
if (do_mt) {
#ifdef WEBP_USE_THREAD
SegmentJob side_job;
// Note the use of '&' instead of '&&' because we must call the functions
// no matter what.
InitSegmentJob(enc, &main_job, 0, split_row);
InitSegmentJob(enc, &side_job, split_row, last_row);
// we don't need to call Reset() on main_job.worker, since we're calling
// WebPWorkerExecute() on it
ok &= worker_interface->Reset(&side_job.worker);
// launch the two jobs in parallel
if (ok) {
worker_interface->Launch(&side_job.worker);
worker_interface->Execute(&main_job.worker);
ok &= worker_interface->Sync(&side_job.worker);
ok &= worker_interface->Sync(&main_job.worker);
}
worker_interface->End(&side_job.worker);
if (ok) MergeJobs(&side_job, &main_job); // merge results together
#endif // WEBP_USE_THREAD
} else {
// Even for single-thread case, we use the generic Worker tools.
InitSegmentJob(enc, &main_job, 0, last_row);
worker_interface->Execute(&main_job.worker);
ok &= worker_interface->Sync(&main_job.worker);
}
worker_interface->End(&main_job.worker);
if (ok) {
enc->alpha = main_job.alpha / total_mb;
enc->uv_alpha = main_job.uv_alpha / total_mb;
AssignSegments(enc, main_job.alphas);
}
} else { // Use only one default segment.
ResetAllMBInfo(enc);
}
if (!ok) {
return WebPEncodingSetError(enc->pic,
VP8_ENC_ERROR_OUT_OF_MEMORY); // imprecise
}
return ok;
} | c | github | https://github.com/opencv/opencv | 3rdparty/libwebp/src/enc/analysis_enc.c |
"""Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import *
except ImportError:
pass | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='PayPalIPN',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('business', models.CharField(help_text=b'Email where the money was sent.', max_length=127, blank=True)),
('charset', models.CharField(max_length=32, blank=True)),
('custom', models.CharField(max_length=255, blank=True)),
('notify_version', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('parent_txn_id', models.CharField(max_length=19, verbose_name=b'Parent Transaction ID', blank=True)),
('receiver_email', models.EmailField(max_length=127, blank=True)),
('receiver_id', models.CharField(max_length=127, blank=True)),
('residence_country', models.CharField(max_length=2, blank=True)),
('test_ipn', models.BooleanField(default=False)),
('txn_id', models.CharField(help_text=b'PayPal transaction ID.', max_length=19, verbose_name=b'Transaction ID', db_index=True, blank=True)),
('txn_type', models.CharField(help_text=b'PayPal transaction type.', max_length=128, verbose_name=b'Transaction Type', blank=True)),
('verify_sign', models.CharField(max_length=255, blank=True)),
('address_country', models.CharField(max_length=64, blank=True)),
('address_city', models.CharField(max_length=40, blank=True)),
('address_country_code', models.CharField(help_text=b'ISO 3166', max_length=64, blank=True)),
('address_name', models.CharField(max_length=128, blank=True)),
('address_state', models.CharField(max_length=40, blank=True)),
('address_status', models.CharField(max_length=11, blank=True)),
('address_street', models.CharField(max_length=200, blank=True)),
('address_zip', models.CharField(max_length=20, blank=True)),
('contact_phone', models.CharField(max_length=20, blank=True)),
('first_name', models.CharField(max_length=64, blank=True)),
('last_name', models.CharField(max_length=64, blank=True)),
('payer_business_name', models.CharField(max_length=127, blank=True)),
('payer_email', models.CharField(max_length=127, blank=True)),
('payer_id', models.CharField(max_length=13, blank=True)),
('auth_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('auth_exp', models.CharField(max_length=28, blank=True)),
('auth_id', models.CharField(max_length=19, blank=True)),
('auth_status', models.CharField(max_length=9, blank=True)),
('exchange_rate', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=16, blank=True)),
('invoice', models.CharField(max_length=127, blank=True)),
('item_name', models.CharField(max_length=127, blank=True)),
('item_number', models.CharField(max_length=127, blank=True)),
('mc_currency', models.CharField(default=b'USD', max_length=32, blank=True)),
('mc_fee', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_gross', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_handling', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_shipping', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('memo', models.CharField(max_length=255, blank=True)),
('num_cart_items', models.IntegerField(default=0, null=True, blank=True)),
('option_name1', models.CharField(max_length=64, blank=True)),
('option_name2', models.CharField(max_length=64, blank=True)),
('payer_status', models.CharField(max_length=10, blank=True)),
('payment_date', models.DateTimeField(help_text=b'HH:MM:SS DD Mmm YY, YYYY PST', null=True, blank=True)),
('payment_gross', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('payment_status', models.CharField(max_length=17, blank=True)),
('payment_type', models.CharField(max_length=7, blank=True)),
('pending_reason', models.CharField(max_length=14, blank=True)),
('protection_eligibility', models.CharField(max_length=32, blank=True)),
('quantity', models.IntegerField(default=1, null=True, blank=True)),
('reason_code', models.CharField(max_length=15, blank=True)),
('remaining_settle', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('settle_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('settle_currency', models.CharField(max_length=32, blank=True)),
('shipping', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('shipping_method', models.CharField(max_length=255, blank=True)),
('tax', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('transaction_entity', models.CharField(max_length=7, blank=True)),
('auction_buyer_id', models.CharField(max_length=64, blank=True)),
('auction_closing_date', models.DateTimeField(help_text=b'HH:MM:SS DD Mmm YY, YYYY PST', null=True, blank=True)),
('auction_multi_item', models.IntegerField(default=0, null=True, blank=True)),
('for_auction', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount_per_cycle', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('initial_payment_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('next_payment_date', models.DateTimeField(help_text=b'HH:MM:SS DD Mmm YY, YYYY PST', null=True, blank=True)),
('outstanding_balance', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('payment_cycle', models.CharField(max_length=32, blank=True)),
('period_type', models.CharField(max_length=32, blank=True)),
('product_name', models.CharField(max_length=128, blank=True)),
('product_type', models.CharField(max_length=128, blank=True)),
('profile_status', models.CharField(max_length=32, blank=True)),
('recurring_payment_id', models.CharField(max_length=128, blank=True)),
('rp_invoice_id', models.CharField(max_length=127, blank=True)),
('time_created', models.DateTimeField(help_text=b'HH:MM:SS DD Mmm YY, YYYY PST', null=True, blank=True)),
('amount1', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount2', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount3', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount1', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount2', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount3', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('password', models.CharField(max_length=24, blank=True)),
('period1', models.CharField(max_length=32, blank=True)),
('period2', models.CharField(max_length=32, blank=True)),
('period3', models.CharField(max_length=32, blank=True)),
('reattempt', models.CharField(max_length=1, blank=True)),
('recur_times', models.IntegerField(default=0, null=True, blank=True)),
('recurring', models.CharField(max_length=1, blank=True)),
('retry_at', models.DateTimeField(help_text=b'HH:MM:SS DD Mmm YY, YYYY PST', null=True, blank=True)),
('subscr_date', models.DateTimeField(help_text=b'HH:MM:SS DD Mmm YY, YYYY PST', null=True, blank=True)),
('subscr_effective', models.DateTimeField(help_text=b'HH:MM:SS DD Mmm YY, YYYY PST', null=True, blank=True)),
('subscr_id', models.CharField(max_length=19, blank=True)),
('username', models.CharField(max_length=64, blank=True)),
('case_creation_date', models.DateTimeField(help_text=b'HH:MM:SS DD Mmm YY, YYYY PST', null=True, blank=True)),
('case_id', models.CharField(max_length=14, blank=True)),
('case_type', models.CharField(max_length=24, blank=True)),
('receipt_id', models.CharField(max_length=64, blank=True)),
('currency_code', models.CharField(default=b'USD', max_length=32, blank=True)),
('handling_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('transaction_subject', models.CharField(max_length=255, blank=True)),
('ipaddress', models.IPAddressField(blank=True)),
('flag', models.BooleanField(default=False)),
('flag_code', models.CharField(max_length=16, blank=True)),
('flag_info', models.TextField(blank=True)),
('query', models.TextField(blank=True)),
('response', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('from_view', models.CharField(max_length=6, null=True, blank=True)),
],
options={
'db_table': 'paypal_ipn',
'verbose_name': 'PayPal IPN',
},
bases=(models.Model,),
),
] | unknown | codeparrot/codeparrot-clean | ||
{
"name": "@tailwindcss/oxide-darwin-x64",
"version": "4.1.18",
"repository": {
"type": "git",
"url": "git+https://github.com/tailwindlabs/tailwindcss.git",
"directory": "crates/node/npm/darwin-x64"
},
"os": [
"darwin"
],
"cpu": [
"x64"
],
"main": "tailwindcss-oxide.darwin-x64.node",
"files": [
"tailwindcss-oxide.darwin-x64.node"
],
"publishConfig": {
"provenance": true,
"access": "public"
},
"license": "MIT",
"engines": {
"node": ">= 20"
}
} | json | github | https://github.com/tailwindlabs/tailwindcss | crates/node/npm/darwin-x64/package.json |
#!/bin/bash
set -euxo pipefail
export DEBIAN_FRONTEND=noninteractive
curl -L --proto '=https' --tlsv1.2 -sSf \
https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh \
| bash
cargo binstall cargo-nextest --secure
apt-get update
apt-get -y install --no-install-recommends libfontconfig1-dev | unknown | github | https://github.com/vercel/next.js | .devcontainer/rust/install.sh |
# oppia/templatetags/display_functions.py
import hashlib
import json
import math
import urllib
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(name='get_index')
def get_index(start,index):
return start+index
@register.filter(name='secs_to_duration')
def secs_to_duration(secs):
if secs == 0:
return "-"
if secs < 60:
return "< 1 min"
if secs < 120:
return "1 min"
return str(int(math.floor(secs/60))) + " mins"
#minutes = int(math.floor(secs/60))
#seconds = int(secs - (minutes*60))
#return str(minutes)+'\''+str(seconds)+'"'
@register.filter(name='title_lang')
@stringfilter
def title_lang(title,lang):
try:
titles = json.loads(title)
if lang in titles:
return titles[lang]
else:
for l in titles:
return titles[l]
except:
pass
return title
@register.filter(name='gravatar')
def gravatar(user, size):
gravatar_url = "https://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id':hashlib.md5(user.email).hexdigest(),
'size':str(size)
})
return mark_safe(
'<img src="{0}" alt="gravatar for {1}" class="gravatar" width="{2}" height="{2}"/>'.format(gravatar_url, user, size)
) | unknown | codeparrot/codeparrot-clean | ||
from homeassistant.helpers.entity import Entity
import voluptuous as vol
import homeassistant.components.remote as remote
import homeassistant.helpers.config_validation as cv
from homeassistant.components.remote import (DOMAIN)
DATA_BROADLINK = 'broadlink'
DEPENDENCIES = ['broadlink']
SERVICE_LEARN = 'broadlink_learn'
SERVICE_CALL = 'broadlink_call'
SERVICE_SCHEMA = vol.Schema({
vol.Optional('commandName'): cv.string,
vol.Required('device'): cv.string,
vol.Optional('count'): cv.string,
})
device = None
def setup_platform(hass, config, add_devices, discovery_info=None):
if discovery_info is None:
return
broadlink = hass.data[DATA_BROADLINK]
global device
device = BroadlinkRemote(broadlink)
add_devices([device], True)
hass.services.register(DOMAIN, SERVICE_CALL, _call_service,schema=SERVICE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_LEARN, _learn_service,schema=SERVICE_SCHEMA)
return True
def _call_service(service):
_device = service.data.get('device')
count = service.data.get('count')
command_name = service.data.get('commandName')
if(count == "None" or count == ""):
count = 1
else:
count = int(count)
if(_device != "None" and _device != ""):
device.call(_device, command_name, count)
def _learn_service(service):
_device = service.data.get('device')
command_name = service.data.get('commandName')
if(_device != "None" and _device != ""):
device.learn(_device, command_name)
class BroadlinkRemote(Entity):
def __init__(self,device):
self.device = device
@property
def name(self):
return 'broadlink'
def call(self, device, command_name, count):
import time
if(count < 0):
count = 1
if(count >5):
count =5
for index in range(count):
self.device.call(device, command_name)
time.sleep(1)
def learn(self, device, command_name):
self.device.learn(device, command_name) | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v4.no-op.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"query": {
"kind": "grafana",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "stat",
"spec": {
"pluginVersion": "",
"options": {
"__angularMigration": {
"autoMigrateFrom": "singlestat",
"originalOptions": {}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "stat",
"spec": {
"pluginVersion": "",
"options": {
"__angularMigration": {
"autoMigrateFrom": "singlestat",
"originalOptions": {}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "V4 No-Op Migration Test",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v4.no-op.v42.v2alpha1.json |
# frozen_string_literal: true
class Wheel < ActiveRecord::Base
belongs_to :wheelable, polymorphic: true, counter_cache: true, touch: :wheels_owned_at
end | ruby | github | https://github.com/rails/rails | activerecord/test/models/wheel.rb |
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
root.system.readfile = os.path.join(tests_root, 'halt.sh') | unknown | codeparrot/codeparrot-clean | ||
test_kind: fsm_workload_test
selector:
roots:
- jstests/concurrency/fsm_workloads/**/*.js
- src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js
exclude_files:
# SERVER-14669 Multi-removes that use $where miscount removed documents
# Disabled due to MongoDB restrictions and/or workload restrictions
# These workloads sometimes trigger 'Could not lock auth data update lock'
# errors because the AuthorizationManager currently waits for only five
# seconds to acquire the lock for authorization documents
# uses >100MB of data, which can overwhelm test hosts
# compact can only be run against a standalone mongod
# can cause OOM kills on test hosts
# cannot createIndex after dropDatabase without sharding first
# The WTWriteConflictException failpoint is not supported on mongos.
# SERVER-20361 Improve the behaviour of multi-update/delete against a sharded collection
- jstests/concurrency/fsm_workloads/query/update/update_where.js
# TODO Undenylist (SERVER-38852).
# serverStatus does not include transaction metrics on mongos.
# Uses the same transaction id across different routers, which is not allowed because when either
# router tries to commit, it may not know the full participant list.
# The test may spuriously fail when run against sharded clusters, due to limitations of the
# infrastructure. See SERVER-77039 for full details.
# This test runs a large number of inserts, which can cause moveCollection to take a long time
# to finish. This can cause the CheckMetadataConsistency hook to hit LockBusy errors.
- jstests/concurrency/fsm_workloads/timeseries/timeseries_insert_idle_bucket_expiration.js
exclude_with_any_tags:
- requires_standalone
- assumes_against_mongod_not_mongos
- assumes_balancer_off
- requires_replication
# mongos has no system.profile collection.
- requires_profiling
- assumes_unsharded_collection
# implicitly_retry_on_migration_in_progress.js alters find/aggregate commands
# so that the whole result set is returned through a single batch
- assumes_no_implicit_cursor_exhaustion
executor:
archive:
hooks:
- CheckReplDBHashInBackground
- CheckReplDBHash
- CheckMetadataConsistencyInBackground
- ValidateCollections
tests: true
config:
shell_options:
eval: >-
await import("jstests/libs/override_methods/implicitly_shard_accessed_collections.js");
await import("jstests/libs/override_methods/implicitly_retry_crud_on_no_progress_made.js");
await import("jstests/libs/override_methods/implicitly_retry_on_migration_in_progress.js");
global_vars:
TestData:
runningWithBalancer: true
implicitlyShardOnCreateCollectionOnly: true
hooks:
- class: CheckIdleCursors
- class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
class: ShardedClusterFixture
mongos_options:
set_parameters:
enableTestCommands: 1
queryAnalysisSamplerConfigurationRefreshSecs: 1
shard_options:
mongod_options:
oplogSize: 1024
mongod_options:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
queryAnalysisWriterIntervalSecs: 1
queryAnalysisSamplerConfigurationRefreshSecs: 1
skipDroppingHashedShardKeyIndex: true
reshardingMinimumOperationDurationMillis: 0
num_rs_nodes_per_shard: 3
num_shards: 2
num_mongos: 2
enable_balancer: true
random_migrations: true | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml |
"""Aggregate_tracefiles module.
This script aggregates several tracefiles into one tracefile.
All but the last argument are input tracefiles or .txt files which list tracefiles.
The last argument is the tracefile to which the output will be written.
"""
import os
import subprocess
import sys
from optparse import OptionParser
def aggregate(inputs, output):
"""Aggregate the tracefiles given in inputs to a tracefile given by output."""
args = ["lcov"]
for name in inputs:
args += ["-a", name]
args += ["-o", output]
print(" ".join(args))
return subprocess.call(args)
def getfilesize(path):
"""Return file size of 'path'."""
if not os.path.isfile(path):
return 0
return os.path.getsize(path)
def main():
"""Execute Main entry."""
inputs = []
usage = "usage: %prog input1.info input2.info ... output.info"
parser = OptionParser(usage=usage)
(_, args) = parser.parse_args()
if len(args) < 2:
return "must supply input files"
for path in args[:-1]:
_, ext = os.path.splitext(path)
if ext == ".info":
if getfilesize(path) > 0:
inputs.append(path)
elif ext == ".txt":
inputs += [line.strip() for line in open(path) if getfilesize(line.strip()) > 0]
else:
return "unrecognized file type"
return aggregate(inputs, args[-1])
if __name__ == "__main__":
sys.exit(main()) | python | github | https://github.com/mongodb/mongo | buildscripts/aggregate_tracefiles.py |
from __future__ import unicode_literals
import warnings
from functools import partial
from operator import attrgetter
from django import forms
from django.apps import apps
from django.core import checks, exceptions
from django.core.exceptions import FieldDoesNotExist
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import Q, signals
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.fields import (
BLANK_CHOICE_DASH, AutoField, Field, IntegerField, PositiveIntegerField,
PositiveSmallIntegerField,
)
from django.db.models.fields.related_lookups import (
RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn,
RelatedLessThan, RelatedLessThanOrEqual,
)
from django.db.models.query import QuerySet
from django.db.models.query_utils import PathInfo
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property, curry
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_docs_version
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def resolve_relation(scope_model, relation):
"""
Transform relation into a model or fully-qualified model string of the form
"app_label.ModelName", relative to scope_model.
The relation argument can be:
* RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case
the model argument will be returned.
* A bare model name without an app_label, in which case scope_model's
app_label will be prepended.
* An "app_label.ModelName" string.
* A model class, which will be returned unchanged.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
relation = scope_model
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
if "." not in relation:
relation = "%s.%s" % (scope_model._meta.app_label, relation)
return relation
def lazy_related_operation(function, model, *related_models, **kwargs):
"""
Schedule `function` to be called once `model` and all `related_models`
have been imported and registered with the app registry. `function` will
be called with the newly-loaded model classes as its positional arguments,
plus any optional keyword arguments.
The `model` argument must be a model class. Each subsequent positional
argument is another model, or a reference to another model - see
`resolve_relation()` for the various forms these may take. Any relative
references will be resolved relative to `model`.
This is a convenience wrapper for `Apps.lazy_model_operation` - the app
registry model used is the one found in `model._meta.apps`.
"""
models = [model] + [resolve_relation(model, rel) for rel in related_models]
model_keys = (make_model_tuple(m) for m in models)
apps = model._meta.apps
return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)
def add_lazy_relation(cls, field, relation, operation):
warnings.warn(
"add_lazy_relation() has been superseded by lazy_related_operation() "
"and related methods on the Apps class.",
RemovedInDjango20Warning, stacklevel=2)
# Rearrange args for new Apps.lazy_model_operation
function = lambda local, related, field: operation(field, related, local)
lazy_related_operation(function, cls, relation, field=field)
class RelatedField(Field):
"""
Base class that all relational fields inherit from.
"""
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.remote_field.model
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_related_name_is_valid())
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_related_name_is_valid(self):
import re
import keyword
related_name = self.remote_field.related_name
if not related_name:
return []
is_valid_id = True
if keyword.iskeyword(related_name):
is_valid_id = False
if six.PY3:
if not related_name.isidentifier():
is_valid_id = False
else:
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*\Z', related_name):
is_valid_id = False
if not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.remote_field.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_relation_model_exists(self):
rel_is_missing = self.remote_field.model not in apps.get_models()
rel_is_string = isinstance(self.remote_field.model, six.string_types)
model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name
if rel_is_missing and (rel_is_string or not self.remote_field.model._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.remote_field.model not in apps.get_models() and
not isinstance(self.remote_field.model, six.string_types) and
self.remote_field.model._meta.swapped):
model = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
"""
Check accessor and reverse query name clashes.
"""
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.remote_field.model` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.remote_field.model, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.remote_field.is_hidden():
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.remote_field.model._meta
# rel_opts.object_name == "Target"
rel_name = self.remote_field.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name,
clash_field.name) # i. e. "Target.model_set"
if clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.related_model._meta.object_name,
clash_field.field.name)
if clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'."
% (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
# By default related field will not have a column as it relates to
# columns from another table.
return None
def contribute_to_class(self, cls, name, virtual_only=False):
super(RelatedField, self).contribute_to_class(cls, name, virtual_only=virtual_only)
self.opts = cls._meta
if not cls._meta.abstract:
if self.remote_field.related_name:
related_name = force_text(self.remote_field.related_name) % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.remote_field.related_name = related_name
def resolve_related_class(model, related, field):
field.remote_field.model = related
field.do_related_class(related, model)
lazy_related_operation(resolve_related_class, cls, self.remote_field.model, field=self)
def get_forward_related_filter(self, obj):
"""
Return the keyword arguments that when supplied to
self.model.object.filter(), would select all instances related through
this field to the remote obj. This is used to build the querysets
returned by related descriptors. obj is an instance of
self.related_field.model.
"""
return {
'%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname)
for _, rh_field in self.related_fields
}
def get_reverse_related_filter(self, obj):
"""
Complement to get_forward_related_filter(). Return the keyword
arguments that when passed to self.related_field.model.object.filter()
select all instances of self.related_field.model related through
this field to obj. obj is an instance of self.model.
"""
base_filter = {
rh_field.attname: getattr(obj, lh_field.attname)
for lh_field, rh_field in self.related_fields
}
base_filter.update(self.get_extra_descriptor_filter(obj) or {})
return base_filter
@property
def swappable_setting(self):
"""
Get the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.remote_field.model, six.string_types):
to_string = self.remote_field.model
else:
to_string = self.remote_field.model._meta.label
return apps.get_swappable_settings_name(to_string)
return None
def set_attributes_from_rel(self):
self.name = self.name or (self.remote_field.model._meta.model_name + '_' + self.remote_field.model._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.remote_field.model._meta.verbose_name
self.remote_field.set_field_name()
@property
def related(self):
warnings.warn(
"Usage of field.related has been deprecated. Use field.remote_field instead.",
RemovedInDjango110Warning, 2)
return self.remote_field
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.contribute_to_related_class(other, self.remote_field)
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.remote_field.limit_choices_to):
return self.remote_field.limit_choices_to()
return self.remote_field.limit_choices_to
def formfield(self, **kwargs):
"""
Pass ``limit_choices_to`` to the field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.remote_field, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.remote_field.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super(RelatedField, self).formfield(**defaults)
def related_query_name(self):
"""
Define the name that can be used to identify this related object in a
table-spanning query.
"""
return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name
@property
def target_field(self):
"""
When filtering against this relation, returns the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"The relation has multiple target fields, but only single target field was asked for")
return target_fields[0]
class SingleRelatedObjectDescriptor(object):
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``SingleRelatedObjectDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ReverseSingleRelatedObjectDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif value is not None and not isinstance(value, self.related.related_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
if None in related_pk:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name)
)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one relation.
In the example::
class Choice(Model):
poll = ForeignKey(Place, related_name='choices')
`choice.poll` is a ReverseSingleRelatedObjectDescriptor instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.remote_field.model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.remote_field.model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
qs = self.get_queryset(instance=instance)
qs = qs.filter(**self.field.get_reverse_related_filter(instance))
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.remote_field.model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
def create_foreign_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_foreign_related_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist("%r is not related to %r." % (obj, self.instance))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ForeignRelatedObjectsDescriptor(object):
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Choice(Model):
poll = ForeignKey(Place, related_name='choices')
``poll.choices`` is a ``ForeignRelatedObjectsDescriptor`` instance.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
return create_foreign_related_manager(
self.rel.related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
manager = self.__get__(instance)
manager.set(value)
def create_many_related_manager(superclass, rel, reverse):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
for lh_field, rh_field in self.source_field.related_fields:
self.core_filters['%s__%s' % (self.query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_many_related_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(ForeignRelatedObjectsDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ManyRelatedObjectsDescriptor
instances.
"""
def __init__(self, rel, reverse=False):
super(ManyRelatedObjectsDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_many_related_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
class ForeignObjectRel(object):
"""
Used by ForeignObject to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
# Field flags
auto_created = True
concrete = False
editable = False
is_relation = True
# Reverse relations are always nullable (Django can't enforce that a
# foreign key on the related model points to this model).
null = True
def __init__(self, field, to, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, on_delete=None):
self.field = field
self.model = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.parent_link = parent_link
self.on_delete = on_delete
self.symmetrical = False
self.multiple = True
# Some of the following cached_properties can't be initialized in
# __init__ as the field doesn't have its model yet. Calling these methods
# before field.contribute_to_class() has been called will result in
# AttributeError
@property
def to(self):
warnings.warn(
"Usage of ForeignObjectRel.to attribute has been deprecated. "
"Use the model attribute instead.",
RemovedInDjango20Warning, 2)
return self.model
@cached_property
def hidden(self):
return self.is_hidden()
@cached_property
def name(self):
return self.field.related_query_name()
@property
def remote_field(self):
return self.field
@property
def target_field(self):
"""
When filtering against this relation, returns the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError("Can't use target_field for multicolumn relations.")
return target_fields[0]
@cached_property
def related_model(self):
if not self.field.model:
raise AttributeError(
"This property can't be accessed before self.field.contribute_to_class has been called.")
return self.field.model
@cached_property
def many_to_many(self):
return self.field.many_to_many
@cached_property
def many_to_one(self):
return self.field.one_to_many
@cached_property
def one_to_many(self):
return self.field.many_to_one
@cached_property
def one_to_one(self):
return self.field.one_to_one
def get_prep_lookup(self, lookup_name, value):
return self.field.get_prep_lookup(lookup_name, value)
def get_lookup(self, lookup_name):
return self.field.get_lookup(lookup_name)
def get_internal_type(self):
return self.field.get_internal_type()
@property
def db_type(self):
return self.field.db_type
def __repr__(self):
return '<%s: %s.%s>' % (
type(self).__name__,
self.related_model._meta.app_label,
self.related_model._meta.model_name,
)
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""
Return choices with a default blank choices included, for use as
SelectField choices for this field.
Analog of django.db.models.fields.Field.get_choices(), provided
initially for utilization by RelatedFieldListFilter.
"""
first_choice = blank_choice if include_blank else []
queryset = self.related_model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.related_model._meta.model_name: False}
)
lst = [(x._get_pk_val(), smart_text(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared)
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name is not None and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
def get_accessor_name(self, model=None):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
# Due to backwards compatibility ModelForms need to be able to provide
# an alternate model. See BaseInlineFormSet.get_default_prefix().
opts = model._meta if model else self.related_model._meta
model = model or self.related_model
if self.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if self.symmetrical and model == self.model:
return None
if self.related_name:
return self.related_name
if opts.default_related_name:
return opts.default_related_name % {
'model_name': opts.model_name.lower(),
'app_label': opts.app_label.lower(),
}
return opts.model_name + ('_set' if self.multiple else '')
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
def get_path_info(self):
return self.field.get_reverse_path_info()
class ManyToOneRel(ForeignObjectRel):
"""
Used by the ForeignKey field to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
Note: Because we somewhat abuse the Rel objects by using them as reverse
fields we get the funny situation where
``ManyToOneRel.many_to_one == False`` and
``ManyToOneRel.one_to_many == True``. This is unfortunate but the actual
ManyToOneRel class is a private API and there is work underway to turn
reverse relations into actual fields.
"""
def __init__(self, field, to, field_name, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, on_delete=None):
super(ManyToOneRel, self).__init__(
field, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
self.field_name = field_name
def __getstate__(self):
state = self.__dict__.copy()
state.pop('related_model', None)
return state
def get_related_field(self):
"""
Return the Field in the 'to' object to which this relationship is tied.
"""
field = self.model._meta.get_field(self.field_name)
if not field.concrete:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return field
def set_field_name(self):
self.field_name = self.field_name or self.model._meta.pk.name
class OneToOneRel(ManyToOneRel):
"""
Used by OneToOneField to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
def __init__(self, field, to, field_name, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, on_delete=None):
super(OneToOneRel, self).__init__(
field, to, field_name,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
self.multiple = False
class ManyToManyRel(ForeignObjectRel):
"""
Used by ManyToManyField to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=True, through=None, through_fields=None,
db_constraint=True):
super(ManyToManyRel, self).__init__(
field, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
self.through = through
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
self.through_fields = through_fields
self.symmetrical = symmetrical
self.db_constraint = db_constraint
def get_related_field(self):
"""
Return the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
"""
opts = self.through._meta
if self.through_fields:
field = opts.get_field(self.through_fields[0])
else:
for field in opts.fields:
rel = getattr(field, 'remote_field', None)
if rel and rel.model == self.model:
break
return field.foreign_related_fields[0]
class ForeignObject(RelatedField):
"""
Abstraction of the ForeignKey relation, supports multi-column relations.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
requires_unique_target = True
related_accessor_class = ForeignRelatedObjectsDescriptor
rel_class = ForeignObjectRel
def __init__(self, to, on_delete, from_fields, to_fields, rel=None, related_name=None,
related_query_name=None, limit_choices_to=None, parent_link=False,
swappable=True, **kwargs):
if rel is None:
rel = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
super(ForeignObject, self).__init__(rel=rel, **kwargs)
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
def check(self, **kwargs):
errors = super(ForeignObject, self).check(**kwargs)
errors.extend(self._check_unique_target())
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.remote_field.model, six.string_types)
if rel_is_string or not self.requires_unique_target:
return []
try:
self.foreign_related_fields
except FieldDoesNotExist:
return []
has_unique_field = any(rel_field.unique
for rel_field in self.foreign_related_fields)
if not has_unique_field and len(self.foreign_related_fields) > 1:
field_combination = ', '.join("'%s'" % rel_field.name
for rel_field in self.foreign_related_fields)
model_name = self.remote_field.model.__name__
return [
checks.Error(
"None of the fields %s on model '%s' have a unique=True constraint."
% (field_combination, model_name),
hint=None,
obj=self,
id='fields.E310',
)
]
elif not has_unique_field:
field_name = self.foreign_related_fields[0].name
model_name = self.remote_field.model.__name__
return [
checks.Error(
("'%s.%s' must set unique=True "
"because it is referenced by a foreign key.") % (model_name, field_name),
hint=None,
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['on_delete'] = self.remote_field.on_delete
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.remote_field.related_name is not None:
kwargs['related_name'] = self.remote_field.related_name
if self.remote_field.related_query_name is not None:
kwargs['related_query_name'] = self.remote_field.related_query_name
if self.remote_field.parent_link:
kwargs['parent_link'] = self.remote_field.parent_link
# Work out string form of "to"
if isinstance(self.remote_field.model, six.string_types):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (self.remote_field.model._meta.app_label, self.remote_field.model._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.remote_field.model, six.string_types):
raise ValueError('Related model %r cannot be resolved' % self.remote_field.model)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field(from_field_name))
to_field = (self.remote_field.model._meta.pk if to_field_name is None
else self.remote_field.model._meta.get_field(to_field_name))
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Return an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Return a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.remote_field.model._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.remote_field, not self.unique, False)]
return pathinfos
def get_lookup(self, lookup_name):
if lookup_name == 'in':
return RelatedIn
elif lookup_name == 'exact':
return RelatedExact
elif lookup_name == 'gt':
return RelatedGreaterThan
elif lookup_name == 'gte':
return RelatedGreaterThanOrEqual
elif lookup_name == 'lt':
return RelatedLessThan
elif lookup_name == 'lte':
return RelatedLessThanOrEqual
elif lookup_name != 'isnull':
raise TypeError('Related Field got invalid lookup: %s' % lookup_name)
return super(ForeignObject, self).get_lookup(lookup_name)
def get_transform(self, *args, **kwargs):
raise NotImplementedError('Relational fields do not support transforms.')
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.remote_field.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.remote_field.limit_choices_to)
class ForeignKey(ForeignObject):
"""
Provide a many-to-one relation by adding a column to the local model
to hold the remote value.
By default ForeignKey will target the pk of the remote model but this
behavior can be changed by using the ``to_field`` argument.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = ManyToOneRel
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, on_delete=None, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, to_field=None,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError:
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r" % (
self.__class__.__name__, to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if on_delete is None:
warnings.warn(
"on_delete will be a required arg for %s in Django 2.0. "
"Set it to models.CASCADE if you want to maintain the current default behavior. "
"See https://docs.djangoproject.com/en/%s/ref/models/fields/"
"#django.db.models.ForeignKey.on_delete" % (
self.__class__.__name__,
get_docs_version(),
),
RemovedInDjango20Warning, 2)
on_delete = CASCADE
elif not callable(on_delete):
warnings.warn(
"The signature for {0} will change in Django 2.0. "
"Pass to_field='{1}' as a kwarg instead of as an arg.".format(
self.__class__.__name__,
on_delete,
),
RemovedInDjango20Warning, 2)
on_delete, to_field = to_field, on_delete
kwargs['rel'] = self.rel_class(
self, to, to_field,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
kwargs['db_index'] = kwargs.get('db_index', True)
super(ForeignKey, self).__init__(
to, on_delete, from_fields=['self'], to_fields=[to_field], **kwargs)
self.db_constraint = db_constraint
def check(self, **kwargs):
errors = super(ForeignKey, self).check(**kwargs)
errors.extend(self._check_on_delete())
errors.extend(self._check_unique())
return errors
def _check_on_delete(self):
on_delete = getattr(self.remote_field, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def _check_unique(self, **kwargs):
return [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=self,
id='fields.W342',
)
] if self.unique else []
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.remote_field.model, "_meta", None)
if self.remote_field.field_name and (not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.remote_field.field_name
return name, path, args, kwargs
@property
def target_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.remote_field.model._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.remote_field, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.remote_field.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.remote_field.model._default_manager.using(using).filter(
**{self.remote_field.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.remote_field.model._meta.verbose_name, 'pk': value,
'field': self.remote_field.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.target_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.target_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.target_field.get_db_prep_value(value, connection, prepared)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.remote_field.field_name is None:
self.remote_field.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.remote_field.model, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.remote_field.model))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.remote_field.model._default_manager.using(db),
'to_field_name': self.remote_field.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.target_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
def convert_empty_strings(self, value, expression, connection, context):
if (not value) and isinstance(value, six.string_types):
return None
return value
def get_db_converters(self, connection):
converters = super(ForeignKey, self).get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
return super(ForeignKey, self).get_col(alias, output_field or self.target_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that it always carries a "unique" constraint with it and the reverse
relation always returns the object pointed to (since there will only ever
be one), rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = SingleRelatedObjectDescriptor
rel_class = OneToOneRel
description = _("One-to-one relationship")
def __init__(self, to, on_delete=None, to_field=None, **kwargs):
kwargs['unique'] = True
if on_delete is None:
warnings.warn(
"on_delete will be a required arg for %s in Django 2.0. "
"Set it to models.CASCADE if you want to maintain the current default behavior. "
"See https://docs.djangoproject.com/en/%s/ref/models/fields/"
"#django.db.models.ForeignKey.on_delete" % (
self.__class__.__name__,
get_docs_version(),
),
RemovedInDjango20Warning, 2)
on_delete = CASCADE
elif not callable(on_delete):
warnings.warn(
"The signature for {0} will change in Django 2.0. "
"Pass to_field='{1}' as a kwarg instead of as an arg.".format(
self.__class__.__name__,
on_delete,
),
RemovedInDjango20Warning, 2)
to_field = on_delete
on_delete = CASCADE # Avoid warning in superclass
super(OneToOneField, self).__init__(to, on_delete, to_field=to_field, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.remote_field.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.remote_field.model):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def _check_unique(self, **kwargs):
# Override ForeignKey since check isn't applicable here.
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or related._meta.managed
to_model = resolve_relation(klass, field.remote_field.model)
name = '%s_%s' % (klass._meta.object_name, field.name)
lazy_related_operation(set_managed, klass, to_model, name)
to = make_model_tuple(to_model)[1]
from_ = klass._meta.model_name
if to == from_:
to = 'to_%s' % to
from_ = 'from_%s' % from_
meta = type(str('Meta'), (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(
klass,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
to: models.ForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
)
})
class ManyToManyField(RelatedField):
"""
Provide a many-to-many relation by using an intermediary model that
holds two ForeignKey fields pointed at the two sides of the relation.
Unless a ``through`` model was provided, ManyToManyField will use the
create_many_to_many_intermediary_model factory to automatically generate
the intermediary model.
"""
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
rel_class = ManyToManyRel
description = _("Many-to-many relationship")
def __init__(self, to, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=None, through=None,
through_fields=None, db_constraint=True, db_table=None,
swappable=True, **kwargs):
try:
to._meta
except AttributeError:
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ManyToManyField must be "
"either a model, a model name, or the string %r" %
(self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it
# here to break early if there's a problem.
to = str(to)
if symmetrical is None:
symmetrical = (to == RECURSIVE_RELATIONSHIP_CONSTANT)
if through is not None:
assert db_table is None, (
"Cannot specify a db_table if an intermediary model is used."
)
kwargs['rel'] = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
symmetrical=symmetrical,
through=through,
through_fields=through_fields,
db_constraint=db_constraint,
)
self.has_null_arg = 'null' in kwargs
super(ManyToManyField, self).__init__(**kwargs)
self.db_table = db_table
self.swappable = swappable
def check(self, **kwargs):
errors = super(ManyToManyField, self).check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
errors.extend(self._check_ignored_options(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.has_null_arg:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
hint=None,
obj=self,
id='fields.W340',
)
)
if len(self._validators) > 0:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
hint=None,
obj=self,
id='fields.W341',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.remote_field.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.remote_field.through._meta.app_label, self.remote_field.through.__name__)
else:
qualified_model_name = self.remote_field.through
errors = []
if self.remote_field.through not in apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
("Field specifies a many-to-many relation through model "
"'%s', which has not been installed.") %
qualified_model_name,
hint=None,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
# Set some useful local variables
to_model = resolve_relation(from_model, self.remote_field.model)
from_model_name = from_model._meta.object_name
if isinstance(to_model, six.string_types):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.remote_field.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.remote_field.symmetrical and
not self.remote_field.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
if seen_self > 2 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=("Use through_fields to specify which two "
"foreign keys Django should use."),
obj=self.remote_field.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
seen_to = sum(to_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
if seen_from > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=('If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, '
'through="%s").') % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, to_model_name),
hint=('If you want to create a recursive '
'relationship, use ForeignKey("self", '
'symmetrical=False, through="%s").') % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'.") % (
self, from_model_name, to_model_name
),
hint=None,
obj=self.remote_field.through,
id='fields.E336',
)
)
# Validate `through_fields`.
if self.remote_field.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy".
if not (len(self.remote_field.through_fields) >= 2 and
self.remote_field.through_fields[0] and self.remote_field.through_fields[1]):
errors.append(
checks.Error(
("Field specifies 'through_fields' but does not "
"provide the names of the two link fields that should be "
"used for the relation through model "
"'%s'.") % qualified_model_name,
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models.
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
source, through, target = from_model, self.remote_field.through, self.remote_field.model
source_field_name, target_field_name = self.remote_field.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'remote_field') and getattr(f.remote_field, 'model', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = ("Did you mean one of the following foreign "
"keys to '%s': %s?") % (related_model._meta.object_name,
', '.join(possible_field_names))
else:
hint = None
try:
field = through._meta.get_field(field_name)
except FieldDoesNotExist:
errors.append(
checks.Error(
("The intermediary model '%s' has no field '%s'.") % (
qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'remote_field') and
getattr(field.remote_field, 'model', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments.
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.remote_field.db_constraint is not True:
kwargs['db_constraint'] = self.remote_field.db_constraint
if self.remote_field.related_name is not None:
kwargs['related_name'] = self.remote_field.related_name
if self.remote_field.related_query_name is not None:
kwargs['related_query_name'] = self.remote_field.related_query_name
# Rel needs more work.
if isinstance(self.remote_field.model, six.string_types):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
if getattr(self.remote_field, 'through', None) is not None:
if isinstance(self.remote_field.through, six.string_types):
kwargs['through'] = self.remote_field.through
elif not self.remote_field.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (
self.remote_field.through._meta.app_label,
self.remote_field.through._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error.
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs['to'].setting_name, swappable_setting)
)
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.remote_field.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"""
Function that can be curried to provide the m2m table name for this
relation.
"""
if self.remote_field.through is not None:
return self.remote_field.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"""
Function that can be curried to provide the source accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[0]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if (f.is_relation and f.remote_field.model == related.related_model and
(link_field_name is None or link_field_name == f.name)):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"""
Function that can be curried to provide the related accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[1]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if f.is_relation and f.remote_field.model == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
elif self.remote_field.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.remote_field.related_name = "_%s_%s_+" % (cls.__name__.lower(), name)
super(ManyToManyField, self).contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not cls._meta.abstract:
if self.remote_field.through:
def resolve_through_model(_, model, field):
field.remote_field.through = model
lazy_related_operation(resolve_through_model, cls, self.remote_field.through, field=self)
elif not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation.
setattr(cls, self.name, ManyRelatedObjectsDescriptor(self.remote_field, reverse=False))
# Set up the accessor for the m2m table name for the relation.
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(self.remote_field, reverse=True))
# Set up the accessors for the column names on the m2m table.
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'remote_field')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'remote_field')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"""
Return the value of this field in the given model instance.
"""
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.remote_field.model._default_manager.using(db),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_scheduling_policy_facts
short_description: Retrieve facts about one or more oVirt scheduling policies
author: "Ondra Machacek (@machacekondra)"
version_added: "2.4"
description:
- "Retrieve facts about one or more oVirt scheduling policies."
notes:
- "This module creates a new top-level C(ovirt_scheduling_policies) fact,
which contains a list of scheduling policies."
options:
id:
description:
- "ID of the scheduling policy."
required: true
name:
description:
- "Name of the scheduling policy, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all scheduling policies with name InClusterUpgrade:
- ovirt_scheduling_policy_facts:
name: InClusterUpgrade
- debug:
var: ovirt_scheduling_policies
'''
RETURN = '''
ovirt_scheduling_policies:
description: "List of dictionaries describing the scheduling policies.
Scheduling policies attributes are mapped to dictionary keys,
all scheduling policies attributes can be found at following
url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
id=dict(default=None),
name=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
system_service = connection.system_service()
sched_policies_service = system_service.scheduling_policies_service()
if module.params['name']:
sched_policies = [
e for e in sched_policies_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
elif module.params['id']:
sched_policies = [
sched_policies_service.service(module.params['id']).get()
]
else:
sched_policies = sched_policies_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_scheduling_policies=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in sched_policies
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django import forms
GEOMETRY_TYPES = (
('Point', 'Points'),
('LineString', 'Lines'),
('Polygon', 'Polygons'),
)
class NewLayerForm(forms.Form):
"""
A form to create an empty layer in PostGIS.
"""
name = forms.CharField(label='Layer name', max_length=255)
title = forms.CharField(label='Layer title', max_length=255)
geometry_type = forms.ChoiceField(choices=GEOMETRY_TYPES)
permissions = forms.CharField(
widget=forms.HiddenInput(
attrs={
'name': 'permissions',
'id': 'permissions'}),
required=True)
attributes = forms.CharField(
widget=forms.HiddenInput(
attrs={
'name': 'attributes',
'id': 'attributes'}),
required=True) | unknown | codeparrot/codeparrot-clean | ||
# mysql/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
) | unknown | codeparrot/codeparrot-clean | ||
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Sapphire address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Sapphire address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | unknown | codeparrot/codeparrot-clean | ||
import os
from functools import wraps
from django.template.engine import Engine
from django.test.utils import override_settings
from django.utils.safestring import mark_safe
ROOT = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_DIR = os.path.join(ROOT, "templates")
def setup(templates, *args, test_once=False, debug_only=False):
"""
Runs test method multiple times in the following order:
debug cached string_if_invalid
----- ------ -----------------
False False
False True
False False INVALID
False True INVALID
True False
True True
Use test_once=True to test deprecation warnings since the message won't be
displayed multiple times.
"""
for arg in args:
templates.update(arg)
# numerous tests make use of an inclusion tag
# add this in here for simplicity
templates["inclusion.html"] = "{{ result }}"
loaders = [
(
"django.template.loaders.cached.Loader",
[
("django.template.loaders.locmem.Loader", templates),
],
),
]
def decorator(func):
# Make Engine.get_default() raise an exception to ensure that tests
# are properly isolated from Django's global settings.
@override_settings(TEMPLATES=None)
@wraps(func)
def inner(self):
# Set up custom template tag libraries if specified
libraries = getattr(self, "libraries", {})
self.engine = Engine(
libraries=libraries,
loaders=loaders,
debug=debug_only,
)
func(self)
if test_once:
return
func(self)
if debug_only:
return
self.engine = Engine(
libraries=libraries,
loaders=loaders,
string_if_invalid="INVALID",
)
func(self)
func(self)
self.engine = Engine(
debug=True,
libraries=libraries,
loaders=loaders,
)
func(self)
func(self)
return inner
return decorator
# Helper objects
class SomeException(Exception):
silent_variable_failure = True
class SomeOtherException(Exception):
pass
class ShouldNotExecuteException(Exception):
pass
class SomeClass:
def __init__(self):
self.otherclass = OtherClass()
def method(self):
return "SomeClass.method"
def method2(self, o):
return o
def method3(self):
raise SomeException
def method4(self):
raise SomeOtherException
def method5(self):
raise TypeError
def __getitem__(self, key):
if key == "silent_fail_key":
raise SomeException
elif key == "noisy_fail_key":
raise SomeOtherException
raise KeyError
@property
def silent_fail_attribute(self):
raise SomeException
@property
def noisy_fail_attribute(self):
raise SomeOtherException
@property
def attribute_error_attribute(self):
raise AttributeError
@property
def type_error_attribute(self):
raise TypeError
class OtherClass:
def method(self):
return "OtherClass.method"
class TestObj:
def is_true(self):
return True
def is_false(self):
return False
def is_bad(self):
raise ShouldNotExecuteException()
class SilentGetItemClass:
def __getitem__(self, key):
raise SomeException
class SilentAttrClass:
def b(self):
raise SomeException
b = property(b)
class UTF8Class:
"Class whose __str__ returns non-ASCII data"
def __str__(self):
return "ŠĐĆŽćžšđ"
# These two classes are used to test auto-escaping of string output.
class UnsafeClass:
def __str__(self):
return "you & me"
class SafeClass:
def __str__(self):
return mark_safe("you > me") | python | github | https://github.com/django/django | tests/template_tests/utils.py |
# This file is part of Androguard.
#
# Copyright (C) 2012, Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from struct import pack, unpack
import androguard.decompiler.dad.util as util
from androguard.decompiler.dad.instruction import (ArrayLengthExpression,
ArrayLoadExpression, ArrayStoreInstruction,
AssignExpression, BaseClass, BinaryCompExpression,
BinaryExpression, BinaryExpression2Addr,
BinaryExpressionLit, CastExpression,
CheckCastExpression, ConditionalExpression,
ConditionalZExpression, Constant,
FillArrayExpression, FilledArrayExpression,
InstanceExpression, InstanceInstruction,
InvokeInstruction, InvokeDirectInstruction,
InvokeRangeInstruction, InvokeStaticInstruction,
MonitorEnterExpression, MonitorExitExpression,
MoveExceptionExpression, MoveExpression,
MoveResultExpression, NewArrayExpression,
NewInstance, NopExpression, ThrowExpression,
Variable, ReturnInstruction, StaticExpression,
StaticInstruction, SwitchExpression, ThisParam,
UnaryExpression)
logger = logging.getLogger('dad.opcode_ins')
class Op(object):
CMP = 'cmp'
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
MOD = '%'
AND = '&'
OR = '|'
XOR = '^'
EQUAL = '=='
NEQUAL = '!='
GREATER = '>'
LOWER = '<'
GEQUAL = '>='
LEQUAL = '<='
NEG = '-'
NOT = '~'
INTSHL = '<<' # '(%s << ( %s & 0x1f ))'
INTSHR = '>>' # '(%s >> ( %s & 0x1f ))'
LONGSHL = '<<' # '(%s << ( %s & 0x3f ))'
LONGSHR = '>>' # '(%s >> ( %s & 0x3f ))'
def get_variables(vmap, *variables):
res = []
for variable in variables:
res.append(vmap.setdefault(variable, Variable(variable)))
if len(res) == 1:
return res[0]
return res
def assign_const(dest_reg, cst, vmap):
return AssignExpression(get_variables(vmap, dest_reg), cst)
def assign_cmp(val_a, val_b, val_c, cmp_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
exp = BinaryCompExpression(Op.CMP, reg_b, reg_c, cmp_type)
return AssignExpression(reg_a, exp)
def load_array_exp(val_a, val_b, val_c, ar_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
return AssignExpression(reg_a, ArrayLoadExpression(reg_b, reg_c, ar_type))
def store_array_inst(val_a, val_b, val_c, ar_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
return ArrayStoreInstruction(reg_a, reg_b, reg_c, ar_type)
def assign_cast_exp(val_a, val_b, val_op, op_type, vmap):
reg_a, reg_b = get_variables(vmap, val_a, val_b)
return AssignExpression(reg_a, CastExpression(val_op, op_type, reg_b))
def assign_binary_exp(ins, val_op, op_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, ins.AA, ins.BB, ins.CC)
return AssignExpression(reg_a, BinaryExpression(val_op, reg_b,
reg_c, op_type))
def assign_binary_2addr_exp(ins, val_op, op_type, vmap):
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return AssignExpression(reg_a, BinaryExpression2Addr(val_op, reg_a,
reg_b, op_type))
def assign_lit(op_type, val_cst, val_a, val_b, vmap):
cst = Constant(val_cst, 'I')
var_a, var_b = get_variables(vmap, val_a, val_b)
return AssignExpression(var_a, BinaryExpressionLit(op_type, var_b, cst))
# nop
def nop(ins, vmap):
return NopExpression()
# move vA, vB ( 4b, 4b )
def move(ins, vmap):
logger.debug('Move %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move/from16 vAA, vBBBB ( 8b, 16b )
def movefrom16(ins, vmap):
logger.debug('MoveFrom16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move/16 vAAAA, vBBBB ( 16b, 16b )
def move16(ins, vmap):
logger.debug('Move16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-wide vA, vB ( 4b, 4b )
def movewide(ins, vmap):
logger.debug('MoveWide %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move-wide/from16 vAA, vBBBB ( 8b, 16b )
def movewidefrom16(ins, vmap):
logger.debug('MoveWideFrom16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-wide/16 vAAAA, vBBBB ( 16b, 16b )
def movewide16(ins, vmap):
logger.debug('MoveWide16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-object vA, vB ( 4b, 4b )
def moveobject(ins, vmap):
logger.debug('MoveObject %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move-object/from16 vAA, vBBBB ( 8b, 16b )
def moveobjectfrom16(ins, vmap):
logger.debug('MoveObjectFrom16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-object/16 vAAAA, vBBBB ( 16b, 16b )
def moveobject16(ins, vmap):
logger.debug('MoveObject16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-result vAA ( 8b )
def moveresult(ins, vmap, ret):
logger.debug('MoveResult : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-result-wide vAA ( 8b )
def moveresultwide(ins, vmap, ret):
logger.debug('MoveResultWide : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-result-object vAA ( 8b )
def moveresultobject(ins, vmap, ret):
logger.debug('MoveResultObject : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-exception vAA ( 8b )
def moveexception(ins, vmap, _type):
logger.debug('MoveException : %s', ins.get_output())
return MoveExceptionExpression(get_variables(vmap, ins.AA), _type)
# return-void
def returnvoid(ins, vmap):
logger.debug('ReturnVoid')
return ReturnInstruction(None)
# return vAA ( 8b )
def return_reg(ins, vmap):
logger.debug('Return : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# return-wide vAA ( 8b )
def returnwide(ins, vmap):
logger.debug('ReturnWide : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# return-object vAA ( 8b )
def returnobject(ins, vmap):
logger.debug('ReturnObject : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# const/4 vA, #+B ( 4b, 4b )
def const4(ins, vmap):
logger.debug('Const4 : %s', ins.get_output())
cst = Constant(ins.B, 'I')
return assign_const(ins.A, cst, vmap)
# const/16 vAA, #+BBBB ( 8b, 16b )
def const16(ins, vmap):
logger.debug('Const16 : %s', ins.get_output())
cst = Constant(ins.BBBB, 'I')
return assign_const(ins.AA, cst, vmap)
# const vAA, #+BBBBBBBB ( 8b, 32b )
def const(ins, vmap):
logger.debug('Const : %s', ins.get_output())
value = unpack("=f", pack("=i", ins.BBBBBBBB))[0]
cst = Constant(value, 'I', ins.BBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const/high16 vAA, #+BBBB0000 ( 8b, 16b )
def consthigh16(ins, vmap):
logger.debug('ConstHigh16 : %s', ins.get_output())
value = unpack('=f', '\x00\x00' + pack('=h', ins.BBBB))[0]
cst = Constant(value, 'I', ins.BBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide/16 vAA, #+BBBB ( 8b, 16b )
def constwide16(ins, vmap):
logger.debug('ConstWide16 : %s', ins.get_output())
value = unpack('=d', pack('=d', ins.BBBB))[0]
cst = Constant(value, 'J', ins.BBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide/32 vAA, #+BBBBBBBB ( 8b, 32b )
def constwide32(ins, vmap):
logger.debug('ConstWide32 : %s', ins.get_output())
value = unpack('=d', pack('=d', ins.BBBBBBBB))[0]
cst = Constant(value, 'J', ins.BBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide vAA, #+BBBBBBBBBBBBBBBB ( 8b, 64b )
def constwide(ins, vmap):
logger.debug('ConstWide : %s', ins.get_output())
value = unpack('=d', pack('=q', ins.BBBBBBBBBBBBBBBB))[0]
cst = Constant(value, 'D', ins.BBBBBBBBBBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide/high16 vAA, #+BBBB000000000000 ( 8b, 16b )
def constwidehigh16(ins, vmap):
logger.debug('ConstWideHigh16 : %s', ins.get_output())
value = unpack('=d',
'\x00\x00\x00\x00\x00\x00' + pack('=h', ins.BBBB))[0]
cst = Constant(value, 'D', ins.BBBB)
return assign_const(ins.AA, cst, vmap)
# const-string vAA ( 8b )
def conststring(ins, vmap):
logger.debug('ConstString : %s', ins.get_output())
cst = Constant(ins.get_raw_string(), 'STR')
return assign_const(ins.AA, cst, vmap)
# const-string/jumbo vAA ( 8b )
def conststringjumbo(ins, vmap):
logger.debug('ConstStringJumbo %s', ins.get_output())
cst = Constant(ins.get_raw_string(), 'STR')
return assign_const(ins.AA, cst, vmap)
# const-class vAA, type@BBBB ( 8b )
def constclass(ins, vmap):
logger.debug('ConstClass : %s', ins.get_output())
cst = Constant(util.get_type(ins.get_string()), 'class')
return assign_const(ins.AA, cst, vmap)
# monitor-enter vAA ( 8b )
def monitorenter(ins, vmap):
logger.debug('MonitorEnter : %s', ins.get_output())
return MonitorEnterExpression(get_variables(vmap, ins.AA))
# monitor-exit vAA ( 8b )
def monitorexit(ins, vmap):
logger.debug('MonitorExit : %s', ins.get_output())
a = get_variables(vmap, ins.AA)
return MonitorExitExpression(a)
# check-cast vAA ( 8b )
def checkcast(ins, vmap):
logger.debug('CheckCast: %s', ins.get_output())
cast_type = util.get_type(ins.get_translated_kind())
cast_var = get_variables(vmap, ins.AA)
return AssignExpression(cast_var, CheckCastExpression(cast_var, cast_type))
# instance-of vA, vB ( 4b, 4b )
def instanceof(ins, vmap):
logger.debug('InstanceOf : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
reg_c = BaseClass(util.get_type(ins.get_translated_kind()))
exp = BinaryExpression('instanceof', reg_b, reg_c, 'Z')
return AssignExpression(reg_a, exp)
# array-length vA, vB ( 4b, 4b )
def arraylength(ins, vmap):
logger.debug('ArrayLength: %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return AssignExpression(reg_a, ArrayLengthExpression(reg_b))
# new-instance vAA ( 8b )
def newinstance(ins, vmap):
logger.debug('NewInstance : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
ins_type = ins.cm.get_type(ins.BBBB)
return AssignExpression(reg_a, NewInstance(ins_type))
# new-array vA, vB ( 8b, size )
def newarray(ins, vmap):
logger.debug('NewArray : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = NewArrayExpression(b, ins.cm.get_type(ins.CCCC))
return AssignExpression(a, exp)
# filled-new-array {vD, vE, vF, vG, vA} ( 4b each )
def fillednewarray(ins, vmap, ret):
logger.debug('FilledNewArray : %s', ins.get_output())
c, d, e, f, g = get_variables(vmap, ins.C, ins.D,
ins.E, ins.F, ins.G)
array_type = ins.cm.get_type(ins.BBBB)
exp = FilledArrayExpression(ins.A, array_type, [c, d, e, f, g][:ins.A])
return AssignExpression(ret, exp)
# filled-new-array/range {vCCCC..vNNNN} ( 16b )
def fillednewarrayrange(ins, vmap, ret):
logger.debug('FilledNewArrayRange : %s', ins.get_output())
a, c, n = get_variables(vmap, ins.AA, ins.CCCC, ins.NNNN)
array_type = ins.cm.get_type(ins.BBBB)
exp = FilledArrayExpression(a, array_type, [c, n])
return AssignExpression(ret, exp)
# fill-array-data vAA, +BBBBBBBB ( 8b, 32b )
def fillarraydata(ins, vmap, value):
logger.debug('FillArrayData : %s', ins.get_output())
return FillArrayExpression(get_variables(vmap, ins.AA), value)
# fill-array-data-payload vAA, +BBBBBBBB ( 8b, 32b )
def fillarraydatapayload(ins, vmap):
logger.debug('FillArrayDataPayload : %s', ins.get_output())
return FillArrayExpression(None)
# throw vAA ( 8b )
def throw(ins, vmap):
logger.debug('Throw : %s', ins.get_output())
return ThrowExpression(get_variables(vmap, ins.AA))
# goto +AA ( 8b )
def goto(ins, vmap):
return NopExpression()
# goto/16 +AAAA ( 16b )
def goto16(ins, vmap):
return NopExpression()
# goto/32 +AAAAAAAA ( 32b )
def goto32(ins, vmap):
return NopExpression()
# packed-switch vAA, +BBBBBBBB ( reg to test, 32b )
def packedswitch(ins, vmap):
logger.debug('PackedSwitch : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
return SwitchExpression(reg_a, ins.BBBBBBBB)
# sparse-switch vAA, +BBBBBBBB ( reg to test, 32b )
def sparseswitch(ins, vmap):
logger.debug('SparseSwitch : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
return SwitchExpression(reg_a, ins.BBBBBBBB)
# cmpl-float vAA, vBB, vCC ( 8b, 8b, 8b )
def cmplfloat(ins, vmap):
logger.debug('CmpglFloat : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'F', vmap)
# cmpg-float vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpgfloat(ins, vmap):
logger.debug('CmpgFloat : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'F', vmap)
# cmpl-double vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpldouble(ins, vmap):
logger.debug('CmplDouble : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'D', vmap)
# cmpg-double vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpgdouble(ins, vmap):
logger.debug('CmpgDouble : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'D', vmap)
# cmp-long vAA, vBB, vCC ( 8b, 8b, 8b )
def cmplong(ins, vmap):
logger.debug('CmpLong : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'J', vmap)
# if-eq vA, vB, +CCCC ( 4b, 4b, 16b )
def ifeq(ins, vmap):
logger.debug('IfEq : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.EQUAL, a, b)
# if-ne vA, vB, +CCCC ( 4b, 4b, 16b )
def ifne(ins, vmap):
logger.debug('IfNe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.NEQUAL, a, b)
# if-lt vA, vB, +CCCC ( 4b, 4b, 16b )
def iflt(ins, vmap):
logger.debug('IfLt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.LOWER, a, b)
# if-ge vA, vB, +CCCC ( 4b, 4b, 16b )
def ifge(ins, vmap):
logger.debug('IfGe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.GEQUAL, a, b)
# if-gt vA, vB, +CCCC ( 4b, 4b, 16b )
def ifgt(ins, vmap):
logger.debug('IfGt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.GREATER, a, b)
# if-le vA, vB, +CCCC ( 4b, 4b, 16b )
def ifle(ins, vmap):
logger.debug('IfLe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.LEQUAL, a, b)
# if-eqz vAA, +BBBB ( 8b, 16b )
def ifeqz(ins, vmap):
logger.debug('IfEqz : %s', ins.get_output())
return ConditionalZExpression(Op.EQUAL, get_variables(vmap, ins.AA))
# if-nez vAA, +BBBB ( 8b, 16b )
def ifnez(ins, vmap):
logger.debug('IfNez : %s', ins.get_output())
return ConditionalZExpression(Op.NEQUAL, get_variables(vmap, ins.AA))
# if-ltz vAA, +BBBB ( 8b, 16b )
def ifltz(ins, vmap):
logger.debug('IfLtz : %s', ins.get_output())
return ConditionalZExpression(Op.LOWER, get_variables(vmap, ins.AA))
# if-gez vAA, +BBBB ( 8b, 16b )
def ifgez(ins, vmap):
logger.debug('IfGez : %s', ins.get_output())
return ConditionalZExpression(Op.GEQUAL, get_variables(vmap, ins.AA))
# if-gtz vAA, +BBBB ( 8b, 16b )
def ifgtz(ins, vmap):
logger.debug('IfGtz : %s', ins.get_output())
return ConditionalZExpression(Op.GREATER, get_variables(vmap, ins.AA))
# if-lez vAA, +BBBB (8b, 16b )
def iflez(ins, vmap):
logger.debug('IfLez : %s', ins.get_output())
return ConditionalZExpression(Op.LEQUAL, get_variables(vmap, ins.AA))
#TODO: check type for all aget
# aget vAA, vBB, vCC ( 8b, 8b, 8b )
def aget(ins, vmap):
logger.debug('AGet : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, None, vmap)
# aget-wide vAA, vBB, vCC ( 8b, 8b, 8b )
def agetwide(ins, vmap):
logger.debug('AGetWide : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'W', vmap)
# aget-object vAA, vBB, vCC ( 8b, 8b, 8b )
def agetobject(ins, vmap):
logger.debug('AGetObject : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'O', vmap)
# aget-boolean vAA, vBB, vCC ( 8b, 8b, 8b )
def agetboolean(ins, vmap):
logger.debug('AGetBoolean : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'Z', vmap)
# aget-byte vAA, vBB, vCC ( 8b, 8b, 8b )
def agetbyte(ins, vmap):
logger.debug('AGetByte : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'B', vmap)
# aget-char vAA, vBB, vCC ( 8b, 8b, 8b )
def agetchar(ins, vmap):
logger.debug('AGetChar : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'C', vmap)
# aget-short vAA, vBB, vCC ( 8b, 8b, 8b )
def agetshort(ins, vmap):
logger.debug('AGetShort : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'S', vmap)
# aput vAA, vBB, vCC
def aput(ins, vmap):
logger.debug('APut : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, None, vmap)
# aput-wide vAA, vBB, vCC ( 8b, 8b, 8b )
def aputwide(ins, vmap):
logger.debug('APutWide : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'W', vmap)
# aput-object vAA, vBB, vCC ( 8b, 8b, 8b )
def aputobject(ins, vmap):
logger.debug('APutObject : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'O', vmap)
# aput-boolean vAA, vBB, vCC ( 8b, 8b, 8b )
def aputboolean(ins, vmap):
logger.debug('APutBoolean : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'Z', vmap)
# aput-byte vAA, vBB, vCC ( 8b, 8b, 8b )
def aputbyte(ins, vmap):
logger.debug('APutByte : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'B', vmap)
# aput-char vAA, vBB, vCC ( 8b, 8b, 8b )
def aputchar(ins, vmap):
logger.debug('APutChar : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'C', vmap)
# aput-short vAA, vBB, vCC ( 8b, 8b, 8b )
def aputshort(ins, vmap):
logger.debug('APutShort : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'S', vmap)
# iget vA, vB ( 4b, 4b )
def iget(ins, vmap):
logger.debug('IGet : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-wide vA, vB ( 4b, 4b )
def igetwide(ins, vmap):
logger.debug('IGetWide : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-object vA, vB ( 4b, 4b )
def igetobject(ins, vmap):
logger.debug('IGetObject : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-boolean vA, vB ( 4b, 4b )
def igetboolean(ins, vmap):
logger.debug('IGetBoolean : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-byte vA, vB ( 4b, 4b )
def igetbyte(ins, vmap):
logger.debug('IGetByte : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-char vA, vB ( 4b, 4b )
def igetchar(ins, vmap):
logger.debug('IGetChar : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-short vA, vB ( 4b, 4b )
def igetshort(ins, vmap):
logger.debug('IGetShort : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iput vA, vB ( 4b, 4b )
def iput(ins, vmap):
logger.debug('IPut %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-wide vA, vB ( 4b, 4b )
def iputwide(ins, vmap):
logger.debug('IPutWide %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-object vA, vB ( 4b, 4b )
def iputobject(ins, vmap):
logger.debug('IPutObject %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-boolean vA, vB ( 4b, 4b )
def iputboolean(ins, vmap):
logger.debug('IPutBoolean %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-byte vA, vB ( 4b, 4b )
def iputbyte(ins, vmap):
logger.debug('IPutByte %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-char vA, vB ( 4b, 4b )
def iputchar(ins, vmap):
logger.debug('IPutChar %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-short vA, vB ( 4b, 4b )
def iputshort(ins, vmap):
logger.debug('IPutShort %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
klass = util.get_type(klass)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# sget vAA ( 8b )
def sget(ins, vmap):
logger.debug('SGet : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-wide vAA ( 8b )
def sgetwide(ins, vmap):
logger.debug('SGetWide : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-object vAA ( 8b )
def sgetobject(ins, vmap):
logger.debug('SGetObject : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-boolean vAA ( 8b )
def sgetboolean(ins, vmap):
logger.debug('SGetBoolean : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-byte vAA ( 8b )
def sgetbyte(ins, vmap):
logger.debug('SGetByte : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-char vAA ( 8b )
def sgetchar(ins, vmap):
logger.debug('SGetChar : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-short vAA ( 8b )
def sgetshort(ins, vmap):
logger.debug('SGetShort : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sput vAA ( 8b )
def sput(ins, vmap):
logger.debug('SPut : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-wide vAA ( 8b )
def sputwide(ins, vmap):
logger.debug('SPutWide : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-object vAA ( 8b )
def sputobject(ins, vmap):
logger.debug('SPutObject : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-boolean vAA ( 8b )
def sputboolean(ins, vmap):
logger.debug('SPutBoolean : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-wide vAA ( 8b )
def sputbyte(ins, vmap):
logger.debug('SPutByte : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-char vAA ( 8b )
def sputchar(ins, vmap):
logger.debug('SPutChar : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-short vAA ( 8b )
def sputshort(ins, vmap):
logger.debug('SPutShort : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
klass = util.get_type(klass)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
def get_args(vmap, param_type, largs):
num_param = 0
args = []
for type_ in param_type:
param = largs[num_param]
args.append(param)
num_param += util.get_type_size(type_)
if len(param_type) == 1:
return [get_variables(vmap, *args)]
return get_variables(vmap, *args)
# invoke-virtual {vD, vE, vF, vG, vA} ( 4b each )
def invokevirtual(ins, vmap, ret):
logger.debug('InvokeVirtual : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
c = get_variables(vmap, ins.C)
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, c, ret_type,
param_type, args)
return AssignExpression(returned, exp)
# invoke-super {vD, vE, vF, vG, vA} ( 4b each )
def invokesuper(ins, vmap, ret):
logger.debug('InvokeSuper : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
superclass = BaseClass('super')
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, superclass, ret_type,
param_type, args)
return AssignExpression(returned, exp)
# invoke-direct {vD, vE, vF, vG, vA} ( 4b each )
def invokedirect(ins, vmap, ret):
logger.debug('InvokeDirect : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
base = get_variables(vmap, ins.C)
if ret_type == 'V':
if isinstance(base, ThisParam):
returned = None
else:
returned = base
ret.set_to(base)
else:
returned = ret.new()
exp = InvokeDirectInstruction(cls_name, name, base, ret_type,
param_type, args)
return AssignExpression(returned, exp)
# invoke-static {vD, vE, vF, vG, vA} ( 4b each )
def invokestatic(ins, vmap, ret):
logger.debug('InvokeStatic : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.C, ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
base = BaseClass(cls_name)
returned = None if ret_type == 'V' else ret.new()
exp = InvokeStaticInstruction(cls_name, name, base, ret_type,
param_type, args)
return AssignExpression(returned, exp)
# invoke-interface {vD, vE, vF, vG, vA} ( 4b each )
def invokeinterface(ins, vmap, ret):
logger.debug('InvokeInterface : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
c = get_variables(vmap, ins.C)
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, c, ret_type,
param_type, args)
return AssignExpression(returned, exp)
# invoke-virtual/range {vCCCC..vNNNN} ( 16b each )
def invokevirtualrange(ins, vmap, ret):
logger.debug('InvokeVirtualRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = range(ins.CCCC, ins.NNNN + 1)
this_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
returned = None if ret_type == 'V' else ret.new()
exp = InvokeRangeInstruction(cls_name, name, ret_type,
param_type, [this_arg] + args)
return AssignExpression(returned, exp)
# invoke-super/range {vCCCC..vNNNN} ( 16b each )
def invokesuperrange(ins, vmap, ret):
logger.debug('InvokeSuperRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = range(ins.CCCC, ins.NNNN + 1)
args = get_args(vmap, param_type, largs[1:])
base = get_variables(vmap, ins.CCCC)
if ret_type != 'V':
returned = ret.new()
else:
returned = base
ret.set_to(base)
superclass = BaseClass('super')
exp = InvokeRangeInstruction(cls_name, name, ret_type,
param_type, [superclass] + args)
return AssignExpression(returned, exp)
# invoke-direct/range {vCCCC..vNNNN} ( 16b each )
def invokedirectrange(ins, vmap, ret):
logger.debug('InvokeDirectRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = range(ins.CCCC, ins.NNNN + 1)
this_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
base = get_variables(vmap, ins.CCCC)
if ret_type != 'V':
returned = ret.new()
else:
returned = base
ret.set_to(base)
exp = InvokeRangeInstruction(cls_name, name, ret_type,
param_type, [this_arg] + args)
return AssignExpression(returned, exp)
# invoke-static/range {vCCCC..vNNNN} ( 16b each )
def invokestaticrange(ins, vmap, ret):
logger.debug('InvokeStaticRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = range(ins.CCCC, ins.NNNN + 1)
args = get_args(vmap, param_type, largs)
base = BaseClass(cls_name)
returned = None if ret_type == 'V' else ret.new()
exp = InvokeStaticInstruction(cls_name, name, base, ret_type,
param_type, args)
return AssignExpression(returned, exp)
# invoke-interface/range {vCCCC..vNNNN} ( 16b each )
def invokeinterfacerange(ins, vmap, ret):
logger.debug('InvokeInterfaceRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = range(ins.CCCC, ins.NNNN + 1)
base_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
returned = None if ret_type == 'V' else ret.new()
exp = InvokeRangeInstruction(cls_name, name, ret_type,
param_type, [base_arg] + args)
return AssignExpression(returned, exp)
# neg-int vA, vB ( 4b, 4b )
def negint(ins, vmap):
logger.debug('NegInt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'I')
return AssignExpression(a, exp)
# not-int vA, vB ( 4b, 4b )
def notint(ins, vmap):
logger.debug('NotInt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NOT, b, 'I')
return AssignExpression(a, exp)
# neg-long vA, vB ( 4b, 4b )
def neglong(ins, vmap):
logger.debug('NegLong : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'J')
return AssignExpression(a, exp)
# not-long vA, vB ( 4b, 4b )
def notlong(ins, vmap):
logger.debug('NotLong : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NOT, b, 'J')
return AssignExpression(a, exp)
# neg-float vA, vB ( 4b, 4b )
def negfloat(ins, vmap):
logger.debug('NegFloat : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'F')
return AssignExpression(a, exp)
# neg-double vA, vB ( 4b, 4b )
def negdouble(ins, vmap):
logger.debug('NegDouble : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'D')
return AssignExpression(a, exp)
# int-to-long vA, vB ( 4b, 4b )
def inttolong(ins, vmap):
logger.debug('IntToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# int-to-float vA, vB ( 4b, 4b )
def inttofloat(ins, vmap):
logger.debug('IntToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# int-to-double vA, vB ( 4b, 4b )
def inttodouble(ins, vmap):
logger.debug('IntToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# long-to-int vA, vB ( 4b, 4b )
def longtoint(ins, vmap):
logger.debug('LongToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# long-to-float vA, vB ( 4b, 4b )
def longtofloat(ins, vmap):
logger.debug('LongToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# long-to-double vA, vB ( 4b, 4b )
def longtodouble(ins, vmap):
logger.debug('LongToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# float-to-int vA, vB ( 4b, 4b )
def floattoint(ins, vmap):
logger.debug('FloatToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# float-to-long vA, vB ( 4b, 4b )
def floattolong(ins, vmap):
logger.debug('FloatToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# float-to-double vA, vB ( 4b, 4b )
def floattodouble(ins, vmap):
logger.debug('FloatToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# double-to-int vA, vB ( 4b, 4b )
def doubletoint(ins, vmap):
logger.debug('DoubleToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# double-to-long vA, vB ( 4b, 4b )
def doubletolong(ins, vmap):
logger.debug('DoubleToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# double-to-float vA, vB ( 4b, 4b )
def doubletofloat(ins, vmap):
logger.debug('DoubleToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# int-to-byte vA, vB ( 4b, 4b )
def inttobyte(ins, vmap):
logger.debug('IntToByte : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(byte)', 'B', vmap)
# int-to-char vA, vB ( 4b, 4b )
def inttochar(ins, vmap):
logger.debug('IntToChar : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(char)', 'C', vmap)
# int-to-short vA, vB ( 4b, 4b )
def inttoshort(ins, vmap):
logger.debug('IntToShort : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(short)', 'S', vmap)
# add-int vAA, vBB, vCC ( 8b, 8b, 8b )
def addint(ins, vmap):
logger.debug('AddInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'I', vmap)
# sub-int vAA, vBB, vCC ( 8b, 8b, 8b )
def subint(ins, vmap):
logger.debug('SubInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'I', vmap)
# mul-int vAA, vBB, vCC ( 8b, 8b, 8b )
def mulint(ins, vmap):
logger.debug('MulInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'I', vmap)
# div-int vAA, vBB, vCC ( 8b, 8b, 8b )
def divint(ins, vmap):
logger.debug('DivInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'I', vmap)
# rem-int vAA, vBB, vCC ( 8b, 8b, 8b )
def remint(ins, vmap):
logger.debug('RemInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'I', vmap)
# and-int vAA, vBB, vCC ( 8b, 8b, 8b )
def andint(ins, vmap):
logger.debug('AndInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.AND, 'I', vmap)
# or-int vAA, vBB, vCC ( 8b, 8b, 8b )
def orint(ins, vmap):
logger.debug('OrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.OR, 'I', vmap)
# xor-int vAA, vBB, vCC ( 8b, 8b, 8b )
def xorint(ins, vmap):
logger.debug('XorInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.XOR, 'I', vmap)
# shl-int vAA, vBB, vCC ( 8b, 8b, 8b )
def shlint(ins, vmap):
logger.debug('ShlInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHL, 'I', vmap)
# shr-int vAA, vBB, vCC ( 8b, 8b, 8b )
def shrint(ins, vmap):
logger.debug('ShrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHR, 'I', vmap)
# ushr-int vAA, vBB, vCC ( 8b, 8b, 8b )
def ushrint(ins, vmap):
logger.debug('UShrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHR, 'I', vmap)
# add-long vAA, vBB, vCC ( 8b, 8b, 8b )
def addlong(ins, vmap):
logger.debug('AddLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'J', vmap)
# sub-long vAA, vBB, vCC ( 8b, 8b, 8b )
def sublong(ins, vmap):
logger.debug('SubLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'J', vmap)
# mul-long vAA, vBB, vCC ( 8b, 8b, 8b )
def mullong(ins, vmap):
logger.debug('MulLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'J', vmap)
# div-long vAA, vBB, vCC ( 8b, 8b, 8b )
def divlong(ins, vmap):
logger.debug('DivLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'J', vmap)
# rem-long vAA, vBB, vCC ( 8b, 8b, 8b )
def remlong(ins, vmap):
logger.debug('RemLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'J', vmap)
# and-long vAA, vBB, vCC ( 8b, 8b, 8b )
def andlong(ins, vmap):
logger.debug('AndLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.AND, 'J', vmap)
# or-long vAA, vBB, vCC ( 8b, 8b, 8b )
def orlong(ins, vmap):
logger.debug('OrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.OR, 'J', vmap)
# xor-long vAA, vBB, vCC ( 8b, 8b, 8b )
def xorlong(ins, vmap):
logger.debug('XorLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.XOR, 'J', vmap)
# shl-long vAA, vBB, vCC ( 8b, 8b, 8b )
def shllong(ins, vmap):
logger.debug('ShlLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHL, 'J', vmap)
# shr-long vAA, vBB, vCC ( 8b, 8b, 8b )
def shrlong(ins, vmap):
logger.debug('ShrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHR, 'J', vmap)
# ushr-long vAA, vBB, vCC ( 8b, 8b, 8b )
def ushrlong(ins, vmap):
logger.debug('UShrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHR, 'J', vmap)
# add-float vAA, vBB, vCC ( 8b, 8b, 8b )
def addfloat(ins, vmap):
logger.debug('AddFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'F', vmap)
# sub-float vAA, vBB, vCC ( 8b, 8b, 8b )
def subfloat(ins, vmap):
logger.debug('SubFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'F', vmap)
# mul-float vAA, vBB, vCC ( 8b, 8b, 8b )
def mulfloat(ins, vmap):
logger.debug('MulFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'F', vmap)
# div-float vAA, vBB, vCC ( 8b, 8b, 8b )
def divfloat(ins, vmap):
logger.debug('DivFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'F', vmap)
# rem-float vAA, vBB, vCC ( 8b, 8b, 8b )
def remfloat(ins, vmap):
logger.debug('RemFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'F', vmap)
# add-double vAA, vBB, vCC ( 8b, 8b, 8b )
def adddouble(ins, vmap):
logger.debug('AddDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'D', vmap)
# sub-double vAA, vBB, vCC ( 8b, 8b, 8b )
def subdouble(ins, vmap):
logger.debug('SubDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'D', vmap)
# mul-double vAA, vBB, vCC ( 8b, 8b, 8b )
def muldouble(ins, vmap):
logger.debug('MulDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'D', vmap)
# div-double vAA, vBB, vCC ( 8b, 8b, 8b )
def divdouble(ins, vmap):
logger.debug('DivDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'D', vmap)
# rem-double vAA, vBB, vCC ( 8b, 8b, 8b )
def remdouble(ins, vmap):
logger.debug('RemDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'D', vmap)
# add-int/2addr vA, vB ( 4b, 4b )
def addint2addr(ins, vmap):
logger.debug('AddInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'I', vmap)
# sub-int/2addr vA, vB ( 4b, 4b )
def subint2addr(ins, vmap):
logger.debug('SubInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'I', vmap)
# mul-int/2addr vA, vB ( 4b, 4b )
def mulint2addr(ins, vmap):
logger.debug('MulInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'I', vmap)
# div-int/2addr vA, vB ( 4b, 4b )
def divint2addr(ins, vmap):
logger.debug('DivInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'I', vmap)
# rem-int/2addr vA, vB ( 4b, 4b )
def remint2addr(ins, vmap):
logger.debug('RemInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'I', vmap)
# and-int/2addr vA, vB ( 4b, 4b )
def andint2addr(ins, vmap):
logger.debug('AndInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.AND, 'I', vmap)
# or-int/2addr vA, vB ( 4b, 4b )
def orint2addr(ins, vmap):
logger.debug('OrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.OR, 'I', vmap)
# xor-int/2addr vA, vB ( 4b, 4b )
def xorint2addr(ins, vmap):
logger.debug('XorInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.XOR, 'I', vmap)
# shl-int/2addr vA, vB ( 4b, 4b )
def shlint2addr(ins, vmap):
logger.debug('ShlInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHL, 'I', vmap)
# shr-int/2addr vA, vB ( 4b, 4b )
def shrint2addr(ins, vmap):
logger.debug('ShrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHR, 'I', vmap)
# ushr-int/2addr vA, vB ( 4b, 4b )
def ushrint2addr(ins, vmap):
logger.debug('UShrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHR, 'I', vmap)
# add-long/2addr vA, vB ( 4b, 4b )
def addlong2addr(ins, vmap):
logger.debug('AddLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'J', vmap)
# sub-long/2addr vA, vB ( 4b, 4b )
def sublong2addr(ins, vmap):
logger.debug('SubLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'J', vmap)
# mul-long/2addr vA, vB ( 4b, 4b )
def mullong2addr(ins, vmap):
logger.debug('MulLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'J', vmap)
# div-long/2addr vA, vB ( 4b, 4b )
def divlong2addr(ins, vmap):
logger.debug('DivLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'J', vmap)
# rem-long/2addr vA, vB ( 4b, 4b )
def remlong2addr(ins, vmap):
logger.debug('RemLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'J', vmap)
# and-long/2addr vA, vB ( 4b, 4b )
def andlong2addr(ins, vmap):
logger.debug('AndLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.AND, 'J', vmap)
# or-long/2addr vA, vB ( 4b, 4b )
def orlong2addr(ins, vmap):
logger.debug('OrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.OR, 'J', vmap)
# xor-long/2addr vA, vB ( 4b, 4b )
def xorlong2addr(ins, vmap):
logger.debug('XorLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.XOR, 'J', vmap)
# shl-long/2addr vA, vB ( 4b, 4b )
def shllong2addr(ins, vmap):
logger.debug('ShlLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHL, 'J', vmap)
# shr-long/2addr vA, vB ( 4b, 4b )
def shrlong2addr(ins, vmap):
logger.debug('ShrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHR, 'J', vmap)
# ushr-long/2addr vA, vB ( 4b, 4b )
def ushrlong2addr(ins, vmap):
logger.debug('UShrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHR, 'J', vmap)
# add-float/2addr vA, vB ( 4b, 4b )
def addfloat2addr(ins, vmap):
logger.debug('AddFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'F', vmap)
# sub-float/2addr vA, vB ( 4b, 4b )
def subfloat2addr(ins, vmap):
logger.debug('SubFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'F', vmap)
# mul-float/2addr vA, vB ( 4b, 4b )
def mulfloat2addr(ins, vmap):
logger.debug('MulFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'F', vmap)
# div-float/2addr vA, vB ( 4b, 4b )
def divfloat2addr(ins, vmap):
logger.debug('DivFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'F', vmap)
# rem-float/2addr vA, vB ( 4b, 4b )
def remfloat2addr(ins, vmap):
logger.debug('RemFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'F', vmap)
# add-double/2addr vA, vB ( 4b, 4b )
def adddouble2addr(ins, vmap):
logger.debug('AddDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'D', vmap)
# sub-double/2addr vA, vB ( 4b, 4b )
def subdouble2addr(ins, vmap):
logger.debug('subDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'D', vmap)
# mul-double/2addr vA, vB ( 4b, 4b )
def muldouble2addr(ins, vmap):
logger.debug('MulDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'D', vmap)
# div-double/2addr vA, vB ( 4b, 4b )
def divdouble2addr(ins, vmap):
logger.debug('DivDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'D', vmap)
# rem-double/2addr vA, vB ( 4b, 4b )
def remdouble2addr(ins, vmap):
logger.debug('RemDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'D', vmap)
# add-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def addintlit16(ins, vmap):
logger.debug('AddIntLit16 : %s', ins.get_output())
return assign_lit(Op.ADD, ins.CCCC, ins.A, ins.B, vmap)
# rsub-int vA, vB, #+CCCC ( 4b, 4b, 16b )
def rsubint(ins, vmap):
logger.debug('RSubInt : %s', ins.get_output())
var_a, var_b = get_variables(vmap, ins.A, ins.B)
cst = Constant(ins.CCCC, 'I')
return AssignExpression(var_a, BinaryExpressionLit(Op.SUB, cst, var_b))
# mul-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def mulintlit16(ins, vmap):
logger.debug('MulIntLit16 : %s', ins.get_output())
return assign_lit(Op.MUL, ins.CCCC, ins.A, ins.B, vmap)
# div-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def divintlit16(ins, vmap):
logger.debug('DivIntLit16 : %s', ins.get_output())
return assign_lit(Op.DIV, ins.CCCC, ins.A, ins.B, vmap)
# rem-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def remintlit16(ins, vmap):
logger.debug('RemIntLit16 : %s', ins.get_output())
return assign_lit(Op.MOD, ins.CCCC, ins.A, ins.B, vmap)
# and-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def andintlit16(ins, vmap):
logger.debug('AndIntLit16 : %s', ins.get_output())
return assign_lit(Op.AND, ins.CCCC, ins.A, ins.B, vmap)
# or-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def orintlit16(ins, vmap):
logger.debug('OrIntLit16 : %s', ins.get_output())
return assign_lit(Op.OR, ins.CCCC, ins.A, ins.B, vmap)
# xor-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def xorintlit16(ins, vmap):
logger.debug('XorIntLit16 : %s', ins.get_output())
return assign_lit(Op.XOR, ins.CCCC, ins.A, ins.B, vmap)
# add-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def addintlit8(ins, vmap):
logger.debug('AddIntLit8 : %s', ins.get_output())
literal, op = [(ins.CC, Op.ADD), (-ins.CC, Op.SUB)][ins.CC < 0]
return assign_lit(op, literal, ins.AA, ins.BB, vmap)
# rsub-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def rsubintlit8(ins, vmap):
logger.debug('RSubIntLit8 : %s', ins.get_output())
var_a, var_b = get_variables(vmap, ins.AA, ins.BB)
cst = Constant(ins.CC, 'I')
return AssignExpression(var_a, BinaryExpressionLit(Op.SUB, cst, var_b))
# mul-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def mulintlit8(ins, vmap):
logger.debug('MulIntLit8 : %s', ins.get_output())
return assign_lit(Op.MUL, ins.CC, ins.AA, ins.BB, vmap)
# div-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def divintlit8(ins, vmap):
logger.debug('DivIntLit8 : %s', ins.get_output())
return assign_lit(Op.DIV, ins.CC, ins.AA, ins.BB, vmap)
# rem-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def remintlit8(ins, vmap):
logger.debug('RemIntLit8 : %s', ins.get_output())
return assign_lit(Op.MOD, ins.CC, ins.AA, ins.BB, vmap)
# and-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def andintlit8(ins, vmap):
logger.debug('AndIntLit8 : %s', ins.get_output())
return assign_lit(Op.AND, ins.CC, ins.AA, ins.BB, vmap)
# or-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def orintlit8(ins, vmap):
logger.debug('OrIntLit8 : %s', ins.get_output())
return assign_lit(Op.OR, ins.CC, ins.AA, ins.BB, vmap)
# xor-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def xorintlit8(ins, vmap):
logger.debug('XorIntLit8 : %s', ins.get_output())
return assign_lit(Op.XOR, ins.CC, ins.AA, ins.BB, vmap)
# shl-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def shlintlit8(ins, vmap):
logger.debug('ShlIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHL, ins.CC, ins.AA, ins.BB, vmap)
# shr-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def shrintlit8(ins, vmap):
logger.debug('ShrIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHR, ins.CC, ins.AA, ins.BB, vmap)
# ushr-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def ushrintlit8(ins, vmap):
logger.debug('UShrIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHR, ins.CC, ins.AA, ins.BB, vmap)
INSTRUCTION_SET = [
# 0x00
nop, # nop
move, # move
movefrom16, # move/from16
move16, # move/16
movewide, # move-wide
movewidefrom16, # move-wide/from16
movewide16, # move-wide/16
moveobject, # move-object
moveobjectfrom16, # move-object/from16
moveobject16, # move-object/16
moveresult, # move-result
moveresultwide, # move-result-wide
moveresultobject, # move-result-object
moveexception, # move-exception
returnvoid, # return-void
return_reg, # return
# 0x10
returnwide, # return-wide
returnobject, # return-object
const4, # const/4
const16, # const/16
const, # const
consthigh16, # const/high16
constwide16, # const-wide/16
constwide32, # const-wide/32
constwide, # const-wide
constwidehigh16, # const-wide/high16
conststring, # const-string
conststringjumbo, # const-string/jumbo
constclass, # const-class
monitorenter, # monitor-enter
monitorexit, # monitor-exit
checkcast, # check-cast
# 0x20
instanceof, # instance-of
arraylength, # array-length
newinstance, # new-instance
newarray, # new-array
fillednewarray, # filled-new-array
fillednewarrayrange, # filled-new-array/range
fillarraydata, # fill-array-data
throw, # throw
goto, # goto
goto16, # goto/16
goto32, # goto/32
packedswitch, # packed-switch
sparseswitch, # sparse-switch
cmplfloat, # cmpl-float
cmpgfloat, # cmpg-float
cmpldouble, # cmpl-double
# 0x30
cmpgdouble, # cmpg-double
cmplong, # cmp-long
ifeq, # if-eq
ifne, # if-ne
iflt, # if-lt
ifge, # if-ge
ifgt, # if-gt
ifle, # if-le
ifeqz, # if-eqz
ifnez, # if-nez
ifltz, # if-ltz
ifgez, # if-gez
ifgtz, # if-gtz
iflez, # if-l
nop, # unused
nop, # unused
# 0x40
nop, # unused
nop, # unused
nop, # unused
nop, # unused
aget, # aget
agetwide, # aget-wide
agetobject, # aget-object
agetboolean, # aget-boolean
agetbyte, # aget-byte
agetchar, # aget-char
agetshort, # aget-short
aput, # aput
aputwide, # aput-wide
aputobject, # aput-object
aputboolean, # aput-boolean
aputbyte, # aput-byte
# 0x50
aputchar, # aput-char
aputshort, # aput-short
iget, # iget
igetwide, # iget-wide
igetobject, # iget-object
igetboolean, # iget-boolean
igetbyte, # iget-byte
igetchar, # iget-char
igetshort, # iget-short
iput, # iput
iputwide, # iput-wide
iputobject, # iput-object
iputboolean, # iput-boolean
iputbyte, # iput-byte
iputchar, # iput-char
iputshort, # iput-short
# 0x60
sget, # sget
sgetwide, # sget-wide
sgetobject, # sget-object
sgetboolean, # sget-boolean
sgetbyte, # sget-byte
sgetchar, # sget-char
sgetshort, # sget-short
sput, # sput
sputwide, # sput-wide
sputobject, # sput-object
sputboolean, # sput-boolean
sputbyte, # sput-byte
sputchar, # sput-char
sputshort, # sput-short
invokevirtual, # invoke-virtual
invokesuper, # invoke-super
# 0x70
invokedirect, # invoke-direct
invokestatic, # invoke-static
invokeinterface, # invoke-interface
nop, # unused
invokevirtualrange, # invoke-virtual/range
invokesuperrange, # invoke-super/range
invokedirectrange, # invoke-direct/range
invokestaticrange, # invoke-static/range
invokeinterfacerange, # invoke-interface/range
nop, # unused
nop, # unused
negint, # neg-int
notint, # not-int
neglong, # neg-long
notlong, # not-long
negfloat, # neg-float
# 0x80
negdouble, # neg-double
inttolong, # int-to-long
inttofloat, # int-to-float
inttodouble, # int-to-double
longtoint, # long-to-int
longtofloat, # long-to-float
longtodouble, # long-to-double
floattoint, # float-to-int
floattolong, # float-to-long
floattodouble, # float-to-double
doubletoint, # double-to-int
doubletolong, # double-to-long
doubletofloat, # double-to-float
inttobyte, # int-to-byte
inttochar, # int-to-char
inttoshort, # int-to-short
# 0x90
addint, # add-int
subint, # sub-int
mulint, # mul-int
divint, # div-int
remint, # rem-int
andint, # and-int
orint, # or-int
xorint, # xor-int
shlint, # shl-int
shrint, # shr-int
ushrint, # ushr-int
addlong, # add-long
sublong, # sub-long
mullong, # mul-long
divlong, # div-long
remlong, # rem-long
# 0xa0
andlong, # and-long
orlong, # or-long
xorlong, # xor-long
shllong, # shl-long
shrlong, # shr-long
ushrlong, # ushr-long
addfloat, # add-float
subfloat, # sub-float
mulfloat, # mul-float
divfloat, # div-float
remfloat, # rem-float
adddouble, # add-double
subdouble, # sub-double
muldouble, # mul-double
divdouble, # div-double
remdouble, # rem-double
# 0xb0
addint2addr, # add-int/2addr
subint2addr, # sub-int/2addr
mulint2addr, # mul-int/2addr
divint2addr, # div-int/2addr
remint2addr, # rem-int/2addr
andint2addr, # and-int/2addr
orint2addr, # or-int/2addr
xorint2addr, # xor-int/2addr
shlint2addr, # shl-int/2addr
shrint2addr, # shr-int/2addr
ushrint2addr, # ushr-int/2addr
addlong2addr, # add-long/2addr
sublong2addr, # sub-long/2addr
mullong2addr, # mul-long/2addr
divlong2addr, # div-long/2addr
remlong2addr, # rem-long/2addr
# 0xc0
andlong2addr, # and-long/2addr
orlong2addr, # or-long/2addr
xorlong2addr, # xor-long/2addr
shllong2addr, # shl-long/2addr
shrlong2addr, # shr-long/2addr
ushrlong2addr, # ushr-long/2addr
addfloat2addr, # add-float/2addr
subfloat2addr, # sub-float/2addr
mulfloat2addr, # mul-float/2addr
divfloat2addr, # div-float/2addr
remfloat2addr, # rem-float/2addr
adddouble2addr, # add-double/2addr
subdouble2addr, # sub-double/2addr
muldouble2addr, # mul-double/2addr
divdouble2addr, # div-double/2addr
remdouble2addr, # rem-double/2addr
# 0xd0
addintlit16, # add-int/lit16
rsubint, # rsub-int
mulintlit16, # mul-int/lit16
divintlit16, # div-int/lit16
remintlit16, # rem-int/lit16
andintlit16, # and-int/lit16
orintlit16, # or-int/lit16
xorintlit16, # xor-int/lit16
addintlit8, # add-int/lit8
rsubintlit8, # rsub-int/lit8
mulintlit8, # mul-int/lit8
divintlit8, # div-int/lit8
remintlit8, # rem-int/lit8
andintlit8, # and-int/lit8
orintlit8, # or-int/lit8
xorintlit8, # xor-int/lit8
# 0xe0
shlintlit8, # shl-int/lit8
shrintlit8, # shr-int/lit8
ushrintlit8, # ushr-int/lit8
] | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
// The version package provides a location to set the release versions for all
// packages to consume, without creating import cycles.
//
// This package should not import any other terraform packages.
package version
import (
_ "embed"
"fmt"
"strings"
version "github.com/hashicorp/go-version"
)
// rawVersion is the current version as a string, as read from the VERSION
// file. This must be a valid semantic version.
//
//go:embed VERSION
var rawVersion string
// dev determines whether the -dev prerelease marker will
// be included in version info. It is expected to be set to "no" using
// linker flags when building binaries for release.
var dev string = "yes"
// The main version number that is being run at the moment, populated from the raw version.
var Version string
// A pre-release marker for the version, populated using a combination of the raw version
// and the dev flag.
var Prerelease string
// SemVer is an instance of version.Version representing the main version
// without any prerelease information.
var SemVer *version.Version
func init() {
semVerFull := version.Must(version.NewVersion(strings.TrimSpace(rawVersion)))
SemVer = semVerFull.Core()
Version = SemVer.String()
if dev == "no" {
Prerelease = semVerFull.Prerelease()
} else {
Prerelease = "dev"
}
}
// Header is the header name used to send the current terraform version
// in http requests.
const Header = "Terraform-Version"
// String returns the complete version string, including prerelease
func String() string {
if Prerelease != "" {
return fmt.Sprintf("%s-%s", Version, Prerelease)
}
return Version
} | go | github | https://github.com/hashicorp/terraform | version/version.go |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-25 20:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('campaign', '0003_auto_20160725_2119'),
]
operations = [
migrations.CreateModel(
name='Xp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('points', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Damage',
fields=[
('xp_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='campaign.Xp')),
],
bases=('campaign.xp',),
),
migrations.CreateModel(
name='GuardianAssist',
fields=[
('xp_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='campaign.Xp')),
],
bases=('campaign.xp',),
),
migrations.CreateModel(
name='Kill',
fields=[
('xp_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='campaign.Xp')),
('elite', models.BooleanField(default=False)),
('ship_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaign.ImperialShip')),
],
bases=('campaign.xp',),
),
migrations.CreateModel(
name='KillAssist',
fields=[
('xp_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='campaign.Xp')),
],
bases=('campaign.xp',),
),
migrations.CreateModel(
name='MissieonBonus',
fields=[
('xp_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='campaign.Xp')),
('description', models.CharField(max_length=20)),
],
bases=('campaign.xp',),
),
migrations.AddField(
model_name='xp',
name='pilotmission',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaign.CampaignMissionParticipant'),
),
] | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1_test
import (
"testing"
"github.com/google/go-cmp/cmp"
v1beta1 "k8s.io/api/admissionregistration/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
_ "k8s.io/kubernetes/pkg/apis/admissionregistration/install"
"k8s.io/utils/ptr"
)
func TestDefaultAdmissionWebhook(t *testing.T) {
ignore := v1beta1.Ignore
exact := v1beta1.Exact
never := v1beta1.NeverReinvocationPolicy
thirty := int32(30)
allScopes := v1beta1.AllScopes
unknown := v1beta1.SideEffectClassUnknown
tests := []struct {
name string
original runtime.Object
expected runtime.Object
}{
{
name: "ValidatingWebhookConfiguration",
original: &v1beta1.ValidatingWebhookConfiguration{
Webhooks: []v1beta1.ValidatingWebhook{{}},
},
expected: &v1beta1.ValidatingWebhookConfiguration{
Webhooks: []v1beta1.ValidatingWebhook{{
FailurePolicy: &ignore,
MatchPolicy: &exact,
TimeoutSeconds: &thirty,
NamespaceSelector: &metav1.LabelSelector{},
ObjectSelector: &metav1.LabelSelector{},
SideEffects: &unknown,
AdmissionReviewVersions: []string{"v1beta1"},
}},
},
},
{
name: "MutatingWebhookConfiguration",
original: &v1beta1.MutatingWebhookConfiguration{
Webhooks: []v1beta1.MutatingWebhook{{}},
},
expected: &v1beta1.MutatingWebhookConfiguration{
Webhooks: []v1beta1.MutatingWebhook{{
FailurePolicy: &ignore,
MatchPolicy: &exact,
ReinvocationPolicy: &never,
TimeoutSeconds: &thirty,
NamespaceSelector: &metav1.LabelSelector{},
ObjectSelector: &metav1.LabelSelector{},
SideEffects: &unknown,
AdmissionReviewVersions: []string{"v1beta1"},
}},
},
},
{
name: "scope=*",
original: &v1beta1.MutatingWebhookConfiguration{
Webhooks: []v1beta1.MutatingWebhook{{
Rules: []v1beta1.RuleWithOperations{{}},
}},
},
expected: &v1beta1.MutatingWebhookConfiguration{
Webhooks: []v1beta1.MutatingWebhook{{
Rules: []v1beta1.RuleWithOperations{{Rule: v1beta1.Rule{
Scope: &allScopes, // defaulted
}}},
FailurePolicy: &ignore,
MatchPolicy: &exact,
ReinvocationPolicy: &never,
TimeoutSeconds: &thirty,
NamespaceSelector: &metav1.LabelSelector{},
ObjectSelector: &metav1.LabelSelector{},
SideEffects: &unknown,
AdmissionReviewVersions: []string{"v1beta1"},
}},
},
},
{
name: "port=443",
original: &v1beta1.MutatingWebhookConfiguration{
Webhooks: []v1beta1.MutatingWebhook{{
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{},
},
}},
},
expected: &v1beta1.MutatingWebhookConfiguration{
Webhooks: []v1beta1.MutatingWebhook{{
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Port: ptr.To[int32](443), // defaulted
},
},
FailurePolicy: &ignore,
MatchPolicy: &exact,
ReinvocationPolicy: &never,
TimeoutSeconds: &thirty,
NamespaceSelector: &metav1.LabelSelector{},
ObjectSelector: &metav1.LabelSelector{},
SideEffects: &unknown,
AdmissionReviewVersions: []string{"v1beta1"},
}},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
original := test.original
expected := test.expected
legacyscheme.Scheme.Default(original)
if !apiequality.Semantic.DeepEqual(original, expected) {
t.Error(cmp.Diff(expected, original))
}
})
}
}
func TestDefaultAdmissionPolicy(t *testing.T) {
fail := v1beta1.Fail
never := v1beta1.NeverReinvocationPolicy
equivalent := v1beta1.Equivalent
allScopes := v1beta1.AllScopes
tests := []struct {
name string
original runtime.Object
expected runtime.Object
}{
{
name: "ValidatingAdmissionPolicy",
original: &v1beta1.ValidatingAdmissionPolicy{
Spec: v1beta1.ValidatingAdmissionPolicySpec{
MatchConstraints: &v1beta1.MatchResources{},
},
},
expected: &v1beta1.ValidatingAdmissionPolicy{
Spec: v1beta1.ValidatingAdmissionPolicySpec{
MatchConstraints: &v1beta1.MatchResources{
MatchPolicy: &equivalent,
NamespaceSelector: &metav1.LabelSelector{},
ObjectSelector: &metav1.LabelSelector{},
},
FailurePolicy: &fail,
},
},
},
{
name: "ValidatingAdmissionPolicyBinding",
original: &v1beta1.ValidatingAdmissionPolicyBinding{
Spec: v1beta1.ValidatingAdmissionPolicyBindingSpec{
MatchResources: &v1beta1.MatchResources{},
},
},
expected: &v1beta1.ValidatingAdmissionPolicyBinding{
Spec: v1beta1.ValidatingAdmissionPolicyBindingSpec{
MatchResources: &v1beta1.MatchResources{
MatchPolicy: &equivalent,
NamespaceSelector: &metav1.LabelSelector{},
ObjectSelector: &metav1.LabelSelector{},
},
},
},
},
{
name: "scope=*",
original: &v1beta1.ValidatingAdmissionPolicy{
Spec: v1beta1.ValidatingAdmissionPolicySpec{
MatchConstraints: &v1beta1.MatchResources{
ResourceRules: []v1beta1.NamedRuleWithOperations{{}},
},
},
},
expected: &v1beta1.ValidatingAdmissionPolicy{
Spec: v1beta1.ValidatingAdmissionPolicySpec{
MatchConstraints: &v1beta1.MatchResources{
MatchPolicy: &equivalent,
NamespaceSelector: &metav1.LabelSelector{},
ObjectSelector: &metav1.LabelSelector{},
ResourceRules: []v1beta1.NamedRuleWithOperations{
{
RuleWithOperations: v1beta1.RuleWithOperations{
Rule: v1beta1.Rule{
Scope: &allScopes, // defaulted
},
},
},
},
},
FailurePolicy: &fail,
},
},
},
{
name: "MutatingAdmissionPolicy",
original: &v1beta1.MutatingAdmissionPolicy{
Spec: v1beta1.MutatingAdmissionPolicySpec{
MatchConstraints: &v1beta1.MatchResources{},
ReinvocationPolicy: never,
Mutations: []v1beta1.Mutation{
{
PatchType: v1beta1.PatchTypeApplyConfiguration,
ApplyConfiguration: &v1beta1.ApplyConfiguration{
Expression: "fake string",
},
},
},
},
},
expected: &v1beta1.MutatingAdmissionPolicy{
Spec: v1beta1.MutatingAdmissionPolicySpec{
MatchConstraints: &v1beta1.MatchResources{
MatchPolicy: &equivalent,
NamespaceSelector: &metav1.LabelSelector{},
ObjectSelector: &metav1.LabelSelector{},
},
FailurePolicy: &fail,
ReinvocationPolicy: never,
Mutations: []v1beta1.Mutation{
{
PatchType: v1beta1.PatchTypeApplyConfiguration,
ApplyConfiguration: &v1beta1.ApplyConfiguration{
Expression: "fake string",
},
},
},
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
original := test.original
expected := test.expected
legacyscheme.Scheme.Default(original)
if !apiequality.Semantic.DeepEqual(original, expected) {
t.Error(cmp.Diff(expected, original))
}
})
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/admissionregistration/v1beta1/defaults_test.go |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
sanitized_Request,
)
class PrimeShareTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?primeshare\.tv/download/(?P<id>[\da-zA-Z]+)'
_TEST = {
'url': 'http://primeshare.tv/download/238790B611',
'md5': 'b92d9bf5461137c36228009f31533fbc',
'info_dict': {
'id': '238790B611',
'ext': 'mp4',
'title': 'Public Domain - 1960s Commercial - Crest Toothpaste-YKsuFona',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>File not exist<' in webpage:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = self._hidden_inputs(webpage)
headers = {
'Referer': url,
'Content-Type': 'application/x-www-form-urlencoded',
}
wait_time = int(self._search_regex(
r'var\s+cWaitTime\s*=\s*(\d+)',
webpage, 'wait time', default=7)) + 1
self._sleep(wait_time, video_id)
req = sanitized_Request(
url, compat_urllib_parse.urlencode(fields), headers)
video_page = self._download_webpage(
req, video_id, 'Downloading video page')
video_url = self._search_regex(
r"url\s*:\s*'([^']+\.primeshare\.tv(?::443)?/file/[^']+)'",
video_page, 'video url')
title = self._html_search_regex(
r'<h1>Watch\s*(?: )?\s*\((.+?)(?:\s*\[\.\.\.\])?\)\s*(?: )?\s*<strong>',
video_page, 'title')
return {
'id': video_id,
'url': video_url,
'title': title,
'ext': 'mp4',
} | unknown | codeparrot/codeparrot-clean | ||
"""This is a second, slightly harder problem on the same theme as the first."""
# This is for Python 3
import html as html_converter
# for Python 2 uncomment this line
#import cgi as html_converter
class HtmlPagesConverter:
def __init__(self, filename):
"""Read the file and note the positions of the page breaks so we can access them quickly"""
self.filename = filename
self.breaks = [0]
with open(self.filename, "r", encoding="UTF-8") as f:
while True:
line = f.readline()
if not line:
break
line = line.rstrip()
if "PAGE_BREAK" in line:
page_break_position = f.tell()
self.breaks.append(f.tell())
self.breaks.append(f.tell())
def get_html_page(self, page):
"""Return html page with the given number (zero indexed)"""
page_start = self.breaks[page]
page_end = self.breaks[page+1]
html = ""
with open(self.filename, "r", encoding="UTF-8") as f:
f.seek(page_start)
while f.tell() != page_end:
line = f.readline()
line = line.rstrip()
if "PAGE_BREAK" in line:
continue
html += html_converter.escape(line, quote=True)
html += "<br />"
return html | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# SelfTest/Protocol/test_KDF.py: Self-test for key derivation functions
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
import unittest
from binascii import unhexlify
from Crypto.Util.py3compat import *
from Crypto.SelfTest.st_common import list_test_cases
from Crypto.Hash import SHA1, HMAC, SHA256
from Crypto.Cipher import AES, DES3
from Crypto.Protocol.KDF import PBKDF1, PBKDF2, _S2V, HKDF, scrypt
def t2b(t):
if t is None:
return None
t2 = t.replace(" ", "").replace("\n", "")
return unhexlify(b(t2))
class TestVector(object):
pass
class PBKDF1_Tests(unittest.TestCase):
# List of tuples with test data.
# Each tuple is made up by:
# Item #0: a pass phrase
# Item #1: salt (8 bytes encoded in hex)
# Item #2: output key length
# Item #3: iterations to use
# Item #4: expected result (encoded in hex)
_testData = (
# From http://www.di-mgt.com.au/cryptoKDFs.html#examplespbkdf
("password","78578E5A5D63CB06",16,1000,"DC19847E05C64D2FAF10EBFB4A3D2A20"),
)
def test1(self):
v = self._testData[0]
res = PBKDF1(v[0], t2b(v[1]), v[2], v[3], SHA1)
self.assertEqual(res, t2b(v[4]))
class PBKDF2_Tests(unittest.TestCase):
# List of tuples with test data.
# Each tuple is made up by:
# Item #0: a pass phrase
# Item #1: salt (encoded in hex)
# Item #2: output key length
# Item #3: iterations to use
# Item #4: expected result (encoded in hex)
_testData = (
# From http://www.di-mgt.com.au/cryptoKDFs.html#examplespbkdf
("password","78578E5A5D63CB06",24,2048,"BFDE6BE94DF7E11DD409BCE20A0255EC327CB936FFE93643"),
# From RFC 6050
("password","73616c74", 20, 1, "0c60c80f961f0e71f3a9b524af6012062fe037a6"),
("password","73616c74", 20, 2, "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957"),
("password","73616c74", 20, 4096, "4b007901b765489abead49d926f721d065a429c1"),
("passwordPASSWORDpassword","73616c7453414c5473616c7453414c5473616c7453414c5473616c7453414c5473616c74",
25, 4096, "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038"),
( 'pass\x00word',"7361006c74",16,4096, "56fa6aa75548099dcc37d7f03425e0c3"),
)
def test1(self):
# Test only for HMAC-SHA1 as PRF
def prf(p,s):
return HMAC.new(p,s,SHA1).digest()
for i in xrange(len(self._testData)):
v = self._testData[i]
res = PBKDF2(v[0], t2b(v[1]), v[2], v[3])
res2 = PBKDF2(v[0], t2b(v[1]), v[2], v[3], prf)
self.assertEqual(res, t2b(v[4]))
self.assertEqual(res, res2)
def test2(self):
"""From draft-josefsson-scrypt-kdf-01, Chapter 10"""
output_1 = t2b("""
55 ac 04 6e 56 e3 08 9f ec 16 91 c2 25 44 b6 05
f9 41 85 21 6d de 04 65 e6 8b 9d 57 c2 0d ac bc
49 ca 9c cc f1 79 b6 45 99 16 64 b3 9d 77 ef 31
7c 71 b8 45 b1 e3 0b d5 09 11 20 41 d3 a1 97 83
""")
output_2 = t2b("""
4d dc d8 f6 0b 98 be 21 83 0c ee 5e f2 27 01 f9
64 1a 44 18 d0 4c 04 14 ae ff 08 87 6b 34 ab 56
a1 d4 25 a1 22 58 33 54 9a db 84 1b 51 c9 b3 17
6a 27 2b de bb a1 d0 78 47 8f 62 b3 97 f3 3c 8d
""")
prf_hmac_sha256 = lambda p, s: HMAC.new(p, s, SHA256).digest()
output = PBKDF2(b("passwd"), b("salt"), 64, 1, prf=prf_hmac_sha256)
self.assertEqual(output, output_1)
output = PBKDF2(b("Password"), b("NaCl"), 64, 80000, prf=prf_hmac_sha256)
self.assertEqual(output, output_2)
class S2V_Tests(unittest.TestCase):
# Sequence of test vectors.
# Each test vector is made up by:
# Item #0: a tuple of strings
# Item #1: an AES key
# Item #2: the result
# Item #3: the cipher module S2V is based on
# Everything is hex encoded
_testData = [
# RFC5297, A.1
(
( '101112131415161718191a1b1c1d1e1f2021222324252627',
'112233445566778899aabbccddee' ),
'fffefdfcfbfaf9f8f7f6f5f4f3f2f1f0',
'85632d07c6e8f37f950acd320a2ecc93',
AES
),
# RFC5297, A.2
(
( '00112233445566778899aabbccddeeffdeaddadadeaddadaffeeddcc'+
'bbaa99887766554433221100',
'102030405060708090a0',
'09f911029d74e35bd84156c5635688c0',
'7468697320697320736f6d6520706c61'+
'696e7465787420746f20656e63727970'+
'74207573696e67205349562d414553'),
'7f7e7d7c7b7a79787776757473727170',
'7bdb6e3b432667eb06f4d14bff2fbd0f',
AES
),
]
def test1(self):
"""Verify correctness of test vector"""
for tv in self._testData:
s2v = _S2V.new(t2b(tv[1]), tv[3])
for s in tv[0]:
s2v.update(t2b(s))
result = s2v.derive()
self.assertEqual(result, t2b(tv[2]))
def test2(self):
"""Verify that no more than 127(AES) and 63(TDES)
components are accepted."""
key = bchr(0) * 8 + bchr(255) * 8
for module in (AES, DES3):
s2v = _S2V.new(key, module)
max_comps = module.block_size*8-1
for i in xrange(max_comps):
s2v.update(b("XX"))
self.assertRaises(TypeError, s2v.update, b("YY"))
class HKDF_Tests(unittest.TestCase):
# Test vectors from RFC5869, Appendix A
# Each tuple is made up by:
# Item #0: hash module
# Item #1: secret
# Item #2: salt
# Item #3: context
# Item #4: expected result
_test_vector = (
(
SHA256,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
42,
"3cb25f25faacd57a90434f64d0362f2a" +
"2d2d0a90cf1a5a4c5db02d56ecc4c5bf" +
"34007208d5b887185865"
),
(
SHA256,
"000102030405060708090a0b0c0d0e0f" +
"101112131415161718191a1b1c1d1e1f" +
"202122232425262728292a2b2c2d2e2f" +
"303132333435363738393a3b3c3d3e3f" +
"404142434445464748494a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f" +
"707172737475767778797a7b7c7d7e7f" +
"808182838485868788898a8b8c8d8e8f" +
"909192939495969798999a9b9c9d9e9f" +
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" +
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" +
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" +
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef" +
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
82,
"b11e398dc80327a1c8e7f78c596a4934" +
"4f012eda2d4efad8a050cc4c19afa97c" +
"59045a99cac7827271cb41c65e590e09" +
"da3275600c2f09b8367793a9aca3db71" +
"cc30c58179ec3e87c14c01d5c1f3434f" +
"1d87"
),
(
SHA256,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
None,
None,
42,
"8da4e775a563c18f715f802a063c5a31" +
"b8a11f5c5ee1879ec3454e5f3c738d2d" +
"9d201395faa4b61a96c8"
),
(
SHA1,
"0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
42,
"085a01ea1b10f36933068b56efa5ad81" +
"a4f14b822f5b091568a9cdd4f155fda2" +
"c22e422478d305f3f896"
),
(
SHA1,
"000102030405060708090a0b0c0d0e0f" +
"101112131415161718191a1b1c1d1e1f" +
"202122232425262728292a2b2c2d2e2f" +
"303132333435363738393a3b3c3d3e3f" +
"404142434445464748494a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f" +
"707172737475767778797a7b7c7d7e7f" +
"808182838485868788898a8b8c8d8e8f" +
"909192939495969798999a9b9c9d9e9f" +
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" +
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" +
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" +
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef" +
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
82,
"0bd770a74d1160f7c9f12cd5912a06eb" +
"ff6adcae899d92191fe4305673ba2ffe" +
"8fa3f1a4e5ad79f3f334b3b202b2173c" +
"486ea37ce3d397ed034c7f9dfeb15c5e" +
"927336d0441f4c4300e2cff0d0900b52" +
"d3b4"
),
(
SHA1,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"",
"",
42,
"0ac1af7002b3d761d1e55298da9d0506" +
"b9ae52057220a306e07b6b87e8df21d0" +
"ea00033de03984d34918"
),
(
SHA1,
"0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
None,
"",
42,
"2c91117204d745f3500d636a62f64f0a" +
"b3bae548aa53d423b0d1f27ebba6f5e5" +
"673a081d70cce7acfc48"
)
)
def test1(self):
for tv in self._test_vector:
secret, salt, info, exp = [ t2b(tv[x]) for x in (1,2,3,5) ]
key_len, hashmod = [ tv[x] for x in (4,0) ]
output = HKDF(secret, key_len, salt, hashmod, 1, info)
self.assertEqual(output, exp)
def test2(self):
ref = HKDF(b("XXXXXX"), 12, b("YYYY"), SHA1)
# Same output, but this time split over 2 keys
key1, key2 = HKDF(b("XXXXXX"), 6, b("YYYY"), SHA1, 2)
self.assertEqual((ref[:6], ref[6:]), (key1, key2))
# Same output, but this time split over 3 keys
key1, key2, key3 = HKDF(b("XXXXXX"), 4, b("YYYY"), SHA1, 3)
self.assertEqual((ref[:4], ref[4:8], ref[8:]), (key1, key2, key3))
class scrypt_Tests(unittest.TestCase):
# Test vectors taken from
# http://tools.ietf.org/html/draft-josefsson-scrypt-kdf-00
data = (
(
"",
"",
16, # 2K
1,
1,
"""
77 d6 57 62 38 65 7b 20 3b 19 ca 42 c1 8a 04 97
f1 6b 48 44 e3 07 4a e8 df df fa 3f ed e2 14 42
fc d0 06 9d ed 09 48 f8 32 6a 75 3a 0f c8 1f 17
e8 d3 e0 fb 2e 0d 36 28 cf 35 e2 0c 38 d1 89 06
"""
),
(
"password",
"NaCl",
1024, # 1M
8,
16,
"""
fd ba be 1c 9d 34 72 00 78 56 e7 19 0d 01 e9 fe
7c 6a d7 cb c8 23 78 30 e7 73 76 63 4b 37 31 62
2e af 30 d9 2e 22 a3 88 6f f1 09 27 9d 98 30 da
c7 27 af b9 4a 83 ee 6d 83 60 cb df a2 cc 06 40
"""
),
(
"pleaseletmein",
"SodiumChloride",
16384, # 16M
8,
1,
"""
70 23 bd cb 3a fd 73 48 46 1c 06 cd 81 fd 38 eb
fd a8 fb ba 90 4f 8e 3e a9 b5 43 f6 54 5d a1 f2
d5 43 29 55 61 3f 0f cf 62 d4 97 05 24 2a 9a f9
e6 1e 85 dc 0d 65 1e 40 df cf 01 7b 45 57 58 87
"""
),
(
"pleaseletmein",
"SodiumChloride",
1048576, # 1G
8,
1,
"""
21 01 cb 9b 6a 51 1a ae ad db be 09 cf 70 f8 81
ec 56 8d 57 4a 2f fd 4d ab e5 ee 98 20 ad aa 47
8e 56 fd 8f 4b a5 d0 9f fa 1c 6d 92 7c 40 f4 c3
37 30 40 49 e8 a9 52 fb cb f4 5c 6f a7 7a 41 a4
"""
),
)
def setUp(self):
new_test_vectors = []
for tv in self.data:
new_tv = TestVector()
new_tv.P = b(tv[0])
new_tv.S = b(tv[1])
new_tv.N = tv[2]
new_tv.r = tv[3]
new_tv.p = tv[4]
new_tv.output = t2b(tv[5])
new_tv.dkLen = len(new_tv.output)
new_test_vectors.append(new_tv)
self.data = new_test_vectors
def _test1(self):
b_input = t2b("""
f7 ce 0b 65 3d 2d 72 a4 10 8c f5 ab e9 12 ff dd
77 76 16 db bb 27 a7 0e 82 04 f3 ae 2d 0f 6f ad
89 f6 8f 48 11 d1 e8 7b cc 3b d7 40 0a 9f fd 29
09 4f 01 84 63 95 74 f3 9a e5 a1 31 52 17 bc d7
89 49 91 44 72 13 bb 22 6c 25 b5 4d a8 63 70 fb
cd 98 43 80 37 46 66 bb 8f fc b5 bf 40 c2 54 b0
67 d2 7c 51 ce 4a d5 fe d8 29 c9 0b 50 5a 57 1b
7f 4d 1c ad 6a 52 3c da 77 0e 67 bc ea af 7e 89
""")
b_output = t2b("""
79 cc c1 93 62 9d eb ca 04 7f 0b 70 60 4b f6 b6
2c e3 dd 4a 96 26 e3 55 fa fc 61 98 e6 ea 2b 46
d5 84 13 67 3b 99 b0 29 d6 65 c3 57 60 1f b4 26
a0 b2 f4 bb a2 00 ee 9f 0a 43 d1 9b 57 1a 9c 71
ef 11 42 e6 5d 5a 26 6f dd ca 83 2c e5 9f aa 7c
ac 0b 9c f1 be 2b ff ca 30 0d 01 ee 38 76 19 c4
ae 12 fd 44 38 f2 03 a0 e4 e1 c4 7e c3 14 86 1f
4e 90 87 cb 33 39 6a 68 73 e8 f9 d2 53 9a 4b 8e
""")
from Crypto.Protocol.KDF import _scryptROMix
output = _scryptROMix(b_input, 16)
self.assertEqual(output, b_output)
def test2(self):
for tv in self.data:
# TODO: add runtime flag to enable test vectors
# with humongous memory usage
if tv.N > 100000:
continue
output = scrypt(tv.P, tv.S, tv.dkLen, tv.N, tv.r, tv.p)
self.assertEqual(output, tv.output)
def test3(self):
ref = scrypt(b("password"), b("salt"), 12, 16, 1, 1)
# Same output, but this time split over 2 keys
key1, key2 = scrypt(b("password"), b("salt"), 6, 16, 1, 1, 2)
self.assertEqual((ref[:6], ref[6:]), (key1, key2))
# Same output, but this time split over 3 keys
key1, key2, key3 = scrypt(b("password"), b("salt"), 4, 16, 1, 1, 3)
self.assertEqual((ref[:4], ref[4:8], ref[8:]), (key1, key2, key3))
def get_tests(config={}):
tests = []
tests += list_test_cases(PBKDF1_Tests)
tests += list_test_cases(PBKDF2_Tests)
tests += list_test_cases(S2V_Tests)
tests += list_test_cases(HKDF_Tests)
tests += list_test_cases(scrypt_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
#
# Copyright (c) 2008 Deskin Miller
#
test_description='git svn partial-rebuild tests'
. ./lib-git-svn.sh
test_expect_success 'initialize svnrepo' '
mkdir import &&
(
(cd import &&
mkdir trunk branches tags &&
(cd trunk &&
echo foo > foo
) &&
svn_cmd import -m "import for git-svn" . "$svnrepo" >/dev/null &&
svn_cmd copy "$svnrepo"/trunk "$svnrepo"/branches/a \
-m "created branch a"
) &&
rm -rf import &&
svn_cmd co "$svnrepo"/trunk trunk &&
(cd trunk &&
echo bar >> foo &&
svn_cmd ci -m "updated trunk"
) &&
svn_cmd co "$svnrepo"/branches/a a &&
(cd a &&
echo baz >> a &&
svn_cmd add a &&
svn_cmd ci -m "updated a"
) &&
git svn init --stdlayout "$svnrepo"
)
'
test_expect_success 'import an early SVN revision into git' '
git svn fetch -r1:2
'
test_expect_success 'make full git mirror of SVN' '
mkdir mirror &&
(
(cd mirror &&
git init &&
git svn init --stdlayout "$svnrepo" &&
git svn fetch
)
)
'
test_expect_success 'fetch from git mirror and partial-rebuild' '
git config --add remote.origin.url "file://$PWD/mirror/.git" &&
git config --add remote.origin.fetch refs/remotes/*:refs/remotes/* &&
git fetch origin &&
git svn fetch
'
test_done | unknown | github | https://github.com/git/git | t/t9127-git-svn-partial-rebuild.sh |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.web.webfluxfnhandlerclasses
data class Person(val name: String) | kotlin | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/kotlin/org/springframework/docs/web/webfluxfnhandlerclasses/Person.kt |
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("--list", action="store_true", help="Print all options")
def handle(self, *args, **options):
pass | python | github | https://github.com/django/django | tests/bash_completion/management/commands/test_command.py |
name: 🐛 Bug Report
description: Create a report to help us reproduce and fix the bug
body:
- type: markdown
attributes:
value: >
#### Before submitting, please review the [contribution guide](https://github.com/pytorch/pytorch/wiki/The-Ultimate-Guide-to-PyTorch-Contributions) and [AI-Assisted Development](https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md#ai-assisted-development) policy. Issues that do not follow these practices will be automatically closed and users breaking these rules repeatedly may be banned.
- type: markdown
attributes:
value: >
#### Before submitting a bug, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/pytorch/pytorch/issues?q=is%3Aissue+sort%3Acreated-desc+). Note: Please write your bug report in English to ensure it can be understood and addressed by the development team. If you are filing a bug for torch.compile, please use the [torch.compile issue template](https://github.com/pytorch/pytorch/issues/new?q=sort%3Aupdated-desc+is%3Aissue+is%3Aopen&template=pt2-bug-report.yml).
- type: textarea
attributes:
label: 🐛 Describe the bug
description: |
Please provide a clear and concise description of what the bug is.
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently.
Your example should be fully self-contained and not rely on any artifact that should be downloaded.
For example:
```python
# All necessary imports at the beginning
import torch
# A succinct reproducing example trimmed down to the essential parts:
t = torch.rand(5, 10) # Note: the bug is here, we should pass requires_grad=True
t.sum().backward()
```
If the code is too long (hopefully, it isn't), feel free to put it in a public gist and link it in the issue: https://gist.github.com.
Please also paste or describe the results you observe instead of the expected results. If you observe an error, please paste the error message including the **full** traceback of the exception. It may be relevant to wrap error messages in ```` ```triple quotes blocks``` ````.
If your issue is related to numerical accuracy or reproducibility, please read the [numerical accuracy](https://docs.pytorch.org/docs/stable/notes/numerical_accuracy.html) and [reproducibility](https://docs.pytorch.org/docs/stable/notes/randomness.html) notes. If the difference is not expected as described in these documents, please provide appropriate justification on why one result is wrong and the other is correct.
placeholder: |
A clear and concise description of what the bug is.
```python
# Sample code to reproduce the problem
```
```
The error message you got, with the full traceback.
```
validations:
required: true
- type: textarea
attributes:
label: Versions
description: |
Please run the following and paste the output below.
```sh
wget https://raw.githubusercontent.com/pytorch/pytorch/main/torch/utils/collect_env.py
# For security purposes, please check the contents of collect_env.py before running it.
python collect_env.py
```
validations:
required: true
- type: markdown
attributes:
value: >
Thanks for contributing 🎉! | unknown | github | https://github.com/pytorch/pytorch | .github/ISSUE_TEMPLATE/bug-report.yml |
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { Field } from "@chakra-ui/react";
import { useQueryClient } from "@tanstack/react-query";
import { AsyncSelect } from "chakra-react-select";
import type { OptionsOrGroups, GroupBase, SingleValue } from "chakra-react-select";
import React from "react";
import { useTranslation } from "react-i18next";
import { useNavigate } from "react-router-dom";
import { useDebouncedCallback } from "use-debounce";
import { UseDagServiceGetDagsKeyFn } from "openapi/queries";
import { DagService } from "openapi/requests/services.gen";
import type { DAGCollectionResponse, DAGResponse } from "openapi/requests/types.gen";
import type { Option } from "src/utils/option";
import { DropdownIndicator } from "./SearchDagsDropdownIndicator";
export const SearchDags = ({
setIsOpen,
}: {
readonly setIsOpen: React.Dispatch<React.SetStateAction<boolean>>;
}) => {
const { t: translate } = useTranslation("dags");
const queryClient = useQueryClient();
const navigate = useNavigate();
const SEARCH_LIMIT = 10;
const onSelect = (selected: SingleValue<Option>) => {
if (selected) {
setIsOpen(false);
void Promise.resolve(navigate(`/dags/${selected.value}`));
}
};
const searchDagDebounced = useDebouncedCallback(
(inputValue: string, callback: (options: OptionsOrGroups<Option, GroupBase<Option>>) => void) => {
void queryClient.fetchQuery({
queryFn: () =>
DagService.getDags({
dagDisplayNamePattern: inputValue,
limit: SEARCH_LIMIT,
}).then((data: DAGCollectionResponse) => {
const options = data.dags.map((dag: DAGResponse) => ({
label: dag.dag_display_name || dag.dag_id,
value: dag.dag_id,
}));
callback(options);
return options;
}),
queryKey: UseDagServiceGetDagsKeyFn({
dagDisplayNamePattern: inputValue,
}),
staleTime: 0,
});
},
300,
);
return (
<Field.Root>
<AsyncSelect
backspaceRemovesValue={true}
components={{ DropdownIndicator }}
defaultOptions
filterOption={undefined}
loadOptions={searchDagDebounced}
menuIsOpen
onChange={onSelect}
placeholder={translate("search.dags")}
// eslint-disable-next-line unicorn/no-null
value={null} // null is required https://github.com/JedWatson/react-select/issues/3066
/>
</Field.Root>
);
}; | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/src/components/SearchDags/SearchDags.tsx |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.