code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#!/usr/bin/env python2
import curses
import fcntl
import json
import os
import requests
import sys
import termios
import traceback
settings = {
'ip' : '127.0.0.1',
'port' : '80',
'user' : '',
'pssw' : '',
}
keymap = {
'i' : 'ip',
'p' : 'port',
'u' : 'user',
'w' : 'pssw',
}
try:
with open('/home/osmc/cli_remote.conf' , 'r') as f:
lines = f.readlines()
single = ''.join(lines)
raw_sets = json.loads(single)
settings.update(raw_sets)
except:
print 'USAGE : cli-remote i=Your_ip_address p=your_port u=your_username w=your_password'
print 'All the settings are optional. The default will be used in their place if you dont specifiy them.'
print 'Defaults:'
print ' ip : 127.0.0.1'
print ' port : 80'
print ' user : ""'
print ' pass : ""'
print ''
print 'If you are using this script on the device (via ssh or something) then you dont need to put in the IP address.'
print 'The default of 127.0.0.1 already points to the local host.'
print ''
print 'Alternatively, you can save a file called /home/osmc/cli_remote.conf with this:'
print '{"ip": "your_ip", "port": "your_port", "user" : "your_user", "pssw": "your_pass"}'
print 'Or just {"port": "your_port"} if that is all you would like to change.'
print ''
for arg in sys.argv[1:]:
try:
k, v = arg.split('=')
key = keymap.get(k, None)
if key is not None:
settings[key] = v
except:
continue
def call(settings, action, params=None):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
command = {"jsonrpc":"2.0","method":"%s" % action, "id": 1}
if params is not None:
command['params'] = params
data=json.dumps(command)
data = data.replace('"true"', 'true').replace('"false"', 'false')
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def call_keyboard(settings, text, params=None):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
command = {"jsonrpc":"2.0","method":"Input.SendText", "params": {"text": text}, "id": 1}
data=json.dumps(command)
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def test(settings):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
data=json.dumps({"jsonrpc":"2.0","method":"JSONRPC.Ping", "id": 1})
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
data=json.dumps({"jsonrpc":"2.0", "method":"GUI.ShowNotification", "params":{"title":"Kodi CLI Remote", "message":"Connected!"}, "id":1})
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def redraw(stdscr):
stdscr.erase()
stdscr.refresh()
stdscr.addstr(2,0,">>> 'Arrow Keys' to navigate")
stdscr.addstr(3,0,">>> 'Enter' to select")
stdscr.addstr(4,0,">>> 'Backspace' or 'Esc' to navigate back")
stdscr.addstr(5,0,">>> 'c' for the context menu")
stdscr.addstr(6,0,">>> 'i' for info")
stdscr.addstr(7,0,">>> 'o' to toggle the OSD")
stdscr.addstr(8,0,">>> 's' to show codec info")
stdscr.addstr(9,0,">>> '[' and ']' volume up and down")
stdscr.addstr(10,0,">>> 'm' to toggle mute")
stdscr.addstr(11,0,">>> 'k' to enter keyboard mode (send text to Kodi's keyboard)")
stdscr.addstr(12,0,">>> 'd' debugger on, 'f' debugger off")
stdscr.addstr(13,0,">>> 'q' to quit")
stdscr.refresh()
key_map = {
curses.KEY_UP : {'name' : 'Up', 'action' : 'Input.Up'},
curses.KEY_DOWN : {'name' : 'Down', 'action' : 'Input.Down'},
curses.KEY_LEFT : {'name' : 'Left', 'action' : 'Input.Left'},
curses.KEY_RIGHT : {'name' : 'Right', 'action' : 'Input.Right'},
curses.KEY_BACKSPACE : {'name' : 'Back', 'action' : 'Input.Back'},
27 : {'name' : 'Back', 'action' : 'Input.Back'}, # ESC
99 : {'name' : 'ContextMenu', 'action' : 'Input.ContextMenu'}, # c
13 : {'name' : 'Select', 'action' : 'Input.Select'}, # ENTER
105 : {'name' : 'Info', 'action' : 'Input.Info'}, # i
104 : {'name' : 'Home', 'action' : 'Input.Home'}, # h
111 : {'name' : 'ShowOSD', 'action' : 'Input.ShowOSD'}, # o
115 : {'name' : 'ShowCodec', 'action' : 'Input.ShowCodec'}, #s
91 : {'name' : 'VolDown', 'action' : 'Application.SetVolume', # [
"params": { "volume": "decrement" }},
93 : {'name' : 'VolUp', 'action' : 'Application.SetVolume', # ]
"params": { "volume": "increment" }},
100 : {'name' : 'Debugger On', 'action' : 'Settings.SetSettingValue', # d
"params": {"setting":"debug.showloginfo", "value":"true"}},
102 : {'name' : 'Debugger Off', 'action' : 'Settings.SetSettingValue', # f
"params": {"setting":"debug.showloginfo", "value":"false"}},
109 : {'name' : 'Toggle Mute', 'action' : 'Application.SetMute', # m
"params": {"mute":"toggle"}},
}
try:
test(settings)
except requests.ConnectionError:
print 'Failed to connect.'
print 'Ensure that Kodi is able to be controlled via HTTP'
print 'Open the Kodi settings, Service, Web Server, and Enable HTTP remote.'
sys.exit()
stdscr = curses.initscr()
curses.cbreak()
curses.nonl()
stdscr.keypad(1)
redraw(stdscr)
curses.noecho()
key = ''
name = ''
while key != ord('q'):
redraw(stdscr)
if name:
stdscr.addstr(0,0, name)
key = stdscr.getch()
stdscr.refresh()
action = key_map.get(key, {}).get('action', None)
params = key_map.get(key, {}).get('params', None)
name = key_map.get(key, {}).get('name' , None)
if action is not None:
curses.setsyx(0, 0)
call(settings, action, params)
continue
if key == ord('k'):
curses.echo()
redraw(stdscr)
stdscr.addstr(0,0,"<<< KEYBOARD MODE >>>")
text = stdscr.getstr(0,23)
call_keyboard(settings, text)
curses.noecho()
redraw(stdscr)
curses.endwin() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Network-related classes.
*/
@InterfaceAudience.Public
package org.apache.hadoop.net;
import org.apache.hadoop.classification.InterfaceAudience; | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/package-info.java |
// MODULE: context
// FILE: context.kt
fun test() {
<caret_context>call("foo")
}
fun call(text: String) {}
// MODULE: main
// MODULE_KIND: CodeFragment
// CONTEXT_MODULE: context
// FILE: fragment.kt
// CODE_FRAGMENT_KIND: EXPRESSION
// CODE_FRAGMENT_FOREIGN_VALUE: foo_DebugLabel(Ljava/lang/String;)
<caret>foo_DebugLabel | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/testData/components/compilerFacility/compilation/codeFragments/capturing/foreignValue.kt |
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly (or
identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
import os
import sys
import inspect
import traceback
import pdb
import re
import linecache
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import re as pre
import random
# Use sys.stdout encoding for ouput.
# This was only added to Python's doctest in Python 2.6, so we must duplicate
# it here to make utf8 files work in Python 2.5.
pdoctest._encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
def sys_normcase(f):
if sys_case_insensitive:
return f.lower()
return f
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos+1] != '\\':
rv = rv[:pos+1] + '\\' + rv[pos+1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def isgeneratorfunction(object):
"""
Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
"""
CO_GENERATOR = 0x20
if (inspect.isfunction(object) or inspect.ismethod(object)) and \
object.func_code.co_flags & CO_GENERATOR:
return True
return False
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def test(*paths, **kwargs):
"""
Run all tests in test_*.py files which match any of the given
strings in `paths` or all tests if paths=[].
Notes:
o if sort=False, tests are run in random order (not default).
o paths can be entered in native system format or in unix,
forward-slash format.
Examples:
>> import sympy
Run all tests:
>> sympy.test()
Run one file:
>> sympy.test("sympy/core/tests/test_basic.py")
>> sympy.test("_basic")
Run all tests in sympy/functions/ and some particular file:
>> sympy.test("sympy/core/tests/test_basic.py", "sympy/functions")
Run all tests in sympy/core and sympy/utilities:
>> sympy.test("/core", "/util")
Run specific test from a file:
>> sympy.test("sympy/core/tests/test_basic.py", kw="test_equality")
Run the tests with verbose mode on:
>> sympy.test(verbose=True)
Don't sort the test output:
>> sympy.test(sort=False)
Turn on post-mortem pdb:
>> sympy.test(pdb=True)
Turn off colors:
>> sympy.test(colors=False)
The traceback verboseness can be set to "short" or "no" (default is "short")
>> sympy.test(tb='no')
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", "")
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
r = PyTestReporter(verbose, tb, colors)
t = SymPyTests(r, kw, post_mortem, seed)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
test_files = t.get_test_files('sympy')
if len(paths) == 0:
t._testfiles.extend(test_files)
else:
paths = convert_to_native_paths(paths)
matched = []
for f in test_files:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
t._testfiles.extend(matched)
return t.test(sort=sort)
def doctest(*paths, **kwargs):
"""
Runs doctests in all *py files in the sympy directory which match
any of the given strings in `paths` or all tests if paths=[].
Note:
o paths can be entered in native system format or in unix,
forward-slash format.
o files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples:
>> import sympy
Run all tests:
>> sympy.doctest()
Run one file:
>> sympy.doctest("sympy/core/basic.py")
>> sympy.doctest("polynomial.txt")
Run all tests in sympy/functions/ and some particular file:
>> sympy.doctest("/functions", "basic.py")
Run any file having polynomial in its name, doc/src/modules/polynomial.txt,
sympy\functions\special\polynomials.py, and sympy\polys\polynomial.py:
>> sympy.doctest("polynomial")
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
blacklist.extend([
"doc/src/modules/mpmath", # needs to be fixed upstream
"sympy/mpmath", # needs to be fixed upstream
"doc/src/modules/plotting.txt", # generates live plots
"sympy/plotting", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/utilities/autowrap.py", # needs installed compiler
"sympy/galgebra/GA.py", # needs numpy
"sympy/galgebra/latex_ex.py", # needs numpy
"sympy/conftest.py", # needs py.test
"sympy/utilities/benchmarking.py", # needs py.test
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
r = PyTestReporter(verbose)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
t._testfiles.extend(not_blacklisted)
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# test *txt files only if we are running python newer than 2.4
if sys.version_info[:2] > (2,4):
# N.B.
# --------------------------------------------------------------------
# Here we test *.txt files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.txt', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *txt files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
setup_pprint()
first_report = True
for txt_file in matched:
if not os.path.isfile(txt_file):
continue
old_displayhook = sys.displayhook
try:
# out = pdoctest.testfile(txt_file, module_relative=False, encoding='utf-8',
# optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE)
out = sympytestfile(txt_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
txtfailed, tested = out
if tested:
failed = txtfailed or failed
if first_report:
first_report = False
msg = 'txt doctests start'
lhead = '='*((80 - len(msg))//2 - 1)
rhead = '='*(79 - len(msg) - len(lhead) - 1)
print ' '.join([lhead, msg, rhead])
print
# use as the id, everything past the first 'sympy'
file_id = txt_file[txt_file.find('sympy') + len('sympy') + 1:]
print file_id, # get at least the name out so it is know who is being tested
wid = 80 - len(file_id) - 1 #update width
test_file = '[%s]' % (tested)
report = '[%s]' % (txtfailed or 'OK')
print ''.join([test_file,' '*(wid-len(test_file)-len(report)), report])
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *txt as evidenced by first_report no longer being True.
if not first_report and failed:
print
print("DO *NOT* COMMIT!")
return not failed
# The Python 2.5 doctest runner uses a tuple, but in 2.6+, it uses a namedtuple
# (which doesn't exist in 2.5-)
if sys.version_info[:2] > (2,5):
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
else:
SymPyTestResults = lambda a, b: (a, b)
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = pdoctest._load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=random.random()):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed
def test(self, sort=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
else:
from random import shuffle
random.seed(self._seed)
shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
name = "test%d" % self._count
name = os.path.splitext(os.path.basename(filename))[0]
self._count += 1
gl = {'__file__':filename}
random.seed(self._seed)
try:
execfile(filename, gl)
except (ImportError, SyntaxError):
self._reporter.import_error(filename, sys.exc_info())
return
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
disabled = gl.get("disabled", False)
if disabled:
funcs = []
else:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f])
or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
self._reporter.entering_filename(filename, len(funcs))
for f in funcs:
self._reporter.entering_test(f)
try:
f()
except KeyboardInterrupt:
raise
except:
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip(v)
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if self._kw == "":
return True
return x.__name__.find(self._kw) != -1
def get_test_files(self, dir, pat = 'test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
`dir` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return [sys_normcase(gi) for gi in g]
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
import unittest
from StringIO import StringIO
rel_name = filename[len(self._root_dir)+1:]
module = rel_name.replace(os.sep, '.')[:-3]
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except:
self._reporter.import_error(filename, sys.exc_info())
return
tests = [test for test in tests if len(test.examples) > 0]
# By default (except for python 2.4 in which it was broken) tests
# are sorted by alphabetical order by function name. We sort by line number
# so one can edit the file sequentially from bottom to top...HOWEVER
# if there are decorated functions, their line numbers will be too large
# and for now one must just search for these by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS | \
pdoctest.NORMALIZE_WHITESPACE)
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
try:
f, t = runner.run(test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of *py files (default) from which docstrings
will be tested which are at or below directory `dir`. By default,
only those that have an __init__.py in their parent directory
and do not start with `test_` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
in_module = self._from_module(module, val)
if not in_module:
# double check in case this function is decorated
# and just appears to come from a different module.
pat = r'\s*(def|class)\s+%s\s*\(' % rawname
PAT = pre.compile(pat)
in_module = any(PAT.match(line) for line in source_lines)
if in_module:
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module, source_lines, globs, seen)
except ValueError, msg:
raise
except:
pass
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
in_module = self._from_module(module, val)
if not in_module:
# "double check" again
pat = r'\s*(def|class)\s+%s\s*\(' % valname
PAT = pre.compile(pat)
in_module = any(PAT.match(line) for line in source_lines)
if in_module:
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
if lineno is None:
# if None, then _find_lineno couldn't find the docstring.
# But IT IS STILL THERE. Likely it was decorated or something
# (i.e., @property docstrings have lineno == None)
# TODO: Write our own _find_lineno that is smarter in this regard
# Until then, just give it a dummy lineno. This is just used for
# sorting the tests, so the only bad effect is that they will appear
# last instead of the order that they really are in the file.
# lineno is also used to report the offending line of a failing
# doctest, which is another reason to fix this. See issue 1947.
lineno = 0
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 2041).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`SymPyDocTestRunner.check_output`, and the results are formatted by
the `SymPyDocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
def write(self, text, color="", align="left", width=80):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
color ... choose from the colors below, "" means default color
align ... left/right, left is a normal print, right is aligned on the
right hand side of the screen, filled with " " if necessary
width ... the screen width
"""
color_templates = (
("Black" , "0;30"),
("Red" , "0;31"),
("Green" , "0;32"),
("Brown" , "0;33"),
("Blue" , "0;34"),
("Purple" , "0;35"),
("Cyan" , "0;36"),
("LightGray" , "0;37"),
("DarkGray" , "1;30"),
("LightRed" , "1;31"),
("LightGreen" , "1;32"),
("Yellow" , "1;33"),
("LightBlue" , "1;34"),
("LightPurple" , "1;35"),
("LightCyan" , "1;36"),
("White" , "1;37"), )
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if align == "right":
if self._write_pos+len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width-self._write_pos-len(text)))
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
if sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" % (c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text)-l-1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = 80
if text != "":
text = " %s " % text
idx = (width-len(text)) // 2
t = delim*idx + text + delim*(width-idx-len(text))
self.write(t+"\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None):
self.write_center("test process starts")
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
self.write("executable: %s (%s)\n" % (executable, python_version))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.polys.domains import GROUND_TYPES
self.write("ground types: %s\n" % GROUND_TYPES)
if seed is not None:
self.write("random seed: %d\n\n" % seed)
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > 80:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s:%s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
#self.write_center("These tests raised an exception", "_")
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
#self.write_center("Failed", "_")
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
#self.write_center("Failed", "_")
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir)+1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
if self._colors:
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n"+f.__name__+" ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, fname):
self._xpassed.append((self._active_file, fname))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(".", "Green")
def test_skip(self, v):
self._skipped += 1
self.write("s", "Green")
if self._verbose:
self.write(" - ", "Green")
self.write(v.message, "Green")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir)+1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
if self._colors:
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir() | unknown | codeparrot/codeparrot-clean | ||
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
# Apache Hadoop Changelog
## Release 0.19.0 - 2008-11-20
### INCOMPATIBLE CHANGES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-3595](https://issues.apache.org/jira/browse/HADOOP-3595) | Remove deprecated mapred.combine.once functionality | Major | . | Chris Douglas | Chris Douglas |
| [HADOOP-3667](https://issues.apache.org/jira/browse/HADOOP-3667) | Remove deprecated methods in JobConf | Major | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-3652](https://issues.apache.org/jira/browse/HADOOP-3652) | Remove deprecated class OutputFormatBase | Major | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-2325](https://issues.apache.org/jira/browse/HADOOP-2325) | Require Java 6 | Major | build | Doug Cutting | Doug Cutting |
| [HADOOP-1700](https://issues.apache.org/jira/browse/HADOOP-1700) | Append to files in HDFS | Major | . | stack | dhruba borthakur |
| [HADOOP-3796](https://issues.apache.org/jira/browse/HADOOP-3796) | fuse-dfs should take rw,ro,trashon,trashoff,protected=blah mount arguments rather than them being compiled in | Major | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-3837](https://issues.apache.org/jira/browse/HADOOP-3837) | hadop streaming does not use progress reporting to detect hung tasks | Major | . | dhruba borthakur | dhruba borthakur |
| [HADOOP-3792](https://issues.apache.org/jira/browse/HADOOP-3792) | exit code from "hadoop dfs -test ..." is wrong for Unix shell | Minor | fs | Ben Slusky | Ben Slusky |
| [HADOOP-3889](https://issues.apache.org/jira/browse/HADOOP-3889) | distcp: Better Error Message should be thrown when accessing source files/directory with no read permission | Minor | . | Peeyush Bishnoi | Tsz Wo Nicholas Sze |
| [HADOOP-3062](https://issues.apache.org/jira/browse/HADOOP-3062) | Need to capture the metrics for the network ios generate by dfs reads/writes and map/reduce shuffling and break them down by racks | Major | metrics | Runping Qi | Chris Douglas |
| [HADOOP-3150](https://issues.apache.org/jira/browse/HADOOP-3150) | Move task file promotion into the task | Major | . | Owen O'Malley | Amareshwari Sriramadasu |
| [HADOOP-3963](https://issues.apache.org/jira/browse/HADOOP-3963) | libhdfs should never exit on its own but rather return errors to the calling application | Minor | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-1869](https://issues.apache.org/jira/browse/HADOOP-1869) | access times of HDFS files | Major | . | dhruba borthakur | dhruba borthakur |
| [HADOOP-3981](https://issues.apache.org/jira/browse/HADOOP-3981) | Need a distributed file checksum algorithm for HDFS | Major | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3245](https://issues.apache.org/jira/browse/HADOOP-3245) | Provide ability to persist running jobs (extend HADOOP-1876) | Major | . | Devaraj Das | Amar Kamat |
| [HADOOP-3911](https://issues.apache.org/jira/browse/HADOOP-3911) | ' -blocks ' option not being recognized | Minor | fs, util | Koji Noguchi | Lohit Vijayarenu |
| [HADOOP-3722](https://issues.apache.org/jira/browse/HADOOP-3722) | Provide a unified way to pass jobconf options from bin/hadoop | Minor | conf | Matei Zaharia | Enis Soztutar |
| [HADOOP-2816](https://issues.apache.org/jira/browse/HADOOP-2816) | Cluster summary at name node web has confusing report for space utilization | Major | . | Robert Chansler | Suresh Srinivas |
| [HADOOP-4227](https://issues.apache.org/jira/browse/HADOOP-4227) | Remove the deprecated, unused class ShellCommand. | Minor | fs | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3938](https://issues.apache.org/jira/browse/HADOOP-3938) | Quotas for disk space management | Major | . | Robert Chansler | Raghu Angadi |
| [HADOOP-4116](https://issues.apache.org/jira/browse/HADOOP-4116) | Balancer should provide better resource management | Blocker | . | Raghu Angadi | Hairong Kuang |
| [HADOOP-4190](https://issues.apache.org/jira/browse/HADOOP-4190) | Changes to JobHistory makes it backward incompatible | Blocker | . | Amar Kamat | Amar Kamat |
| [HADOOP-4293](https://issues.apache.org/jira/browse/HADOOP-4293) | Remove WritableJobConf | Major | . | Owen O'Malley | Owen O'Malley |
| [HADOOP-4281](https://issues.apache.org/jira/browse/HADOOP-4281) | Capacity reported in some of the commands is not consistent with the Web UI reported data | Blocker | . | Suresh Srinivas | Suresh Srinivas |
| [HADOOP-4430](https://issues.apache.org/jira/browse/HADOOP-4430) | Namenode Web UI capacity report is inconsistent with Balancer | Blocker | . | Suresh Srinivas | Suresh Srinivas |
### NEW FEATURES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-2664](https://issues.apache.org/jira/browse/HADOOP-2664) | lzop-compatible CompresionCodec | Major | io | Chris Douglas | Chris Douglas |
| [HADOOP-3402](https://issues.apache.org/jira/browse/HADOOP-3402) | Add example code to support run terasort on hadoop | Major | . | Owen O'Malley | Owen O'Malley |
| [HADOOP-3479](https://issues.apache.org/jira/browse/HADOOP-3479) | Implement configuration items useful for Hadoop resource manager (v1) | Major | conf | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-3695](https://issues.apache.org/jira/browse/HADOOP-3695) | [HOD] Have an ability to run multiple slaves per node | Major | contrib/hod | Hemanth Yamijala | Vinod Kumar Vavilapalli |
| [HADOOP-3149](https://issues.apache.org/jira/browse/HADOOP-3149) | supporting multiple outputs for M/R jobs | Major | . | Alejandro Abdelnur | Alejandro Abdelnur |
| [HADOOP-3714](https://issues.apache.org/jira/browse/HADOOP-3714) | Bash tab completion support | Trivial | scripts | Chris Smith | Chris Smith |
| [HADOOP-3730](https://issues.apache.org/jira/browse/HADOOP-3730) | add new JobConf constructor that disables loading default configurations | Major | conf | Alejandro Abdelnur | Alejandro Abdelnur |
| [HADOOP-372](https://issues.apache.org/jira/browse/HADOOP-372) | should allow to specify different inputformat classes for different input dirs for Map/Reduce jobs | Major | . | Runping Qi | Chris Smith |
| [HADOOP-3485](https://issues.apache.org/jira/browse/HADOOP-3485) | fix writes | Minor | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-153](https://issues.apache.org/jira/browse/HADOOP-153) | skip records that fail Task | Major | . | Doug Cutting | Sharad Agarwal |
| [HADOOP-3873](https://issues.apache.org/jira/browse/HADOOP-3873) | DistCp should have an option for limiting the number of files/bytes being copied | Major | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3585](https://issues.apache.org/jira/browse/HADOOP-3585) | Hardware Failure Monitoring in large clusters running Hadoop/HDFS | Minor | metrics | Ioannis Koltsidas | Ioannis Koltsidas |
| [HADOOP-1480](https://issues.apache.org/jira/browse/HADOOP-1480) | pipes should be able to set user counters | Major | . | Owen O'Malley | Arun C Murthy |
| [HADOOP-3854](https://issues.apache.org/jira/browse/HADOOP-3854) | org.apache.hadoop.http.HttpServer should support user configurable filter | Major | util | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3746](https://issues.apache.org/jira/browse/HADOOP-3746) | A fair sharing job scheduler | Minor | . | Matei Zaharia | Matei Zaharia |
| [HADOOP-3754](https://issues.apache.org/jira/browse/HADOOP-3754) | Support a Thrift Interface to access files/directories in HDFS | Major | . | dhruba borthakur | dhruba borthakur |
| [HADOOP-3828](https://issues.apache.org/jira/browse/HADOOP-3828) | Write skipped records' bytes to DFS | Major | . | Sharad Agarwal | Sharad Agarwal |
| [HADOOP-3939](https://issues.apache.org/jira/browse/HADOOP-3939) | DistCp should support an option for deleting non-existing files. | Major | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3698](https://issues.apache.org/jira/browse/HADOOP-3698) | Implement access control for submitting jobs to queues in the JobTracker | Major | . | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-3941](https://issues.apache.org/jira/browse/HADOOP-3941) | Extend FileSystem API to return file-checksums/file-digests | Major | fs | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3361](https://issues.apache.org/jira/browse/HADOOP-3361) | Implement renames for NativeS3FileSystem | Major | fs/s3 | Tom White | Tom White |
| [HADOOP-3702](https://issues.apache.org/jira/browse/HADOOP-3702) | add support for chaining Maps in a single Map and after a Reduce [M\*/RM\*] | Major | . | Alejandro Abdelnur | Alejandro Abdelnur |
| [HADOOP-3445](https://issues.apache.org/jira/browse/HADOOP-3445) | Implementing core scheduler functionality in Resource Manager (V1) for Hadoop | Major | . | Vivek Ratan | Vivek Ratan |
| [HADOOP-3992](https://issues.apache.org/jira/browse/HADOOP-3992) | Synthetic Load Generator for NameNode testing | Major | . | Robert Chansler | Hairong Kuang |
| [HADOOP-3829](https://issues.apache.org/jira/browse/HADOOP-3829) | Narrown down skipped records based on user acceptable value | Major | . | Sharad Agarwal | Sharad Agarwal |
| [HADOOP-4084](https://issues.apache.org/jira/browse/HADOOP-4084) | Add explain plan capabilities to Hive QL | Major | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4104](https://issues.apache.org/jira/browse/HADOOP-4104) | add time, permission and user attribute support to libhdfs | Major | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-4106](https://issues.apache.org/jira/browse/HADOOP-4106) | add time, permission and user attribute support to fuse-dfs | Major | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-4176](https://issues.apache.org/jira/browse/HADOOP-4176) | Implement getFileChecksum(Path) in HftpFileSystem | Major | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4070](https://issues.apache.org/jira/browse/HADOOP-4070) | [Hive] Provide a mechanism for registering UDFs from the query language | Major | . | Tom White | Tom White |
| [HADOOP-2536](https://issues.apache.org/jira/browse/HADOOP-2536) | MapReduce for MySQL | Minor | . | Fredrik Hedberg | Fredrik Hedberg |
| [HADOOP-3019](https://issues.apache.org/jira/browse/HADOOP-3019) | want input sampler & sorted partitioner | Major | . | Doug Cutting | Chris Douglas |
| [HADOOP-3924](https://issues.apache.org/jira/browse/HADOOP-3924) | Add a 'Killed' job status | Critical | . | Alejandro Abdelnur | Subru Krishnan |
| [HADOOP-4120](https://issues.apache.org/jira/browse/HADOOP-4120) | [Hive] print time taken by query in interactive shell | Minor | . | Raghotham Murthy | Raghotham Murthy |
| [HADOOP-4301](https://issues.apache.org/jira/browse/HADOOP-4301) | Forrest doc for skip bad records feature | Blocker | documentation | Sharad Agarwal | Sharad Agarwal |
| [HADOOP-4260](https://issues.apache.org/jira/browse/HADOOP-4260) | support show partitions in hive | Major | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4095](https://issues.apache.org/jira/browse/HADOOP-4095) | [Hive] enhance describe table & partition | Major | . | Prasad Chakka | Namit Jain |
| [HADOOP-4086](https://issues.apache.org/jira/browse/HADOOP-4086) | Add limit to Hive QL | Major | . | Ashish Thusoo | Namit Jain |
| [HADOOP-2658](https://issues.apache.org/jira/browse/HADOOP-2658) | Design and Implement a Test Plan to support appends to HDFS files | Blocker | test | dhruba borthakur | dhruba borthakur |
| [HADOOP-4406](https://issues.apache.org/jira/browse/HADOOP-4406) | Make TCTLSeparatedProtocol configurable and have DynamicSerDe initialize, initialize the SerDe | Major | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-1823](https://issues.apache.org/jira/browse/HADOOP-1823) | want InputFormat for bzip2 files | Major | . | Doug Cutting | |
### IMPROVEMENTS:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-3563](https://issues.apache.org/jira/browse/HADOOP-3563) | Seperate out datanode and namenode functionality of generation stamp upgrade process | Major | . | dhruba borthakur | dhruba borthakur |
| [HADOOP-3577](https://issues.apache.org/jira/browse/HADOOP-3577) | Tools to inject blocks into name node and simulated data nodes for testing | Minor | . | Sanjay Radia | Sanjay Radia |
| [HADOOP-3341](https://issues.apache.org/jira/browse/HADOOP-3341) | make key-value separators in hadoop streaming fully configurable | Major | . | Zheng Shao | Zheng Shao |
| [HADOOP-3556](https://issues.apache.org/jira/browse/HADOOP-3556) | Substitute the synchronized code in MD5Hash to avoid lock contention. Use ThreadLocal instead. | Major | io | Iván de Prado | Iván de Prado |
| [HADOOP-3655](https://issues.apache.org/jira/browse/HADOOP-3655) | provide more control options for the junit run | Minor | build | Steve Loughran | Steve Loughran |
| [HADOOP-3660](https://issues.apache.org/jira/browse/HADOOP-3660) | Add replication factor for injecting blocks in the data node cluster | Major | benchmarks | Sanjay Radia | Sanjay Radia |
| [HADOOP-3328](https://issues.apache.org/jira/browse/HADOOP-3328) | DFS write pipeline : only the last datanode needs to verify checksum | Major | . | Raghu Angadi | Raghu Angadi |
| [HADOOP-3684](https://issues.apache.org/jira/browse/HADOOP-3684) | The data\_join should allow the user to implement a customer cloning function | Major | . | Runping Qi | Runping Qi |
| [HADOOP-3721](https://issues.apache.org/jira/browse/HADOOP-3721) | CompositeRecordReader::next is unnecessarily complex | Major | . | Chris Douglas | Chris Douglas |
| [HADOOP-3478](https://issues.apache.org/jira/browse/HADOOP-3478) | The algorithm to decide map re-execution on fetch failures can be improved | Major | . | Jothi Padmanabhan | Jothi Padmanabhan |
| [HADOOP-1627](https://issues.apache.org/jira/browse/HADOOP-1627) | DFSAdmin incorrectly reports cluster data. | Minor | . | Konstantin Shvachko | Raghu Angadi |
| [HADOOP-3617](https://issues.apache.org/jira/browse/HADOOP-3617) | Writes from map serialization include redundant checks for accounting space | Major | . | Chris Douglas | Chris Douglas |
| [HADOOP-3412](https://issues.apache.org/jira/browse/HADOOP-3412) | Refactor the scheduler out of the JobTracker | Minor | . | Brice Arnould | Brice Arnould |
| [HADOOP-3624](https://issues.apache.org/jira/browse/HADOOP-3624) | CreateEditsLog could be improved to create tree directory structure | Minor | test | Lohit Vijayarenu | Lohit Vijayarenu |
| [HADOOP-3747](https://issues.apache.org/jira/browse/HADOOP-3747) | Add counter support to MultipleOutputs | Minor | . | Alejandro Abdelnur | Alejandro Abdelnur |
| [HADOOP-3661](https://issues.apache.org/jira/browse/HADOOP-3661) | Normalize fuse-dfs handling of moving things to trash wrt the way hadoop dfs does it (only when non posix trash flag is enabled in compile) | Major | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-3169](https://issues.apache.org/jira/browse/HADOOP-3169) | LeaseChecker daemon should not be started in DFSClient constructor | Major | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3646](https://issues.apache.org/jira/browse/HADOOP-3646) | Providing bzip2 as codec | Major | conf, io | Abdul Qadeer | Abdul Qadeer |
| [HADOOP-3861](https://issues.apache.org/jira/browse/HADOOP-3861) | Make MapFile.Reader and Writer implement java.io.Closeable | Major | io | Tom White | Tom White |
| [HADOOP-3694](https://issues.apache.org/jira/browse/HADOOP-3694) | if MiniDFS startup time could be improved, testing time would be reduced | Major | test | Steve Loughran | Doug Cutting |
| [HADOOP-3620](https://issues.apache.org/jira/browse/HADOOP-3620) | Namenode should synchronously resolve a datanode's network location when the datanode registers | Major | . | Hairong Kuang | Hairong Kuang |
| [HADOOP-3860](https://issues.apache.org/jira/browse/HADOOP-3860) | Compare name-node performance when journaling is performed into local hard-drives or nfs. | Major | benchmarks | Konstantin Shvachko | Konstantin Shvachko |
| [HADOOP-3805](https://issues.apache.org/jira/browse/HADOOP-3805) | improve fuse-dfs write performance which is 33% slower than hadoop dfs -copyFromLocal | Minor | . | Pete Wyckoff | |
| [HADOOP-2302](https://issues.apache.org/jira/browse/HADOOP-2302) | Streaming should provide an option for numerical sort of keys | Major | . | Lohit Vijayarenu | Devaraj Das |
| [HADOOP-3892](https://issues.apache.org/jira/browse/HADOOP-3892) | Include Unix group name in JobConf | Trivial | conf | Matei Zaharia | Matei Zaharia |
| [HADOOP-3853](https://issues.apache.org/jira/browse/HADOOP-3853) | Move multiple input format extension to library package | Major | . | Tom White | Tom White |
| [HADOOP-657](https://issues.apache.org/jira/browse/HADOOP-657) | Free temporary space should be modelled better | Major | . | Owen O'Malley | Ari Rabkin |
| [HADOOP-3202](https://issues.apache.org/jira/browse/HADOOP-3202) | Deprecate org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem fs, Path dir) | Major | fs | Tsz Wo Nicholas Sze | Amareshwari Sriramadasu |
| [HADOOP-3368](https://issues.apache.org/jira/browse/HADOOP-3368) | Can commons-logging.properties be pulled from hadoop-core? | Major | build | Steve Loughran | Steve Loughran |
| [HADOOP-3780](https://issues.apache.org/jira/browse/HADOOP-3780) | JobTracker should synchronously resolve the tasktracker's network location when the tracker registers | Major | . | Amar Kamat | Amar Kamat |
| [HADOOP-3852](https://issues.apache.org/jira/browse/HADOOP-3852) | If ShellCommandExecutor had a toString() operator that listed the command run, its error messages may be more meaningful | Minor | util | Steve Loughran | Steve Loughran |
| [HADOOP-3664](https://issues.apache.org/jira/browse/HADOOP-3664) | Remove deprecated methods introduced in changes to validating input paths (HADOOP-3095) | Major | . | Tom White | Tom White |
| [HADOOP-3719](https://issues.apache.org/jira/browse/HADOOP-3719) | Chukwa | Major | . | Ari Rabkin | Ari Rabkin |
| [HADOOP-3935](https://issues.apache.org/jira/browse/HADOOP-3935) | Extract classes from DataNode.java | Trivial | . | Johan Oskarsson | Johan Oskarsson |
| [HADOOP-3905](https://issues.apache.org/jira/browse/HADOOP-3905) | Create a generic interface for edits log. | Major | . | Konstantin Shvachko | Konstantin Shvachko |
| [HADOOP-3549](https://issues.apache.org/jira/browse/HADOOP-3549) | meaningful errno values in libhdfs | Major | . | Ben Slusky | Ben Slusky |
| [HADOOP-2130](https://issues.apache.org/jira/browse/HADOOP-2130) | Pipes submit job should be Non-blocking | Critical | . | Srikanth Kakani | Arun C Murthy |
| [HADOOP-3944](https://issues.apache.org/jira/browse/HADOOP-3944) | TupleWritable listed as public class but cannot be used without methods private to the package | Trivial | documentation | Michael Andrews | Chris Douglas |
| [HADOOP-3759](https://issues.apache.org/jira/browse/HADOOP-3759) | Provide ability to run memory intensive jobs without affecting other running tasks on the nodes | Major | . | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-3965](https://issues.apache.org/jira/browse/HADOOP-3965) | Make DataBlockScanner package private | Major | . | Konstantin Shvachko | Konstantin Shvachko |
| [HADOOP-2330](https://issues.apache.org/jira/browse/HADOOP-2330) | Preallocate transaction log to improve namenode transaction logging performance | Major | . | dhruba borthakur | dhruba borthakur |
| [HADOOP-3908](https://issues.apache.org/jira/browse/HADOOP-3908) | Better error message if llibhdfs.so doesn't exist | Minor | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-3342](https://issues.apache.org/jira/browse/HADOOP-3342) | Better safety of killing jobs via web interface | Minor | . | Daniel Naber | Enis Soztutar |
| [HADOOP-3769](https://issues.apache.org/jira/browse/HADOOP-3769) | expose static SampleMapper and SampleReducer classes of GenericMRLoadGenerator class for gridmix reuse | Major | test | Lingyun Yang | Lingyun Yang |
| [HADOOP-3948](https://issues.apache.org/jira/browse/HADOOP-3948) | Separate Namenodes edits and fsimage | Major | . | Lohit Vijayarenu | Lohit Vijayarenu |
| [HADOOP-3866](https://issues.apache.org/jira/browse/HADOOP-3866) | Improve Hadoop Jobtracker Admin | Major | scripts | craig weisenfluh | craig weisenfluh |
| [HADOOP-3943](https://issues.apache.org/jira/browse/HADOOP-3943) | NetworkTopology.pseudoSortByDistance does not need to be a synchronized method | Major | . | Hairong Kuang | Hairong Kuang |
| [HADOOP-3498](https://issues.apache.org/jira/browse/HADOOP-3498) | File globbing alternation should be able to span path components | Major | fs | Tom White | Tom White |
| [HADOOP-3581](https://issues.apache.org/jira/browse/HADOOP-3581) | Prevent memory intensive user tasks from taking down nodes | Major | . | Hemanth Yamijala | Vinod Kumar Vavilapalli |
| [HADOOP-3605](https://issues.apache.org/jira/browse/HADOOP-3605) | Added an abort on unset AWS\_ACCOUNT\_ID to luanch-hadoop-master | Minor | contrib/cloud | Al Hoang | Al Hoang |
| [HADOOP-3514](https://issues.apache.org/jira/browse/HADOOP-3514) | Reduce seeks during shuffle, by inline crcs | Major | . | Devaraj Das | Jothi Padmanabhan |
| [HADOOP-4113](https://issues.apache.org/jira/browse/HADOOP-4113) | libhdfs should never exit on its own but rather return errors to the calling application - missing diff files | Minor | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-3446](https://issues.apache.org/jira/browse/HADOOP-3446) | The reduce task should not flush the in memory file system before starting the reducer | Critical | . | Owen O'Malley | Chris Douglas |
| [HADOOP-4094](https://issues.apache.org/jira/browse/HADOOP-4094) | [Hive]implement hive-site.xml similar to hadoop-site.xml | Minor | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4151](https://issues.apache.org/jira/browse/HADOOP-4151) | Add a memcmp-compatible interface for key types | Minor | . | Chris Douglas | Chris Douglas |
| [HADOOP-4174](https://issues.apache.org/jira/browse/HADOOP-4174) | Move non-client methods ou of ClientProtocol | Major | . | Konstantin Shvachko | Konstantin Shvachko |
| [HADOOP-4138](https://issues.apache.org/jira/browse/HADOOP-4138) | [Hive] refactor the SerDe library | Major | . | Zheng Shao | Zheng Shao |
| [HADOOP-4075](https://issues.apache.org/jira/browse/HADOOP-4075) | test-patch.sh should output the ant commands that it runs | Major | build | Nigel Daley | Ramya Sunil |
| [HADOOP-4117](https://issues.apache.org/jira/browse/HADOOP-4117) | Improve configurability of Hadoop EC2 instances | Major | contrib/cloud | Tom White | Tom White |
| [HADOOP-2411](https://issues.apache.org/jira/browse/HADOOP-2411) | Add support for larger EC2 instance types | Major | contrib/cloud | Tom White | Chris K Wensel |
| [HADOOP-3930](https://issues.apache.org/jira/browse/HADOOP-3930) | Decide how to integrate scheduler info into CLI and job tracker web page | Major | . | Matei Zaharia | Sreekanth Ramakrishnan |
| [HADOOP-4083](https://issues.apache.org/jira/browse/HADOOP-4083) | change new config attribute queue.name to mapred.job.queue.name | Major | . | Owen O'Malley | Hemanth Yamijala |
| [HADOOP-4194](https://issues.apache.org/jira/browse/HADOOP-4194) | Add JobConf and JobID to job related methods in JobTrackerInstrumentation | Major | . | Mac Yang | Mac Yang |
| [HADOOP-249](https://issues.apache.org/jira/browse/HADOOP-249) | Improving Map -\> Reduce performance and Task JVM reuse | Major | . | Benjamin Reed | Devaraj Das |
| [HADOOP-3638](https://issues.apache.org/jira/browse/HADOOP-3638) | Cache the iFile index files in memory to reduce seeks during map output serving | Major | . | Devaraj Das | Jothi Padmanabhan |
| [HADOOP-3975](https://issues.apache.org/jira/browse/HADOOP-3975) | test-patch can report the modifications found in the workspace along with the error message | Minor | test | Hemanth Yamijala | Ramya Sunil |
| [HADOOP-4124](https://issues.apache.org/jira/browse/HADOOP-4124) | Changing priority of a job should be available in CLI and available on the web UI only along with the Kill Job actions | Major | . | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-2165](https://issues.apache.org/jira/browse/HADOOP-2165) | Augment JobHistory to store tasks' userlogs | Major | . | Arun C Murthy | Vinod Kumar Vavilapalli |
| [HADOOP-4062](https://issues.apache.org/jira/browse/HADOOP-4062) | IPC client does not need to be synchronized on the output stream when a connection is closed | Major | ipc | Hairong Kuang | Hairong Kuang |
| [HADOOP-4181](https://issues.apache.org/jira/browse/HADOOP-4181) | some minor things to make Hadoop friendlier to git | Major | build | Owen O'Malley | Owen O'Malley |
| [HADOOP-4090](https://issues.apache.org/jira/browse/HADOOP-4090) | The configuration file lists two paths to hadoop directories (bin and conf). Startup should check that these are valid directories and give appropriate messages. | Minor | . | Ashish Thusoo | Raghotham Murthy |
| [HADOOP-4205](https://issues.apache.org/jira/browse/HADOOP-4205) | [Hive] metastore and ql to use the refactored SerDe library | Major | . | Zheng Shao | Zheng Shao |
| [HADOOP-4279](https://issues.apache.org/jira/browse/HADOOP-4279) | write the random number generator seed to log in the append-related tests | Blocker | test | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4053](https://issues.apache.org/jira/browse/HADOOP-4053) | Schedulers need to know when a job has completed | Blocker | . | Vivek Ratan | Amar Kamat |
| [HADOOP-4424](https://issues.apache.org/jira/browse/HADOOP-4424) | menu layout change for Hadoop documentation | Blocker | documentation | Boris Shkolnik | Boris Shkolnik |
| [HADOOP-4230](https://issues.apache.org/jira/browse/HADOOP-4230) | Hive: GroupBy should not pass the whole row from mapper to reducer | Blocker | . | Zheng Shao | Ashish Thusoo |
| [HADOOP-4231](https://issues.apache.org/jira/browse/HADOOP-4231) | Hive: converting complex objects to JSON failed. | Minor | . | Zheng Shao | Zheng Shao |
| [HADOOP-4252](https://issues.apache.org/jira/browse/HADOOP-4252) | Catch Ctrl-C in Hive CLI so that corresponding hadoop jobs can be killed | Minor | . | Prasad Chakka | Pete Wyckoff |
| [HADOOP-4353](https://issues.apache.org/jira/browse/HADOOP-4353) | enable multi-line query from Hive CLI | Minor | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4307](https://issues.apache.org/jira/browse/HADOOP-4307) | add an option to describe table to show extended properties of the table such as serialization/deserialization properties | Major | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4345](https://issues.apache.org/jira/browse/HADOOP-4345) | Hive: Check that partitioning predicate is present when hive.partition.pruning = strict | Major | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4431](https://issues.apache.org/jira/browse/HADOOP-4431) | Add versionning/tags to Chukwa Chunk | Major | . | Jerome Boulon | Jerome Boulon |
| [HADOOP-4433](https://issues.apache.org/jira/browse/HADOOP-4433) | Improve data loader for collecting metrics and log files from hadoop and system | Major | . | Eric Yang | Eric Yang |
| [HADOOP-3844](https://issues.apache.org/jira/browse/HADOOP-3844) | include message of local exception in Client call failures | Minor | ipc | Steve Loughran | Steve Loughran |
### BUG FIXES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-3528](https://issues.apache.org/jira/browse/HADOOP-3528) | Metrics FilesCreated and files\_deleted metrics do not match. | Blocker | metrics | Lohit Vijayarenu | Lohit Vijayarenu |
| [HADOOP-3542](https://issues.apache.org/jira/browse/HADOOP-3542) | Hadoop archives should not create \_logs file in the final archive directory. | Blocker | . | Mahadev konar | Mahadev konar |
| [HADOOP-3560](https://issues.apache.org/jira/browse/HADOOP-3560) | Archvies sometimes create empty part files. | Blocker | . | Mahadev konar | Mahadev konar |
| [HADOOP-3076](https://issues.apache.org/jira/browse/HADOOP-3076) | [HOD] If a cluster directory is specified as a relative path, an existing script.exitcode file will not be deleted. | Blocker | contrib/hod | Hemanth Yamijala | Vinod Kumar Vavilapalli |
| [HADOOP-3543](https://issues.apache.org/jira/browse/HADOOP-3543) | Need to increment the year field for the copyright notice | Trivial | documentation | Chris Douglas | Chris Douglas |
| [HADOOP-3640](https://issues.apache.org/jira/browse/HADOOP-3640) | NativeS3FsInputStream read() method for reading a single byte is incorrect | Major | fs/s3 | Tom White | Tom White |
| [HADOOP-3711](https://issues.apache.org/jira/browse/HADOOP-3711) | Streaming input is not parsed properly to find the separator | Major | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-3725](https://issues.apache.org/jira/browse/HADOOP-3725) | TestMiniMRMapRedDebugScript loses exception details | Minor | test | Steve Loughran | Steve Loughran |
| [HADOOP-3726](https://issues.apache.org/jira/browse/HADOOP-3726) | TestCLI loses exception details on setup/teardown | Minor | test | Steve Loughran | Steve Loughran |
| [HADOOP-3732](https://issues.apache.org/jira/browse/HADOOP-3732) | Block scanner should read block information during initialization. | Blocker | . | Konstantin Shvachko | Raghu Angadi |
| [HADOOP-3720](https://issues.apache.org/jira/browse/HADOOP-3720) | dfsadmin -refreshNodes should re-read the config file. | Major | . | Lohit Vijayarenu | Lohit Vijayarenu |
| [HADOOP-3723](https://issues.apache.org/jira/browse/HADOOP-3723) | libhdfs only accepts O\_WRONLY and O\_RDONLY so does not accept things like O\_WRONLY \| O\_CREAT | Minor | . | Pete Wyckoff | Pi Song |
| [HADOOP-3643](https://issues.apache.org/jira/browse/HADOOP-3643) | jobtasks.jsp when called for running tasks should ignore completed TIPs | Major | . | Amar Kamat | Amar Kamat |
| [HADOOP-3777](https://issues.apache.org/jira/browse/HADOOP-3777) | Failure to load native lzo libraries causes job failure | Major | . | Chris Douglas | Chris Douglas |
| [HADOOP-3728](https://issues.apache.org/jira/browse/HADOOP-3728) | Cannot run more than one instance of examples.SleepJob at the same time. | Minor | . | Brice Arnould | Brice Arnould |
| [HADOOP-3795](https://issues.apache.org/jira/browse/HADOOP-3795) | NameNode does not save image if different dfs.name.dir have different checkpoint stamps | Major | . | Lohit Vijayarenu | Lohit Vijayarenu |
| [HADOOP-3778](https://issues.apache.org/jira/browse/HADOOP-3778) | seek(long) in DFSInputStream should catch socket exception for retry later | Minor | . | Luo Ning | Luo Ning |
| [HADOOP-3756](https://issues.apache.org/jira/browse/HADOOP-3756) | dfs.client.buffer.dir isn't used in hdfs, but it's still in conf/hadoop-default.xml | Trivial | . | Michael Bieniosek | Raghu Angadi |
| [HADOOP-3776](https://issues.apache.org/jira/browse/HADOOP-3776) | NPE in NameNode with unknown blocks | Blocker | . | Raghu Angadi | Raghu Angadi |
| [HADOOP-3820](https://issues.apache.org/jira/browse/HADOOP-3820) | gridmix-env has a syntax error, and wrongly defines USE\_REAL\_DATASET by default | Major | benchmarks | Arun C Murthy | Arun C Murthy |
| [HADOOP-3819](https://issues.apache.org/jira/browse/HADOOP-3819) | can not get svn revision # at build time if locale is not english | Minor | build | Rong-En Fan | Rong-En Fan |
| [HADOOP-3848](https://issues.apache.org/jira/browse/HADOOP-3848) | TaskTracker.localizeJob calls getSystemDir for each task rather than caching it | Major | . | Arun C Murthy | Arun C Murthy |
| [HADOOP-3863](https://issues.apache.org/jira/browse/HADOOP-3863) | Use a thread-local rather than static ENCODER/DECODER variables in Text for synchronization | Critical | . | Arun C Murthy | Arun C Murthy |
| [HADOOP-3131](https://issues.apache.org/jira/browse/HADOOP-3131) | enabling BLOCK compression for map outputs breaks the reduce progress counters | Major | . | Colin Evans | Matei Zaharia |
| [HADOOP-3836](https://issues.apache.org/jira/browse/HADOOP-3836) | TestMultipleOutputs will fail if it is ran more than one times | Major | test | Tsz Wo Nicholas Sze | Alejandro Abdelnur |
| [HADOOP-3846](https://issues.apache.org/jira/browse/HADOOP-3846) | CreateEditsLog used for benchmark misses creating parent directories | Minor | benchmarks | Lohit Vijayarenu | Lohit Vijayarenu |
| [HADOOP-3904](https://issues.apache.org/jira/browse/HADOOP-3904) | A few tests still using old hdfs package name | Minor | test | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3319](https://issues.apache.org/jira/browse/HADOOP-3319) | [HOD]checknodes prints errors messages on stdout | Major | contrib/hod | Vinod Kumar Vavilapalli | Vinod Kumar Vavilapalli |
| [HADOOP-3907](https://issues.apache.org/jira/browse/HADOOP-3907) | INodeDirectoryWithQuota should be in its own .java file | Minor | . | Steve Loughran | Tsz Wo Nicholas Sze |
| [HADOOP-3919](https://issues.apache.org/jira/browse/HADOOP-3919) | hadoop conf got slightly mangled by 3772 | Minor | . | Ari Rabkin | Ari Rabkin |
| [HADOOP-3875](https://issues.apache.org/jira/browse/HADOOP-3875) | Fix TaskTracker's heartbeat timer to note the time the hearbeat RPC returned to decide next heartbeat time | Major | . | Arun C Murthy | Arun C Murthy |
| [HADOOP-3864](https://issues.apache.org/jira/browse/HADOOP-3864) | JobTracker lockup due to JobInProgress.initTasks taking significant time for large jobs on large clusters | Critical | . | Arun C Murthy | Arun C Murthy |
| [HADOOP-9](https://issues.apache.org/jira/browse/HADOOP-9) | mapred.local.dir temp dir. space allocation limited by smallest area | Minor | . | Paul Baclace | Ari Rabkin |
| [HADOOP-3851](https://issues.apache.org/jira/browse/HADOOP-3851) | spelling error in FSNamesystemMetrics log message | Trivial | . | Steve Loughran | Steve Loughran |
| [HADOOP-3816](https://issues.apache.org/jira/browse/HADOOP-3816) | KFS changes for faster directory listing | Minor | fs | Sriram Rao | Sriram Rao |
| [HADOOP-3773](https://issues.apache.org/jira/browse/HADOOP-3773) | Setting the conf twice in Pipes Submitter | Trivial | . | Koji Noguchi | Koji Noguchi |
| [HADOOP-3952](https://issues.apache.org/jira/browse/HADOOP-3952) | TestDataJoin references dfs.MiniDFSCluster instead of hdfs.MiniDFSCluster | Major | test | Owen O'Malley | Owen O'Malley |
| [HADOOP-3951](https://issues.apache.org/jira/browse/HADOOP-3951) | The package name used in FSNamesystem is incorrect | Trivial | . | Tsz Wo Nicholas Sze | Chris Douglas |
| [HADOOP-3946](https://issues.apache.org/jira/browse/HADOOP-3946) | TestMapRed fails on trunk | Blocker | test | Amareshwari Sriramadasu | Tom White |
| [HADOOP-3949](https://issues.apache.org/jira/browse/HADOOP-3949) | javadoc warnings: Multiple sources of package comments found for package | Major | build, documentation | Tsz Wo Nicholas Sze | Jerome Boulon |
| [HADOOP-3933](https://issues.apache.org/jira/browse/HADOOP-3933) | DataNode's BlockSender sends more data than necessary | Minor | . | Ning Li | Ning Li |
| [HADOOP-3962](https://issues.apache.org/jira/browse/HADOOP-3962) | Shell command "fs -count" should support paths with different file systsms | Major | fs | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3957](https://issues.apache.org/jira/browse/HADOOP-3957) | Fix javac warnings in DistCp and the corresponding tests | Minor | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3958](https://issues.apache.org/jira/browse/HADOOP-3958) | TestMapRed ignores failures of the test case | Major | test | Owen O'Malley | Owen O'Malley |
| [HADOOP-3658](https://issues.apache.org/jira/browse/HADOOP-3658) | Incorrect destination IP logged for receiving blocks | Minor | . | Koji Noguchi | Chris Douglas |
| [HADOOP-3985](https://issues.apache.org/jira/browse/HADOOP-3985) | TestHDFSServerPorts fails on trunk | Major | . | Amar Kamat | Hairong Kuang |
| [HADOOP-3964](https://issues.apache.org/jira/browse/HADOOP-3964) | javadoc warnings by failmon | Major | build | Tsz Wo Nicholas Sze | dhruba borthakur |
| [HADOOP-3785](https://issues.apache.org/jira/browse/HADOOP-3785) | FileSystem cache should be case-insensitive | Major | fs | Doug Cutting | Bill de hOra |
| [HADOOP-3506](https://issues.apache.org/jira/browse/HADOOP-3506) | Occasional NPE in Jets3tFileSystemStore | Major | fs/s3 | Robert | Tom White |
| [HADOOP-3705](https://issues.apache.org/jira/browse/HADOOP-3705) | CompositeInputFormat is unable to parse InputFormat classes with names containing '\_' or '$' | Major | . | Jingkei Ly | Chris Douglas |
| [HADOOP-4023](https://issues.apache.org/jira/browse/HADOOP-4023) | javadoc warnings: incorrect references | Major | documentation | Tsz Wo Nicholas Sze | Owen O'Malley |
| [HADOOP-4030](https://issues.apache.org/jira/browse/HADOOP-4030) | LzopCodec shouldn't be in the default list of codecs i.e. io.compression.codecs | Major | io | Arun C Murthy | Arun C Murthy |
| [HADOOP-3961](https://issues.apache.org/jira/browse/HADOOP-3961) | resource estimation works badly in some cases | Blocker | . | Ari Rabkin | Ari Rabkin |
| [HADOOP-4036](https://issues.apache.org/jira/browse/HADOOP-4036) | Increment InterTrackerProtocol version number due to changes in HADOOP-3759 | Major | . | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-2168](https://issues.apache.org/jira/browse/HADOOP-2168) | Pipes with a C++ record reader does not update progress in the map until it is 100% | Major | . | Owen O'Malley | Arun C Murthy |
| [HADOOP-3488](https://issues.apache.org/jira/browse/HADOOP-3488) | the rsync command in hadoop-daemon.sh also rsync the logs folder from the master, what deletes the datanode / tasktracker log files. | Critical | scripts | Stefan Groschupf | Craig Macdonald |
| [HADOOP-3937](https://issues.apache.org/jira/browse/HADOOP-3937) | Job history may get disabled due to overly long job names | Major | . | Matei Zaharia | Matei Zaharia |
| [HADOOP-3950](https://issues.apache.org/jira/browse/HADOOP-3950) | TestMapRed and TestMiniMRDFSSort failed on trunk | Major | test | Tsz Wo Nicholas Sze | Enis Soztutar |
| [HADOOP-3910](https://issues.apache.org/jira/browse/HADOOP-3910) | Are ClusterTestDFSNamespaceLogging and ClusterTestDFS still valid tests? | Minor | test | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-3954](https://issues.apache.org/jira/browse/HADOOP-3954) | Skip records enabled as default. | Critical | . | Koji Noguchi | Sharad Agarwal |
| [HADOOP-4050](https://issues.apache.org/jira/browse/HADOOP-4050) | TestFairScheduler failed on Linux | Major | . | Tsz Wo Nicholas Sze | Matei Zaharia |
| [HADOOP-4078](https://issues.apache.org/jira/browse/HADOOP-4078) | TestKosmosFileSystem fails on trunk | Blocker | fs | Amareshwari Sriramadasu | Lohit Vijayarenu |
| [HADOOP-3968](https://issues.apache.org/jira/browse/HADOOP-3968) | test-libhdfs fails on trunk | Major | . | Lohit Vijayarenu | Pete Wyckoff |
| [HADOOP-4100](https://issues.apache.org/jira/browse/HADOOP-4100) | Scheduler.assignTasks should not be dealing with cleanupTask | Major | . | Devaraj Das | Amareshwari Sriramadasu |
| [HADOOP-3970](https://issues.apache.org/jira/browse/HADOOP-3970) | Counters written to the job history cannot be recovered back | Major | . | Amar Kamat | Amar Kamat |
| [HADOOP-4097](https://issues.apache.org/jira/browse/HADOOP-4097) | Hive interaction with speculative execution is broken | Critical | . | Joydeep Sen Sarma | Joydeep Sen Sarma |
| [HADOOP-4154](https://issues.apache.org/jira/browse/HADOOP-4154) | Fix javac warning in WritableUtils | Minor | io | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4054](https://issues.apache.org/jira/browse/HADOOP-4054) | During edit log loading, an underconstruction file's lease gets removed twice | Major | . | Hairong Kuang | Hairong Kuang |
| [HADOOP-4071](https://issues.apache.org/jira/browse/HADOOP-4071) | FSNameSystem.isReplicationInProgress should add an underReplicated block to the neededReplication queue using method "add" not "update" | Major | . | Hairong Kuang | Hairong Kuang |
| [HADOOP-4147](https://issues.apache.org/jira/browse/HADOOP-4147) | Remove JobWithTaskContext from JobInProgress | Trivial | . | Amar Kamat | Amareshwari Sriramadasu |
| [HADOOP-4133](https://issues.apache.org/jira/browse/HADOOP-4133) | remove derby.log files form repository and also change the location where these files get created | Minor | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4112](https://issues.apache.org/jira/browse/HADOOP-4112) | Got ArrayOutOfBound exception while analyzing the job history | Major | . | Amar Kamat | Amareshwari Sriramadasu |
| [HADOOP-3831](https://issues.apache.org/jira/browse/HADOOP-3831) | slow-reading dfs clients do not recover from datanode-write-timeouts | Major | . | Christian Kunz | Raghu Angadi |
| [HADOOP-4155](https://issues.apache.org/jira/browse/HADOOP-4155) | JobHisotry::JOBTRACKER\_START\_TIME is not initialized properly | Major | . | Lohit Vijayarenu | Lohit Vijayarenu |
| [HADOOP-4099](https://issues.apache.org/jira/browse/HADOOP-4099) | HFTP interface compatibility with older releases broken | Blocker | fs | Kan Zhang | dhruba borthakur |
| [HADOOP-3570](https://issues.apache.org/jira/browse/HADOOP-3570) | Including user specified jar files in the client side classpath path in Hadoop 0.17 streaming | Major | . | Suhas Gogate | Sharad Agarwal |
| [HADOOP-4129](https://issues.apache.org/jira/browse/HADOOP-4129) | Memory limits of TaskTracker and Tasks should be in kiloBytes. | Blocker | . | Vinod Kumar Vavilapalli | Vinod Kumar Vavilapalli |
| [HADOOP-4139](https://issues.apache.org/jira/browse/HADOOP-4139) | [Hive] multi group by statement is not optimized | Major | . | Namit Jain | Namit Jain |
| [HADOOP-3623](https://issues.apache.org/jira/browse/HADOOP-3623) | LeaseManager needs refactoring. | Major | . | Konstantin Shvachko | Tsz Wo Nicholas Sze |
| [HADOOP-4125](https://issues.apache.org/jira/browse/HADOOP-4125) | Reduce cleanup tip web ui is does not show attempts | Major | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-4087](https://issues.apache.org/jira/browse/HADOOP-4087) | Make Hive metastore server to work for PHP & Python clients | Major | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4197](https://issues.apache.org/jira/browse/HADOOP-4197) | Need to update DATA\_TRANSFER\_VERSION | Major | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4195](https://issues.apache.org/jira/browse/HADOOP-4195) | SequenceFile.Writer close() uses compressor after returning it to CodecPool. | Major | io | Hong Tang | Arun C Murthy |
| [HADOOP-3959](https://issues.apache.org/jira/browse/HADOOP-3959) | [HOD] --resource\_manager.options is not passed to qsub | Major | contrib/hod | Craig Macdonald | Vinod Kumar Vavilapalli |
| [HADOOP-3783](https://issues.apache.org/jira/browse/HADOOP-3783) | "deprecated filesystem name" warning on EC2 | Minor | contrib/cloud | Stuart Sierra | Tom White |
| [HADOOP-3991](https://issues.apache.org/jira/browse/HADOOP-3991) | updates to hadoop-ec2-env.sh for 0.18.0 | Minor | contrib/cloud | Karl Anderson | Tom White |
| [HADOOP-2403](https://issues.apache.org/jira/browse/HADOOP-2403) | JobHistory log files contain data that cannot be parsed by org.apache.hadoop.mapred.JobHistory | Critical | . | Runping Qi | Amareshwari Sriramadasu |
| [HADOOP-4200](https://issues.apache.org/jira/browse/HADOOP-4200) | Hadoop-Patch build is failing | Major | build | Ramya Sunil | Ramya Sunil |
| [HADOOP-4121](https://issues.apache.org/jira/browse/HADOOP-4121) | HistoryViewer initialization failure should log exception trace | Trivial | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-4213](https://issues.apache.org/jira/browse/HADOOP-4213) | NPE in TestLimitTasksPerJobTaskScheduler | Major | test | Tsz Wo Nicholas Sze | Sreekanth Ramakrishnan |
| [HADOOP-4077](https://issues.apache.org/jira/browse/HADOOP-4077) | Access permissions for setting access times and modification times for files | Blocker | . | dhruba borthakur | dhruba borthakur |
| [HADOOP-3592](https://issues.apache.org/jira/browse/HADOOP-3592) | org.apache.hadoop.fs.FileUtil.copy() will leak input streams if the destination can't be opened | Minor | fs | Steve Loughran | Bill de hOra |
| [HADOOP-4169](https://issues.apache.org/jira/browse/HADOOP-4169) | 'compressed' keyword in DDL syntax misleading and does not compress | Major | . | Joydeep Sen Sarma | Joydeep Sen Sarma |
| [HADOOP-4175](https://issues.apache.org/jira/browse/HADOOP-4175) | Incorporate metastore server review comments | Major | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4027](https://issues.apache.org/jira/browse/HADOOP-4027) | When streaming utility is run without specifying mapper/reducer/input/output options, it returns 0. | Major | . | Ramya Sunil | |
| [HADOOP-4242](https://issues.apache.org/jira/browse/HADOOP-4242) | Remove an extra ";" in FSDirectory | Blocker | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4248](https://issues.apache.org/jira/browse/HADOOP-4248) | Remove HADOOP-1230 API from 0.19 | Major | . | Owen O'Malley | Owen O'Malley |
| [HADOOP-4249](https://issues.apache.org/jira/browse/HADOOP-4249) | Declare hsqldb.jar in eclipse plugin | Blocker | contrib/eclipse-plugin | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4247](https://issues.apache.org/jira/browse/HADOOP-4247) | hadoop jar throwing exception when running examples | Blocker | . | Hemanth Yamijala | Owen O'Malley |
| [HADOOP-4269](https://issues.apache.org/jira/browse/HADOOP-4269) | LineRecordReader.LineReader should use util.LineReader | Major | util | Chris Douglas | Chris Douglas |
| [HADOOP-4280](https://issues.apache.org/jira/browse/HADOOP-4280) | test-libhdfs consistently fails on trunk | Blocker | . | Raghu Angadi | Pete Wyckoff |
| [HADOOP-4254](https://issues.apache.org/jira/browse/HADOOP-4254) | Cannot setSpaceQuota to 1TB | Blocker | . | Tsz Wo Nicholas Sze | Raghu Angadi |
| [HADOOP-4275](https://issues.apache.org/jira/browse/HADOOP-4275) | New public methods added to the \*ID classes | Major | . | Owen O'Malley | Owen O'Malley |
| [HADOOP-4173](https://issues.apache.org/jira/browse/HADOOP-4173) | TestProcfsBasedProcessTree failing on Windows machine | Major | test, util | Ramya Sunil | Vinod Kumar Vavilapalli |
| [HADOOP-4189](https://issues.apache.org/jira/browse/HADOOP-4189) | HADOOP-3245 is incomplete | Blocker | . | Amar Kamat | Amar Kamat |
| [HADOOP-4274](https://issues.apache.org/jira/browse/HADOOP-4274) | Capacity scheduler's implementation of getJobs modifies the list of running jobs inadvertently | Blocker | . | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-4309](https://issues.apache.org/jira/browse/HADOOP-4309) | eclipse-plugin no longer compiles on trunk | Blocker | contrib/eclipse-plugin | Chris Douglas | Chris Douglas |
| [HADOOP-4232](https://issues.apache.org/jira/browse/HADOOP-4232) | Race condition in JVM reuse when more than one slot becomes free | Blocker | . | Devaraj Das | Devaraj Das |
| [HADOOP-4135](https://issues.apache.org/jira/browse/HADOOP-4135) | change max length of database columns for metastore to 767 | Minor | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4093](https://issues.apache.org/jira/browse/HADOOP-4093) | [Hive]unify Table.getCols() & get\_fields() | Major | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4302](https://issues.apache.org/jira/browse/HADOOP-4302) | TestReduceFetch fails intermittently | Blocker | . | Devaraj Das | Chris Douglas |
| [HADOOP-4319](https://issues.apache.org/jira/browse/HADOOP-4319) | fuse-dfs dfs\_read function may return less than the requested #of bytes even if EOF not reached | Blocker | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-4246](https://issues.apache.org/jira/browse/HADOOP-4246) | Reduce task copy errors may not kill it eventually | Blocker | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-4209](https://issues.apache.org/jira/browse/HADOOP-4209) | The TaskAttemptID should not have the JobTracker start time | Blocker | . | Owen O'Malley | Amar Kamat |
| [HADOOP-4163](https://issues.apache.org/jira/browse/HADOOP-4163) | If a reducer failed at shuffling stage, the task should fail, not just logging an exception | Blocker | . | Runping Qi | Sharad Agarwal |
| [HADOOP-4299](https://issues.apache.org/jira/browse/HADOOP-4299) | Unable to access a file by a different user in the same group when permissions is set to 770 or when permissions is turned OFF | Blocker | . | Ramya Sunil | Hairong Kuang |
| [HADOOP-4261](https://issues.apache.org/jira/browse/HADOOP-4261) | Jobs failing in the init stage will never cleanup | Blocker | . | Amar Kamat | Amareshwari Sriramadasu |
| [HADOOP-4256](https://issues.apache.org/jira/browse/HADOOP-4256) | Remove Completed and Failed Job tables from jobqueue\_details.jsp | Blocker | . | Sreekanth Ramakrishnan | Sreekanth Ramakrishnan |
| [HADOOP-4267](https://issues.apache.org/jira/browse/HADOOP-4267) | TestDBJob failed on Linux | Blocker | . | Raghu Angadi | Enis Soztutar |
| [HADOOP-4225](https://issues.apache.org/jira/browse/HADOOP-4225) | FSEditLog logs modification time instead of access time. | Blocker | . | Konstantin Shvachko | Konstantin Shvachko |
| [HADOOP-4018](https://issues.apache.org/jira/browse/HADOOP-4018) | limit memory usage in jobtracker | Major | . | dhruba borthakur | dhruba borthakur |
| [HADOOP-4288](https://issues.apache.org/jira/browse/HADOOP-4288) | java.lang.NullPointerException is observed in Jobtracker log while call heartbeat | Blocker | . | Karam Singh | Amar Kamat |
| [HADOOP-4380](https://issues.apache.org/jira/browse/HADOOP-4380) | Make new classes in mapred package private instead of public | Major | . | Owen O'Malley | Owen O'Malley |
| [HADOOP-4014](https://issues.apache.org/jira/browse/HADOOP-4014) | DFS upgrade fails on Windows | Blocker | fs | NOMURA Yoshihide | Konstantin Shvachko |
| [HADOOP-4393](https://issues.apache.org/jira/browse/HADOOP-4393) | Merge AccessControlException and AccessControlIOException into one exception class | Blocker | fs | Owen O'Malley | Owen O'Malley |
| [HADOOP-4287](https://issues.apache.org/jira/browse/HADOOP-4287) | [mapred] jobqueue\_details.jsp shows negative count of running and waiting reduces with CapacityTaskScheduler. | Blocker | . | Vinod Kumar Vavilapalli | Sreekanth Ramakrishnan |
| [HADOOP-4361](https://issues.apache.org/jira/browse/HADOOP-4361) | Corner cases in killJob from command line | Blocker | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-4400](https://issues.apache.org/jira/browse/HADOOP-4400) | Add "hdfs://" to fs.default.name on quickstart.html | Trivial | documentation | Jeff Hammerbacher | Jeff Hammerbacher |
| [HADOOP-4378](https://issues.apache.org/jira/browse/HADOOP-4378) | TestJobQueueInformation fails regularly | Blocker | test | Tsz Wo Nicholas Sze | Sreekanth Ramakrishnan |
| [HADOOP-3814](https://issues.apache.org/jira/browse/HADOOP-3814) | [HOD] Remove dfs.client.buffer.dir generation, as this is removed in Hadoop 0.19. | Blocker | contrib/hod | Hemanth Yamijala | Vinod Kumar Vavilapalli |
| [HADOOP-4376](https://issues.apache.org/jira/browse/HADOOP-4376) | Fix line formatting in hadoop-default.xml for hadoop.http.filter.initializers | Blocker | conf | Enis Soztutar | Enis Soztutar |
| [HADOOP-4410](https://issues.apache.org/jira/browse/HADOOP-4410) | TestMiniMRDebugScript fails on trunk | Blocker | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-4236](https://issues.apache.org/jira/browse/HADOOP-4236) | JobTracker.killJob() fails to kill a job if the job is not yet initialized | Blocker | . | Amar Kamat | Sharad Agarwal |
| [HADOOP-4373](https://issues.apache.org/jira/browse/HADOOP-4373) | Guaranteed Capacity calculation is not calculated correctly | Blocker | . | Karam Singh | Hemanth Yamijala |
| [HADOOP-4335](https://issues.apache.org/jira/browse/HADOOP-4335) | FsShell -ls fails for file systems without owners or groups | Major | scripts | David Phillips | David Phillips |
| [HADOOP-4418](https://issues.apache.org/jira/browse/HADOOP-4418) | Update documentation in forrest for Mapred, streaming and pipes | Blocker | documentation | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-3155](https://issues.apache.org/jira/browse/HADOOP-3155) | reducers stuck at shuffling | Blocker | . | Runping Qi | dhruba borthakur |
| [HADOOP-4425](https://issues.apache.org/jira/browse/HADOOP-4425) | Edits log takes much longer to load | Blocker | . | Chris Douglas | Chris Douglas |
| [HADOOP-4427](https://issues.apache.org/jira/browse/HADOOP-4427) | Add new/missing commands in forrest | Blocker | documentation | Sharad Agarwal | Sreekanth Ramakrishnan |
| [HADOOP-4278](https://issues.apache.org/jira/browse/HADOOP-4278) | TestDatanodeDeath failed occasionally | Blocker | . | Tsz Wo Nicholas Sze | dhruba borthakur |
| [HADOOP-4423](https://issues.apache.org/jira/browse/HADOOP-4423) | FSDataset.getStoredBlock(id) should not return corrupted information | Blocker | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4358](https://issues.apache.org/jira/browse/HADOOP-4358) | NPE from CreateEditsLog | Blocker | test | Chris Douglas | Raghu Angadi |
| [HADOOP-4449](https://issues.apache.org/jira/browse/HADOOP-4449) | Minor formatting changes to quota related commands | Trivial | . | Raghu Angadi | Raghu Angadi |
| [HADOOP-4455](https://issues.apache.org/jira/browse/HADOOP-4455) | Upload the derby.jar and TestSeDe.jar needed for fixes to 0.19 bugs | Blocker | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4457](https://issues.apache.org/jira/browse/HADOOP-4457) | Input split logging in history is broken in 0.19 | Blocker | . | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
| [HADOOP-4321](https://issues.apache.org/jira/browse/HADOOP-4321) | Document the capacity scheduler in Forrest | Blocker | documentation | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-4404](https://issues.apache.org/jira/browse/HADOOP-4404) | saveFSImage() should remove files from a storage directory that do not correspond to its type. | Blocker | . | Konstantin Shvachko | Konstantin Shvachko |
| [HADOOP-4149](https://issues.apache.org/jira/browse/HADOOP-4149) | JobQueueJobInProgressListener.jobUpdated() might not work as expected | Blocker | . | Amar Kamat | Amar Kamat |
| [HADOOP-4438](https://issues.apache.org/jira/browse/HADOOP-4438) | Add new/missing dfs commands in forrest | Blocker | documentation | Hemanth Yamijala | Suresh Srinivas |
| [HADOOP-4296](https://issues.apache.org/jira/browse/HADOOP-4296) | Spasm of JobClient failures on successful jobs every once in a while | Blocker | . | Joydeep Sen Sarma | dhruba borthakur |
| [HADOOP-4439](https://issues.apache.org/jira/browse/HADOOP-4439) | Cleanup memory related resource management | Blocker | . | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-1945](https://issues.apache.org/jira/browse/HADOOP-1945) | pipes examples aren't in the release | Major | . | Owen O'Malley | Owen O'Malley |
| [HADOOP-4329](https://issues.apache.org/jira/browse/HADOOP-4329) | Hive: [] operator with maps does not work | Major | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4330](https://issues.apache.org/jira/browse/HADOOP-4330) | Hive: AS clause with subqueries having group bys is not propogated to the outer query block | Major | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4344](https://issues.apache.org/jira/browse/HADOOP-4344) | Hive: Partition pruning causes semantic exception with joins | Major | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4303](https://issues.apache.org/jira/browse/HADOOP-4303) | Hive: trim and rtrim UDFs behaviors are reversed | Major | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4315](https://issues.apache.org/jira/browse/HADOOP-4315) | Hive: Cleanup temporary files once the job is done | Major | . | Ashish Thusoo | Ashish Thusoo |
| [HADOOP-4146](https://issues.apache.org/jira/browse/HADOOP-4146) | [Hive] null pointer exception on a join | Major | . | Namit Jain | Namit Jain |
| [HADOOP-4265](https://issues.apache.org/jira/browse/HADOOP-4265) | [Hive] error when user specifies the delimiter | Major | . | Namit Jain | Namit Jain |
| [HADOOP-4273](https://issues.apache.org/jira/browse/HADOOP-4273) | [Hive] job submission exception if input is null | Major | . | Namit Jain | Namit Jain |
| [HADOOP-4316](https://issues.apache.org/jira/browse/HADOOP-4316) | [Hive] extra new lines at output | Major | . | Namit Jain | Namit Jain |
| [HADOOP-4327](https://issues.apache.org/jira/browse/HADOOP-4327) | Create table hive does not set delimeters | Major | . | Edward Capriolo | Namit Jain |
| [HADOOP-4342](https://issues.apache.org/jira/browse/HADOOP-4342) | [hive] bug in partition pruning | Major | . | Namit Jain | Namit Jain |
| [HADOOP-4356](https://issues.apache.org/jira/browse/HADOOP-4356) | [Hive] for a 2-stage map-reduce job, number of reducers not set correctly | Major | . | Namit Jain | Namit Jain |
| [HADOOP-4241](https://issues.apache.org/jira/browse/HADOOP-4241) | -hiveconf config parameters in hive cli should override all config variables | Major | . | Joydeep Sen Sarma | Joydeep Sen Sarma |
| [HADOOP-4367](https://issues.apache.org/jira/browse/HADOOP-4367) | Hive: UDAF functions cannot handle NULL values | Major | . | Zheng Shao | Zheng Shao |
| [HADOOP-4355](https://issues.apache.org/jira/browse/HADOOP-4355) | hive 2 case sensitivity issues | Major | . | Zheng Shao | |
| [HADOOP-4294](https://issues.apache.org/jira/browse/HADOOP-4294) | Hive: Parser should pass field schema to SerDe | Major | . | Zheng Shao | |
| [HADOOP-4333](https://issues.apache.org/jira/browse/HADOOP-4333) | add ability to drop partitions through DDL | Minor | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4183](https://issues.apache.org/jira/browse/HADOOP-4183) | select \* to console issues in Hive | Major | . | Joydeep Sen Sarma | |
| [HADOOP-4266](https://issues.apache.org/jira/browse/HADOOP-4266) | Hive: Support "IS NULL", "IS NOT NULL", and size(x) for map and list | Major | . | Zheng Shao | Zheng Shao |
| [HADOOP-4320](https://issues.apache.org/jira/browse/HADOOP-4320) | [Hive] TCTLSeparatedProtocol implement maps/lists/sets read/writes | Major | . | Pete Wyckoff | |
| [HADOOP-4250](https://issues.apache.org/jira/browse/HADOOP-4250) | Remove short names of serdes from Deserializer, Serializer & SerDe interface and relevant code. | Major | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4405](https://issues.apache.org/jira/browse/HADOOP-4405) | all creation of hadoop dfs queries from with in hive shell | Minor | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4366](https://issues.apache.org/jira/browse/HADOOP-4366) | Provide way to replace existing column names for columnSet tables | Major | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4336](https://issues.apache.org/jira/browse/HADOOP-4336) | fix sampling bug in fractional bucket case | Minor | . | Prasad Chakka | Prasad Chakka |
| [HADOOP-4272](https://issues.apache.org/jira/browse/HADOOP-4272) | Hive: metadataTypedColumnsetSerDe should check if SERIALIZATION.LIB is old columnsetSerDe | Major | . | Zheng Shao | Prasad Chakka |
| [HADOOP-4387](https://issues.apache.org/jira/browse/HADOOP-4387) | TestHDFSFileSystemContract fails on windows | Blocker | test | Raghu Angadi | Raghu Angadi |
| [HADOOP-4466](https://issues.apache.org/jira/browse/HADOOP-4466) | SequenceFileOutputFormat is coupled to WritableComparable and Writable | Blocker | io | Chris K Wensel | Chris K Wensel |
| [HADOOP-4510](https://issues.apache.org/jira/browse/HADOOP-4510) | FileOutputFormat protects getTaskOutputPath | Blocker | . | Chris K Wensel | Chris K Wensel |
| [HADOOP-4089](https://issues.apache.org/jira/browse/HADOOP-4089) | Check if the tmp file used in the CLI exists before using it. | Major | . | Ashish Thusoo | |
| [HADOOP-4525](https://issues.apache.org/jira/browse/HADOOP-4525) | config ipc.server.tcpnodelay is no loger being respected | Major | ipc | Clint Morgan | Clint Morgan |
| [HADOOP-4498](https://issues.apache.org/jira/browse/HADOOP-4498) | JobHistory does not escape literal jobName when used in a regex pattern | Blocker | . | Chris K Wensel | Chris K Wensel |
| [HADOOP-4446](https://issues.apache.org/jira/browse/HADOOP-4446) | Update Scheduling Information display in Web UI | Major | . | Karam Singh | Sreekanth Ramakrishnan |
| [HADOOP-4282](https://issues.apache.org/jira/browse/HADOOP-4282) | User configurable filter fails to filter accesses to certain directories | Blocker | . | Kan Zhang | Tsz Wo Nicholas Sze |
| [HADOOP-4595](https://issues.apache.org/jira/browse/HADOOP-4595) | JVM Reuse triggers RuntimeException("Invalid state") | Major | . | Aaron Kimball | Devaraj Das |
| [HADOOP-4552](https://issues.apache.org/jira/browse/HADOOP-4552) | Deadlock in RPC Server | Major | ipc | Raghu Angadi | Raghu Angadi |
| [HADOOP-4471](https://issues.apache.org/jira/browse/HADOOP-4471) | Capacity Scheduler should maintain the right ordering of jobs in its running queue | Blocker | . | Vivek Ratan | Amar Kamat |
| [HADOOP-4500](https://issues.apache.org/jira/browse/HADOOP-4500) | multifilesplit is using job default filesystem incorrectly | Major | . | Joydeep Sen Sarma | Joydeep Sen Sarma |
### TESTS:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-3587](https://issues.apache.org/jira/browse/HADOOP-3587) | contrib/data\_join needs unit tests | Major | test | Chris Douglas | Chris Douglas |
| [HADOOP-3966](https://issues.apache.org/jira/browse/HADOOP-3966) | Place the new findbugs warnings introduced by the patch in the /tmp directory when "ant test-patch" is run. | Minor | test | Ramya Sunil | Ramya Sunil |
| [HADOOP-4069](https://issues.apache.org/jira/browse/HADOOP-4069) | TestKosmosFileSystem can fail when run through ant test on systems shared by users | Minor | fs | Hemanth Yamijala | Lohit Vijayarenu |
| [HADOOP-4259](https://issues.apache.org/jira/browse/HADOOP-4259) | findbugs should run over the tools.jar also | Minor | test | Owen O'Malley | Chris Douglas |
| [HADOOP-4237](https://issues.apache.org/jira/browse/HADOOP-4237) | TestStreamingBadRecords.testNarrowDown fails intermittently | Minor | test | Sharad Agarwal | Sharad Agarwal |
| [HADOOP-3790](https://issues.apache.org/jira/browse/HADOOP-3790) | Add more unit tests to test appending to files in HDFS | Blocker | test | dhruba borthakur | Tsz Wo Nicholas Sze |
| [HADOOP-4426](https://issues.apache.org/jira/browse/HADOOP-4426) | TestCapacityScheduler is broken | Blocker | . | Hemanth Yamijala | Hemanth Yamijala |
| [HADOOP-4464](https://issues.apache.org/jira/browse/HADOOP-4464) | Separate testClientTriggeredLeaseRecovery() out from TestFileCreation | Blocker | test | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4390](https://issues.apache.org/jira/browse/HADOOP-4390) | Hive: test for case sensitivity in serde2 thrift serde | Minor | . | Zheng Shao | |
| [HADOOP-4056](https://issues.apache.org/jira/browse/HADOOP-4056) | Unit test for DynamicSerDe | Minor | . | Pete Wyckoff | Pete Wyckoff |
### SUB-TASKS:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-3824](https://issues.apache.org/jira/browse/HADOOP-3824) | Refactor org.apache.hadoop.mapred.StatusHttpServer | Major | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4186](https://issues.apache.org/jira/browse/HADOOP-4186) | Move LineRecordReader.LineReader class to util package | Major | . | Tom White | Tom White |
| [HADOOP-4184](https://issues.apache.org/jira/browse/HADOOP-4184) | Fix simple module dependencies between core, hdfs and mapred | Major | . | Tom White | Tom White |
| [HADOOP-4354](https://issues.apache.org/jira/browse/HADOOP-4354) | Separate TestDatanodeDeath.testDatanodeDeath() into 4 tests | Blocker | test | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
### OTHER:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-3601](https://issues.apache.org/jira/browse/HADOOP-3601) | Hive as a contrib project | Minor | . | Joydeep Sen Sarma | Ashish Thusoo |
| [HADOOP-3791](https://issues.apache.org/jira/browse/HADOOP-3791) | Use generics in ReflectionUtils | Trivial | . | Chris Smith | Chris Smith |
| [HADOOP-4076](https://issues.apache.org/jira/browse/HADOOP-4076) | fuse-dfs REAME lists wrong ant flags and is not specific in some place | Major | . | Pete Wyckoff | Pete Wyckoff |
| [HADOOP-3942](https://issues.apache.org/jira/browse/HADOOP-3942) | Update DistCp documentation | Blocker | documentation | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HADOOP-4105](https://issues.apache.org/jira/browse/HADOOP-4105) | libhdfs wiki is very out-of-date and contains mostly broken links | Minor | documentation | Pete Wyckoff | Pete Wyckoff | | unknown | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/CHANGELOG.0.19.0.md |
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitousage.bugs.varargs;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
import org.junit.Test;
import org.mockitoutil.TestBase;
public class VarargsErrorWhenCallingRealMethodTest extends TestBase {
class Foo {
int blah(String a, String b, Object... c) {
return 1;
}
}
@Test
public void shouldNotThrowAnyException() throws Exception {
Foo foo = mock(Foo.class);
when(foo.blah(anyString(), anyString())).thenCallRealMethod();
assertEquals(1, foo.blah("foo", "bar"));
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockitousage/bugs/varargs/VarargsErrorWhenCallingRealMethodTest.java |
#!venv/bin/python
import pycurl, json, requests
BASEURL = 'http://localhost:80'
print '\nStarting Tests @ {}...\n'.format(BASEURL)
c = pycurl.Curl()
c.setopt(c.COOKIEFILE, '') # Without this, login will work but won't be saved for next request
c.setopt(c.HTTPHEADER, ['Content-type: application/json']) # To pass JSON form data
# Signup
print "\n Signing up as test user"
c.setopt(c.URL, BASEURL + "/api/signup")
c.setopt(c.POSTFIELDS, json.dumps({
'username': 'test_1',
'password': 'test_pw'
}))
c.perform()
# Try secure ping
print "\n Secure ping"
c.setopt(c.URL, BASEURL + "/api/secure_ping")
c.setopt(c.POST, 0)
c.perform()
# Logout
print "\n Logging out"
c.setopt(c.URL, BASEURL + '/api/logout')
c.setopt(c.POSTFIELDS, json.dumps({}))
c.perform()
# Try secure ping
print "\n Testing secure ping again (should fail)."
c.setopt(c.URL, BASEURL + "/api/secure_ping")
c.setopt(c.POST, 0)
c.perform()
# Log back in again
print "\n Logging back in again"
c.setopt(c.URL, BASEURL + "/api/login")
c.setopt(c.POSTFIELDS, json.dumps({
'username': 'test_1',
'password': 'test_pw'
}))
c.perform()
# Try secure ping again
print "\n Testing secure ping again"
c.setopt(c.URL, BASEURL + "/api/secure_ping")
c.setopt(c.POST, 0)
c.perform() | unknown | codeparrot/codeparrot-clean | ||
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
# Deep Q-learning Agent
class DQNAgent:
def __init__(self, state_size, action_size,config):
self.cum_reward = 0
self.iter = config["n_iter"]
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.rep = 128
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
#print len(self.memory)
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = reward + self.gamma * \
np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def learn(self,env, rend = False):
s = env.reset()
s = np.reshape(s, [1, self.state_size])
self.cum_reward = 0
for t in range(self.iter):
action = self.act(s)
# Advance the game to the next frame based on the action.
# Reward is 1 for every frame the pole survived
next_state, reward, done, _ = env.step(action)
self.cum_reward = self.cum_reward +reward
next_state = np.reshape(next_state, [1,self.state_size])
# Remember the previous state, action, reward, and done
self.remember(s, action, reward, next_state, done)
# make next_state the new current state for the next frame.
s = next_state
if rend:
env.render()
if done: #
break
# train the agent with the experience of the episode
if (self.rep<len(self.memory)):
self.replay(self.rep)
else:
self.replay(len(self.memory)-1)
def return_cum_reward(self):
return self.cum_reward | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2019 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.network.sockets
import io.ktor.network.selector.*
import io.ktor.network.util.*
import io.ktor.utils.io.*
import io.ktor.utils.io.ByteChannel
import kotlinx.coroutines.*
import java.nio.channels.*
internal fun CoroutineScope.attachForWritingDirectImpl(
channel: ByteChannel,
nioChannel: WritableByteChannel,
selectable: Selectable,
selector: SelectorManager,
socketOptions: SocketOptions.TCPClientSocketOptions? = null
): ReaderJob = reader(Dispatchers.IO + CoroutineName("cio-to-nio-writer"), channel) {
selectable.interestOp(SelectInterest.WRITE, false)
try {
val timeout = if (socketOptions?.socketTimeout != null) {
createTimeout("writing-direct", socketOptions.socketTimeout) {
channel.close(SocketTimeoutException())
}
} else {
null
}
while (!channel.isClosedForRead) {
if (channel.availableForRead == 0) {
channel.awaitContent()
continue
}
var rc = 0
channel.read { buffer ->
while (buffer.hasRemaining()) {
timeout.withTimeout {
do {
rc = nioChannel.write(buffer)
} while (buffer.hasRemaining() && rc > 0)
}
}
}
if (rc == 0) {
selectable.interestOp(SelectInterest.WRITE, true)
selector.select(selectable, SelectInterest.WRITE)
}
}
timeout?.finish()
} finally {
selectable.interestOp(SelectInterest.WRITE, false)
if (nioChannel is SocketChannel) {
try {
if (java7NetworkApisAvailable) {
nioChannel.shutdownOutput()
} else {
nioChannel.socket().shutdownOutput()
}
} catch (ignore: ClosedChannelException) {
}
}
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-network/jvm/src/io/ktor/network/sockets/CIOWriter.kt |
"""Test the condition helper."""
from unittest.mock import patch
from homeassistant.helpers import condition
from homeassistant.util import dt
from tests.common import get_test_home_assistant
class TestConditionHelper:
"""Test condition helpers."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_and_condition(self):
"""Test the 'and' condition."""
test = condition.from_config({
'condition': 'and',
'conditions': [
{
'condition': 'state',
'entity_id': 'sensor.temperature',
'state': '100',
}, {
'condition': 'numeric_state',
'entity_id': 'sensor.temperature',
'below': 110,
}
]
})
self.hass.states.set('sensor.temperature', 120)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 105)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 100)
assert test(self.hass)
def test_and_condition_with_template(self):
"""Test the 'and' condition."""
test = condition.from_config({
'condition': 'and',
'conditions': [
{
'condition': 'template',
'value_template':
'{{ states.sensor.temperature.state == "100" }}',
}, {
'condition': 'numeric_state',
'entity_id': 'sensor.temperature',
'below': 110,
}
]
})
self.hass.states.set('sensor.temperature', 120)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 105)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 100)
assert test(self.hass)
def test_or_condition(self):
"""Test the 'or' condition."""
test = condition.from_config({
'condition': 'or',
'conditions': [
{
'condition': 'state',
'entity_id': 'sensor.temperature',
'state': '100',
}, {
'condition': 'numeric_state',
'entity_id': 'sensor.temperature',
'below': 110,
}
]
})
self.hass.states.set('sensor.temperature', 120)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 105)
assert test(self.hass)
self.hass.states.set('sensor.temperature', 100)
assert test(self.hass)
def test_or_condition_with_template(self):
"""Test the 'or' condition."""
test = condition.from_config({
'condition': 'or',
'conditions': [
{
'condition': 'template',
'value_template':
'{{ states.sensor.temperature.state == "100" }}',
}, {
'condition': 'numeric_state',
'entity_id': 'sensor.temperature',
'below': 110,
}
]
})
self.hass.states.set('sensor.temperature', 120)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 105)
assert test(self.hass)
self.hass.states.set('sensor.temperature', 100)
assert test(self.hass)
def test_time_window(self):
"""Test time condition windows."""
sixam = dt.parse_time("06:00:00")
sixpm = dt.parse_time("18:00:00")
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=dt.now().replace(hour=3)):
assert not condition.time(after=sixam, before=sixpm)
assert condition.time(after=sixpm, before=sixam)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=dt.now().replace(hour=9)):
assert condition.time(after=sixam, before=sixpm)
assert not condition.time(after=sixpm, before=sixam)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=dt.now().replace(hour=15)):
assert condition.time(after=sixam, before=sixpm)
assert not condition.time(after=sixpm, before=sixam)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=dt.now().replace(hour=21)):
assert not condition.time(after=sixam, before=sixpm)
assert condition.time(after=sixpm, before=sixam) | unknown | codeparrot/codeparrot-clean | ||
package snapshotter
import "github.com/containerd/containerd/v2/core/mount"
func isMounted(string) bool { return false }
func unmount(target string) error {
return mount.Unmount(target, 0)
} | go | github | https://github.com/moby/moby | daemon/snapshotter/mount_windows.go |
{
"name": "illuminate/conditionable",
"description": "The Illuminate Conditionable package.",
"license": "MIT",
"homepage": "https://laravel.com",
"support": {
"issues": "https://github.com/laravel/framework/issues",
"source": "https://github.com/laravel/framework"
},
"authors": [
{
"name": "Taylor Otwell",
"email": "taylor@laravel.com"
}
],
"require": {
"php": "^8.2"
},
"autoload": {
"psr-4": {
"Illuminate\\Support\\": ""
}
},
"extra": {
"branch-alias": {
"dev-master": "12.x-dev"
}
},
"config": {
"sort-packages": true
},
"minimum-stability": "dev"
} | json | github | https://github.com/laravel/framework | src/Illuminate/Conditionable/composer.json |
// Copyright 2024 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import "unsafe"
const magic uint32 = 0xED0CDAED
type inBucket struct {
root uint64 // page id of the bucket's root-level page
sequence uint64 // monotonically incrementing, used by NextSequence()
}
type meta struct {
magic uint32
version uint32
pageSize uint32
flags uint32
root inBucket
freelist uint64
pgid uint64
txid uint64
checksum uint64
}
func loadPageMeta(buf []byte) *meta {
return (*meta)(unsafe.Pointer(&buf[pageHeaderSize]))
} | go | github | https://github.com/etcd-io/etcd | tools/etcd-dump-db/meta.go |
from LogAnalyzer import Test,TestResult
import DataflashLog
class TestDupeLogData(Test):
'''test for duplicated data in log, which has been happening on PX4/Pixhawk'''
def __init__(self):
Test.__init__(self)
self.name = "Dupe Log Data"
def __matchSample(self, sample, sampleStartIndex, logdata):
'''return the line number where a match is found, otherwise return False'''
# ignore if all data in sample is the same value
nSame = 0
for s in sample:
if s[1] == sample[0][1]:
nSame += 1
if nSame == 20:
return False
# c
data = logdata.channels["ATT"]["Pitch"].listData
for i in range(sampleStartIndex, len(data)):
#print "Checking against index %d" % i
if i == sampleStartIndex:
continue # skip matching against ourselves
j = 0
while j<20 and (i+j)<len(data) and data[i+j][1] == sample[j][1]:
#print "### Match found, j=%d, data=%f, sample=%f, log data matched to sample at line %d" % (j,data[i+j][1],sample[j][1],data[i+j][0])
j += 1
if j == 20: # all samples match
return data[i][0]
return False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# this could be made more flexible by not hard-coding to use ATT data, could make it dynamic based on whatever is available as long as it is highly variable
if "ATT" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# pick 10 sample points within the range of ATT data we have
sampleStartIndices = []
attStartIndex = 0
attEndIndex = len(logdata.channels["ATT"]["Pitch"].listData)-1
step = attEndIndex / 11
for i in range(step,attEndIndex-step,step):
sampleStartIndices.append(i)
#print "Dupe data sample point index %d at line %d" % (i, logdata.channels["ATT"]["Pitch"].listData[i][0])
# get 20 datapoints of pitch from each sample location and check for a match elsewhere
sampleIndex = 0
for i in range(sampleStartIndices[0], len(logdata.channels["ATT"]["Pitch"].listData)):
if i == sampleStartIndices[sampleIndex]:
#print "Checking sample %d" % i
sample = logdata.channels["ATT"]["Pitch"].listData[i:i+20]
matchedLine = self.__matchSample(sample, i, logdata)
if matchedLine:
#print "Data from line %d found duplicated at line %d" % (sample[0][0],matchedLine)
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Duplicate data chunks found in log (%d and %d)" % (sample[0][0],matchedLine)
return
sampleIndex += 1
if sampleIndex >= len(sampleStartIndices):
break | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dimension(Model):
"""Dimension of the metric.
:param name: The name of the dimension.
:type name: str
:param display_name: The display name of the dimension.
:type display_name: str
:param internal_name: The internal name of the dimension.
:type internal_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'internal_name': {'key': 'internalName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Dimension, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.internal_name = kwargs.get('internal_name', None) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django import forms
from slugify import slugify
from django.utils.translation import ugettext as _
from modeltranslation.forms import TranslationModelForm
from django.contrib.auth import get_user_model
from geonode.groups.models import GroupProfile
class GroupForm(TranslationModelForm):
slug = forms.SlugField(
max_length=20,
help_text=_("a short version of the name consisting only of letters, numbers, underscores and hyphens."),
widget=forms.HiddenInput,
required=False)
def clean_slug(self):
if GroupProfile.objects.filter(
slug__iexact=self.cleaned_data["slug"]).count() > 0:
raise forms.ValidationError(
_("A group already exists with that slug."))
return self.cleaned_data["slug"].lower()
def clean_title(self):
if GroupProfile.objects.filter(
title__iexact=self.cleaned_data["title"]).count() > 0:
raise forms.ValidationError(
_("A group already exists with that name."))
return self.cleaned_data["title"]
def clean(self):
cleaned_data = self.cleaned_data
name = cleaned_data.get("title")
slug = slugify(name)
cleaned_data["slug"] = slug
return cleaned_data
class Meta:
model = GroupProfile
exclude = ['group']
class GroupUpdateForm(forms.ModelForm):
def clean_name(self):
if GroupProfile.objects.filter(
name__iexact=self.cleaned_data["title"]).count() > 0:
if self.cleaned_data["title"] == self.instance.name:
pass # same instance
else:
raise forms.ValidationError(
_("A group already exists with that name."))
return self.cleaned_data["title"]
class Meta:
model = GroupProfile
exclude = ['group']
class GroupMemberForm(forms.Form):
user_identifiers = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'user-select'
}
)
)
manager_role = forms.BooleanField(
required=False,
label=_("Assign manager role")
)
def clean_user_identifiers(self):
value = self.cleaned_data["user_identifiers"]
new_members = []
errors = []
for name in (v.strip() for v in value.split(",")):
try:
new_members.append(get_user_model().objects.get(username=name))
except get_user_model().DoesNotExist:
errors.append(name)
if errors:
raise forms.ValidationError(
_("The following are not valid usernames: %(errors)s; "
"not added to the group"),
params={
"errors": ", ".join(errors)
}
)
return new_members | unknown | codeparrot/codeparrot-clean | ||
# Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_, huber_coef)
assert_array_almost_equal(huber.intercept_, huber_intercept)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber.coef_, huber_coef, 3)
assert_array_almost_equal(huber.intercept_, huber_intercept, 3)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber_sparse.coef_, huber_coef, 3)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=5, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=1000000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
# And as said above, the first iteration seems to be run anyway.
if huber_warm.n_iter_ is not None:
assert_equal(1, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import subprocess
from charms import layer
from charms.reactive import when, when_any, when_not
from charms.reactive import set_state, remove_state
from charms.reactive import hook
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.contrib.charmsupport import nrpe
from charms.reactive.helpers import data_changed
from charms.layer import nginx
from charms.layer import tls_client
from subprocess import Popen
from subprocess import PIPE
from subprocess import STDOUT
from subprocess import CalledProcessError
apilb_nginx = """/var/log/nginx.*.log {
daily
missingok
rotate 14
compress
delaycompress
notifempty
create 0640 www-data adm
sharedscripts
prerotate
if [ -d /etc/logrotate.d/httpd-prerotate ]; then \\
run-parts /etc/logrotate.d/httpd-prerotate; \\
fi \\
endscript
postrotate
invoke-rc.d nginx rotate >/dev/null 2>&1
endscript
}"""
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'website.available')
def request_server_certificates(tls, website):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
get_ingress_address(website),
socket.gethostname(),
]
# maybe they have extra names they want as SANs
extra_sans = hookenv.config('extra_sans')
if extra_sans and not extra_sans == "":
sans.extend(extra_sans.split())
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('config.changed.extra_sans', 'certificates.available',
'website.available')
def update_certificate(tls, website):
# Using the config.changed.extra_sans flag to catch changes.
# IP changes will take ~5 minutes or so to propagate, but
# it will update.
request_server_certificates(tls, website)
@when('certificates.server.cert.available',
'nginx.available', 'tls_client.server.certificate.written')
def kick_nginx(tls):
# we are just going to sighup it, but still want to avoid kicking it
# without need
if data_changed('cert', tls.get_server_cert()):
# certificate changed, so sighup nginx
hookenv.log("Certificate information changed, sending SIGHUP to nginx")
host.service_restart('nginx')
tls_client.reset_certificate_write_flag('server')
@when('config.changed.port')
def close_old_port():
config = hookenv.config()
old_port = config.previous('port')
if not old_port:
return
try:
hookenv.close_port(old_port)
except CalledProcessError:
hookenv.log('Port %d already closed, skipping.' % old_port)
def maybe_write_apilb_logrotate_config():
filename = '/etc/logrotate.d/apilb_nginx'
if not os.path.exists(filename):
# Set log rotation for apilb log file
with open(filename, 'w+') as fp:
fp.write(apilb_nginx)
@when('nginx.available', 'apiserver.available',
'certificates.server.cert.available')
def install_load_balancer(apiserver, tls):
''' Create the default vhost template for load balancing '''
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
server_cert_path = layer_options.get('server_certificate_path')
cert_exists = server_cert_path and os.path.isfile(server_cert_path)
server_key_path = layer_options.get('server_key_path')
key_exists = server_key_path and os.path.isfile(server_key_path)
# Do both the key and certificate exist?
if cert_exists and key_exists:
# At this point the cert and key exist, and they are owned by root.
chown = ['chown', 'www-data:www-data', server_cert_path]
# Change the owner to www-data so the nginx process can read the cert.
subprocess.call(chown)
chown = ['chown', 'www-data:www-data', server_key_path]
# Change the owner to www-data so the nginx process can read the key.
subprocess.call(chown)
port = hookenv.config('port')
hookenv.open_port(port)
services = apiserver.services()
nginx.configure_site(
'apilb',
'apilb.conf',
server_name='_',
services=services,
port=port,
server_certificate=server_cert_path,
server_key=server_key_path,
proxy_read_timeout=hookenv.config('proxy_read_timeout')
)
maybe_write_apilb_logrotate_config()
hookenv.status_set('active', 'Loadbalancer ready.')
@hook('upgrade-charm')
def upgrade_charm():
maybe_write_apilb_logrotate_config()
@when('nginx.available')
def set_nginx_version():
''' Surface the currently deployed version of nginx to Juju '''
cmd = 'nginx -v'
p = Popen(cmd, shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
close_fds=True)
raw = p.stdout.read()
# The version comes back as:
# nginx version: nginx/1.10.0 (Ubuntu)
version = raw.split(b'/')[-1].split(b' ')[0]
hookenv.application_version_set(version.rstrip())
@when('website.available')
def provide_application_details(website):
''' re-use the nginx layer website relation to relay the hostname/port
to any consuming kubernetes-workers, or other units that require the
kubernetes API '''
website.configure(port=hookenv.config('port'))
@when('loadbalancer.available')
def provide_loadbalancing(loadbalancer):
'''Send the public address and port to the public-address interface, so
the subordinates can get the public address of this loadbalancer.'''
loadbalancer.set_address_port(hookenv.unit_get('public-address'),
hookenv.config('port'))
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('nginx.available')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('nginx',)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('nginx',)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service) | unknown | codeparrot/codeparrot-clean | ||
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { createToaster } from "@chakra-ui/react";
export const toaster = createToaster({
pauseOnPageIdle: true,
placement: "bottom-end",
}); | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/src/components/ui/Toaster/createToaster.ts |
(dp0
S'total_runs'
p1
L9688L
sS'Chrome 4'
p2
(dp3
S'summary_display'
p4
S''
p5
sS'total_runs'
p6
L1072L
sS'summary_score'
p7
I100
sS'results'
p8
(dp9
S'score'
p10
(dp11
g10
I100
sS'raw_score'
p12
I100
sS'display'
p13
S'100/100'
p14
ssssS'iPhone 3.1'
p15
(dp16
S'summary_display'
p17
g5
sS'total_runs'
p18
L90L
sS'summary_score'
p19
I100
sS'results'
p20
(dp21
S'score'
p22
(dp23
g22
I100
sS'raw_score'
p24
I100
sS'display'
p25
S'100/100'
p26
ssssS'Chrome 3'
p27
(dp28
S'summary_display'
p29
g5
sS'total_runs'
p30
L641L
sS'summary_score'
p31
I100
sS'results'
p32
(dp33
S'score'
p34
(dp35
g34
I100
sS'raw_score'
p36
I100
sS'display'
p37
S'100/100'
p38
ssssS'Opera 9.64'
p39
(dp40
S'summary_display'
p41
g5
sS'total_runs'
p42
L31L
sS'summary_score'
p43
I85
sS'results'
p44
(dp45
S'score'
p46
(dp47
g46
I85
sS'raw_score'
p48
I85
sS'display'
p49
S'85/100'
p50
ssssS'IE 6'
p51
(dp52
S'summary_display'
p53
g5
sS'total_runs'
p54
L124L
sS'summary_score'
p55
I12
sS'results'
p56
(dp57
S'score'
p58
(dp59
g58
I12
sS'raw_score'
p60
I12
sS'display'
p61
S'12/100'
p62
ssssS'Safari 3.2'
p63
(dp64
S'summary_display'
p65
g5
sS'total_runs'
p66
L21L
sS'summary_score'
p67
I75
sS'results'
p68
(dp69
S'score'
p70
(dp71
g70
I75
sS'raw_score'
p72
I75
sS'display'
p73
S'75/100'
p74
ssssS'Safari 4.0'
p75
(dp76
S'summary_display'
p77
g5
sS'total_runs'
p78
L1049L
sS'summary_score'
p79
I100
sS'results'
p80
(dp81
S'score'
p82
(dp83
g82
I100
sS'raw_score'
p84
I100
sS'display'
p85
S'100/100'
p86
ssssS'IE 8'
p87
(dp88
S'summary_display'
p89
g5
sS'total_runs'
p90
L516L
sS'summary_score'
p91
I20
sS'results'
p92
(dp93
S'score'
p94
(dp95
g94
I20
sS'raw_score'
p96
I20
sS'display'
p97
S'20/100'
p98
ssssS'Opera 10'
p99
(dp100
S'summary_display'
p101
g5
sS'total_runs'
p102
L1322L
sS'summary_score'
p103
I100
sS'results'
p104
(dp105
S'score'
p106
(dp107
g106
I100
sS'raw_score'
p108
I100
sS'display'
p109
S'100/100'
p110
ssssS'iPhone 2.2'
p111
(dp112
S'summary_display'
p113
g5
sS'total_runs'
p114
L9L
sS'summary_score'
p115
I74
sS'results'
p116
(dp117
S'score'
p118
(dp119
g118
I74
sS'raw_score'
p120
I74
sS'display'
p121
S'74/100'
p122
ssssS'Firefox 3.0'
p123
(dp124
S'summary_display'
p125
g5
sS'total_runs'
p126
L612L
sS'summary_score'
p127
I72
sS'results'
p128
(dp129
S'score'
p130
(dp131
g130
I72
sS'raw_score'
p132
I72
sS'display'
p133
S'72/100'
p134
ssssS'Firefox 3.5'
p135
(dp136
S'summary_display'
p137
g5
sS'total_runs'
p138
L4003L
sS'summary_score'
p139
I93
sS'results'
p140
(dp141
S'score'
p142
(dp143
g142
I93
sS'raw_score'
p144
I93
sS'display'
p145
S'93/100'
p146
ssssS'IE 7'
p147
(dp148
S'summary_display'
p149
g5
sS'total_runs'
p150
L198L
sS'summary_score'
p151
I12
sS'results'
p152
(dp153
S'score'
p154
(dp155
g154
I12
sS'raw_score'
p156
I12
sS'display'
p157
S'12/100'
p158
ssss. | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
from .session_update_event import SessionUpdateEvent
from .response_cancel_event import ResponseCancelEvent
from .response_create_event import ResponseCreateEvent
from .conversation_item_create_event import ConversationItemCreateEvent
from .conversation_item_delete_event import ConversationItemDeleteEvent
from .input_audio_buffer_clear_event import InputAudioBufferClearEvent
from .input_audio_buffer_append_event import InputAudioBufferAppendEvent
from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent
from .output_audio_buffer_clear_event import OutputAudioBufferClearEvent
from .conversation_item_retrieve_event import ConversationItemRetrieveEvent
from .conversation_item_truncate_event import ConversationItemTruncateEvent
__all__ = ["RealtimeClientEvent"]
RealtimeClientEvent: TypeAlias = Annotated[
Union[
ConversationItemCreateEvent,
ConversationItemDeleteEvent,
ConversationItemRetrieveEvent,
ConversationItemTruncateEvent,
InputAudioBufferAppendEvent,
InputAudioBufferClearEvent,
OutputAudioBufferClearEvent,
InputAudioBufferCommitEvent,
ResponseCancelEvent,
ResponseCreateEvent,
SessionUpdateEvent,
],
PropertyInfo(discriminator="type"),
] | python | github | https://github.com/openai/openai-python | src/openai/types/realtime/realtime_client_event.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# sadns.py.
# Selectable Asynchronous DNS client written in python with no external dependancies.
# linux, unix only, though a hacky windows client may be possible.
import os, threading, Queue, socket, select, time
# I could subclass queue and make this code more general. I won't, since it all has to be multi-producer multi-consumer safe in that case.
class _lookupThread(threading.Thread):
"""A thread for performing blocking reverse DNS lookups. To use, place a string IP address on
on inqueue. a tuple of (IP, hostname) will be placed on outqueue, or (IP, IP) if it could not be resolved.
this will not always be in-order."""
def __init__(self):
super(_lookupThread, self).__init__()
self.inqueue = Queue.Queue()
self.outqueue = Queue.Queue()
self.rpipe, self.wpipe = os.pipe()
self.setDaemon(True)
def run(self):
while True: # no way to kill self, except by closing wpipe
IP = self.inqueue.get()
try: hostname = socket.gethostbyaddr(IP)[0]
except: hostname = IP
time.sleep(0.5)
self.outqueue.put((IP, hostname))
if not 1 == os.write(self.wpipe, "d"): raise "error writing to pipe"
class lookupSel(object):
def __init__(self):
looker = _lookupThread()
self.inqueue = looker.inqueue
self.outqueue = looker.outqueue
self.rpipe = looker.rpipe # threadsafe, ints
looker.start() # thread runs
# looker now goes out of scope, which is good because we don't
# want to directly interact with a thread.
def fileno(self):
"""this object can be selected on via the comms pipe."""
return self.rpipe
def recv(self):
"""call this whenever select notifies us we have data waiting"""
os.read(self.rpipe, 1) # stop the pipe from filling up.
return self.outqueue.get_nowait()
def send(self, data):
self.inqueue.put(data)
if __name__ == "__main__":
looker = lookupSel()
looker.write("66.35.250.150")
looker.write("127.0.0.1")
looker.write("256.256.256.256")
looker.write("82.94.237.218")
while True:
i,o,e = select.select([looker], [], [], 10)
if len(i) == 0: print "no data forthcoming"
else:
print "result is:", looker.read() | unknown | codeparrot/codeparrot-clean | ||
# Developing Realm
## Building Realm
There are three ways to build Realm
1. \[Recommended] Using Xcode, open Package.swift. With this approach you can build either against a released Core version or a custom branch.
1. Using Xcode, open Realm.xcodeproj. This will download the version of Core specified in `dependencies.list/REALM_CORE_VERSION` and build the Swift SDK against it.
1. From the command line, run `./build.sh build`. Similarly to 2., this also downloads Core and builds against it.
### Building against a custom branch of Core
To build Realm against a custom Core branch, update `Package.swift` by updating the Realm Core dependency from `exact` to `branch`:
```diff
dependencies: [
- .package(url: "https://github.com/realm/realm-core.git", exact: coreVersion)
+ .package(url: "https://github.com/realm/realm-core.git", branch: "*your-custom-branch*")
],
```
## Testing
### Prerequisites
1. AWS credentials - reach out to your lead or Michael O'Brien. These will need to be added as environment variables to Xcode and/or your shell profile:
```
export AWS_ACCESS_KEY_ID=...
export AWS_SECRET_ACCESS_KEY=...
```
2. Run `sh build.sh setup-baas`. This script will dowload go, mongodb, and clone BaaS into `.baas` and prepare everything for the test harness. The version of BaaS that will be run is determined by the commit sha in `dependencies.list/STITCH_VERSION`. | unknown | github | https://github.com/realm/realm-swift | contrib/Development.md |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
def webob_factory(url):
"""Factory for removing duplicate webob code from tests."""
base_url = url
def web_request(url, method=None, body=None):
req = webob.Request.blank("%s%s" % (base_url, url))
if method:
req.content_type = "application/json"
req.method = method
if body:
req.body = jsonutils.dumps(body)
return req
return web_request
def compare_links(actual, expected):
"""Compare xml atom links."""
return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type'))
def compare_media_types(actual, expected):
"""Compare xml media types."""
return compare_tree_to_dict(actual, expected, ('base', 'type'))
def compare_tree_to_dict(actual, expected, keys):
"""Compare parts of lxml.etree objects to dicts."""
for elem, data in zip(actual, expected):
for key in keys:
if elem.get(key) != data.get(key):
return False
return True | unknown | codeparrot/codeparrot-clean | ||
"""
Distance and Area objects to allow for sensible and convienient calculation
and conversions. Here are some tests.
"""
from django.contrib.gis.measure import Distance, Area, D, A
from django.utils import unittest
class DistanceTest(unittest.TestCase):
"Testing the Distance object"
def testInit(self):
"Testing initialisation from valid units"
d = Distance(m=100)
self.assertEqual(d.m, 100)
d1, d2, d3 = D(m=100), D(meter=100), D(metre=100)
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
y1, y2, y3 = D(yd=100), D(yard=100), D(Yard=100)
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
mm1, mm2 = D(millimeter=1000), D(MiLLiMeTeR=1000)
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
def testInitInvalid(self):
"Testing initialisation from invalid units"
self.assertRaises(AttributeError, D, banana=100)
def testAccess(self):
"Testing access in different units"
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
def testAccessInvalid(self):
"Testing access in invalid units"
d = D(m=100)
self.failIf(hasattr(d, 'banana'))
def testAddition(self):
"Test addition & subtraction"
d1 = D(m=100)
d2 = D(m=200)
d3 = d1 + d2
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = d1 - d2
self.assertEqual(d4.m, -100)
d4 -= d1
self.assertEqual(d4.m, -200)
try:
d5 = d1 + 1
except TypeError, e:
pass
else:
self.fail('Distance + number should raise TypeError')
try:
d5 = d1 - 1
except TypeError, e:
pass
else:
self.fail('Distance - number should raise TypeError')
try:
d1 += 1
except TypeError, e:
pass
else:
self.fail('Distance += number should raise TypeError')
try:
d1 -= 1
except TypeError, e:
pass
else:
self.fail('Distance -= number should raise TypeError')
def testMultiplication(self):
"Test multiplication & division"
d1 = D(m=100)
d3 = d1 * 2
self.assertEqual(d3.m, 200)
d3 = 2 * d1
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = d1 / 2
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
a5 = d1 * D(m=10)
self.assertTrue(isinstance(a5, Area))
self.assertEqual(a5.sq_m, 100*10)
try:
d1 *= D(m=1)
except TypeError, e:
pass
else:
self.fail('Distance *= Distance should raise TypeError')
try:
d5 = d1 / D(m=1)
except TypeError, e:
pass
else:
self.fail('Distance / Distance should raise TypeError')
try:
d1 /= D(m=1)
except TypeError, e:
pass
else:
self.fail('Distance /= Distance should raise TypeError')
def testUnitConversions(self):
"Testing default units during maths"
d1 = D(m=100)
d2 = D(km=1)
d3 = d1 + d2
self.assertEqual(d3._default_unit, 'm')
d4 = d2 + d1
self.assertEqual(d4._default_unit, 'km')
d5 = d1 * 2
self.assertEqual(d5._default_unit, 'm')
d6 = d1 / 2
self.assertEqual(d6._default_unit, 'm')
def testComparisons(self):
"Testing comparisons"
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertTrue(d2 > d1)
self.assertTrue(d1 == d1)
self.assertTrue(d1 < d2)
self.failIf(d3)
def testUnitsStr(self):
"Testing conversion to strings"
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
def testUnitAttName(self):
"Testing the `unit_attname` class method"
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'),
('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for nm, att in unit_tuple:
self.assertEqual(att, D.unit_attname(nm))
class AreaTest(unittest.TestCase):
"Testing the Area object"
def testInit(self):
"Testing initialisation from valid units"
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
def testInitInvaliA(self):
"Testing initialisation from invalid units"
self.assertRaises(AttributeError, A, banana=100)
def testAccess(self):
"Testing access in different units"
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
def testAccessInvaliA(self):
"Testing access in invalid units"
a = A(sq_m=100)
self.failIf(hasattr(a, 'banana'))
def testAddition(self):
"Test addition & subtraction"
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = a1 + a2
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = a1 - a2
self.assertEqual(a4.sq_m, -100)
a4 -= a1
self.assertEqual(a4.sq_m, -200)
try:
a5 = a1 + 1
except TypeError, e:
pass
else:
self.fail('Area + number should raise TypeError')
try:
a5 = a1 - 1
except TypeError, e:
pass
else:
self.fail('Area - number should raise TypeError')
try:
a1 += 1
except TypeError, e:
pass
else:
self.fail('Area += number should raise TypeError')
try:
a1 -= 1
except TypeError, e:
pass
else:
self.fail('Area -= number should raise TypeError')
def testMultiplication(self):
"Test multiplication & division"
a1 = A(sq_m=100)
a3 = a1 * 2
self.assertEqual(a3.sq_m, 200)
a3 = 2 * a1
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = a1 / 2
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
try:
a5 = a1 * A(sq_m=1)
except TypeError, e:
pass
else:
self.fail('Area * Area should raise TypeError')
try:
a1 *= A(sq_m=1)
except TypeError, e:
pass
else:
self.fail('Area *= Area should raise TypeError')
try:
a5 = a1 / A(sq_m=1)
except TypeError, e:
pass
else:
self.fail('Area / Area should raise TypeError')
try:
a1 /= A(sq_m=1)
except TypeError, e:
pass
else:
self.fail('Area /= Area should raise TypeError')
def testUnitConversions(self):
"Testing default units during maths"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = a1 + a2
self.assertEqual(a3._default_unit, 'sq_m')
a4 = a2 + a1
self.assertEqual(a4._default_unit, 'sq_km')
a5 = a1 * 2
self.assertEqual(a5._default_unit, 'sq_m')
a6 = a1 / 2
self.assertEqual(a6._default_unit, 'sq_m')
def testComparisons(self):
"Testing comparisons"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertTrue(a2 > a1)
self.assertTrue(a1 == a1)
self.assertTrue(a1 < a2)
self.failIf(a3)
def testUnitsStr(self):
"Testing conversion to strings"
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DistanceTest))
s.addTest(unittest.makeSuite(AreaTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run() | unknown | codeparrot/codeparrot-clean | ||
# -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction a_coordonnees_valides."""
from primaires.scripting.fonction import Fonction
class ClasseFonction(Fonction):
"""Retourne vrai si la salle a des coordonnées valides."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.a_coordonnees_valides, "Salle")
@staticmethod
def a_coordonnees_valides(salle):
"""Retourne vrai si la salle a des coordonnées valides."""
return salle.coords.valide | unknown | codeparrot/codeparrot-clean | ||
import os
from optparse import make_option
from django.contrib.gis import gdal
from django.core.management.base import LabelCommand, CommandError
def layer_option(option, opt, value, parser):
"""
Callback for `make_option` for the `ogrinspect` `layer_key`
keyword option which may be an integer or a string.
"""
try:
dest = int(value)
except ValueError:
dest = value
setattr(parser.values, option.dest, dest)
def list_option(option, opt, value, parser):
"""
Callback for `make_option` for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
if value.lower() == 'true':
dest = True
else:
dest = [s for s in value.split(',')]
setattr(parser.values, option.dest, dest)
class Command(LabelCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
args = '[data_source] [model_name]'
option_list = LabelCommand.option_list + (
make_option('--blank', dest='blank', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--decimal', dest='decimal', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.'),
make_option('--geom-name', dest='geom_name', type='string', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)'),
make_option('--layer', dest='layer_key', type='string', action='callback',
callback=layer_option, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.'),
make_option('--multi-geom', action='store_true', dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.'),
make_option('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__` function.'),
make_option('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` '
'statement.'),
make_option('--null', dest='null', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.'),
make_option('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
)
requires_model_validation = False
def handle(self, *args, **options):
try:
data_source, model_name = args
except ValueError:
raise CommandError('Invalid arguments, must provide: %s' % self.args)
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# Removing options with `None` values.
options = dict([(k, v) for k, v in options.items() if not v is None])
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.OGRException as msg:
raise CommandError(msg)
# Whether the user wants to generate the LayerMapping dictionary as well.
show_mapping = options.pop('mapping', False)
# Getting rid of settings that `_ogrinspect` doesn't like.
verbosity = options.pop('verbosity', False)
settings = options.pop('settings', False)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
output = [s for s in _ogrinspect(ds, model_name, **options)]
if show_mapping:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name' : options['geom_name'],
'layer_key' : options['layer_key'],
'multi_geom' : options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = dict([(v, k) for k, v in mapping_dict.items()])
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend([" '%s' : '%s'," % (rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields])
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n' | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples1, n_features)
Array containing points.
Y : {array-like, sparse matrix}, shape (n_samples2, n_features)
Arrays containing points.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=None):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[0., 2.],
[4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[1., 1.],
[1., 1.]])
"""
if size_threshold is not None:
warnings.warn('Use of the "size_threshold" is deprecated '
'in 0.19 and it will be removed version '
'0.21 of scikit-learn', DeprecationWarning)
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
See also
--------
pairwise_distances : Computes the distance between every pair of samples
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See also
--------
paired_distances : Computes the distances between corresponding
elements of two arrays
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
filter_params : boolean
Whether to filter invalid parameters or not.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static com.google.common.base.Predicates.equalTo;
import static com.google.common.collect.Iterables.all;
import static com.google.common.collect.Iterables.any;
import static com.google.common.collect.Iterables.elementsEqual;
import static com.google.common.collect.Iterables.filter;
import static com.google.common.collect.Iterables.find;
import static com.google.common.collect.Iterables.frequency;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.Iterables.mergeSorted;
import static com.google.common.collect.Iterables.removeIf;
import static com.google.common.collect.Iterables.skip;
import static com.google.common.collect.Iterables.tryFind;
import static com.google.common.collect.Lists.newArrayList;
import static com.google.common.collect.ReflectionFreeAssertThrows.assertThrows;
import static com.google.common.collect.Sets.newHashSet;
import static com.google.common.collect.testing.IteratorFeature.MODIFIABLE;
import static com.google.common.collect.testing.IteratorFeature.UNMODIFIABLE;
import static com.google.common.truth.Truth.assertThat;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyList;
import static java.util.Collections.emptySet;
import static java.util.Collections.nCopies;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonList;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.collect.testing.IteratorTester;
import com.google.common.testing.ClassSanityTester;
import com.google.common.testing.NullPointerTester;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.ConcurrentModificationException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.RandomAccess;
import java.util.Set;
import java.util.SortedSet;
import junit.framework.AssertionFailedError;
import junit.framework.TestCase;
import org.jspecify.annotations.NullMarked;
import org.jspecify.annotations.Nullable;
/**
* Unit test for {@code Iterables}.
*
* @author Kevin Bourrillion
* @author Jared Levy
*/
@GwtCompatible
@NullMarked
public class IterablesTest extends TestCase {
public void testSize0() {
Iterable<String> iterable = emptySet();
assertEquals(0, Iterables.size(iterable));
}
public void testSize1Collection() {
Iterable<String> iterable = singleton("a");
assertEquals(1, Iterables.size(iterable));
}
public void testSize2NonCollection() {
Iterable<Integer> iterable =
new Iterable<Integer>() {
@Override
public Iterator<Integer> iterator() {
return asList(0, 1).iterator();
}
};
assertEquals(2, Iterables.size(iterable));
}
@SuppressWarnings("serial")
public void testSize_collection_doesntIterate() {
List<Integer> nums = asList(1, 2, 3, 4, 5);
List<Integer> collection =
new ArrayList<Integer>(nums) {
@Override
public Iterator<Integer> iterator() {
throw new AssertionFailedError("Don't iterate me!");
}
};
assertEquals(5, Iterables.size(collection));
}
private static <T extends @Nullable Object> Iterable<T> iterable(T... elements) {
List<T> list = asList(elements);
return new Iterable<T>() {
@Override
public Iterator<T> iterator() {
return list.iterator();
}
};
}
public void test_contains_null_set_yes() {
Iterable<@Nullable String> set = newHashSet("a", null, "b");
assertTrue(Iterables.contains(set, null));
}
public void test_contains_null_set_no() {
Iterable<String> set = newHashSet("a", "b");
assertFalse(Iterables.contains(set, null));
}
public void test_contains_null_iterable_yes() {
Iterable<@Nullable String> set = iterable("a", null, "b");
assertTrue(Iterables.contains(set, null));
}
public void test_contains_null_iterable_no() {
Iterable<String> set = iterable("a", "b");
assertFalse(Iterables.contains(set, null));
}
public void test_contains_nonnull_set_yes() {
Iterable<@Nullable String> set = newHashSet("a", null, "b");
assertTrue(Iterables.contains(set, "b"));
}
public void test_contains_nonnull_set_no() {
Iterable<String> set = newHashSet("a", "b");
assertFalse(Iterables.contains(set, "c"));
}
public void test_contains_nonnull_iterable_yes() {
Iterable<@Nullable String> set = iterable("a", null, "b");
assertTrue(Iterables.contains(set, "b"));
}
public void test_contains_nonnull_iterable_no() {
Iterable<String> set = iterable("a", "b");
assertFalse(Iterables.contains(set, "c"));
}
public void testGetOnlyElement_noDefault_valid() {
Iterable<String> iterable = singletonList("foo");
assertEquals("foo", getOnlyElement(iterable));
}
public void testGetOnlyElement_noDefault_empty() {
Iterable<String> iterable = emptyList();
assertThrows(NoSuchElementException.class, () -> getOnlyElement(iterable));
}
public void testGetOnlyElement_noDefault_multiple() {
Iterable<String> iterable = asList("foo", "bar");
assertThrows(IllegalArgumentException.class, () -> getOnlyElement(iterable));
}
public void testGetOnlyElement_withDefault_singleton() {
Iterable<String> iterable = singletonList("foo");
assertEquals("foo", getOnlyElement(iterable, "bar"));
}
public void testGetOnlyElement_withDefault_empty() {
Iterable<String> iterable = emptyList();
assertEquals("bar", getOnlyElement(iterable, "bar"));
}
public void testGetOnlyElement_withDefault_empty_null() {
Iterable<String> iterable = emptyList();
assertThat(Iterables.<@Nullable String>getOnlyElement(iterable, null)).isNull();
}
public void testGetOnlyElement_withDefault_multiple() {
Iterable<String> iterable = asList("foo", "bar");
assertThrows(IllegalArgumentException.class, () -> getOnlyElement(iterable, "x"));
}
@GwtIncompatible // Iterables.toArray(Iterable, Class)
public void testToArrayEmpty() {
Iterable<String> iterable = emptyList();
String[] array = Iterables.toArray(iterable, String.class);
assertThat(array).isEmpty();
}
@GwtIncompatible // Iterables.toArray(Iterable, Class)
public void testToArraySingleton() {
Iterable<String> iterable = singletonList("a");
String[] array = Iterables.toArray(iterable, String.class);
assertThat(array).isEqualTo(new String[] {"a"});
}
@GwtIncompatible // Iterables.toArray(Iterable, Class)
public void testToArray() {
String[] sourceArray = new String[] {"a", "b", "c"};
Iterable<String> iterable = asList(sourceArray);
String[] newArray = Iterables.toArray(iterable, String.class);
assertThat(newArray).isEqualTo(sourceArray);
}
public void testAny() {
List<String> list = new ArrayList<>();
Predicate<String> predicate = equalTo("pants");
assertFalse(any(list, predicate));
list.add("cool");
assertFalse(any(list, predicate));
list.add("pants");
assertTrue(any(list, predicate));
}
public void testAll() {
List<String> list = new ArrayList<>();
Predicate<String> predicate = equalTo("cool");
assertTrue(all(list, predicate));
list.add("cool");
assertTrue(all(list, predicate));
list.add("pants");
assertFalse(all(list, predicate));
}
public void testFind() {
Iterable<String> list = newArrayList("cool", "pants");
assertEquals("cool", find(list, equalTo("cool")));
assertEquals("pants", find(list, equalTo("pants")));
assertThrows(NoSuchElementException.class, () -> find(list, Predicates.alwaysFalse()));
assertEquals("cool", find(list, Predicates.alwaysTrue()));
assertCanIterateAgain(list);
}
public void testFind_withDefault() {
Iterable<String> list = Lists.newArrayList("cool", "pants");
assertEquals("cool", find(list, equalTo("cool"), "woot"));
assertEquals("pants", find(list, equalTo("pants"), "woot"));
assertEquals("woot", find(list, Predicates.alwaysFalse(), "woot"));
assertThat(find(list, Predicates.alwaysFalse(), null)).isNull();
assertEquals("cool", find(list, Predicates.alwaysTrue(), "woot"));
assertCanIterateAgain(list);
}
public void testTryFind() {
Iterable<String> list = newArrayList("cool", "pants");
assertThat(tryFind(list, equalTo("cool"))).hasValue("cool");
assertThat(tryFind(list, equalTo("pants"))).hasValue("pants");
assertThat(tryFind(list, Predicates.alwaysTrue())).hasValue("cool");
assertThat(tryFind(list, Predicates.alwaysFalse())).isAbsent();
assertCanIterateAgain(list);
}
private static class TypeA {}
private interface TypeB {}
private static class HasBoth extends TypeA implements TypeB {}
@GwtIncompatible // Iterables.filter(Iterable, Class)
public void testFilterByType_iterator() throws Exception {
HasBoth hasBoth = new HasBoth();
Iterable<TypeA> alist = Lists.newArrayList(new TypeA(), new TypeA(), hasBoth, new TypeA());
Iterable<TypeB> blist = filter(alist, TypeB.class);
assertThat(blist).containsExactly(hasBoth).inOrder();
}
public void testTransform_iterator() {
List<String> input = asList("1", "2", "3");
Iterable<Integer> result =
Iterables.transform(
input,
new Function<String, Integer>() {
@Override
public Integer apply(String from) {
return Integer.valueOf(from);
}
});
List<Integer> actual = newArrayList(result);
List<Integer> expected = asList(1, 2, 3);
assertEquals(expected, actual);
assertCanIterateAgain(result);
assertEquals("[1, 2, 3]", result.toString());
}
public void testPoorlyBehavedTransform() {
List<String> input = asList("1", "not a number", "3");
Iterable<Integer> result =
Iterables.transform(
input,
new Function<String, Integer>() {
@Override
public Integer apply(String from) {
return Integer.valueOf(from);
}
});
Iterator<Integer> resultIterator = result.iterator();
resultIterator.next();
assertThrows(NumberFormatException.class, () -> resultIterator.next());
}
public void testNullFriendlyTransform() {
List<@Nullable Integer> input = asList(1, 2, null, 3);
Iterable<String> result =
Iterables.transform(
input,
new Function<@Nullable Integer, String>() {
@Override
public String apply(@Nullable Integer from) {
return String.valueOf(from);
}
});
List<String> actual = newArrayList(result);
List<String> expected = asList("1", "2", "null", "3");
assertEquals(expected, actual);
}
// Far less exhaustive than the tests in IteratorsTest
public void testCycle() {
Iterable<String> cycle = Iterables.cycle("a", "b");
int howManyChecked = 0;
for (String string : cycle) {
String expected = (howManyChecked % 2 == 0) ? "a" : "b";
assertEquals(expected, string);
if (howManyChecked++ == 5) {
break;
}
}
// We left the last iterator pointing to "b". But a new iterator should
// always point to "a".
for (String string : cycle) {
assertEquals("a", string);
break;
}
assertEquals("[a, b] (cycled)", cycle.toString());
}
// Again, the exhaustive tests are in IteratorsTest
public void testConcatIterable() {
List<Integer> list1 = newArrayList(1);
List<Integer> list2 = newArrayList(4);
List<List<Integer>> input = newArrayList(list1, list2);
Iterable<Integer> result = Iterables.concat(input);
assertEquals(asList(1, 4), newArrayList(result));
// Now change the inputs and see result dynamically change as well
list1.add(2);
List<Integer> list3 = newArrayList(3);
input.add(1, list3);
assertEquals(asList(1, 2, 3, 4), newArrayList(result));
assertEquals("[1, 2, 3, 4]", result.toString());
}
public void testConcatVarargs() {
List<Integer> list1 = newArrayList(1);
List<Integer> list2 = newArrayList(4);
List<Integer> list3 = newArrayList(7, 8);
List<Integer> list4 = newArrayList(9);
List<Integer> list5 = newArrayList(10);
Iterable<Integer> result = Iterables.concat(list1, list2, list3, list4, list5);
assertEquals(asList(1, 4, 7, 8, 9, 10), newArrayList(result));
assertEquals("[1, 4, 7, 8, 9, 10]", result.toString());
}
public void testConcatNullPointerException() {
List<Integer> list1 = newArrayList(1);
List<Integer> list2 = newArrayList(4);
assertThrows(NullPointerException.class, () -> Iterables.concat(list1, null, list2));
}
public void testConcatPeformingFiniteCycle() {
Iterable<Integer> iterable = asList(1, 2, 3);
int n = 4;
Iterable<Integer> repeated = Iterables.concat(nCopies(n, iterable));
assertThat(repeated).containsExactly(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3).inOrder();
}
public void testPartition_badSize() {
Iterable<Integer> source = singleton(1);
assertThrows(IllegalArgumentException.class, () -> Iterables.partition(source, 0));
}
public void testPartition_empty() {
Iterable<Integer> source = emptySet();
Iterable<List<Integer>> partitions = Iterables.partition(source, 1);
assertTrue(Iterables.isEmpty(partitions));
}
public void testPartition_singleton1() {
Iterable<Integer> source = singleton(1);
Iterable<List<Integer>> partitions = Iterables.partition(source, 1);
assertEquals(1, Iterables.size(partitions));
assertEquals(singletonList(1), partitions.iterator().next());
}
public void testPartition_view() {
List<Integer> list = asList(1, 2);
Iterable<List<Integer>> partitions = Iterables.partition(list, 2);
// Changes before the partition is retrieved are reflected
list.set(0, 3);
Iterator<List<Integer>> iterator = partitions.iterator();
// Changes before the partition is retrieved are reflected
list.set(1, 4);
List<Integer> first = iterator.next();
// Changes after are not
list.set(0, 5);
assertEquals(ImmutableList.of(3, 4), first);
}
@J2ktIncompatible // Arrays.asList(...).subList() doesn't implement RandomAccess in J2KT.
@GwtIncompatible // Arrays.asList(...).subList doesn't implement RandomAccess in GWT
public void testPartitionRandomAccessInput() {
Iterable<Integer> source = asList(1, 2, 3);
Iterable<List<Integer>> partitions = Iterables.partition(source, 2);
Iterator<List<Integer>> iterator = partitions.iterator();
assertTrue(iterator.next() instanceof RandomAccess);
assertTrue(iterator.next() instanceof RandomAccess);
}
@J2ktIncompatible // Arrays.asList(...).subList() doesn't implement RandomAccess in J2KT.
@GwtIncompatible // Arrays.asList(...).subList() doesn't implement RandomAccess in GWT
public void testPartitionNonRandomAccessInput() {
Iterable<Integer> source = new LinkedList<>(asList(1, 2, 3));
Iterable<List<Integer>> partitions = Iterables.partition(source, 2);
Iterator<List<Integer>> iterator = partitions.iterator();
// Even though the input list doesn't implement RandomAccess, the output
// lists do.
assertTrue(iterator.next() instanceof RandomAccess);
assertTrue(iterator.next() instanceof RandomAccess);
}
public void testPaddedPartition_basic() {
List<Integer> list = asList(1, 2, 3, 4, 5);
Iterable<List<@Nullable Integer>> partitions = Iterables.paddedPartition(list, 2);
assertEquals(3, Iterables.size(partitions));
assertEquals(Arrays.<@Nullable Integer>asList(5, null), Iterables.getLast(partitions));
}
public void testPaddedPartitionRandomAccessInput() {
Iterable<Integer> source = asList(1, 2, 3);
Iterable<List<Integer>> partitions = Iterables.paddedPartition(source, 2);
Iterator<List<Integer>> iterator = partitions.iterator();
assertTrue(iterator.next() instanceof RandomAccess);
assertTrue(iterator.next() instanceof RandomAccess);
}
public void testPaddedPartitionNonRandomAccessInput() {
Iterable<Integer> source = new LinkedList<>(asList(1, 2, 3));
Iterable<List<Integer>> partitions = Iterables.paddedPartition(source, 2);
Iterator<List<Integer>> iterator = partitions.iterator();
// Even though the input list doesn't implement RandomAccess, the output
// lists do.
assertTrue(iterator.next() instanceof RandomAccess);
assertTrue(iterator.next() instanceof RandomAccess);
}
// More tests in IteratorsTest
public void testAddAllToList() {
List<String> alreadyThere = newArrayList("already", "there");
List<String> freshlyAdded = newArrayList("freshly", "added");
boolean changed = Iterables.addAll(alreadyThere, freshlyAdded);
assertThat(alreadyThere).containsExactly("already", "there", "freshly", "added").inOrder();
assertTrue(changed);
}
private static void assertCanIterateAgain(Iterable<?> iterable) {
for (@SuppressWarnings("unused") Object obj : iterable) {}
}
@J2ktIncompatible
@GwtIncompatible // NullPointerTester
public void testNullPointerExceptions() {
NullPointerTester tester = new NullPointerTester();
tester.testAllPublicStaticMethods(Iterables.class);
}
// More exhaustive tests are in IteratorsTest.
public void testElementsEqual() throws Exception {
Iterable<?> a;
Iterable<?> b;
// A few elements.
a = asList(4, 8, 15, 16, 23, 42);
b = asList(4, 8, 15, 16, 23, 42);
assertTrue(elementsEqual(a, b));
// An element differs.
a = asList(4, 8, 15, 12, 23, 42);
b = asList(4, 8, 15, 16, 23, 42);
assertFalse(elementsEqual(a, b));
// null versus non-null.
a = Arrays.<@Nullable Integer>asList(4, 8, 15, null, 23, 42);
b = asList(4, 8, 15, 16, 23, 42);
assertFalse(elementsEqual(a, b));
assertFalse(elementsEqual(b, a));
// Different lengths.
a = asList(4, 8, 15, 16, 23);
b = asList(4, 8, 15, 16, 23, 42);
assertFalse(elementsEqual(a, b));
assertFalse(elementsEqual(b, a));
}
public void testToString() {
List<String> list = emptyList();
assertEquals("[]", Iterables.toString(list));
list = newArrayList("yam", "bam", "jam", "ham");
assertEquals("[yam, bam, jam, ham]", Iterables.toString(list));
}
public void testLimit() {
Iterable<String> iterable = newArrayList("foo", "bar", "baz");
Iterable<String> limited = Iterables.limit(iterable, 2);
List<String> expected = ImmutableList.of("foo", "bar");
List<String> actual = newArrayList(limited);
assertEquals(expected, actual);
assertCanIterateAgain(limited);
assertEquals("[foo, bar]", limited.toString());
}
public void testLimit_illegalArgument() {
List<String> list = newArrayList("a", "b", "c");
assertThrows(IllegalArgumentException.class, () -> Iterables.limit(list, -1));
}
public void testIsEmpty() {
Iterable<String> emptyList = emptyList();
assertTrue(Iterables.isEmpty(emptyList));
Iterable<String> singletonList = singletonList("foo");
assertFalse(Iterables.isEmpty(singletonList));
}
public void testSkip_simple() {
Collection<String> set = ImmutableSet.of("a", "b", "c", "d", "e");
assertEquals(newArrayList("c", "d", "e"), newArrayList(skip(set, 2)));
assertEquals("[c, d, e]", skip(set, 2).toString());
}
public void testSkip_simpleList() {
Collection<String> list = newArrayList("a", "b", "c", "d", "e");
assertEquals(newArrayList("c", "d", "e"), newArrayList(skip(list, 2)));
assertEquals("[c, d, e]", skip(list, 2).toString());
}
public void testSkip_pastEnd() {
Collection<String> set = ImmutableSet.of("a", "b");
assertEquals(emptyList(), newArrayList(skip(set, 20)));
}
public void testSkip_pastEndList() {
Collection<String> list = newArrayList("a", "b");
assertEquals(emptyList(), newArrayList(skip(list, 20)));
}
public void testSkip_skipNone() {
Collection<String> set = ImmutableSet.of("a", "b");
assertEquals(newArrayList("a", "b"), newArrayList(skip(set, 0)));
}
public void testSkip_skipNoneList() {
Collection<String> list = newArrayList("a", "b");
assertEquals(newArrayList("a", "b"), newArrayList(skip(list, 0)));
}
public void testSkip_removal() {
Collection<String> set = newHashSet("a", "b");
Iterator<String> iterator = skip(set, 2).iterator();
try {
iterator.next();
} catch (NoSuchElementException suppressed) {
// We want remove() to fail even after a failed call to next().
}
assertThrows(IllegalStateException.class, () -> iterator.remove());
}
public void testSkip_allOfMutableList_modifiable() {
List<String> list = newArrayList("a", "b");
Iterator<String> iterator = skip(list, 2).iterator();
assertThrows(IllegalStateException.class, () -> iterator.remove());
}
public void testSkip_allOfImmutableList_modifiable() {
List<String> list = ImmutableList.of("a", "b");
Iterator<String> iterator = skip(list, 2).iterator();
assertThrows(UnsupportedOperationException.class, () -> iterator.remove());
}
@GwtIncompatible // slow (~35s)
public void testSkip_iterator() {
new IteratorTester<Integer>(
5, MODIFIABLE, newArrayList(2, 3), IteratorTester.KnownOrder.KNOWN_ORDER) {
@Override
protected Iterator<Integer> newTargetIterator() {
return skip(new LinkedHashSet<>(asList(1, 2, 3)), 1).iterator();
}
}.test();
}
@GwtIncompatible // slow (~35s)
public void testSkip_iteratorList() {
new IteratorTester<Integer>(
5, MODIFIABLE, newArrayList(2, 3), IteratorTester.KnownOrder.KNOWN_ORDER) {
@Override
protected Iterator<Integer> newTargetIterator() {
return skip(newArrayList(1, 2, 3), 1).iterator();
}
}.test();
}
public void testSkip_nonStructurallyModifiedList() throws Exception {
List<String> list = newArrayList("a", "b", "c");
Iterable<String> tail = skip(list, 1);
Iterator<String> tailIterator = tail.iterator();
list.set(2, "C");
assertEquals("b", tailIterator.next());
assertEquals("C", tailIterator.next());
assertFalse(tailIterator.hasNext());
}
public void testSkip_structurallyModifiedSkipSome() throws Exception {
Collection<String> set = new LinkedHashSet<>(asList("a", "b", "c"));
Iterable<String> tail = skip(set, 1);
set.remove("b");
set.addAll(newArrayList("A", "B", "C"));
assertThat(tail).containsExactly("c", "A", "B", "C").inOrder();
}
public void testSkip_structurallyModifiedSkipSomeList() throws Exception {
List<String> list = newArrayList("a", "b", "c");
Iterable<String> tail = skip(list, 1);
list.subList(1, 3).clear();
list.addAll(0, newArrayList("A", "B", "C"));
assertThat(tail).containsExactly("B", "C", "a").inOrder();
}
public void testSkip_structurallyModifiedSkipAll() throws Exception {
Collection<String> set = new LinkedHashSet<>(asList("a", "b", "c"));
Iterable<String> tail = skip(set, 2);
set.remove("a");
set.remove("b");
assertFalse(tail.iterator().hasNext());
}
public void testSkip_structurallyModifiedSkipAllList() throws Exception {
List<String> list = newArrayList("a", "b", "c");
Iterable<String> tail = skip(list, 2);
list.subList(0, 2).clear();
assertTrue(Iterables.isEmpty(tail));
}
public void testSkip_illegalArgument() {
List<String> list = newArrayList("a", "b", "c");
assertThrows(IllegalArgumentException.class, () -> skip(list, -1));
}
private void testGetOnAbc(Iterable<String> iterable) {
try {
Iterables.get(iterable, -1);
fail();
} catch (IndexOutOfBoundsException expected) {
}
assertEquals("a", Iterables.get(iterable, 0));
assertEquals("b", Iterables.get(iterable, 1));
assertEquals("c", Iterables.get(iterable, 2));
try {
Iterables.get(iterable, 3);
fail();
} catch (IndexOutOfBoundsException nsee) {
}
try {
Iterables.get(iterable, 4);
fail();
} catch (IndexOutOfBoundsException nsee) {
}
}
private void testGetOnEmpty(Iterable<String> iterable) {
try {
Iterables.get(iterable, 0);
fail();
} catch (IndexOutOfBoundsException expected) {
}
}
public void testGet_list() {
testGetOnAbc(newArrayList("a", "b", "c"));
}
public void testGet_emptyList() {
testGetOnEmpty(Collections.<String>emptyList());
}
public void testGet_sortedSet() {
testGetOnAbc(ImmutableSortedSet.of("b", "c", "a"));
}
public void testGet_emptySortedSet() {
testGetOnEmpty(ImmutableSortedSet.<String>of());
}
public void testGet_iterable() {
testGetOnAbc(ImmutableSet.of("a", "b", "c"));
}
public void testGet_emptyIterable() {
testGetOnEmpty(new HashSet<String>());
}
public void testGet_withDefault_negativePosition() {
assertThrows(
IndexOutOfBoundsException.class, () -> Iterables.get(newArrayList("a", "b", "c"), -1, "d"));
}
public void testGet_withDefault_simple() {
ArrayList<String> list = newArrayList("a", "b", "c");
assertEquals("b", Iterables.get(list, 1, "d"));
}
public void testGet_withDefault_iterable() {
Set<String> set = ImmutableSet.of("a", "b", "c");
assertEquals("b", Iterables.get(set, 1, "d"));
}
public void testGet_withDefault_last() {
ArrayList<String> list = newArrayList("a", "b", "c");
assertEquals("c", Iterables.get(list, 2, "d"));
}
public void testGet_withDefault_lastPlusOne() {
ArrayList<String> list = newArrayList("a", "b", "c");
assertEquals("d", Iterables.get(list, 3, "d"));
}
public void testGet_withDefault_doesntIterate() {
List<String> list = new DiesOnIteratorArrayList();
list.add("a");
assertEquals("a", Iterables.get(list, 0, "b"));
}
public void testGetFirst_withDefault_singleton() {
Iterable<String> iterable = singletonList("foo");
assertEquals("foo", Iterables.getFirst(iterable, "bar"));
}
public void testGetFirst_withDefault_empty() {
Iterable<String> iterable = emptyList();
assertEquals("bar", Iterables.getFirst(iterable, "bar"));
}
public void testGetFirst_withDefault_empty_null() {
Iterable<String> iterable = emptyList();
assertThat(Iterables.<@Nullable String>getFirst(iterable, null)).isNull();
}
public void testGetFirst_withDefault_multiple() {
Iterable<String> iterable = asList("foo", "bar");
assertEquals("foo", Iterables.getFirst(iterable, "qux"));
}
public void testGetLast_list() {
List<String> list = newArrayList("a", "b", "c");
assertEquals("c", Iterables.getLast(list));
}
public void testGetLast_emptyList() {
List<String> list = emptyList();
assertThrows(NoSuchElementException.class, () -> Iterables.getLast(list));
}
public void testGetLast_sortedSet() {
SortedSet<String> sortedSet = ImmutableSortedSet.of("b", "c", "a");
assertEquals("c", Iterables.getLast(sortedSet));
}
public void testGetLast_withDefault_singleton() {
Iterable<String> iterable = singletonList("foo");
assertEquals("foo", Iterables.getLast(iterable, "bar"));
}
public void testGetLast_withDefault_empty() {
Iterable<String> iterable = emptyList();
assertEquals("bar", Iterables.getLast(iterable, "bar"));
}
public void testGetLast_withDefault_empty_null() {
Iterable<String> iterable = emptyList();
assertThat(Iterables.<@Nullable String>getLast(iterable, null)).isNull();
}
public void testGetLast_withDefault_multiple() {
Iterable<String> iterable = asList("foo", "bar");
assertEquals("bar", Iterables.getLast(iterable, "qux"));
}
/**
* {@link ArrayList} extension that forbids the use of {@link Collection#iterator} for tests that
* need to prove that it isn't called.
*/
private static class DiesOnIteratorArrayList extends ArrayList<String> {
/**
* @throws UnsupportedOperationException all the time
*/
@Override
public Iterator<String> iterator() {
throw new UnsupportedOperationException();
}
}
public void testGetLast_withDefault_not_empty_list() {
// TODO: verify that this is the best testing strategy.
List<String> diesOnIteratorList = new DiesOnIteratorArrayList();
diesOnIteratorList.add("bar");
assertEquals("bar", Iterables.getLast(diesOnIteratorList, "qux"));
}
public void testGetLast_emptySortedSet() {
SortedSet<String> sortedSet = ImmutableSortedSet.of();
assertThrows(NoSuchElementException.class, () -> Iterables.getLast(sortedSet));
assertEquals("c", Iterables.getLast(sortedSet, "c"));
}
public void testGetLast_iterable() {
Set<String> set = ImmutableSet.of("a", "b", "c");
assertEquals("c", Iterables.getLast(set));
}
public void testGetLast_emptyIterable() {
Set<String> set = new HashSet<>();
assertThrows(NoSuchElementException.class, () -> Iterables.getLast(set));
}
public void testUnmodifiableIterable() {
List<String> list = newArrayList("a", "b", "c");
Iterable<String> iterable = Iterables.unmodifiableIterable(list);
Iterator<String> iterator = iterable.iterator();
iterator.next();
assertThrows(UnsupportedOperationException.class, () -> iterator.remove());
assertEquals("[a, b, c]", iterable.toString());
}
@SuppressWarnings({"deprecation", "InlineMeInliner"}) // test of a deprecated method
public void testUnmodifiableIterableShortCircuit() {
List<String> list = newArrayList("a", "b", "c");
Iterable<String> iterable = Iterables.unmodifiableIterable(list);
Iterable<String> iterable2 = Iterables.unmodifiableIterable(iterable);
assertSame(iterable, iterable2);
ImmutableList<String> immutableList = ImmutableList.of("a", "b", "c");
assertSame(immutableList, Iterables.unmodifiableIterable(immutableList));
assertSame(immutableList, Iterables.unmodifiableIterable((List<String>) immutableList));
}
public void testFrequency_multiset() {
Multiset<String> multiset = ImmutableMultiset.of("a", "b", "a", "c", "b", "a");
assertEquals(3, frequency(multiset, "a"));
assertEquals(2, frequency(multiset, "b"));
assertEquals(1, frequency(multiset, "c"));
assertEquals(0, frequency(multiset, "d"));
assertEquals(0, frequency(multiset, 4.2));
assertEquals(0, frequency(multiset, null));
}
public void testFrequency_set() {
Set<String> set = newHashSet("a", "b", "c");
assertEquals(1, frequency(set, "a"));
assertEquals(1, frequency(set, "b"));
assertEquals(1, frequency(set, "c"));
assertEquals(0, frequency(set, "d"));
assertEquals(0, frequency(set, 4.2));
assertEquals(0, frequency(set, null));
}
public void testFrequency_list() {
List<String> list = newArrayList("a", "b", "a", "c", "b", "a");
assertEquals(3, frequency(list, "a"));
assertEquals(2, frequency(list, "b"));
assertEquals(1, frequency(list, "c"));
assertEquals(0, frequency(list, "d"));
assertEquals(0, frequency(list, 4.2));
assertEquals(0, frequency(list, null));
}
public void testRemoveAll_collection() {
List<String> list = newArrayList("a", "b", "c", "d", "e");
assertTrue(Iterables.removeAll(list, newArrayList("b", "d", "f")));
assertEquals(newArrayList("a", "c", "e"), list);
assertFalse(Iterables.removeAll(list, newArrayList("x", "y", "z")));
assertEquals(newArrayList("a", "c", "e"), list);
}
public void testRemoveAll_iterable() {
List<String> list = newArrayList("a", "b", "c", "d", "e");
Iterable<String> iterable =
new Iterable<String>() {
@Override
public Iterator<String> iterator() {
return list.iterator();
}
};
assertTrue(Iterables.removeAll(iterable, newArrayList("b", "d", "f")));
assertEquals(newArrayList("a", "c", "e"), list);
assertFalse(Iterables.removeAll(iterable, newArrayList("x", "y", "z")));
assertEquals(newArrayList("a", "c", "e"), list);
}
public void testRetainAll_collection() {
List<String> list = newArrayList("a", "b", "c", "d", "e");
assertTrue(Iterables.retainAll(list, newArrayList("b", "d", "f")));
assertEquals(newArrayList("b", "d"), list);
assertFalse(Iterables.retainAll(list, newArrayList("b", "e", "d")));
assertEquals(newArrayList("b", "d"), list);
}
public void testRetainAll_iterable() {
List<String> list = newArrayList("a", "b", "c", "d", "e");
Iterable<String> iterable =
new Iterable<String>() {
@Override
public Iterator<String> iterator() {
return list.iterator();
}
};
assertTrue(Iterables.retainAll(iterable, newArrayList("b", "d", "f")));
assertEquals(newArrayList("b", "d"), list);
assertFalse(Iterables.retainAll(iterable, newArrayList("b", "e", "d")));
assertEquals(newArrayList("b", "d"), list);
}
public void testRemoveIf_randomAccess() {
List<String> list = newArrayList("a", "b", "c", "d", "e");
assertTrue(
removeIf(
list,
new Predicate<String>() {
@Override
public boolean apply(String s) {
return s.equals("b") || s.equals("d") || s.equals("f");
}
}));
assertEquals(newArrayList("a", "c", "e"), list);
assertFalse(
removeIf(
list,
new Predicate<String>() {
@Override
public boolean apply(String s) {
return s.equals("x") || s.equals("y") || s.equals("z");
}
}));
assertEquals(newArrayList("a", "c", "e"), list);
}
public void testRemoveIf_randomAccess_notPermittingDuplicates() {
// https://github.com/google/guava/issues/1596
List<String> uniqueList = newArrayList("a", "b", "c", "d", "e");
assertThat(uniqueList).containsNoDuplicates();
assertTrue(uniqueList instanceof RandomAccess);
assertTrue(
removeIf(
uniqueList,
new Predicate<String>() {
@Override
public boolean apply(String s) {
return s.equals("b") || s.equals("d") || s.equals("f");
}
}));
assertEquals(newArrayList("a", "c", "e"), uniqueList);
assertFalse(
removeIf(
uniqueList,
new Predicate<String>() {
@Override
public boolean apply(String s) {
return s.equals("x") || s.equals("y") || s.equals("z");
}
}));
assertEquals(newArrayList("a", "c", "e"), uniqueList);
}
public void testRemoveIf_transformedList() {
List<String> list = newArrayList("1", "2", "3", "4", "5");
List<Integer> transformed =
Lists.transform(
list,
new Function<String, Integer>() {
@Override
public Integer apply(String s) {
return Integer.valueOf(s);
}
});
assertTrue(
removeIf(
transformed,
new Predicate<Integer>() {
@Override
public boolean apply(Integer n) {
return (n & 1) == 0; // isEven()
}
}));
assertEquals(newArrayList("1", "3", "5"), list);
assertFalse(
removeIf(
transformed,
new Predicate<Integer>() {
@Override
public boolean apply(Integer n) {
return (n & 1) == 0; // isEven()
}
}));
assertEquals(newArrayList("1", "3", "5"), list);
}
public void testRemoveIf_noRandomAccess() {
List<String> list = new LinkedList<>(asList("a", "b", "c", "d", "e"));
assertTrue(
removeIf(
list,
new Predicate<String>() {
@Override
public boolean apply(String s) {
return s.equals("b") || s.equals("d") || s.equals("f");
}
}));
assertEquals(newArrayList("a", "c", "e"), list);
assertFalse(
removeIf(
list,
new Predicate<String>() {
@Override
public boolean apply(String s) {
return s.equals("x") || s.equals("y") || s.equals("z");
}
}));
assertEquals(newArrayList("a", "c", "e"), list);
}
public void testRemoveIf_iterable() {
List<String> list = new LinkedList<>(asList("a", "b", "c", "d", "e"));
Iterable<String> iterable =
new Iterable<String>() {
@Override
public Iterator<String> iterator() {
return list.iterator();
}
};
assertTrue(
removeIf(
iterable,
new Predicate<String>() {
@Override
public boolean apply(String s) {
return s.equals("b") || s.equals("d") || s.equals("f");
}
}));
assertEquals(newArrayList("a", "c", "e"), list);
assertFalse(
removeIf(
iterable,
new Predicate<String>() {
@Override
public boolean apply(String s) {
return s.equals("x") || s.equals("y") || s.equals("z");
}
}));
assertEquals(newArrayList("a", "c", "e"), list);
}
// The Maps returned by Maps.filterEntries(), Maps.filterKeys(), and
// Maps.filterValues() are not tested with removeIf() since Maps are not
// Iterable. Those returned by Iterators.filter() and Iterables.filter()
// are not tested because they are unmodifiable.
public void testConsumingIterable() {
// Test data
List<String> list = new ArrayList<>(asList("a", "b"));
// Test & Verify
Iterable<String> consumingIterable = Iterables.consumingIterable(list);
assertEquals("Iterables.consumingIterable(...)", consumingIterable.toString());
Iterator<String> consumingIterator = consumingIterable.iterator();
assertThat(list).containsExactly("a", "b").inOrder();
assertTrue(consumingIterator.hasNext());
assertThat(list).containsExactly("a", "b").inOrder();
assertEquals("a", consumingIterator.next());
assertThat(list).contains("b");
assertTrue(consumingIterator.hasNext());
assertEquals("b", consumingIterator.next());
assertThat(list).isEmpty();
assertFalse(consumingIterator.hasNext());
}
@GwtIncompatible // ?
// TODO: Figure out why this is failing in GWT.
public void testConsumingIterable_duelingIterators() {
// Test data
List<String> list = new ArrayList<>(asList("a", "b"));
// Test & Verify
Iterator<String> i1 = Iterables.consumingIterable(list).iterator();
Iterator<String> i2 = Iterables.consumingIterable(list).iterator();
i1.next();
assertThrows(ConcurrentModificationException.class, () -> i2.next());
}
public void testConsumingIterable_queue_iterator() {
List<Integer> items = ImmutableList.of(4, 8, 15, 16, 23, 42);
new IteratorTester<Integer>(3, UNMODIFIABLE, items, IteratorTester.KnownOrder.KNOWN_ORDER) {
@Override
protected Iterator<Integer> newTargetIterator() {
return Iterables.consumingIterable(new LinkedList<>(items)).iterator();
}
}.test();
}
public void testConsumingIterable_queue_removesFromQueue() {
Queue<Integer> queue = new LinkedList<>(asList(5, 14));
Iterator<Integer> consumingIterator = Iterables.consumingIterable(queue).iterator();
assertEquals(5, queue.peek().intValue());
assertEquals(5, consumingIterator.next().intValue());
assertEquals(14, queue.peek().intValue());
assertTrue(consumingIterator.hasNext());
assertTrue(queue.isEmpty());
}
public void testConsumingIterable_noIteratorCall() {
Queue<Integer> queue = new UnIterableQueue<>(new LinkedList<>(asList(5, 14)));
Iterator<Integer> consumingIterator = Iterables.consumingIterable(queue).iterator();
/*
* Make sure that we can get an element off without calling
* UnIterableQueue.iterator().
*/
assertEquals(5, consumingIterator.next().intValue());
}
private static class UnIterableQueue<T> extends ForwardingQueue<T> {
private final Queue<T> queue;
UnIterableQueue(Queue<T> queue) {
this.queue = queue;
}
@Override
public Iterator<T> iterator() {
throw new UnsupportedOperationException();
}
@Override
protected Queue<T> delegate() {
return queue;
}
}
public void testIndexOf_empty() {
List<String> list = new ArrayList<>();
assertEquals(-1, Iterables.indexOf(list, equalTo("")));
}
public void testIndexOf_oneElement() {
List<String> list = Lists.newArrayList("bob");
assertEquals(0, Iterables.indexOf(list, equalTo("bob")));
assertEquals(-1, Iterables.indexOf(list, equalTo("jack")));
}
public void testIndexOf_twoElements() {
List<String> list = Lists.newArrayList("mary", "bob");
assertEquals(0, Iterables.indexOf(list, equalTo("mary")));
assertEquals(1, Iterables.indexOf(list, equalTo("bob")));
assertEquals(-1, Iterables.indexOf(list, equalTo("jack")));
}
public void testIndexOf_withDuplicates() {
List<String> list = Lists.newArrayList("mary", "bob", "bob", "bob", "sam");
assertEquals(0, Iterables.indexOf(list, equalTo("mary")));
assertEquals(1, Iterables.indexOf(list, equalTo("bob")));
assertEquals(4, Iterables.indexOf(list, equalTo("sam")));
assertEquals(-1, Iterables.indexOf(list, equalTo("jack")));
}
private static final Predicate<CharSequence> STARTSWITH_A =
new Predicate<CharSequence>() {
@Override
public boolean apply(CharSequence input) {
return (input.length() > 0) && (input.charAt(0) == 'a');
}
};
@SuppressWarnings("UnnecessaryStringBuilder") // false positive in a weird case
public void testIndexOf_genericPredicate() {
List<CharSequence> sequences = new ArrayList<>();
sequences.add("bob");
sequences.add(new StringBuilder("charlie"));
sequences.add(new StringBuilder("henry"));
sequences.add(new StringBuilder("apple"));
sequences.add("lemon");
assertEquals(3, Iterables.indexOf(sequences, STARTSWITH_A));
}
public void testIndexOf_genericPredicate2() {
List<String> sequences = Lists.newArrayList("bob", "charlie", "henry", "apple", "lemon");
assertEquals(3, Iterables.indexOf(sequences, STARTSWITH_A));
}
public void testMergeSorted_empty() {
// Setup
Iterable<Iterable<Integer>> elements = ImmutableList.of();
// Test
Iterable<Integer> iterable = mergeSorted(elements, Ordering.natural());
// Verify
Iterator<Integer> iterator = iterable.iterator();
assertFalse(iterator.hasNext());
assertThrows(NoSuchElementException.class, () -> iterator.next());
}
public void testMergeSorted_single_empty() {
// Setup
Iterable<Integer> iterable0 = ImmutableList.of();
Iterable<Iterable<Integer>> iterables = ImmutableList.of(iterable0);
// Test & Verify
verifyMergeSorted(iterables, ImmutableList.<Integer>of());
}
public void testMergeSorted_single() {
// Setup
Iterable<Integer> iterable0 = ImmutableList.of(1, 2, 3);
Iterable<Iterable<Integer>> iterables = ImmutableList.of(iterable0);
// Test & Verify
verifyMergeSorted(iterables, iterable0);
}
public void testMergeSorted_pyramid() {
List<Iterable<Integer>> iterables = new LinkedList<>();
List<Integer> allIntegers = new ArrayList<>();
// Creates iterators like: {{}, {0}, {0, 1}, {0, 1, 2}, ...}
for (int i = 0; i < 10; i++) {
List<Integer> list = new LinkedList<>();
for (int j = 0; j < i; j++) {
list.add(j);
allIntegers.add(j);
}
iterables.add(Ordering.<Integer>natural().sortedCopy(list));
}
verifyMergeSorted(iterables, allIntegers);
}
// Like the pyramid, but creates more unique values, along with repeated ones.
public void testMergeSorted_skipping_pyramid() {
List<Iterable<Integer>> iterables = new LinkedList<>();
List<Integer> allIntegers = new ArrayList<>();
for (int i = 0; i < 20; i++) {
List<Integer> list = new LinkedList<>();
for (int j = 0; j < i; j++) {
list.add(j * i);
allIntegers.add(j * i);
}
iterables.add(Ordering.<Integer>natural().sortedCopy(list));
}
verifyMergeSorted(iterables, allIntegers);
}
@J2ktIncompatible
@GwtIncompatible // reflection
public void testIterables_nullCheck() throws Exception {
new ClassSanityTester()
.forAllPublicStaticMethods(Iterables.class)
.thatReturn(Iterable.class)
.testNulls();
}
private static void verifyMergeSorted(
Iterable<Iterable<Integer>> iterables, Iterable<Integer> unsortedExpected) {
Iterable<Integer> expected = Ordering.<Integer>natural().sortedCopy(unsortedExpected);
Iterable<Integer> mergedIterator = mergeSorted(iterables, Ordering.natural());
assertEquals(Lists.newLinkedList(expected), Lists.newLinkedList(mergedIterator));
}
public void testMergeSorted_stable_allEqual() {
ImmutableList<Integer> first = ImmutableList.of(0, 2, 4, 6, 8);
ImmutableList<Integer> second = ImmutableList.of(1, 3, 5, 7, 9);
Comparator<Object> comparator = Ordering.allEqual();
Iterable<Integer> merged = Iterables.mergeSorted(ImmutableList.of(first, second), comparator);
assertThat(merged).containsExactly(0, 2, 4, 6, 8, 1, 3, 5, 7, 9).inOrder();
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/collect/IterablesTest.java |
#ifndef SQL_EXCEPTION_HANDLER_H_INCLUDED
#define SQL_EXCEPTION_HANDLER_H_INCLUDED
/*
Copyright (c) 2017, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
@file
@brief This file declares functions to convert exceptions to MySQL
error messages.
The pattern for use in other functions is:
@code
try
{
something_that_throws();
}
catch (...)
{
handle_foo_exception("function_name");
}
@endcode
There are different handlers for different use cases.
*/
/**
Handle an exception of any type.
Code that could throw exceptions should be wrapped in try/catch, and
the catch block should raise a corresponding MySQL error. If this
function is called from the catch block, it will raise a specialized
error message for many of the std::exception subclasses, or a more
generic error message if it is not a std::exception.
@param funcname the name of the function that caught an exception
@see handle_gis_exception
*/
void handle_std_exception(const char *funcname);
/**
Handle a GIS exception of any type.
This function constitutes the exception handling barrier between
Boost.Geometry and MySQL code. It handles all exceptions thrown in
GIS code and raises the corresponding error in MySQL.
Pattern for use in other functions:
@code
try
{
something_that_throws();
}
catch (...)
{
handle_gis_exception("st_foo");
}
@endcode
Other exception handling code put into the catch block, before or
after the call to handle_gis_exception(), must not throw exceptions.
@param funcname Function name for use in error message
@see handle_std_exception
*/
void handle_gis_exception(const char *funcname);
#endif // SQL_EXCEPTION_HANDLER_H_INCLUDED | c | github | https://github.com/mysql/mysql-server | sql/sql_exception_handler.h |
package image
import (
"context"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/containerd/log"
"github.com/moby/sys/atomicwriter"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// DigestWalkFunc is function called by StoreBackend.Walk
type DigestWalkFunc func(id digest.Digest) error
// StoreBackend provides interface for image.Store persistence
type StoreBackend interface {
Walk(f DigestWalkFunc) error
Get(id digest.Digest) ([]byte, error)
Set(data []byte) (digest.Digest, error)
Delete(id digest.Digest) error
SetMetadata(id digest.Digest, key string, data []byte) error
GetMetadata(id digest.Digest, key string) ([]byte, error)
DeleteMetadata(id digest.Digest, key string) error
}
// fs implements StoreBackend using the filesystem.
type fs struct {
sync.RWMutex
root string
}
const (
contentDirName = "content"
metadataDirName = "metadata"
)
// NewFSStoreBackend returns new filesystem based backend for image.Store
func NewFSStoreBackend(root string) (StoreBackend, error) {
return newFSStore(root)
}
func newFSStore(root string) (*fs, error) {
s := &fs{
root: root,
}
if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0o700); err != nil {
return nil, errors.Wrap(err, "failed to create storage backend")
}
if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0o700); err != nil {
return nil, errors.Wrap(err, "failed to create storage backend")
}
return s, nil
}
func (s *fs) contentFile(dgst digest.Digest) string {
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Encoded())
}
func (s *fs) metadataDir(dgst digest.Digest) string {
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Encoded())
}
// Walk calls the supplied callback for each image ID in the storage backend.
func (s *fs) Walk(f DigestWalkFunc) error {
// Only Canonical digest (sha256) is currently supported
s.RLock()
dir, err := os.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
s.RUnlock()
if err != nil {
return err
}
for _, v := range dir {
dgst := digest.NewDigestFromEncoded(digest.Canonical, v.Name())
if err := dgst.Validate(); err != nil {
log.G(context.TODO()).Debugf("skipping invalid digest %s: %s", dgst, err)
continue
}
if err := f(dgst); err != nil {
return err
}
}
return nil
}
// Get returns the content stored under a given digest.
func (s *fs) Get(dgst digest.Digest) ([]byte, error) {
s.RLock()
defer s.RUnlock()
return s.get(dgst)
}
func (s *fs) get(dgst digest.Digest) ([]byte, error) {
content, err := os.ReadFile(s.contentFile(dgst))
if err != nil {
return nil, errors.Wrapf(err, "failed to get digest %s", dgst)
}
// todo: maybe optional
if digest.FromBytes(content) != dgst {
return nil, fmt.Errorf("failed to verify: %v", dgst)
}
return content, nil
}
// Set stores content by checksum.
func (s *fs) Set(data []byte) (digest.Digest, error) {
s.Lock()
defer s.Unlock()
if len(data) == 0 {
return "", errors.New("invalid empty data")
}
dgst := digest.FromBytes(data)
if err := atomicwriter.WriteFile(s.contentFile(dgst), data, 0o600); err != nil {
return "", errors.Wrap(err, "failed to write digest data")
}
return dgst, nil
}
// Delete removes content and metadata files associated with the digest.
func (s *fs) Delete(dgst digest.Digest) error {
s.Lock()
defer s.Unlock()
if err := os.RemoveAll(s.metadataDir(dgst)); err != nil {
return err
}
return os.Remove(s.contentFile(dgst))
}
// SetMetadata sets metadata for a given ID. It fails if there's no base file.
func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error {
s.Lock()
defer s.Unlock()
if _, err := s.get(dgst); err != nil {
return err
}
baseDir := s.metadataDir(dgst)
if err := os.MkdirAll(baseDir, 0o700); err != nil {
return err
}
return atomicwriter.WriteFile(filepath.Join(baseDir, key), data, 0o600)
}
// GetMetadata returns metadata for a given digest.
func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) {
s.RLock()
defer s.RUnlock()
if _, err := s.get(dgst); err != nil {
return nil, err
}
bytes, err := os.ReadFile(filepath.Join(s.metadataDir(dgst), key))
if err != nil {
return nil, errors.Wrap(err, "failed to read metadata")
}
return bytes, nil
}
// DeleteMetadata removes the metadata associated with a digest.
func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error {
s.Lock()
defer s.Unlock()
return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key))
} | go | github | https://github.com/moby/moby | daemon/internal/image/fs.go |
#!/usr/bin/env python3
"""
Created on 6 Nov 2018
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_core.csv.csv_writer import CSVWriter
# --------------------------------------------------------------------------------------------------------------------
# reference row...
jstr1 = '{"rec": "2016-09-27T13:29:52.947+01:00", "val": {"opc_n2": {"pm1": 1, "pm2p5": 2, "pm10": 3, ' \
'"per": 4, "bin1": {"a": 5, "b": 6}, "bin2": [7, 8], "mtf1": 9}}}'
print("jstr1: %s" % jstr1)
print("-")
# missing value...
jstr2 = '{"rec": "2016-09-27T13:29:52.947+01:00", "val": {"opc_n2": {"pm1": 1, "pm2p5": 2, "pm10": 3, ' \
'"bin1": {"a": 5, "b": 6}, "bin2": [7, 8], "mtf1": 9}}}'
print("jstr2: %s" % jstr2)
print("-")
# extra value...
jstr3 = '{"rec": "2016-09-27T13:29:52.947+01:00", "val": {"opc_n2": {"pm1": 1, "pm2p5": 2, "pm10": 3, ' \
'"per": 4, "xxx": 999, "bin1": {"a": 5, "b": 6}, "bin2": [7, 8], "mtf1": 9}}}'
print("jstr3: %s" % jstr3)
print("=")
# --------------------------------------------------------------------------------------------------------------------
writer = CSVWriter()
print(writer)
print("-")
writer.write(jstr1)
writer.write(jstr2)
writer.write(jstr3)
print("-")
print(writer) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchlogs_log_group
short_description: create or delete log_group in CloudWatchLogs
notes:
- For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html).
description:
- Create or delete log_group in CloudWatchLogs.
version_added: "2.5"
author:
- Willian Ricardo (@willricardo) <willricardo@gmail.com>
requirements: [ json, botocore, boto3 ]
options:
state:
description:
- Whether the rule is present or absent.
choices: ["present", "absent"]
default: present
required: false
type: str
log_group_name:
description:
- The name of the log group.
required: true
type: str
kms_key_id:
description:
- The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
required: false
type: str
tags:
description:
- The key-value pairs to use for the tags.
required: false
type: dict
retention:
description:
- The number of days to retain the log events in the specified log group.
- "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]"
- Mutually exclusive with I(purge_retention_policy).
required: false
type: int
purge_retention_policy:
description:
- "Whether to purge the retention policy or not."
- "Mutually exclusive with I(retention) and I(overwrite)."
default: false
required: false
type: bool
version_added: "2.10"
overwrite:
description:
- Whether an existing log group should be overwritten on create.
- Mutually exclusive with I(purge_retention_policy).
default: false
required: false
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- cloudwatchlogs_log_group:
log_group_name: test-log-group
- cloudwatchlogs_log_group:
state: present
log_group_name: test-log-group
tags: { "Name": "test-log-group", "Env" : "QA" }
- cloudwatchlogs_log_group:
state: present
log_group_name: test-log-group
tags: { "Name": "test-log-group", "Env" : "QA" }
kms_key_id: arn:aws:kms:region:account-id:key/key-id
- cloudwatchlogs_log_group:
state: absent
log_group_name: test-log-group
'''
RETURN = '''
log_groups:
description: Return the list of complex objects representing log groups
returned: success
type: complex
contains:
log_group_name:
description: The name of the log group.
returned: always
type: str
creation_time:
description: The creation time of the log group.
returned: always
type: int
retention_in_days:
description: The number of days to retain the log events in the specified log group.
returned: always
type: int
metric_filter_count:
description: The number of metric filters.
returned: always
type: int
arn:
description: The Amazon Resource Name (ARN) of the log group.
returned: always
type: str
stored_bytes:
description: The number of bytes stored.
returned: always
type: str
kms_key_id:
description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
returned: always
type: str
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def create_log_group(client, log_group_name, kms_key_id, tags, retention, module):
request = {'logGroupName': log_group_name}
if kms_key_id:
request['kmsKeyId'] = kms_key_id
if tags:
request['tags'] = tags
try:
client.create_log_group(**request)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
exception=traceback.format_exc())
if retention:
input_retention_policy(client=client,
log_group_name=log_group_name,
retention=retention, module=module)
desc_log_group = describe_log_group(client=client,
log_group_name=log_group_name,
module=module)
if 'logGroups' in desc_log_group:
for i in desc_log_group['logGroups']:
if log_group_name == i['logGroupName']:
return i
module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!")
def input_retention_policy(client, log_group_name, retention, module):
try:
permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]
if retention in permited_values:
response = client.put_retention_policy(logGroupName=log_group_name,
retentionInDays=retention)
else:
delete_log_group(client=client, log_group_name=log_group_name, module=module)
module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]")
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def delete_retention_policy(client, log_group_name, module):
try:
client.delete_retention_policy(logGroupName=log_group_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def delete_log_group(client, log_group_name, module):
desc_log_group = describe_log_group(client=client,
log_group_name=log_group_name,
module=module)
try:
if 'logGroups' in desc_log_group:
for i in desc_log_group['logGroups']:
if log_group_name == i['logGroupName']:
client.delete_log_group(logGroupName=log_group_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def describe_log_group(client, log_group_name, module):
try:
desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name)
return desc_log_group
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
log_group_name=dict(required=True, type='str'),
state=dict(choices=['present', 'absent'],
default='present'),
kms_key_id=dict(required=False, type='str'),
tags=dict(required=False, type='dict'),
retention=dict(required=False, type='int'),
purge_retention_policy=dict(required=False, type='bool', default=False),
overwrite=dict(required=False, type='bool', default=False)
))
mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']]
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
state = module.params.get('state')
changed = False
# Determine if the log group exists
desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
found_log_group = {}
for i in desc_log_group.get('logGroups', []):
if module.params['log_group_name'] == i['logGroupName']:
found_log_group = i
break
if state == 'present':
if found_log_group:
if module.params['overwrite'] is True:
changed = True
delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
found_log_group = create_log_group(client=logs,
log_group_name=module.params['log_group_name'],
kms_key_id=module.params['kms_key_id'],
tags=module.params['tags'],
retention=module.params['retention'],
module=module)
elif module.params['purge_retention_policy']:
if found_log_group.get('retentionInDays'):
changed = True
delete_retention_policy(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
elif module.params['retention'] != found_log_group.get('retentionInDays'):
if module.params['retention'] is not None:
changed = True
input_retention_policy(client=logs,
log_group_name=module.params['log_group_name'],
retention=module.params['retention'],
module=module)
found_log_group['retentionInDays'] = module.params['retention']
elif not found_log_group:
changed = True
found_log_group = create_log_group(client=logs,
log_group_name=module.params['log_group_name'],
kms_key_id=module.params['kms_key_id'],
tags=module.params['tags'],
retention=module.params['retention'],
module=module)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(found_log_group))
elif state == 'absent':
if found_log_group:
changed = True
delete_log_group(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
module.exit_json(changed=changed)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.data;
import org.apache.kafka.connect.errors.DataException;
import java.util.Calendar;
import java.util.TimeZone;
/**
* <p>
* A time representing a specific point in a day, not tied to any specific date. The corresponding Java type is a
* {@link java.util.Date} where only hours, minutes, seconds, and milliseconds can be non-zero. This effectively makes it a
* point in time during the first day after the Unix epoch. The underlying representation is an integer
* representing the number of milliseconds after midnight.
* </p>
*/
public class Time {
public static final String LOGICAL_NAME = "org.apache.kafka.connect.data.Time";
private static final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
/**
* Returns a SchemaBuilder for a Time. By returning a SchemaBuilder you can override additional schema settings such
* as required/optional, default value, and documentation.
* @return a SchemaBuilder
*/
public static SchemaBuilder builder() {
return SchemaBuilder.int32()
.name(LOGICAL_NAME)
.version(1);
}
public static final Schema SCHEMA = builder().schema();
/**
* Convert a value from its logical format ({@link java.util.Date}) to its encoded format (int).
* @param value the logical value
* @return the encoded value
*/
public static int fromLogical(Schema schema, java.util.Date value) {
if (!(LOGICAL_NAME.equals(schema.name())))
throw new DataException("Requested conversion of Time object but the schema does not match.");
Calendar calendar = Calendar.getInstance(UTC);
calendar.setTime(value);
long unixMillis = calendar.getTimeInMillis();
if (unixMillis < 0 || unixMillis > MILLIS_PER_DAY) {
throw new DataException("Kafka Connect Time type should not have any date fields set to non-zero values.");
}
return (int) unixMillis;
}
/**
* Convert a value from its encoded format (int) to its logical format ({@link java.util.Date}).
* @param value the encoded value
* @return the logical value
*/
public static java.util.Date toLogical(Schema schema, int value) {
if (!(LOGICAL_NAME.equals(schema.name())))
throw new DataException("Requested conversion of Date object but the schema does not match.");
if (value < 0 || value > MILLIS_PER_DAY)
throw new DataException("Time values must use number of milliseconds greater than 0 and less than 86400000");
return new java.util.Date(value);
}
} | java | github | https://github.com/apache/kafka | connect/api/src/main/java/org/apache/kafka/connect/data/Time.java |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Serie.py
#
# Copyright (c) 2008 Magnun Leno da Silva
#
# Author: Magnun Leno da Silva <magnun.leno@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
# Contributor: Rodrigo Moreiro Araujo <alf.rodrigo@gmail.com>
#import cairoplot
import doctest
import collections
NUMTYPES = (int, float)
LISTTYPES = (list, tuple)
FILLING_TYPES = ['linear', 'solid', 'gradient']
DEFAULT_COLOR_FILLING = 'solid'
#TODO: Define default color list
DEFAULT_COLOR_LIST = None
class Data(object):
'''
Class that models the main data structure.
It can hold:
- a number type (int, float or long)
- a tuple, witch represents a point and can have 2 or 3 items (x,y,z)
- if a list is passed it will be converted to a tuple.
obs: In case a tuple is passed it will convert to tuple
'''
def __init__(self, data=None, name=None, parent=None):
'''
Starts main atributes from the Data class
@name - Name for each point;
@content - The real data, can be an int, float, long or tuple, which
represents a point (x,y) or (x,y,z);
@parent - A pointer that give the data access to it's parent.
Usage:
>>> d = Data(name='empty'); print d
empty: ()
>>> d = Data((1,1),'point a'); print d
point a: (1, 1)
>>> d = Data((1,2,3),'point b'); print d
point b: (1, 2, 3)
>>> d = Data([2,3],'point c'); print d
point c: (2, 3)
>>> d = Data(12, 'simple value'); print d
simple value: 12
'''
# Initial values
self.__content = None
self.__name = None
# Setting passed values
self.parent = parent
self.name = name
self.content = data
# Name property
@property
def name(self):
doc = '''
Name is a read/write property that controls the input of name.
- If passed an invalid value it cleans the name with None
Usage:
>>> d = Data(13); d.name = 'name_test'; print d
name_test: 13
>>> d.name = 11; print d
13
>>> d.name = 'other_name'; print d
other_name: 13
>>> d.name = None; print d
13
>>> d.name = 'last_name'; print d
last_name: 13
>>> d.name = ''; print d
13
'''
return self.__name
@name.setter
def name(self, name):
'''
Sets the name of the Data
'''
if type(name) == str and len(name) > 0:
self.__name = name
else:
self.__name = None
# Content property
@property
def content(self):
doc = '''
Content is a read/write property that validate the data passed
and return it.
Usage:
>>> d = Data(); d.content = 13; d.content
13
>>> d = Data(); d.content = (1,2); d.content
(1, 2)
>>> d = Data(); d.content = (1,2,3); d.content
(1, 2, 3)
>>> d = Data(); d.content = [1,2,3]; d.content
(1, 2, 3)
>>> d = Data(); d.content = [1.5,.2,3.3]; d.content
(1.5, 0.20000000000000001, 3.2999999999999998)
'''
return self.__content
@content.setter
def content(self, data):
'''
Ensures that data is a valid tuple/list or a number (int, float
or long)
'''
# Type: None
if data is None:
self.__content = None
return
# Type: Int or Float
elif type(data) in NUMTYPES:
self.__content = data
# Type: List or Tuple
elif type(data) in LISTTYPES:
# Ensures the correct size
if len(data) not in (2, 3):
raise TypeError("Data (as list/tuple) must have 2 or 3 items")
return
# Ensures that all items in list/tuple is a number
isnum = lambda x : type(x) not in NUMTYPES
if max(list(map(isnum, data))):
# An item in data isn't an int or a float
raise TypeError("All content of data must be a number (int or float)")
# Convert the tuple to list
if type(data) is list:
data = tuple(data)
# Append a copy and sets the type
self.__content = data[:]
# Unknown type!
else:
self.__content = None
raise TypeError("Data must be an int, float or a tuple with two or three items")
return
def clear(self):
'''
Clear the all Data (content, name and parent)
'''
self.content = None
self.name = None
self.parent = None
def copy(self):
'''
Returns a copy of the Data structure
'''
# The copy
new_data = Data()
if self.content is not None:
# If content is a point
if type(self.content) is tuple:
new_data.__content = self.content[:]
# If content is a number
else:
new_data.__content = self.content
# If it has a name
if self.name is not None:
new_data.__name = self.name
return new_data
def __str__(self):
'''
Return a string representation of the Data structure
'''
if self.name is None:
if self.content is None:
return ''
return str(self.content)
else:
if self.content is None:
return self.name+": ()"
return self.name+": "+str(self.content)
def __len__(self):
'''
Return the length of the Data.
- If it's a number return 1;
- If it's a list return it's length;
- If its None return 0.
'''
if self.content is None:
return 0
elif type(self.content) in NUMTYPES:
return 1
return len(self.content)
class Group(object):
'''
Class that models a group of data. Every value (int, float, long, tuple
or list) passed is converted to a list of Data.
It can receive:
- A single number (int, float, long);
- A list of numbers;
- A tuple of numbers;
- An instance of Data;
- A list of Data;
Obs: If a tuple with 2 or 3 items is passed it is converted to a point.
If a tuple with only 1 item is passed it's converted to a number;
If a tuple with more than 2 items is passed it's converted to a
list of numbers
'''
def __init__(self, group=None, name=None, parent=None):
'''
Starts main atributes in Group instance.
@data_list - a list of data which forms the group;
@range - a range that represent the x axis of possible functions;
@name - name of the data group;
@parent - the Serie parent of this group.
Usage:
>>> g = Group(13, 'simple number'); print g
simple number ['13']
>>> g = Group((1,2), 'simple point'); print g
simple point ['(1, 2)']
>>> g = Group([1,2,3,4], 'list of numbers'); print g
list of numbers ['1', '2', '3', '4']
>>> g = Group((1,2,3,4),'int in tuple'); print g
int in tuple ['1', '2', '3', '4']
>>> g = Group([(1,2),(2,3),(3,4)], 'list of points'); print g
list of points ['(1, 2)', '(2, 3)', '(3, 4)']
>>> g = Group([[1,2,3],[1,2,3]], '2D coordinate lists'); print g
2D coordinated lists ['(1, 1)', '(2, 2)', '(3, 3)']
>>> g = Group([[1,2],[1,2],[1,2]], '3D coordinate lists'); print g
3D coordinated lists ['(1, 1, 1)', '(2, 2, 2)']
'''
# Initial values
self.__data_list = []
self.__range = []
self.__name = None
self.parent = parent
self.name = name
self.data_list = group
# Name property
@property
def name(self):
doc = '''
Name is a read/write property that controls the input of name.
- If passed an invalid value it cleans the name with None
Usage:
>>> g = Group(13); g.name = 'name_test'; print g
name_test ['13']
>>> g.name = 11; print g
['13']
>>> g.name = 'other_name'; print g
other_name ['13']
>>> g.name = None; print g
['13']
>>> g.name = 'last_name'; print g
last_name ['13']
>>> g.name = ''; print g
['13']
'''
return self.__name
@name.setter
def name(self, name):
'''
Sets the name of the Group
'''
if type(name) == str and len(name) > 0:
self.__name = name
else:
self.__name = None
# data_list property
@property
def data_list(self):
doc = '''
The data_list is a read/write property that can be a list of
numbers, a list of points or a list of 2 or 3 coordinate lists. This
property uses mainly the self.add_data method.
Usage:
>>> g = Group(); g.data_list = 13; print g
['13']
>>> g.data_list = (1,2); print g
['(1, 2)']
>>> g.data_list = Data((1,2),'point a'); print g
['point a: (1, 2)']
>>> g.data_list = [1,2,3]; print g
['1', '2', '3']
>>> g.data_list = (1,2,3,4); print g
['1', '2', '3', '4']
>>> g.data_list = [(1,2),(2,3),(3,4)]; print g
['(1, 2)', '(2, 3)', '(3, 4)']
>>> g.data_list = [[1,2],[1,2]]; print g
['(1, 1)', '(2, 2)']
>>> g.data_list = [[1,2],[1,2],[1,2]]; print g
['(1, 1, 1)', '(2, 2, 2)']
>>> g.range = (10); g.data_list = lambda x:x**2; print g
['(0.0, 0.0)', '(1.0, 1.0)', '(2.0, 4.0)', '(3.0, 9.0)', '(4.0, 16.0)', '(5.0, 25.0)', '(6.0, 36.0)', '(7.0, 49.0)', '(8.0, 64.0)', '(9.0, 81.0)']
'''
return self.__data_list
@data_list.setter
def data_list(self, group):
'''
Ensures that group is valid.
'''
# None
if group is None:
self.__data_list = []
# Int/float/long or Instance of Data
elif type(group) in NUMTYPES or isinstance(group, Data):
# Clean data_list
self.__data_list = []
self.add_data(group)
# One point
elif type(group) is tuple and len(group) in (2,3):
self.__data_list = []
self.add_data(group)
# list of items
elif type(group) in LISTTYPES and type(group[0]) is not list:
# Clean data_list
self.__data_list = []
for item in group:
# try to append and catch an exception
self.add_data(item)
# function lambda
elif isinstance(group, collections.Callable):
# Explicit is better than implicit
function = group
# Has range
if len(self.range) is not 0:
# Clean data_list
self.__data_list = []
# Generate values for the lambda function
for x in self.range:
#self.add_data((x,round(group(x),2)))
self.add_data((x,function(x)))
# Only have range in parent
elif self.parent is not None and len(self.parent.range) is not 0:
# Copy parent range
self.__range = self.parent.range[:]
# Clean data_list
self.__data_list = []
# Generate values for the lambda function
for x in self.range:
#self.add_data((x,round(group(x),2)))
self.add_data((x,function(x)))
# Don't have range anywhere
else:
# x_data don't exist
raise Exception("Data argument is valid but to use function type please set x_range first")
# Coordinate Lists
elif type(group) in LISTTYPES and type(group[0]) is list:
# Clean data_list
self.__data_list = []
data = []
if len(group) == 3:
data = list(zip(group[0], group[1], group[2]))
elif len(group) == 2:
data = list(zip(group[0], group[1]))
else:
raise TypeError("Only one list of coordinates was received.")
for item in data:
self.add_data(item)
else:
raise TypeError("Group type not supported")
@property
def range(self):
doc = '''
The range is a read/write property that generates a range of values
for the x axis of the functions. When passed a tuple it almost works
like the built-in range funtion:
- 1 item, represent the end of the range started from 0;
- 2 items, represents the start and the end, respectively;
- 3 items, the last one represents the step;
When passed a list the range function understands as a valid range.
Usage:
>>> g = Group(); g.range = 10; print g.range
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
>>> g = Group(); g.range = (5); print g.range
[0.0, 1.0, 2.0, 3.0, 4.0]
>>> g = Group(); g.range = (1,7); print g.range
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
>>> g = Group(); g.range = (0,10,2); print g.range
[0.0, 2.0, 4.0, 6.0, 8.0]
>>>
>>> g = Group(); g.range = [0]; print g.range
[0.0]
>>> g = Group(); g.range = [0,10,20]; print g.range
[0.0, 10.0, 20.0]
'''
return self.__range
@range.setter
def range(self, x_range):
'''
Controls the input of a valid type and generate the range
'''
# if passed a simple number convert to tuple
if type(x_range) in NUMTYPES:
x_range = (x_range,)
# A list, just convert to float
if type(x_range) is list and len(x_range) > 0:
# Convert all to float
x_range = list(map(float, x_range))
# Prevents repeated values and convert back to list
self.__range = list(set(x_range[:]))
# Sort the list to ascending order
self.__range.sort()
# A tuple, must check the lengths and generate the values
elif type(x_range) is tuple and len(x_range) in (1,2,3):
# Convert all to float
x_range = list(map(float, x_range))
# Inital values
start = 0.0
step = 1.0
end = 0.0
# Only the end and it can't be less or iqual to 0
if len(x_range) is 1 and x_range > 0:
end = x_range[0]
# The start and the end but the start must be less then the end
elif len(x_range) is 2 and x_range[0] < x_range[1]:
start = x_range[0]
end = x_range[1]
# All 3, but the start must be less then the end
elif x_range[0] <= x_range[1]:
start = x_range[0]
end = x_range[1]
step = x_range[2]
# Starts the range
self.__range = []
# Generate the range
# Can't use the range function because it doesn't support float values
while start < end:
self.__range.append(start)
start += step
# Incorrect type
else:
raise Exception("x_range must be a list with one or more items or a tuple with 2 or 3 items")
def add_data(self, data, name=None):
'''
Append a new data to the data_list.
- If data is an instance of Data, append it
- If it's an int, float, tuple or list create an instance of Data and append it
Usage:
>>> g = Group()
>>> g.add_data(12); print g
['12']
>>> g.add_data(7,'other'); print g
['12', 'other: 7']
>>>
>>> g = Group()
>>> g.add_data((1,1),'a'); print g
['a: (1, 1)']
>>> g.add_data((2,2),'b'); print g
['a: (1, 1)', 'b: (2, 2)']
>>>
>>> g.add_data(Data((1,2),'c')); print g
['a: (1, 1)', 'b: (2, 2)', 'c: (1, 2)']
'''
if not isinstance(data, Data):
# Try to convert
data = Data(data,name,self)
if data.content is not None:
self.__data_list.append(data.copy())
self.__data_list[-1].parent = self
def to_list(self):
'''
Returns the group as a list of numbers (int, float or long) or a
list of tuples (points 2D or 3D).
Usage:
>>> g = Group([1,2,3,4],'g1'); g.to_list()
[1, 2, 3, 4]
>>> g = Group([(1,2),(2,3),(3,4)],'g2'); g.to_list()
[(1, 2), (2, 3), (3, 4)]
>>> g = Group([(1,2,3),(3,4,5)],'g2'); g.to_list()
[(1, 2, 3), (3, 4, 5)]
'''
return [data.content for data in self]
def copy(self):
'''
Returns a copy of this group
'''
new_group = Group()
new_group.__name = self.__name
if self.__range is not None:
new_group.__range = self.__range[:]
for data in self:
new_group.add_data(data.copy())
return new_group
def get_names(self):
'''
Return a list with the names of all data in this group
'''
names = []
for data in self:
if data.name is None:
names.append('Data '+str(data.index()+1))
else:
names.append(data.name)
return names
def __str__ (self):
'''
Returns a string representing the Group
'''
ret = ""
if self.name is not None:
ret += self.name + " "
if len(self) > 0:
list_str = [str(item) for item in self]
ret += str(list_str)
else:
ret += "[]"
return ret
def __getitem__(self, key):
'''
Makes a Group iterable, based in the data_list property
'''
return self.data_list[key]
def __len__(self):
'''
Returns the length of the Group, based in the data_list property
'''
return len(self.data_list)
class Colors(object):
'''
Class that models the colors its labels (names) and its properties, RGB
and filling type.
It can receive:
- A list where each item is a list with 3 or 4 items. The
first 3 items represent the RGB values and the last argument
defines the filling type. The list will be converted to a dict
and each color will receve a name based in its position in the
list.
- A dictionary where each key will be the color name and its item
can be a list with 3 or 4 items. The first 3 items represent
the RGB colors and the last argument defines the filling type.
'''
def __init__(self, color_list=None):
'''
Start the color_list property
@ color_list - the list or dict contaning the colors properties.
'''
self.__color_list = None
self.color_list = color_list
@property
def color_list(self):
doc = '''
>>> c = Colors([[1,1,1],[2,2,2,'linear'],[3,3,3,'gradient']])
>>> print c.color_list
{'Color 2': [2, 2, 2, 'linear'], 'Color 3': [3, 3, 3, 'gradient'], 'Color 1': [1, 1, 1, 'solid']}
>>> c.color_list = [[1,1,1],(2,2,2,'solid'),(3,3,3,'linear')]
>>> print c.color_list
{'Color 2': [2, 2, 2, 'solid'], 'Color 3': [3, 3, 3, 'linear'], 'Color 1': [1, 1, 1, 'solid']}
>>> c.color_list = {'a':[1,1,1],'b':(2,2,2,'solid'),'c':(3,3,3,'linear'), 'd':(4,4,4)}
>>> print c.color_list
{'a': [1, 1, 1, 'solid'], 'c': [3, 3, 3, 'linear'], 'b': [2, 2, 2, 'solid'], 'd': [4, 4, 4, 'solid']}
'''
@color_list.setter
def color_list(self, color_list):
'''
Format the color list to a dictionary
'''
if color_list is None:
self.__color_list = None
return
if type(color_list) in LISTTYPES and type(color_list[0]) in LISTTYPES:
old_color_list = color_list[:]
color_list = {}
for index, color in enumerate(old_color_list):
if len(color) is 3 and max(list(map(type, color))) in NUMTYPES:
color_list['Color '+str(index+1)] = list(color)+[DEFAULT_COLOR_FILLING]
elif len(color) is 4 and max(list(map(type, color[:-1]))) in NUMTYPES and color[-1] in FILLING_TYPES:
color_list['Color '+str(index+1)] = list(color)
else:
raise TypeError("Unsuported color format")
elif type(color_list) is not dict:
raise TypeError("Unsuported color format")
for name, color in list(color_list.items()):
if len(color) is 3:
if max(list(map(type, color))) in NUMTYPES:
color_list[name] = list(color)+[DEFAULT_COLOR_FILLING]
else:
raise TypeError("Unsuported color format")
elif len(color) is 4:
if max(list(map(type, color[:-1]))) in NUMTYPES and color[-1] in FILLING_TYPES:
color_list[name] = list(color)
else:
raise TypeError("Unsuported color format")
self.__color_list = color_list.copy()
class Series:
'''
Class that models a Series (group of groups). Every value (int, float,
long, tuple or list) passed is converted to a list of Group or Data.
It can receive:
- a single number or point, will be converted to a Group of one Data;
- a list of numbers, will be converted to a group of numbers;
- a list of tuples, will converted to a single Group of points;
- a list of lists of numbers, each 'sublist' will be converted to a
group of numbers;
- a list of lists of tuples, each 'sublist' will be converted to a
group of points;
- a list of lists of lists, the content of the 'sublist' will be
processed as coordinated lists and the result will be converted to
a group of points;
- a Dictionary where each item can be the same of the list: number,
point, list of numbers, list of points or list of lists (coordinated
lists);
- an instance of Data;
- an instance of group.
'''
def __init__(self, series=None, name=None, property=[], colors=None):
'''
Starts main atributes in Group instance.
@series - a list, dict of data of which the series is composed;
@name - name of the series;
@property - a list/dict of properties to be used in the plots of
this Series
Usage:
>>> print Series([1,2,3,4])
["Group 1 ['1', '2', '3', '4']"]
>>> print Series([[1,2,3],[4,5,6]])
["Group 1 ['1', '2', '3']", "Group 2 ['4', '5', '6']"]
>>> print Series((1,2))
["Group 1 ['(1, 2)']"]
>>> print Series([(1,2),(2,3)])
["Group 1 ['(1, 2)', '(2, 3)']"]
>>> print Series([[(1,2),(2,3)],[(4,5),(5,6)]])
["Group 1 ['(1, 2)', '(2, 3)']", "Group 2 ['(4, 5)', '(5, 6)']"]
>>> print Series([[[1,2,3],[1,2,3],[1,2,3]]])
["Group 1 ['(1, 1, 1)', '(2, 2, 2)', '(3, 3, 3)']"]
>>> print Series({'g1':[1,2,3], 'g2':[4,5,6]})
["g1 ['1', '2', '3']", "g2 ['4', '5', '6']"]
>>> print Series({'g1':[(1,2),(2,3)], 'g2':[(4,5),(5,6)]})
["g1 ['(1, 2)', '(2, 3)']", "g2 ['(4, 5)', '(5, 6)']"]
>>> print Series({'g1':[[1,2],[1,2]], 'g2':[[4,5],[4,5]]})
["g1 ['(1, 1)', '(2, 2)']", "g2 ['(4, 4)', '(5, 5)']"]
>>> print Series(Data(1,'d1'))
["Group 1 ['d1: 1']"]
>>> print Series(Group([(1,2),(2,3)],'g1'))
["g1 ['(1, 2)', '(2, 3)']"]
'''
# Intial values
self.__group_list = []
self.__name = None
self.__range = None
# TODO: Implement colors with filling
self.__colors = None
self.name = name
self.group_list = series
self.colors = colors
# Name property
@property
def name(self):
doc = '''
Name is a read/write property that controls the input of name.
- If passed an invalid value it cleans the name with None
Usage:
>>> s = Series(13); s.name = 'name_test'; print s
name_test ["Group 1 ['13']"]
>>> s.name = 11; print s
["Group 1 ['13']"]
>>> s.name = 'other_name'; print s
other_name ["Group 1 ['13']"]
>>> s.name = None; print s
["Group 1 ['13']"]
>>> s.name = 'last_name'; print s
last_name ["Group 1 ['13']"]
>>> s.name = ''; print s
["Group 1 ['13']"]
'''
return self.__name
@name.setter
def name(self, name):
'''
Sets the name of the Group
'''
if type(name) == str and len(name) > 0:
self.__name = name
else:
self.__name = None
# Colors property
@property
def colors(self):
doc = '''
>>> s = Series()
>>> s.colors = [[1,1,1],[2,2,2,'linear'],[3,3,3,'gradient']]
>>> print s.colors
{'Color 2': [2, 2, 2, 'linear'], 'Color 3': [3, 3, 3, 'gradient'], 'Color 1': [1, 1, 1, 'solid']}
>>> s.colors = [[1,1,1],(2,2,2,'solid'),(3,3,3,'linear')]
>>> print s.colors
{'Color 2': [2, 2, 2, 'solid'], 'Color 3': [3, 3, 3, 'linear'], 'Color 1': [1, 1, 1, 'solid']}
>>> s.colors = {'a':[1,1,1],'b':(2,2,2,'solid'),'c':(3,3,3,'linear'), 'd':(4,4,4)}
>>> print s.colors
{'a': [1, 1, 1, 'solid'], 'c': [3, 3, 3, 'linear'], 'b': [2, 2, 2, 'solid'], 'd': [4, 4, 4, 'solid']}
'''
return self.__colors.color_list
@colors.setter
def colors(self, colors):
'''
Format the color list to a dictionary
'''
self.__colors = Colors(colors)
@property
def range(self):
doc = '''
The range is a read/write property that generates a range of values
for the x axis of the functions. When passed a tuple it almost works
like the built-in range funtion:
- 1 item, represent the end of the range started from 0;
- 2 items, represents the start and the end, respectively;
- 3 items, the last one represents the step;
When passed a list the range function understands as a valid range.
Usage:
>>> s = Series(); s.range = 10; print s.range
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
>>> s = Series(); s.range = (5); print s.range
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
>>> s = Series(); s.range = (1,7); print s.range
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
>>> s = Series(); s.range = (0,10,2); print s.range
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0]
>>>
>>> s = Series(); s.range = [0]; print s.range
[0.0]
>>> s = Series(); s.range = [0,10,20]; print s.range
[0.0, 10.0, 20.0]
'''
return self.__range
@range.setter
def range(self, x_range):
'''
Controls the input of a valid type and generate the range
'''
# if passed a simple number convert to tuple
if type(x_range) in NUMTYPES:
x_range = (x_range,)
# A list, just convert to float
if type(x_range) is list and len(x_range) > 0:
# Convert all to float
x_range = list(map(float, x_range))
# Prevents repeated values and convert back to list
self.__range = list(set(x_range[:]))
# Sort the list to ascending order
self.__range.sort()
# A tuple, must check the lengths and generate the values
elif type(x_range) is tuple and len(x_range) in (1,2,3):
# Convert all to float
x_range = list(map(float, x_range))
# Inital values
start = 0.0
step = 1.0
end = 0.0
# Only the end and it can't be less or iqual to 0
if len(x_range) is 1 and x_range > 0:
end = x_range[0]
# The start and the end but the start must be lesser then the end
elif len(x_range) is 2 and x_range[0] < x_range[1]:
start = x_range[0]
end = x_range[1]
# All 3, but the start must be lesser then the end
elif x_range[0] < x_range[1]:
start = x_range[0]
end = x_range[1]
step = x_range[2]
# Starts the range
self.__range = []
# Generate the range
# Cnat use the range function becouse it don't suport float values
while start <= end:
self.__range.append(start)
start += step
# Incorrect type
else:
raise Exception("x_range must be a list with one or more item or a tuple with 2 or 3 items")
@property
def group_list(self):
doc = '''
The group_list is a read/write property used to pre-process the list
of Groups.
It can be:
- a single number, point or lambda, will be converted to a single
Group of one Data;
- a list of numbers, will be converted to a group of numbers;
- a list of tuples, will converted to a single Group of points;
- a list of lists of numbers, each 'sublist' will be converted to
a group of numbers;
- a list of lists of tuples, each 'sublist' will be converted to a
group of points;
- a list of lists of lists, the content of the 'sublist' will be
processed as coordinated lists and the result will be converted
to a group of points;
- a list of lambdas, each lambda represents a Group;
- a Dictionary where each item can be the same of the list: number,
point, list of numbers, list of points, list of lists
(coordinated lists) or lambdas
- an instance of Data;
- an instance of group.
Usage:
>>> s = Series()
>>> s.group_list = [1,2,3,4]; print s
["Group 1 ['1', '2', '3', '4']"]
>>> s.group_list = [[1,2,3],[4,5,6]]; print s
["Group 1 ['1', '2', '3']", "Group 2 ['4', '5', '6']"]
>>> s.group_list = (1,2); print s
["Group 1 ['(1, 2)']"]
>>> s.group_list = [(1,2),(2,3)]; print s
["Group 1 ['(1, 2)', '(2, 3)']"]
>>> s.group_list = [[(1,2),(2,3)],[(4,5),(5,6)]]; print s
["Group 1 ['(1, 2)', '(2, 3)']", "Group 2 ['(4, 5)', '(5, 6)']"]
>>> s.group_list = [[[1,2,3],[1,2,3],[1,2,3]]]; print s
["Group 1 ['(1, 1, 1)', '(2, 2, 2)', '(3, 3, 3)']"]
>>> s.group_list = [(0.5,5.5) , [(0,4),(6,8)] , (5.5,7) , (7,9)]; print s
["Group 1 ['(0.5, 5.5)']", "Group 2 ['(0, 4)', '(6, 8)']", "Group 3 ['(5.5, 7)']", "Group 4 ['(7, 9)']"]
>>> s.group_list = {'g1':[1,2,3], 'g2':[4,5,6]}; print s
["g1 ['1', '2', '3']", "g2 ['4', '5', '6']"]
>>> s.group_list = {'g1':[(1,2),(2,3)], 'g2':[(4,5),(5,6)]}; print s
["g1 ['(1, 2)', '(2, 3)']", "g2 ['(4, 5)', '(5, 6)']"]
>>> s.group_list = {'g1':[[1,2],[1,2]], 'g2':[[4,5],[4,5]]}; print s
["g1 ['(1, 1)', '(2, 2)']", "g2 ['(4, 4)', '(5, 5)']"]
>>> s.range = 10
>>> s.group_list = lambda x:x*2
>>> s.group_list = [lambda x:x*2, lambda x:x**2, lambda x:x**3]; print s
["Group 1 ['(0.0, 0.0)', '(1.0, 2.0)', '(2.0, 4.0)', '(3.0, 6.0)', '(4.0, 8.0)', '(5.0, 10.0)', '(6.0, 12.0)', '(7.0, 14.0)', '(8.0, 16.0)', '(9.0, 18.0)', '(10.0, 20.0)']", "Group 2 ['(0.0, 0.0)', '(1.0, 1.0)', '(2.0, 4.0)', '(3.0, 9.0)', '(4.0, 16.0)', '(5.0, 25.0)', '(6.0, 36.0)', '(7.0, 49.0)', '(8.0, 64.0)', '(9.0, 81.0)', '(10.0, 100.0)']", "Group 3 ['(0.0, 0.0)', '(1.0, 1.0)', '(2.0, 8.0)', '(3.0, 27.0)', '(4.0, 64.0)', '(5.0, 125.0)', '(6.0, 216.0)', '(7.0, 343.0)', '(8.0, 512.0)', '(9.0, 729.0)', '(10.0, 1000.0)']"]
>>> s.group_list = {'linear':lambda x:x*2, 'square':lambda x:x**2, 'cubic':lambda x:x**3}; print s
["cubic ['(0.0, 0.0)', '(1.0, 1.0)', '(2.0, 8.0)', '(3.0, 27.0)', '(4.0, 64.0)', '(5.0, 125.0)', '(6.0, 216.0)', '(7.0, 343.0)', '(8.0, 512.0)', '(9.0, 729.0)', '(10.0, 1000.0)']", "linear ['(0.0, 0.0)', '(1.0, 2.0)', '(2.0, 4.0)', '(3.0, 6.0)', '(4.0, 8.0)', '(5.0, 10.0)', '(6.0, 12.0)', '(7.0, 14.0)', '(8.0, 16.0)', '(9.0, 18.0)', '(10.0, 20.0)']", "square ['(0.0, 0.0)', '(1.0, 1.0)', '(2.0, 4.0)', '(3.0, 9.0)', '(4.0, 16.0)', '(5.0, 25.0)', '(6.0, 36.0)', '(7.0, 49.0)', '(8.0, 64.0)', '(9.0, 81.0)', '(10.0, 100.0)']"]
>>> s.group_list = Data(1,'d1'); print s
["Group 1 ['d1: 1']"]
>>> s.group_list = Group([(1,2),(2,3)],'g1'); print s
["g1 ['(1, 2)', '(2, 3)']"]
'''
return self.__group_list
@group_list.setter
def group_list(self, series):
'''
Controls the input of a valid group list.
'''
#TODO: Add support to the following strem of data: [ (0.5,5.5) , [(0,4),(6,8)] , (5.5,7) , (7,9)]
# Type: None
if series is None:
self.__group_list = []
# List or Tuple
elif type(series) in LISTTYPES:
self.__group_list = []
is_function = lambda x: isinstance(x, collections.Callable)
# Groups
if list in list(map(type, series)) or max(list(map(is_function, series))):
for group in series:
self.add_group(group)
# single group
else:
self.add_group(series)
#old code
## List of numbers
#if type(series[0]) in NUMTYPES or type(series[0]) is tuple:
# print series
# self.add_group(series)
#
## List of anything else
#else:
# for group in series:
# self.add_group(group)
# Dict representing series of groups
elif type(series) is dict:
self.__group_list = []
names = list(series.keys())
names.sort()
for name in names:
self.add_group(Group(series[name],name,self))
# A single lambda
elif isinstance(series, collections.Callable):
self.__group_list = []
self.add_group(series)
# Int/float, instance of Group or Data
elif type(series) in NUMTYPES or isinstance(series, Group) or isinstance(series, Data):
self.__group_list = []
self.add_group(series)
# Default
else:
raise TypeError("Serie type not supported")
def add_group(self, group, name=None):
'''
Append a new group in group_list
'''
if not isinstance(group, Group):
#Try to convert
group = Group(group, name, self)
if len(group.data_list) is not 0:
# Auto naming groups
if group.name is None:
group.name = "Group "+str(len(self.__group_list)+1)
self.__group_list.append(group)
self.__group_list[-1].parent = self
def copy(self):
'''
Returns a copy of the Series
'''
new_series = Series()
new_series.__name = self.__name
if self.__range is not None:
new_series.__range = self.__range[:]
#Add color property in the copy method
#self.__colors = None
for group in self:
new_series.add_group(group.copy())
return new_series
def get_names(self):
'''
Returns a list of the names of all groups in the Serie
'''
names = []
for group in self:
if group.name is None:
names.append('Group '+str(group.index()+1))
else:
names.append(group.name)
return names
def to_list(self):
'''
Returns a list with the content of all groups and data
'''
big_list = []
for group in self:
for data in group:
if type(data.content) in NUMTYPES:
big_list.append(data.content)
else:
big_list = big_list + list(data.content)
return big_list
def __getitem__(self, key):
'''
Makes the Series iterable, based in the group_list property
'''
return self.__group_list[key]
def __str__(self):
'''
Returns a string that represents the Series
'''
ret = ""
if self.name is not None:
ret += self.name + " "
if len(self) > 0:
list_str = [str(item) for item in self]
ret += str(list_str)
else:
ret += "[]"
return ret
def __len__(self):
'''
Returns the length of the Series, based in the group_lsit property
'''
return len(self.group_list)
if __name__ == '__main__':
doctest.testmod() | unknown | codeparrot/codeparrot-clean | ||
/* Generated file to emulate the ts.refactor.addOrRemoveBracesToArrowFunction namespace. */
export * from "../refactors/convertOverloadListToSingleSignature.js";
export * from "../refactors/addOrRemoveBracesToArrowFunction.js"; | typescript | github | https://github.com/microsoft/TypeScript | src/services/_namespaces/ts.refactor.addOrRemoveBracesToArrowFunction.ts |
'''
Created on Oct 15, 2013
@author: petrychenko
HTTP Application that process HTTP requests to certain URLs.
See URLs in function @route decorators
'''
import bottle
from bottle import route, request
import json
from controllers import AppController
class ObjectEncoder(json.JSONEncoder):
"""A custom JSONEncoder class that knows how to encode core custom
objects.
"""
def default(self, obj):
if isinstance(obj, object):
return { obj.__dict__ }
else:
return json.JSONEncoder.default(self, obj)
#controller supposed to be stateless so it is safe to have a singleton , one for all users
controller = AppController();
def _get_json_body():
request_body_stream = request.body
body = str(request_body_stream.read(), encoding='UTF-8')
new_event_dict = json.loads(body)
return new_event_dict
@route('/users/', method='PUT')
def createUser():
""" PUT json-encoded user record
format:
{ "email": "petrichenko@gmail.com", "password": "123", "first_name": "Ivan", "last_name": "Petrychenko" }
first_name and last_name are optional and can be omitted
"""
controller.createUser( _get_json_body() )
@route('/users/<user_id:int>/events/', method='PUT')
def createEvent(user_id):
""" PUT json-encoded event record
format:
{ "name": "birthday's party", "comment": "white tie DC", "start": 1381878005, "end" : 1382310212, "lon" : 37.322325, "lat" : -122.053828}
comment is optional and can be omitted
"""
controller.createEvent( user_id, _get_json_body() )
@route('/users/<user_id:int>/events/<event_id:int>/', method='DELETE')
def deleteEvent(user_id, event_id):
""" deletes event record by id
no body required
"""
controller.deleteEvent( user_id, event_id )
@route('/users/<user_id:int>/events/queries/delete/', method='POST')
def deleteEvents(user_id):
""" deletes event records by set of ids
body shell contain {"ids": [1,2,3] }
"""
controller.deleteEvents( user_id, _get_json_body()["ids"] )
@route('/users/<user_id:int>/events/queries/filter/', method='POST')
def filterEvents(user_id):
""" returns JSON representation of events that matches posted set of criteria
body shell contain only one of geo-filters, polygon or radius. If both are present radius is ignored.
time constraints represents UNIX epoch time.
{
"radius": {
"point" : {"lon" : 37.322325, "lat" : -122.053828},
"miles" : 10
},
"polygon": {
"points" : [ {"lon" : 37.322325, "lat" : -122.053828}, {"lon" : 37.322325, "lat" : -122.053028}, {"lon" : 37.322025, "lat" : -122.053028}, {"lon" : 37.322025, "lat" : -122.053828} ]
},
"timespan" : {
"start" : 1381805402,
"end" : 1384573841
}
}
"""
event_list = controller.filterEvents(user_id, _get_json_body())
result = []
for event in event_list:
result.append(event.__dict__)
return json.dumps(result, cls= ObjectEncoder)
if __name__ == '__main__':
bottle.run(host="localhost",port=9090, reloader=False, debug=True) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from readthedocs.projects.version_handling import version_windows
class TestVersionWindows(unittest.TestCase):
def setUp(self):
self.versions = [
'0.1.0',
'0.2.0',
'0.2.1',
'0.3.0',
'0.3.1',
'1.1.0',
'1.2.0',
'1.3.0',
'2.1.0',
'2.2.0',
'2.3.0',
'2.3.1',
'2.3.2',
'2.3.3',
'nonsense-version',
]
def test_major(self):
major_versions = version_windows(self.versions, major=1)
self.assertEqual(major_versions, ['2.3.3'])
major_versions = version_windows(self.versions, major=2)
self.assertEqual(major_versions, ['1.3.0', '2.3.3'])
major_versions = version_windows(self.versions, major=3)
self.assertEqual(major_versions, ['0.3.1', '1.3.0', '2.3.3'])
major_versions = version_windows(self.versions, major=4)
self.assertEqual(major_versions, ['0.3.1', '1.3.0', '2.3.3'])
def test_minor(self):
minor_versions = version_windows(self.versions, minor=1)
self.assertEqual(minor_versions, ['2.3.3'])
minor_versions = version_windows(self.versions, minor=2)
self.assertEqual(minor_versions, ['2.2.0', '2.3.3'])
minor_versions = version_windows(self.versions, minor=3)
self.assertEqual(minor_versions, ['2.1.0', '2.2.0', '2.3.3'])
minor_versions = version_windows(self.versions, minor=4)
self.assertEqual(minor_versions, ['2.1.0', '2.2.0', '2.3.3'])
def test_point(self):
point_versions = version_windows(self.versions, point=1)
self.assertEqual(point_versions, ['2.3.3'])
point_versions = version_windows(self.versions, point=2)
self.assertEqual(point_versions, ['2.3.2', '2.3.3'])
point_versions = version_windows(self.versions, point=3)
self.assertEqual(point_versions, ['2.3.1', '2.3.2', '2.3.3'])
point_versions = version_windows(self.versions, point=4)
self.assertEqual(point_versions, ['2.3.0', '2.3.1', '2.3.2', '2.3.3'])
point_versions = version_windows(self.versions, point=5)
self.assertEqual(point_versions, ['2.3.0', '2.3.1', '2.3.2', '2.3.3'])
def test_sort(self):
final_versions = version_windows(self.versions,
major=2, minor=2, point=1)
self.assertEqual(final_versions, ['1.2.0', '1.3.0', '2.2.0', '2.3.3'])
self.assertTrue('2.3.0' not in final_versions)
final_versions = version_windows(self.versions,
major=1, minor=2, point=2)
# There is no 1.x in this list.
# There are two 2.x versions.
# There are two point releases if available.
self.assertEqual(final_versions, ['2.2.0', '2.3.2', '2.3.3'])
final_versions = version_windows(self.versions,
major=1, minor=2, point=3)
self.assertEqual(final_versions, ['2.2.0', '2.3.1', '2.3.2', '2.3.3'])
final_versions = version_windows(['2.3.2', '2.2.0', '2.3.0', '2.3.3', '2.3.1'],
major=1, minor=2, point=3)
self.assertEqual(final_versions, ['2.2.0', '2.3.1', '2.3.2', '2.3.3'])
def test_unicode(self):
version_windows(['release-ç', '1.2.¢'], major=2, minor=2, point=1)
version_windows([u'release-ç', u'1.2.¢'], major=2, minor=2, point=1)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Helpers for handling compatibility differences
between Python 2 and Python 3.
"""
from __future__ import absolute_import
from __future__ import print_function
from future.utils import text_type
if str != bytes:
# On Python 3 and higher, str and bytes
# are not equivalent. We must use StringIO for
# doing io on native strings.
from io import StringIO as NativeStringIO
else:
# On Python 2 and older, str and bytes
# are equivalent. We must use BytesIO for
# doing io on native strings.
from io import BytesIO as NativeStringIO
def bytes2NativeString(x, encoding='utf-8'):
"""
Convert C{bytes} to a native C{str}.
On Python 3 and higher, str and bytes
are not equivalent. In this case, decode
the bytes, and return a native string.
On Python 2 and lower, str and bytes
are equivalent. In this case, just
just return the native string.
@param x: a string of type C{bytes}
@param encoding: an optional codec, default: 'utf-8'
@return: a string of type C{str}
"""
if isinstance(x, bytes) and str != bytes:
return x.decode(encoding)
return x
def unicode2bytes(x, encoding='utf-8'):
"""
Convert a unicode string to C{bytes}.
@param x: a unicode string, of type C{unicode} on Python 2,
or C{str} on Python 3.
@param encoding: an optional codec, default: 'utf-8'
@return: a string of type C{bytes}
"""
if isinstance(x, text_type):
x = x.encode(encoding)
return x
__all__ = [
"NativeStringIO",
"bytes2NativeString",
"unicode2bytes"
] | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.op.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation used by classes to make TensorFlow operations conveniently accessible via {@code
* org.tensorflow.op.Ops}.
*
* <p>An annotation processor ({@code org.tensorflow.processor.OperatorProcessor}) builds the
* {@code Ops} class by aggregating all classes annotated as {@code @Operator}s. Each annotated
* class <b>must</b> have at least one public static factory method named {@code create} that
* accepts a {@link org.tensorflow.op.Scope} as its first argument. The processor then adds a
* convenience method in the {@code Ops} class. For example:
*
* <pre>{@code
* @Operator
* public final class MyOp implements Op {
* public static MyOp create(Scope scope, Operand operand) {
* ...
* }
* }
* }</pre>
*
* <p>results in a method in the {@code Ops} class
*
* <pre>{@code
* import org.tensorflow.op.Ops;
* ...
* Ops ops = Ops.create(graph);
* ...
* ops.myOp(operand);
* // and has exactly the same effect as calling
* // MyOp.create(ops.getScope(), operand);
* }</pre>
*/
@Documented
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.SOURCE)
public @interface Operator {
/**
* Specify an optional group within the {@code Ops} class.
*
* <p>By default, an annotation processor will create convenience methods directly in the {@code
* Ops} class. An annotated operator may optionally choose to place the method within a group. For
* example:
*
* <pre>{@code
* @Operator(group="math")
* public final class Add extends PrimitiveOp implements Operand {
* ...
* }
* }</pre>
*
* <p>results in the {@code add} method placed within a {@code math} group within the {@code Ops}
* class.
*
* <pre>{@code
* ops.math().add(...);
* }</pre>
*
* <p>The group name must be a <a
* href="https://docs.oracle.com/javase/specs/jls/se7/html/jls-3.html#jls-3.8">valid Java
* identifier</a>.
*/
String group() default "";
/**
* Name for the wrapper method used in the {@code Ops} class.
*
* <p>By default, a processor derives the method name in the {@code Ops} class from the class name
* of the operator. This attribute allow you to provide a different name instead. For example:
*
* <pre>{@code
* @Operator(name="myOperation")
* public final class MyRealOperation implements Operand {
* public static MyRealOperation create(...)
* }
* }</pre>
*
* <p>results in this method added to the {@code Ops} class
*
* <pre>{@code
* ops.myOperation(...);
* // and is the same as calling
* // MyRealOperation.create(...)
* }</pre>
*
* <p>The name must be a <a
* href="https://docs.oracle.com/javase/specs/jls/se7/html/jls-3.html#jls-3.8">valid Java
* identifier</a>.
*/
String name() default "";
} | java | github | https://github.com/tensorflow/tensorflow | tensorflow/java/src/main/java/org/tensorflow/op/annotation/Operator.java |
#!/usr/bin/env python
"""
highlightedtextplugin.py
A display text custom widget plugin for Qt Designer.
Copyright (C) 2006 David Boddie <david@boddie.org.uk>
Copyright (C) 2005-2006 Trolltech ASA. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from PyQt4 import QtCore, QtGui, QtDesigner
from highlightedtextedit import HighlightedTextEdit
def Q_TYPEID(class_name):
return "com.trolltech.Qt.Designer.TaskMenu"
class HighlightedTextEditPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
"""HighlightedTextEditPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin)
Provides a Python custom plugin for Qt Designer by implementing the
QDesignerCustomWidgetPlugin via a PyQt-specific custom plugin class.
"""
# The __init__() method is only used to set up the plugin and define its
# initialized variable.
def __init__(self, parent=None):
super(HighlightedTextEditPlugin, self).__init__(parent)
self.initialized = False
# The initialize() and isInitialized() methods allow the plugin to set up
# any required resources, ensuring that this can only happen once for each
# plugin.
def initialize(self, formEditor):
if self.initialized:
return
# We register an extension factory to add a extension to each form's
# task menu.
manager = formEditor.extensionManager()
if manager:
self.factory = HighlightedTextEditTaskMenuFactory(manager)
manager.registerExtensions(
self.factory, Q_TYPEID("QPyDesignerTaskMenuExtension")
)
self.initialized = True
def isInitialized(self):
return self.initialized
# This factory method creates new instances of our custom widget with the
# appropriate parent.
def createWidget(self, parent):
widget = HighlightedTextEdit(parent)
# We install an event filter on the text editor to prevent the
# contents from being modified outside the custom editor dialog.
widget.installEventFilter(self)
return widget
def eventFilter(self, obj, event):
if isinstance(obj, QtGui.QTextEdit):
if isinstance(event, QtGui.QKeyEvent):
return True
elif isinstance(event, QtGui.QFocusEvent):
return True
return False
# This method returns the name of the custom widget class that is provided
# by this plugin.
def name(self):
return "HighlightedTextEdit"
# Returns the name of the group in Qt Designer's widget box that this
# widget belongs to.
def group(self):
return "PyQt Examples"
# Returns the icon used to represent the custom widget in Qt Designer's
# widget box.
def icon(self):
return QtGui.QIcon(_logo_pixmap)
# Returns a short description of the custom widget for use in a tool tip.
def toolTip(self):
return ""
# Returns a short description of the custom widget for use in a "What's
# This?" help message for the widget.
def whatsThis(self):
return ""
# Returns True if the custom widget acts as a container for other widgets;
# otherwise returns False. Note that plugins for custom containers also
# need to provide an implementation of the QDesignerContainerExtension
# interface if they need to add custom editing support to Qt Designer.
def isContainer(self):
return False
# Returns an XML description of a custom widget instance that describes
# default values for its properties. Each custom widget created by this
# plugin will be configured using this description.
def domXml(self):
return '<widget class="HighlightedTextEdit" name="highlightedTextEdit" />\n'
# Returns the module containing the custom widget class. It may include
# a module path.
def includeFile(self):
return "highlightedtextedit"
class HighlightedTextEditTaskMenuFactory(QtDesigner.QExtensionFactory):
"""HighlightedTextEditTaskMenuFactory(QtDesigner.QExtensionFactory)
Provides
"""
def __init__(self, parent = None):
QtDesigner.QExtensionFactory.__init__(self, parent)
# This standard factory function returns an object to represent a task
# menu entry.
def createExtension(self, obj, iid, parent):
if iid != Q_TYPEID("QPyDesignerTaskMenuExtension"):
return None
# We pass the instance of the custom widget to the object representing
# the task menu entry so that the contents of the custom widget can be
# modified.
if isinstance(obj, HighlightedTextEdit):
return HighlightedTextEditTaskMenu(obj, parent)
return None
class HighlightedTextEditTaskMenu(QtDesigner.QPyDesignerTaskMenuExtension):
"""HighlightedTextEditTaskMenu(QtDesigner.QPyDesignerTaskMenuExtension)
Provides a task menu entry to enable text in the highlighted text
editor to be edited via a dialog.
"""
def __init__(self, textEdit, parent):
super(HighlightedTextEditTaskMenu, self).__init__(parent)
self.textEdit = textEdit
# Create the action to be added to the form's existing task menu
# and connect it to a slot in this class.
self.editStateAction = QtGui.QAction("Edit Text...", self,
triggered=self.editText)
def preferredEditAction(self):
return self.editStateAction
def taskActions(self):
return [self.editStateAction]
# The editText() slot is called when the action that represents our task
# menu entry is triggered. We open a dialog, passing the custom widget as
# an argument.
@QtCore.pyqtSlot()
def editText(self):
HighlightedTextEditDialog(self.textEdit).exec_()
class HighlightedTextEditDialog(QtGui.QDialog):
"""HighlightedTextEditDialog(QtGui.QDialog)
Provides a dialog that is used to edit the contents of the custom widget.
"""
def __init__(self, editor, parent=None):
super(HighlightedTextEditDialog, self).__init__(parent)
self.editor = editor
self.textEdit = HighlightedTextEdit()
self.textEdit.setCode(editor.getCode())
self.textEdit.installEventFilter(self)
okButton = QtGui.QPushButton("&OK")
okButton.clicked.connect(self.updateText)
cancelButton = QtGui.QPushButton("&Cancel")
cancelButton.clicked.connect(self.reject)
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(okButton)
buttonLayout.addWidget(cancelButton)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.textEdit)
layout.addLayout(buttonLayout)
self.setLayout(layout)
def eventFilter(self, obj, event):
if obj == self.textEdit:
if isinstance(event, QtGui.QKeyEvent):
if event.key() == QtCore.Qt.Key_Return and \
int(event.modifiers() & QtCore.Qt.ControlModifier) == QtCore.Qt.ControlModifier:
if event.type() == QtGui.QEvent.KeyPress:
cursor = self.textEdit.textCursor()
char_format = cursor.charFormat()
char_format.setFontPointSize(self.textEdit.font.pointSizeF()/2.0)
block_format = cursor.blockFormat()
cursor.insertBlock(block_format, char_format)
self.textEdit.setTextCursor(cursor)
return True
return False
# When we update the contents of the custom widget, we access its
# properties via the QDesignerFormWindowInterface API so that Qt Designer
# can integrate the changes we make into its undo-redo management.
def updateText(self):
formWindow = QtDesigner.QDesignerFormWindowInterface.findFormWindow(self.editor)
if formWindow:
formWindow.cursor().setProperty("code", self.textEdit.getCode())
self.accept()
# Define the image used for the icon.
_logo_16x16_xpm = [
"16 16 6 1",
" c None",
". c #FFFFFF",
"a c #000000",
"b c #FF4040",
"c c #40C040",
"d c #4040FF",
".........b......",
".........b......",
".........b......",
"...aaaa..bbbb...",
"..a...a..b...b..",
"..a...a..b...b..",
"..a...a..b...b..",
"...aaa.a.bbbb...",
".............d..",
".............d..",
".............d..",
"...ccc....dddd..",
"..c...c..d...d..",
"..c......d...d..",
"..c...c..d...d..",
"...ccc....dddd.."]
_logo_pixmap = QtGui.QPixmap(_logo_16x16_xpm) | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pytest
from pandas._libs.parsers import (
_maybe_upcast,
na_values,
)
import pandas as pd
from pandas import NA
import pandas._testing as tm
from pandas.core.arrays import (
ArrowStringArray,
BooleanArray,
FloatingArray,
IntegerArray,
)
def test_maybe_upcast(any_real_numpy_dtype):
# GH#36712
dtype = np.dtype(any_real_numpy_dtype)
na_value = na_values[dtype]
arr = np.array([1, 2, na_value], dtype=dtype)
result = _maybe_upcast(arr, use_dtype_backend=True)
expected_mask = np.array([False, False, True])
if issubclass(dtype.type, np.integer):
expected = IntegerArray(arr, mask=expected_mask)
else:
expected = FloatingArray(arr, mask=expected_mask)
tm.assert_extension_array_equal(result, expected)
def test_maybe_upcast_no_na(any_real_numpy_dtype):
# GH#36712
arr = np.array([1, 2, 3], dtype=any_real_numpy_dtype)
result = _maybe_upcast(arr, use_dtype_backend=True)
expected_mask = np.array([False, False, False])
if issubclass(np.dtype(any_real_numpy_dtype).type, np.integer):
expected = IntegerArray(arr, mask=expected_mask)
else:
expected = FloatingArray(arr, mask=expected_mask)
tm.assert_extension_array_equal(result, expected)
def test_maybe_upcaste_bool():
# GH#36712
dtype = np.bool_
na_value = na_values[dtype]
arr = np.array([True, False, na_value], dtype="uint8").view(dtype)
result = _maybe_upcast(arr, use_dtype_backend=True)
expected_mask = np.array([False, False, True])
expected = BooleanArray(arr, mask=expected_mask)
tm.assert_extension_array_equal(result, expected)
def test_maybe_upcaste_bool_no_nan():
# GH#36712
dtype = np.bool_
arr = np.array([True, False, False], dtype="uint8").view(dtype)
result = _maybe_upcast(arr, use_dtype_backend=True)
expected_mask = np.array([False, False, False])
expected = BooleanArray(arr, mask=expected_mask)
tm.assert_extension_array_equal(result, expected)
def test_maybe_upcaste_all_nan():
# GH#36712
dtype = np.int64
na_value = na_values[dtype]
arr = np.array([na_value, na_value], dtype=dtype)
result = _maybe_upcast(arr, use_dtype_backend=True)
expected_mask = np.array([True, True])
expected = IntegerArray(arr, mask=expected_mask)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("val", [na_values[np.object_], "c"])
def test_maybe_upcast_object(val, string_storage):
# GH#36712
pa = pytest.importorskip("pyarrow")
with pd.option_context("mode.string_storage", string_storage):
arr = np.array(["a", "b", val], dtype=np.object_)
result = _maybe_upcast(arr, use_dtype_backend=True)
if string_storage == "python":
exp_val = "c" if val == "c" else NA
expected = pd.array(["a", "b", exp_val], dtype=pd.StringDtype())
else:
exp_val = "c" if val == "c" else None
expected = ArrowStringArray(pa.array(["a", "b", exp_val]))
tm.assert_extension_array_equal(result, expected) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/io/parser/test_upcast.py |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <kevin.breit@kevinbreit.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_device
short_description: Manage devices in the Meraki cloud
version_added: "2.7"
description:
- Visibility into devices associated to a Meraki environment.
notes:
- This module does not support claiming of devices or licenses into a Meraki organization.
- More information about the Meraki API can be found at U(https://dashboard.meraki.com/api_docs).
- Some of the options are likely only used for developers within Meraki.
options:
state:
description:
- Query an organization.
choices: [absent, present, query]
default: query
org_name:
description:
- Name of organization.
- If C(clone) is specified, C(org_name) is the name of the new organization.
aliases: [ organization ]
org_id:
description:
- ID of organization.
net_name:
description:
- Name of a network.
aliases: [network]
net_id:
description:
- ID of a network.
serial:
description:
- Serial number of a device to query.
hostname:
description:
- Hostname of network device to search for.
aliases: [name]
model:
description:
- Model of network device to search for.
tags:
description:
- Space delimited list of tags to assign to device.
lat:
description:
- Latitude of device's geographic location.
- Use negative number for southern hemisphere.
aliases: [latitude]
lng:
description:
- Longitude of device's geographic location.
- Use negative number for western hemisphere.
aliases: [longitude]
address:
description:
- Postal address of device's location.
move_map_marker:
description:
- Whether or not to set the latitude and longitude of a device based on the new address.
- Only applies when C(lat) and C(lng) are not specified.
type: bool
serial_lldp_cdp:
description:
- Serial number of device to query LLDP/CDP information from.
lldp_cdp_timespan:
description:
- Timespan, in seconds, used to query LLDP and CDP information.
- Must be less than 1 month.
serial_uplink:
description:
- Serial number of device to query uplink information from.
note:
description:
- Informational notes about a device.
- Limited to 255 characters.
version_added: '2.8'
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query all devices in an organization.
meraki_device:
auth_key: abc12345
org_name: YourOrg
state: query
delegate_to: localhost
- name: Query all devices in a network.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
state: query
delegate_to: localhost
- name: Query a device by serial number.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: query
delegate_to: localhost
- name: Lookup uplink information about a device.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial_uplink: ABC-123
state: query
delegate_to: localhost
- name: Lookup LLDP and CDP information about devices connected to specified device.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial_lldp_cdp: ABC-123
state: query
delegate_to: localhost
- name: Lookup a device by hostname.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
hostname: main-switch
state: query
delegate_to: localhost
- name: Query all devices of a specific model.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
model: MR26
state: query
delegate_to: localhost
- name: Update information about a device.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
state: present
serial: '{{serial}}'
name: mr26
address: 1060 W. Addison St., Chicago, IL
lat: 41.948038
lng: -87.65568
tags: recently-added
delegate_to: localhost
- name: Claim a device into a network.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: present
delegate_to: localhost
- name: Remove a device from a network.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: absent
delegate_to: localhost
'''
RETURN = r'''
response:
description: Data returned from Meraki dashboard.
type: dict
returned: info
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils._text import to_native
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def format_tags(tags):
return " {tags} ".format(tags=tags)
def is_device_valid(meraki, serial, data):
for device in data:
if device['serial'] == serial:
return True
return False
def get_org_devices(meraki, org_id):
path = meraki.construct_path('get_all_org', org_id=org_id)
response = meraki.request(path, method='GET')
if meraki.status != 200:
meraki.fail_json(msg='Failed to query all devices belonging to the organization')
return response
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['absent', 'present', 'query'], default='query'),
net_name=dict(type='str', aliases=['network']),
net_id=dict(type='str'),
serial=dict(type='str'),
serial_uplink=dict(type='str'),
serial_lldp_cdp=dict(type='str'),
lldp_cdp_timespan=dict(type='int'),
hostname=dict(type='str', aliases=['name']),
model=dict(type='str'),
tags=dict(type='str'),
lat=dict(type='float', aliases=['latitude']),
lng=dict(type='float', aliases=['longitude']),
address=dict(type='str'),
move_map_marker=dict(type='bool'),
note=dict(type='str'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='device')
if meraki.params['serial_lldp_cdp'] and not meraki.params['lldp_cdp_timespan']:
meraki.fail_json(msg='lldp_cdp_timespan is required when querying LLDP and CDP information')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
meraki.params['follow_redirects'] = 'all'
query_urls = {'device': '/networks/{net_id}/devices'}
query_org_urls = {'device': '/organizations/{org_id}/inventory'}
query_device_urls = {'device': '/networks/{net_id}/devices/'}
claim_device_urls = {'device': '/networks/{net_id}/devices/claim'}
bind_org_urls = {'device': '/organizations/{org_id}/claim'}
update_device_urls = {'device': '/networks/{net_id}/devices/'}
delete_device_urls = {'device': '/networks/{net_id}/devices/'}
meraki.url_catalog['get_all'].update(query_urls)
meraki.url_catalog['get_all_org'] = query_org_urls
meraki.url_catalog['get_device'] = query_device_urls
meraki.url_catalog['create'] = claim_device_urls
meraki.url_catalog['bind_org'] = bind_org_urls
meraki.url_catalog['update'] = update_device_urls
meraki.url_catalog['delete'] = delete_device_urls
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# FIXME: Work with Meraki so they can implement a check mode
if module.check_mode:
meraki.exit_json(**meraki.result)
# execute checks for argument completeness
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if org_id is None:
org_id = meraki.get_org_id(meraki.params['org_name'])
nets = meraki.get_nets(org_id=org_id)
net_id = None
if meraki.params['net_id'] or meraki.params['net_name']:
net_id = meraki.params['net_id']
if net_id is None:
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
if meraki.params['net_name'] or meraki.params['net_id']:
device = []
if meraki.params['serial']:
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial']
request = meraki.request(path, method='GET')
device.append(request)
meraki.result['data'] = device
elif meraki.params['serial_uplink']:
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial_uplink'] + '/uplink'
meraki.result['data'] = (meraki.request(path, method='GET'))
elif meraki.params['serial_lldp_cdp']:
if meraki.params['lldp_cdp_timespan'] > 2592000:
meraki.fail_json(msg='LLDP/CDP timespan must be less than a month (2592000 seconds)')
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial_lldp_cdp'] + '/lldp_cdp'
path = path + '?timespan=' + str(meraki.params['lldp_cdp_timespan'])
device.append(meraki.request(path, method='GET'))
meraki.result['data'] = device
elif meraki.params['hostname']:
path = meraki.construct_path('get_all', net_id=net_id)
devices = meraki.request(path, method='GET')
for unit in devices:
if unit['name'] == meraki.params['hostname']:
device.append(unit)
meraki.result['data'] = device
elif meraki.params['model']:
path = meraki.construct_path('get_all', net_id=net_id)
devices = meraki.request(path, method='GET')
device_match = []
for device in devices:
if device['model'] == meraki.params['model']:
device_match.append(device)
meraki.result['data'] = device_match
else:
path = meraki.construct_path('get_all', net_id=net_id)
request = meraki.request(path, method='GET')
meraki.result['data'] = request
else:
path = meraki.construct_path('get_all_org', org_id=org_id)
devices = meraki.request(path, method='GET')
if meraki.params['serial']:
for device in devices:
if device['serial'] == meraki.params['serial']:
meraki.result['data'] = device
else:
meraki.result['data'] = devices
elif meraki.params['state'] == 'present':
device = []
if meraki.params['hostname']:
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list):
payload = {'name': meraki.params['hostname'],
'tags': format_tags(meraki.params['tags']),
'lat': meraki.params['lat'],
'lng': meraki.params['lng'],
'address': meraki.params['address'],
'moveMapMarker': meraki.params['move_map_marker'],
'notes': meraki.params['note'],
}
query_path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial']
device_data = meraki.request(query_path, method='GET')
ignore_keys = ['lanIp', 'serial', 'mac', 'model', 'networkId', 'moveMapMarker', 'wan1Ip', 'wan2Ip']
if meraki.is_update_required(device_data, payload, optional_ignore=ignore_keys):
path = meraki.construct_path('update', net_id=net_id) + meraki.params['serial']
updated_device = []
updated_device.append(meraki.request(path, method='PUT', payload=json.dumps(payload)))
meraki.result['data'] = updated_device
meraki.result['changed'] = True
else:
if net_id is None:
device_list = get_org_devices(meraki, org_id)
if is_device_valid(meraki, meraki.params['serial'], device_list) is False:
payload = {'serial': meraki.params['serial']}
path = meraki.construct_path('bind_org', org_id=org_id)
created_device = []
created_device.append(meraki.request(path, method='POST', payload=json.dumps(payload)))
meraki.result['data'] = created_device
meraki.result['changed'] = True
else:
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list) is False:
if net_id:
payload = {'serial': meraki.params['serial']}
path = meraki.construct_path('create', net_id=net_id)
created_device = []
created_device.append(meraki.request(path, method='POST', payload=json.dumps(payload)))
meraki.result['data'] = created_device
meraki.result['changed'] = True
elif meraki.params['state'] == 'absent':
device = []
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list) is True:
path = meraki.construct_path('delete', net_id=net_id)
path = path + meraki.params['serial'] + '/remove'
request = meraki.request(path, method='POST')
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Support for the iTunes format
# Copyright 2010-2020 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
# Canonical namespace
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
# Extra namespace
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
}
def _start_itunes_author(self, attrs_d):
self._start_author(attrs_d)
def _end_itunes_author(self):
self._end_author()
def _end_itunes_category(self):
self._end_category()
def _start_itunes_name(self, attrs_d):
self._start_name(attrs_d)
def _end_itunes_name(self):
self._end_name()
def _start_itunes_email(self, attrs_d):
self._start_email(attrs_d)
def _end_itunes_email(self):
self._end_email()
def _start_itunes_subtitle(self, attrs_d):
self._start_subtitle(attrs_d)
def _end_itunes_subtitle(self):
self._end_subtitle()
def _start_itunes_summary(self, attrs_d):
self._start_summary(attrs_d)
def _end_itunes_summary(self):
self._end_summary()
def _start_itunes_owner(self, attrs_d):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._add_tag(term.strip(), 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrs_d):
self._add_tag(attrs_d.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _start_itunes_image(self, attrs_d):
self.push('itunes_image', 0)
if attrs_d.get('href'):
self._get_context()['image'] = FeedParserDict({'href': attrs_d.get('href')})
elif attrs_d.get('url'):
self._get_context()['image'] = FeedParserDict({'href': attrs_d.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._get_context()['itunes_block'] = (value == 'yes' or value == 'Yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._get_context()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
def set_subdomain_of_default_realm(apps, schema_editor):
# type: (StateApps, DatabaseSchemaEditor) -> None
if settings.DEVELOPMENT:
Realm = apps.get_model('zerver', 'Realm')
try:
default_realm = Realm.objects.get(domain="zulip.com")
except ObjectDoesNotExist:
default_realm = None
if default_realm is not None:
default_realm.subdomain = "zulip"
default_realm.save()
class Migration(migrations.Migration):
dependencies = [
('zerver', '0028_userprofile_tos_version'),
]
operations = [
migrations.AddField(
model_name='realm',
name='subdomain',
field=models.CharField(max_length=40, unique=True, null=True),
),
migrations.RunPython(set_subdomain_of_default_realm)
] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
X_RECORD_INTERFACE = "XRecord"
ATSPI_INTERFACE = "AT-SPI"
INTERFACES = [X_RECORD_INTERFACE, ATSPI_INTERFACE]
CURRENT_INTERFACE = None
# Key codes enumeration
class Key:
LEFT = "<left>"
RIGHT = "<right>"
UP = "<up>"
DOWN = "<down>"
BACKSPACE = "<backspace>"
TAB = "<tab>"
ENTER = "<enter>"
SCROLL_LOCK = "<scroll_lock>"
PRINT_SCREEN = "<print_screen>"
PAUSE = "<pause>"
MENU = "<menu>"
# Modifier keys
CONTROL = "<ctrl>"
ALT = "<alt>"
ALT_GR = "<alt_gr>"
SHIFT = "<shift>"
SUPER = "<super>"
HYPER = "<hyper>"
CAPSLOCK = "<capslock>"
NUMLOCK = "<numlock>"
META = "<meta>"
F1 = "<f1>"
F2 = "<f2>"
F3 = "<f3>"
F4 = "<f4>"
F5 = "<f5>"
F6 = "<f6>"
F7 = "<f7>"
F8 = "<f8>"
F9 = "<f9>"
F10 = "<f10>"
F11 = "<f11>"
F12 = "<f12>"
# Other
ESCAPE = "<escape>"
INSERT = "<insert>"
DELETE = "<delete>"
HOME = "<home>"
END = "<end>"
PAGE_UP = "<page_up>"
PAGE_DOWN = "<page_down>"
# Numpad
NP_INSERT = "<np_insert>"
NP_DELETE = "<np_delete>"
NP_HOME = "<np_home>"
NP_END = "<np_end>"
NP_PAGE_UP = "<np_page_up>"
NP_PAGE_DOWN = "<np_page_down>"
NP_LEFT = "<np_left>"
NP_RIGHT = "<np_right>"
NP_UP = "<np_up>"
NP_DOWN = "<np_down>"
NP_DIVIDE = "<np_divide>"
NP_MULTIPLY = "<np_multiply>"
NP_ADD = "<np_add>"
NP_SUBTRACT = "<np_subtract>"
NP_5 = "<np_5>"
@classmethod
def is_key(klass, keyString):
# Key strings must be treated as case insensitive - always convert to lowercase
# before doing any comparisons
return keyString.lower() in klass.__dict__.values() or keyString.startswith("<code")
import datetime, time, threading, Queue, re, logging
_logger = logging.getLogger("iomediator")
MODIFIERS = [Key.CONTROL, Key.ALT, Key.ALT_GR, Key.SHIFT, Key.SUPER, Key.HYPER, Key.META, Key.CAPSLOCK, Key.NUMLOCK]
HELD_MODIFIERS = [Key.CONTROL, Key.ALT, Key.SUPER, Key.SHIFT, Key.HYPER, Key.META]
NAVIGATION_KEYS = [Key.LEFT, Key.RIGHT, Key.UP, Key.DOWN, Key.BACKSPACE, Key.HOME, Key.END, Key.PAGE_UP, Key.PAGE_DOWN]
#KEY_SPLIT_RE = re.compile("(<.+?>\+{0,1})", re.UNICODE)
KEY_SPLIT_RE = re.compile("(<[^<>]+>\+?)", re.UNICODE)
SEND_LOCK = threading.Lock()
from interface import *
from configmanager import *
class IoMediator(threading.Thread):
"""
The IoMediator is responsible for tracking the state of modifier keys and
interfacing with the various Interface classes to obtain the correct
characters to pass to the expansion service.
This class must not store or maintain any configuration details.
"""
# List of targets interested in receiving keypress, hotkey and mouse events
listeners = []
def __init__(self, service):
threading.Thread.__init__(self, name="KeypressHandler-thread")
self.queue = Queue.Queue()
self.listeners.append(service)
self.interfaceType = ConfigManager.SETTINGS[INTERFACE_TYPE]
# Modifier tracking
self.modifiers = {
Key.CONTROL : False,
Key.ALT : False,
Key.ALT_GR: False,
Key.SHIFT : False,
Key.SUPER : False,
Key.HYPER : False,
Key.META : False,
Key.CAPSLOCK : False,
Key.NUMLOCK : False
}
if self.interfaceType == X_RECORD_INTERFACE:
self.interface = XRecordInterface(self, service.app)
elif self.interfaceType == X_EVDEV_INTERFACE:
self.interface = EvDevInterface(self, service.app)
else:
self.interface = AtSpiInterface(self, service.app)
global CURRENT_INTERFACE
CURRENT_INTERFACE = self.interface
def shutdown(self):
self.interface.cancel()
self.queue.put_nowait((None, None, None))
self.join()
# Callback methods for Interfaces ----
def set_modifier_state(self, modifier, state):
_logger.debug("Set modifier %s to %r", modifier, state)
self.modifiers[modifier] = state
def handle_modifier_down(self, modifier):
"""
Updates the state of the given modifier key to 'pressed'
"""
_logger.debug("%s pressed", modifier)
if modifier in (Key.CAPSLOCK, Key.NUMLOCK):
if self.modifiers[modifier]:
self.modifiers[modifier] = False
else:
self.modifiers[modifier] = True
else:
self.modifiers[modifier] = True
def handle_modifier_up(self, modifier):
"""
Updates the state of the given modifier key to 'released'.
"""
_logger.debug("%s released", modifier)
# Caps and num lock are handled on key down only
if not modifier in (Key.CAPSLOCK, Key.NUMLOCK):
self.modifiers[modifier] = False
def handle_keypress(self, keyCode, windowName, windowClass):
"""
Looks up the character for the given key code, applying any
modifiers currently in effect, and passes it to the expansion service.
"""
self.queue.put_nowait((keyCode, windowName, windowClass))
def run(self):
while True:
keyCode, windowName, windowClass = self.queue.get()
if keyCode is None and windowName is None:
break
numLock = self.modifiers[Key.NUMLOCK]
modifiers = self.__getModifiersOn()
shifted = self.modifiers[Key.CAPSLOCK] ^ self.modifiers[Key.SHIFT]
key = self.interface.lookup_string(keyCode, shifted, numLock, self.modifiers[Key.ALT_GR])
rawKey = self.interface.lookup_string(keyCode, False, False, False)
for target in self.listeners:
target.handle_keypress(rawKey, modifiers, key, windowName, windowClass)
self.queue.task_done()
def handle_mouse_click(self, rootX, rootY, relX, relY, button, windowInfo):
for target in self.listeners:
target.handle_mouseclick(rootX, rootY, relX, relY, button, windowInfo)
# Methods for expansion service ----
def send_string(self, string):
"""
Sends the given string for output.
"""
if len(string) == 0:
return
k = Key()
string = string.replace('\n', "<enter>")
string = string.replace('\t', "<tab>")
_logger.debug("Send via event interface")
self.__clearModifiers()
modifiers = []
for section in KEY_SPLIT_RE.split(string):
if len(section) > 0:
if k.is_key(section[:-1]) and section[-1] == '+' and section[:-1] in MODIFIERS:
# Section is a modifier application (modifier followed by '+')
modifiers.append(section[:-1])
else:
if len(modifiers) > 0:
# Modifiers ready for application - send modified key
if k.is_key(section):
self.interface.send_modified_key(section, modifiers)
modifiers = []
else:
self.interface.send_modified_key(section[0], modifiers)
if len(section) > 1:
self.interface.send_string(section[1:])
modifiers = []
else:
# Normal string/key operation
if k.is_key(section):
self.interface.send_key(section)
else:
self.interface.send_string(section)
self.__reapplyModifiers()
def paste_string(self, string, pasteCommand):
if len(string) > 0:
_logger.debug("Send via clipboard")
self.interface.send_string_clipboard(string, pasteCommand)
def remove_string(self, string):
backspaces = -1 # Start from -1 to discount the backspace already pressed by the user
k = Key()
for section in KEY_SPLIT_RE.split(string):
if k.is_key(section):
backspaces += 1
else:
backspaces += len(section)
self.send_backspace(backspaces)
def send_key(self, keyName):
keyName = keyName.replace('\n', "<enter>")
self.interface.send_key(keyName)
def press_key(self, keyName):
keyName = keyName.replace('\n', "<enter>")
self.interface.fake_keydown(keyName)
def release_key(self, keyName):
keyName = keyName.replace('\n', "<enter>")
self.interface.fake_keyup(keyName)
def fake_keypress(self, keyName):
keyName = keyName.replace('\n', "<enter>")
self.interface.fake_keypress(keyName)
def send_left(self, count):
"""
Sends the given number of left key presses.
"""
for i in range(count):
self.interface.send_key(Key.LEFT)
def send_right(self, count):
for i in range(count):
self.interface.send_key(Key.RIGHT)
def send_up(self, count):
"""
Sends the given number of up key presses.
"""
for i in range(count):
self.interface.send_key(Key.UP)
def send_backspace(self, count):
"""
Sends the given number of backspace key presses.
"""
for i in range(count):
self.interface.send_key(Key.BACKSPACE)
def send_mouse_click(self, x, y, button, relative):
self.interface.send_mouse_click(x, y, button, relative)
def send_mouse_click_relative(self, x, y, button):
self.interface.send_mouse_click_relative(x, y, button)
def flush(self):
self.interface.flush()
# Utility methods ----
def __clearModifiers(self):
self.releasedModifiers = []
for modifier in self.modifiers.keys():
if self.modifiers[modifier] and not modifier in (Key.CAPSLOCK, Key.NUMLOCK):
self.releasedModifiers.append(modifier)
self.interface.release_key(modifier)
def __reapplyModifiers(self):
for modifier in self.releasedModifiers:
self.interface.press_key(modifier)
def __getModifiersOn(self):
modifiers = []
for modifier in HELD_MODIFIERS:
if self.modifiers[modifier]:
modifiers.append(modifier)
modifiers.sort()
return modifiers
class Waiter:
"""
Waits for a specified event to occur
"""
def __init__(self, rawKey, modifiers, button, timeOut):
IoMediator.listeners.append(self)
self.rawKey = rawKey
self.modifiers = modifiers
self.button = button
self.event = threading.Event()
self.timeOut = timeOut
if modifiers is not None:
self.modifiers.sort()
def wait(self):
self.event.wait(self.timeOut)
def handle_keypress(self, rawKey, modifiers, key, *args):
if rawKey == self.rawKey and modifiers == self.modifiers:
IoMediator.listeners.remove(self)
self.event.set()
def handle_mouseclick(self, rootX, rootY, relX, relY, button, windowInfo):
if button == self.button:
self.event.set()
class KeyGrabber:
"""
Keygrabber used by the hotkey settings dialog to grab the key pressed
"""
def __init__(self, parent):
self.targetParent = parent
def start(self):
# In QT version, sometimes the mouseclick event arrives before we finish initialising
# sleep slightly to prevent this
time.sleep(0.1)
IoMediator.listeners.append(self)
CURRENT_INTERFACE.grab_keyboard()
def handle_keypress(self, rawKey, modifiers, key, *args):
if not rawKey in MODIFIERS:
IoMediator.listeners.remove(self)
self.targetParent.set_key(rawKey, modifiers)
CURRENT_INTERFACE.ungrab_keyboard()
def handle_mouseclick(self, rootX, rootY, relX, relY, button, windowInfo):
IoMediator.listeners.remove(self)
CURRENT_INTERFACE.ungrab_keyboard()
self.targetParent.cancel_grab()
class Recorder(KeyGrabber):
"""
Recorder used by the record macro functionality
"""
def __init__(self, parent):
KeyGrabber.__init__(self, parent)
self.insideKeys = False
def start(self, delay):
time.sleep(0.1)
IoMediator.listeners.append(self)
self.targetParent.start_record()
self.startTime = time.time()
self.delay = delay
self.delayFinished = False
def start_withgrab(self):
time.sleep(0.1)
IoMediator.listeners.append(self)
self.targetParent.start_record()
self.startTime = time.time()
self.delay = 0
self.delayFinished = True
CURRENT_INTERFACE.grab_keyboard()
def stop(self):
if self in IoMediator.listeners:
IoMediator.listeners.remove(self)
if self.insideKeys:
self.targetParent.end_key_sequence()
self.insideKeys = False
def stop_withgrab(self):
CURRENT_INTERFACE.ungrab_keyboard()
if self in IoMediator.listeners:
IoMediator.listeners.remove(self)
if self.insideKeys:
self.targetParent.end_key_sequence()
self.insideKeys = False
def set_record_keyboard(self, doIt):
self.recordKeyboard = doIt
def set_record_mouse(self, doIt):
self.recordMouse = doIt
def __delayPassed(self):
if not self.delayFinished:
now = time.time()
delta = datetime.datetime.utcfromtimestamp(now - self.startTime)
self.delayFinished = (delta.second > self.delay)
return self.delayFinished
def handle_keypress(self, rawKey, modifiers, key, *args):
if self.recordKeyboard and self.__delayPassed():
if not self.insideKeys:
self.insideKeys = True
self.targetParent.start_key_sequence()
modifierCount = len(modifiers)
if modifierCount > 1 or (modifierCount == 1 and Key.SHIFT not in modifiers) or \
(Key.SHIFT in modifiers and len(rawKey) > 1):
self.targetParent.append_hotkey(rawKey, modifiers)
elif not key in MODIFIERS:
self.targetParent.append_key(key)
def handle_mouseclick(self, rootX, rootY, relX, relY, button, windowInfo):
if self.recordMouse and self.__delayPassed():
if self.insideKeys:
self.insideKeys = False
self.targetParent.end_key_sequence()
self.targetParent.append_mouseclick(relX, relY, button, windowInfo[0])
class WindowGrabber:
def __init__(self, dialog):
self.dialog = dialog
def start(self):
time.sleep(0.1)
IoMediator.listeners.append(self)
def handle_keypress(self, rawKey, modifiers, key, *args):
pass
def handle_mouseclick(self, rootX, rootY, relX, relY, button, windowInfo):
IoMediator.listeners.remove(self)
self.dialog.receive_window_info(windowInfo) | unknown | codeparrot/codeparrot-clean | ||
'use cache'
// Exported TypeScript nodes should be ignored when validating that all module
// exports are async functions.
export type T = {}
export interface I {}
export enum E {}
export default interface D {}
export async function Page() {} | typescript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/server-actions/server-graph/60/input.ts |
% This is generated by ESQL's AbstractFunctionTestCase. Do not edit it. See ../README.md for how to regenerate it.
**Examples**
```esql
ROW a=10
| EVAL j = TO_STRING(a)
```
| a:integer | j:keyword |
| --- | --- |
| 10 | "10" |
It also works fine on multivalued fields:
```esql
ROW a=[10, 9, 8]
| EVAL j = TO_STRING(a)
```
| a:integer | j:keyword |
| --- | --- |
| [10, 9, 8] | ["10", "9", "8"] | | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/esql/_snippets/functions/examples/to_string.md |
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
)
func TestSymbolsTable(t *testing.T) {
s := NewSymbolTable()
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
require.Equal(t, uint32(0), s.Symbolize(""))
require.Equal(t, []string{""}, s.Symbols())
require.Equal(t, uint32(1), s.Symbolize("abc"))
require.Equal(t, []string{"", "abc"}, s.Symbols())
require.Equal(t, uint32(2), s.Symbolize("__name__"))
require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols())
require.Equal(t, uint32(3), s.Symbolize("foo"))
require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols())
s.Reset()
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
require.Equal(t, uint32(0), s.Symbolize(""))
require.Equal(t, uint32(1), s.Symbolize("__name__"))
require.Equal(t, []string{"", "__name__"}, s.Symbols())
require.Equal(t, uint32(2), s.Symbolize("abc"))
require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols())
ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234")
encoded := s.SymbolizeLabels(ls, nil)
require.Equal(t, []uint32{1, 3, 4, 5}, encoded)
b := labels.NewScratchBuilder(len(encoded))
decoded, err := desymbolizeLabels(&b, encoded, s.Symbols())
require.NoError(t, err)
require.Equal(t, ls, decoded)
// Different buf.
ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234")
encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5})
require.Equal(t, []uint32{1, 3, 6, 5}, encoded)
} | go | github | https://github.com/prometheus/prometheus | prompb/io/prometheus/write/v2/symbols_test.go |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
from textwrap import dedent
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
class board_board(osv.osv):
_name = 'board.board'
_description = "Board"
_auto = False
_columns = {}
@tools.cache()
def list(self, cr, uid, context=None):
Actions = self.pool.get('ir.actions.act_window')
Menus = self.pool.get('ir.ui.menu')
IrValues = self.pool.get('ir.values')
act_ids = Actions.search(cr, uid, [('res_model', '=', self._name)], context=context)
refs = ['%s,%s' % (Actions._name, act_id) for act_id in act_ids]
# cannot search "action" field on menu (non stored function field without search_fnct)
irv_ids = IrValues.search(cr, uid, [
('model', '=', 'ir.ui.menu'),
('key', '=', 'action'),
('key2', '=', 'tree_but_open'),
('value', 'in', refs),
], context=context)
menu_ids = map(itemgetter('res_id'), IrValues.read(cr, uid, irv_ids, ['res_id'], context=context))
menu_ids = Menus._filter_visible_menus(cr, uid, menu_ids, context=context)
menu_names = Menus.name_get(cr, uid, menu_ids, context=context)
return [dict(id=m[0], name=m[1]) for m in menu_names]
def _clear_list_cache(self):
self.list.clear_cache(self)
def create(self, cr, user, vals, context=None):
return 0
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
"""
Overrides orm field_view_get.
@return: Dictionary of Fields, arch and toolbar.
"""
res = {}
res = super(board_board, self).fields_view_get(cr, user, view_id, view_type,
context, toolbar=toolbar, submenu=submenu)
CustView = self.pool.get('ir.ui.view.custom')
vids = CustView.search(cr, user, [('user_id', '=', user), ('ref_id', '=', view_id)], context=context)
if vids:
view_id = vids[0]
arch = CustView.browse(cr, user, view_id, context=context)
res['custom_view_id'] = view_id
res['arch'] = arch.arch
res['arch'] = self._arch_preprocessing(cr, user, res['arch'], context=context)
res['toolbar'] = {'print': [], 'action': [], 'relate': []}
return res
def _arch_preprocessing(self, cr, user, arch, context=None):
from lxml import etree
def remove_unauthorized_children(node):
for child in node.iterchildren():
if child.tag == 'action' and child.get('invisible'):
node.remove(child)
else:
child = remove_unauthorized_children(child)
return node
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
archnode = etree.fromstring(encode(arch))
return etree.tostring(remove_unauthorized_children(archnode), pretty_print=True)
class board_create(osv.osv_memory):
def board_create(self, cr, uid, ids, context=None):
assert len(ids) == 1
this = self.browse(cr, uid, ids[0], context=context)
view_arch = dedent("""<?xml version="1.0"?>
<form string="%s" version="7.0">
<board style="2-1">
<column/>
<column/>
</board>
</form>
""".strip() % (this.name,))
view_id = self.pool.get('ir.ui.view').create(cr, uid, {
'name': this.name,
'model': 'board.board',
'priority': 16,
'type': 'form',
'arch': view_arch,
}, context=context)
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, {
'name': this.name,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'board.board',
'usage': 'menu',
'view_id': view_id,
'help': dedent('''<div class="oe_empty_custom_dashboard">
<p>
<b>This dashboard is empty.</b>
</p><p>
To add the first report into this dashboard, go to any
menu, switch to list or graph view, and click <i>'Add to
Dashboard'</i> in the extended search options.
</p><p>
You can filter and group data before inserting into the
dashboard using the search options.
</p>
</div>
''')
}, context=context)
menu_id = self.pool.get('ir.ui.menu').create(cr, SUPERUSER_ID, {
'name': this.name,
'parent_id': this.menu_parent_id.id,
'action': 'ir.actions.act_window,%s' % (action_id,)
}, context=context)
self.pool.get('board.board')._clear_list_cache()
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {
'menu_id': menu_id
},
}
def _default_menu_parent_id(self, cr, uid, context=None):
_, menu_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'menu_reporting_dashboard')
return menu_id
_name = "board.create"
_description = "Board Creation"
_columns = {
'name': fields.char('Board Name', size=64, required=True),
'menu_parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
}
_defaults = {
'menu_parent_id': _default_menu_parent_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_terminal(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def terminal_cfg_line_sessionid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid = ET.SubElement(line, "sessionid")
sessionid.text = kwargs.pop('sessionid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def terminal_cfg_line_exec_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid_key = ET.SubElement(line, "sessionid")
sessionid_key.text = kwargs.pop('sessionid')
exec_timeout = ET.SubElement(line, "exec-timeout")
exec_timeout.text = kwargs.pop('exec_timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def terminal_cfg_line_sessionid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid = ET.SubElement(line, "sessionid")
sessionid.text = kwargs.pop('sessionid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def terminal_cfg_line_exec_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid_key = ET.SubElement(line, "sessionid")
sessionid_key.text = kwargs.pop('sessionid')
exec_timeout = ET.SubElement(line, "exec-timeout")
exec_timeout.text = kwargs.pop('exec_timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An apiproxy stub that calls a remote handler via HTTP.
This allows easy remote access to the App Engine datastore, and potentially any
of the other App Engine APIs, using the same interface you use when accessing
the service locally.
An example Python script:
---
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from myapp import models
import getpass
def auth_func():
return (raw_input('Username:'), getpass.getpass('Password:'))
remote_api_stub.ConfigureRemoteApi(None, '/_ah/remote_api', auth_func,
'my-app.appspot.com')
# Now you can access the remote datastore just as if your code was running on
# App Engine!
houses = models.House.all().fetch(100)
for a_house in q:
a_house.doors += 1
db.put(houses)
---
A few caveats:
- Where possible, avoid iterating over queries. Fetching as many results as you
will need is faster and more efficient. If you don't know how many results
you need, or you need 'all of them', iterating is fine.
- Likewise, it's a good idea to put entities in batches. Instead of calling put
for each individual entity, accumulate them and put them in batches using
db.put(), if you can.
- Requests and responses are still limited to 1MB each, so if you have large
entities or try and fetch or put many of them at once, your requests may fail.
"""
import google
import os
import pickle
import random
import sys
import thread
import threading
import yaml
import hashlib
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
else:
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import appengine_rpc
_REQUEST_ID_HEADER = 'HTTP_X_APPENGINE_REQUEST_ID'
class Error(Exception):
"""Base class for exceptions in this module."""
class ConfigurationError(Error):
"""Exception for configuration errors."""
class UnknownJavaServerError(Error):
"""Exception for exceptions returned from a Java remote_api handler."""
def GetUserAgent():
"""Determines the value of the 'User-agent' header to use for HTTP requests.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
"""
product_tokens = []
product_tokens.append("Google-remote_api/1.0")
product_tokens.append(appengine_rpc.GetPlatformToken())
python_version = ".".join(str(i) for i in sys.version_info)
product_tokens.append("Python/%s" % python_version)
return " ".join(product_tokens)
def GetSourceName():
return "Google-remote_api-1.0"
def HashEntity(entity):
"""Return a very-likely-unique hash of an entity."""
return hashlib.sha1(entity.Encode()).digest()
class TransactionData(object):
"""Encapsulates data about an individual transaction."""
def __init__(self, thread_id, is_xg):
self.thread_id = thread_id
self.preconditions = {}
self.entities = {}
self.is_xg = is_xg
class RemoteStub(object):
"""A stub for calling services on a remote server over HTTP.
You can use this to stub out any service that the remote server supports.
"""
_local = threading.local()
def __init__(self, server, path, _test_stub_map=None):
"""Constructs a new RemoteStub that communicates with the specified server.
Args:
server: An instance of a subclass of
google.appengine.tools.appengine_rpc.AbstractRpcServer.
path: The path to the handler this stub should send requests to.
"""
self._server = server
self._path = path
self._test_stub_map = _test_stub_map
def _PreHookHandler(self, service, call, request, response):
pass
def _PostHookHandler(self, service, call, request, response):
pass
def MakeSyncCall(self, service, call, request, response):
self._PreHookHandler(service, call, request, response)
try:
test_stub = self._test_stub_map and self._test_stub_map.GetStub(service)
if test_stub:
test_stub.MakeSyncCall(service, call, request, response)
else:
self._MakeRealSyncCall(service, call, request, response)
finally:
self._PostHookHandler(service, call, request, response)
@classmethod
def _GetRequestId(cls):
"""Returns the id of the request associated with the current thread."""
return cls._local.request_id
@classmethod
def _SetRequestId(cls, request_id):
"""Set the id of the request associated with the current thread."""
cls._local.request_id = request_id
def _MakeRealSyncCall(self, service, call, request, response):
request_pb = remote_api_pb.Request()
request_pb.set_service_name(service)
request_pb.set_method(call)
request_pb.set_request(request.Encode())
if hasattr(self._local, 'request_id'):
request_pb.set_request_id(self._local.request_id)
response_pb = remote_api_pb.Response()
encoded_request = request_pb.Encode()
encoded_response = self._server.Send(self._path, encoded_request)
response_pb.ParseFromString(encoded_response)
if response_pb.has_application_error():
error_pb = response_pb.application_error()
raise apiproxy_errors.ApplicationError(error_pb.code(),
error_pb.detail())
elif response_pb.has_exception():
raise pickle.loads(response_pb.exception())
elif response_pb.has_java_exception():
raise UnknownJavaServerError("An unknown error has occured in the "
"Java remote_api handler for this call.")
else:
response.ParseFromString(response_pb.response())
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
class RemoteDatastoreStub(RemoteStub):
"""A specialised stub for accessing the App Engine datastore remotely.
A specialised stub is required because there are some datastore operations
that preserve state between calls. This stub makes queries possible.
Transactions on the remote datastore are unfortunately still impossible.
"""
def __init__(self, server, path, default_result_count=20,
_test_stub_map=None):
"""Constructor.
Args:
server: The server name to connect to.
path: The URI path on the server.
default_result_count: The number of items to fetch, by default, in a
datastore Query or Next operation. This affects the batch size of
query iterators.
"""
super(RemoteDatastoreStub, self).__init__(server, path, _test_stub_map)
self.default_result_count = default_result_count
self.__queries = {}
self.__transactions = {}
self.__next_local_cursor = 1
self.__local_cursor_lock = threading.Lock()
self.__next_local_tx = 1
self.__local_tx_lock = threading.Lock()
def MakeSyncCall(self, service, call, request, response):
assert service == 'datastore_v3'
explanation = []
assert request.IsInitialized(explanation), explanation
handler = getattr(self, '_Dynamic_' + call, None)
if handler:
handler(request, response)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
response)
assert response.IsInitialized(explanation), explanation
def _Dynamic_RunQuery(self, query, query_result, cursor_id = None):
if query.has_transaction():
txdata = self.__transactions[query.transaction().handle()]
tx_result = remote_api_pb.TransactionQueryResult()
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'TransactionQuery', query, tx_result)
query_result.CopyFrom(tx_result.result())
eg_key = tx_result.entity_group_key()
encoded_eg_key = eg_key.Encode()
eg_hash = None
if tx_result.has_entity_group():
eg_hash = HashEntity(tx_result.entity_group())
old_key, old_hash = txdata.preconditions.get(encoded_eg_key, (None, None))
if old_key is None:
txdata.preconditions[encoded_eg_key] = (eg_key, eg_hash)
elif old_hash != eg_hash:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.CONCURRENT_TRANSACTION,
'Transaction precondition failed.')
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'RunQuery', query, query_result)
if cursor_id is None:
self.__local_cursor_lock.acquire()
try:
cursor_id = self.__next_local_cursor
self.__next_local_cursor += 1
finally:
self.__local_cursor_lock.release()
if query_result.more_results():
query.set_offset(query.offset() + query_result.result_size())
if query.has_limit():
query.set_limit(query.limit() - query_result.result_size())
self.__queries[cursor_id] = query
else:
self.__queries[cursor_id] = None
query_result.mutable_cursor().set_cursor(cursor_id)
def _Dynamic_Next(self, next_request, query_result):
assert next_request.offset() == 0
cursor_id = next_request.cursor().cursor()
if cursor_id not in self.__queries:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Cursor %d not found' % cursor_id)
query = self.__queries[cursor_id]
if query is None:
query_result.set_more_results(False)
return
else:
if next_request.has_count():
query.set_count(next_request.count())
else:
query.clear_count()
self._Dynamic_RunQuery(query, query_result, cursor_id)
query_result.set_skipped_results(0)
def _Dynamic_Get(self, get_request, get_response):
txid = None
if get_request.has_transaction():
txid = get_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
keys = [(k, k.Encode()) for k in get_request.key_list()]
new_request = datastore_pb.GetRequest()
for key, enckey in keys:
if enckey not in txdata.entities:
new_request.add_key().CopyFrom(key)
else:
new_request = get_request
if new_request.key_size() > 0:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Get', new_request, get_response)
if txid is not None:
newkeys = new_request.key_list()
entities = get_response.entity_list()
for key, entity in zip(newkeys, entities):
entity_hash = None
if entity.has_entity():
entity_hash = HashEntity(entity.entity())
txdata.preconditions[key.Encode()] = (key, entity_hash)
new_response = datastore_pb.GetResponse()
it = iter(get_response.entity_list())
for key, enckey in keys:
if enckey in txdata.entities:
cached_entity = txdata.entities[enckey][1]
if cached_entity:
new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
else:
new_response.add_entity()
else:
new_entity = it.next()
if new_entity.has_entity():
assert new_entity.entity().key() == key
new_response.add_entity().CopyFrom(new_entity)
else:
new_response.add_entity()
get_response.CopyFrom(new_response)
def _Dynamic_Put(self, put_request, put_response):
if put_request.has_transaction():
entities = put_request.entity_list()
requires_id = lambda x: x.id() == 0 and not x.has_name()
new_ents = [e for e in entities
if requires_id(e.key().path().element_list()[-1])]
id_request = datastore_pb.PutRequest()
txid = put_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
if new_ents:
for ent in new_ents:
e = id_request.add_entity()
e.mutable_key().CopyFrom(ent.key())
e.mutable_entity_group()
id_response = datastore_pb.PutResponse()
if txdata.is_xg:
rpc_name = 'GetIDsXG'
else:
rpc_name = 'GetIDs'
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', rpc_name, id_request, id_response)
assert id_request.entity_size() == id_response.key_size()
for key, ent in zip(id_response.key_list(), new_ents):
ent.mutable_key().CopyFrom(key)
ent.mutable_entity_group().add_element().CopyFrom(
key.path().element(0))
for entity in entities:
txdata.entities[entity.key().Encode()] = (entity.key(), entity)
put_response.add_key().CopyFrom(entity.key())
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Put', put_request, put_response)
def _Dynamic_Delete(self, delete_request, response):
if delete_request.has_transaction():
txid = delete_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
for key in delete_request.key_list():
txdata.entities[key.Encode()] = (key, None)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Delete', delete_request, response)
def _Dynamic_BeginTransaction(self, request, transaction):
self.__local_tx_lock.acquire()
try:
txid = self.__next_local_tx
self.__transactions[txid] = TransactionData(thread.get_ident(),
request.allow_multiple_eg())
self.__next_local_tx += 1
finally:
self.__local_tx_lock.release()
transaction.set_handle(txid)
transaction.set_app(request.app())
def _Dynamic_Commit(self, transaction, transaction_response):
txid = transaction.handle()
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
tx = remote_api_pb.TransactionRequest()
tx.set_allow_multiple_eg(txdata.is_xg)
for key, hash in txdata.preconditions.values():
precond = tx.add_precondition()
precond.mutable_key().CopyFrom(key)
if hash:
precond.set_hash(hash)
puts = tx.mutable_puts()
deletes = tx.mutable_deletes()
for key, entity in txdata.entities.values():
if entity:
puts.add_entity().CopyFrom(entity)
else:
deletes.add_key().CopyFrom(key)
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'Transaction',
tx, datastore_pb.PutResponse())
def _Dynamic_Rollback(self, transaction, transaction_response):
txid = transaction.handle()
self.__local_tx_lock.acquire()
try:
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
finally:
self.__local_tx_lock.release()
def _Dynamic_CreateIndex(self, index, id_response):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_UpdateIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_DeleteIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
ALL_SERVICES = set(remote_api_services.SERVICE_PB_MAP)
def GetRemoteAppIdFromServer(server, path, remote_token=None):
"""Return the app id from a connection to an existing server.
Args:
server: An appengine_rpc.AbstractRpcServer
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
remote_token: Token to validate that the response was to this request.
Returns:
App ID as reported by the remote server.
Raises:
ConfigurationError: The server returned an invalid response.
"""
if not remote_token:
random.seed()
remote_token = str(random.random())[2:]
remote_token = str(remote_token)
urlargs = {'rtok': remote_token}
response = server.Send(path, payload=None, **urlargs)
if not response.startswith('{'):
raise ConfigurationError(
'Invalid response received from server: %s' % response)
app_info = yaml.load(response)
if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
raise ConfigurationError('Error parsing app_id lookup response')
if str(app_info['rtok']) != remote_token:
raise ConfigurationError('Token validation failed during app_id lookup. '
'(sent %s, got %s)' % (repr(remote_token),
repr(app_info['rtok'])))
return app_info['app_id']
def ConfigureRemoteApiFromServer(server, path, app_id, services=None,
default_auth_domain=None,
use_remote_datastore=True):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Args:
server: An AbstractRpcServer
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
app_id: The app_id of your app, as declared in app.yaml.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
use_remote_datastore: Whether to use RemoteDatastoreStub instead of passing
through datastore requests. RemoteDatastoreStub batches transactional
datastore requests since, in production, datastore requires are scoped to
a single request.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the Remote API.
"""
if services is None:
services = set(ALL_SERVICES)
else:
services = set(services)
unsupported = services.difference(ALL_SERVICES)
if unsupported:
raise ConfigurationError('Unsupported service(s): %s'
% (', '.join(unsupported),))
os.environ['APPLICATION_ID'] = app_id
os.environ.setdefault('AUTH_DOMAIN', default_auth_domain or 'gmail.com')
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if 'datastore_v3' in services and use_remote_datastore:
services.remove('datastore_v3')
datastore_stub = RemoteDatastoreStub(server, path)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
stub = RemoteStub(server, path)
for service in services:
apiproxy_stub_map.apiproxy.RegisterStub(service, stub)
def GetRemoteAppId(servername,
path,
auth_func,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
save_cookies=False):
"""Get the remote appid as reported at servername/path.
This will also return an AbstractRpcServer server, which can be used with
ConfigureRemoteApiFromServer.
Args:
servername: The hostname your app is deployed on.
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
save_cookies: Forwarded to rpc_server_factory function.
Returns:
(app_id, server): The application ID and an AbstractRpcServer.
"""
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
app_id = GetRemoteAppIdFromServer(server, path, rtok)
return app_id, server
_OAUTH_SCOPES = [
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/userinfo.email',
]
def _ConfigureRemoteApiWithKeyFile(servername,
path,
service_account,
key_file_path):
"""Does necessary setup to allow easy remote access to App Engine APIs.
This function uses OAuth2 with a credential derived from service_account and
key_file_path to communicate with App Engine APIs.
Use of this method requires an encryption library to be installed.
Args:
servername: The hostname your app is deployed on (typically,
<app_id>.appspot.com).
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
service_account: The email address of the service account to use for
making OAuth requests.
key_file_path: The path to a .p12 file containing the private key for
service_account.
Returns:
server, a server which may be useful for calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
ImportError: if the oauth2client module is not available or an appropriate
encryption library cannot not be found.
IOError: if key_file_path does not exist or cannot be read.
"""
try:
import oauth2client.client
except ImportError, e:
raise ImportError('Use of a key file to access the Remote API '
'requires the oauth2client module: %s' % e)
if not oauth2client.client.HAS_CRYPTO:
raise ImportError('Use of a key file to access the Remote API '
'requires an encryption library. Please install '
'either PyOpenSSL or PyCrypto 2.6 or later.')
with open(key_file_path, 'rb') as key_file:
key = key_file.read()
credentials = oauth2client.client.SignedJwtAssertionCredentials(
service_account,
key,
_OAUTH_SCOPES)
return _ConfigureRemoteApiWithOAuthCredentials(servername,
path,
credentials)
def _ConfigureRemoteApiWithComputeEngineCredential(servername,
path):
"""Does necessary setup to allow easy remote access to App Engine APIs.
This function uses OAuth2 with a credential from the Compute Engine metadata
server to communicate with App Engine APIs.
Args:
servername: The hostname your app is deployed on (typically,
<app_id>.appspot.com).
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
Returns:
server, a server which may be useful for calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
ImportError: if the oauth2client or httplib2 module is not available.
"""
try:
import httplib2
import oauth2client
except ImportError, e:
raise ImportError('Use of Compute Engine credentials requires the '
'oauth2client and httplib2 modules: %s' % e)
credentials = oauth2client.gce.AppAssertionCredentials(_OAUTH_SCOPES)
http = httplib2.Http()
credentials.authorize(http)
credentials.refresh(http)
return _ConfigureRemoteApiWithOAuthCredentials(servername,
path,
credentials)
def _ConfigureRemoteApiWithOAuthCredentials(servername,
path,
credentials):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Args:
servername: The hostname your app is deployed on (typically,
<app_id>.appspot.com).
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
credentials: An oauth2client.OAuth2Credentials object.
Returns:
server, a server which may be useful for calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
ImportError: if the appengine_rpc_httplib2 module is not available.
"""
try:
from google.appengine.tools import appengine_rpc_httplib2
except ImportError, e:
raise ImportError('Use of OAuth credentials requires the '
'appengine_rpc_httplib2 module. %s' % e)
if not servername:
raise ConfigurationError('servername required')
oauth2_parameters = (
appengine_rpc_httplib2.HttpRpcServerOAuth2.OAuth2Parameters(
access_token=None,
client_id=None,
client_secret=None,
scope=None,
refresh_token=None,
credential_file=None,
credentials=credentials))
return ConfigureRemoteApi(
app_id=None,
path=path,
auth_func=oauth2_parameters,
servername=servername,
rpc_server_factory=appengine_rpc_httplib2.HttpRpcServerOAuth2)
def ConfigureRemoteApi(app_id,
path,
auth_func,
servername=None,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
services=None,
default_auth_domain=None,
save_cookies=False,
use_remote_datastore=True):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Either servername must be provided or app_id must not be None. If app_id
is None and a servername is provided, this function will send a request
to the server to retrieve the app_id.
Note that if the app_id is specified, the internal appid must be used;
this may include a partition and a domain. It is often easier to let
remote_api_stub retrieve the app_id automatically.
Args:
app_id: The app_id of your app, as declared in app.yaml, or None.
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
servername: The hostname your app is deployed on. Defaults to
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
save_cookies: Forwarded to rpc_server_factory function.
use_remote_datastore: Whether to use RemoteDatastoreStub instead of passing
through datastore requests. RemoteDatastoreStub batches transactional
datastore requests since, in production, datastore requires are scoped to
a single request.
Returns:
server, the server created by rpc_server_factory, which may be useful for
calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
"""
if not servername and not app_id:
raise ConfigurationError('app_id or servername required')
if not servername:
servername = '%s.appspot.com' % (app_id,)
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
if not app_id:
app_id = GetRemoteAppIdFromServer(server, path, rtok)
ConfigureRemoteApiFromServer(server, path, app_id, services,
default_auth_domain, use_remote_datastore)
return server
def MaybeInvokeAuthentication():
"""Sends an empty request through to the configured end-point.
If authentication is necessary, this will cause the rpc_server to invoke
interactive authentication.
"""
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if isinstance(datastore_stub, RemoteStub):
datastore_stub._server.Send(datastore_stub._path, payload=None)
else:
raise ConfigurationError('remote_api is not configured.')
ConfigureRemoteDatastore = ConfigureRemoteApi | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PriorityQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import random
import threading
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class PriorityQueueTest(test.TestCase):
def testRoundTripInsertReadOnceSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
enq_list = [
q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
for e, v0, v1 in zip(elem, side_value_0, side_value_1)
]
for enq in enq_list:
enq.run()
deq = q.dequeue_many(100)
deq_elem, deq_value_0, deq_value_1 = sess.run(deq)
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripInsertMultiThreadedReadOnceSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
enqueue_ops = [
q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
for e, v0, v1 in zip(elem, side_value_0, side_value_1)
]
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
for t in enqueue_threads:
t.start()
deq_elem, deq_value_0, deq_value_1 = sess.run(dequeue_op)
for t in enqueue_threads:
t.join()
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripFillsCapacityMultiThreadedEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(10, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
np.random.randint(
5, size=count) for count in enqueue_counts
]
enqueue_ops = [
q.enqueue_many((values, values)) for values in enqueue_values
]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
dequeued = []
def dequeue(dequeue_op):
(dequeue_indices, dequeue_values) = sess.run(dequeue_op)
self.assertAllEqual(dequeue_indices, dequeue_values)
dequeued.extend(dequeue_indices)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
dequeue_threads = [
self.checkedThread(
target=dequeue, args=(op,)) for op in dequeue_ops
]
# Dequeue and check
for t in dequeue_threads:
t.start()
for t in enqueue_threads:
t.start()
for t in enqueue_threads:
t.join()
for t in dequeue_threads:
t.join()
self.assertAllEqual(sorted(dequeued), sorted(all_enqueued_values))
def testRoundTripInsertManyMultiThreadedReadManyMultithreadedSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
np.random.randint(
5, size=count) for count in enqueue_counts
]
enqueue_ops = [
q.enqueue_many((values, values)) for values in enqueue_values
]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
dequeue_wait = threading.Condition()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
def dequeue(dequeue_op, dequeued):
(dequeue_indices, dequeue_values) = sess.run(dequeue_op)
self.assertAllEqual(dequeue_indices, dequeue_values)
dequeue_wait.acquire()
dequeued.extend(dequeue_indices)
dequeue_wait.release()
dequeued = []
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
dequeue_threads = [
self.checkedThread(
target=dequeue, args=(op, dequeued)) for op in dequeue_ops
]
for t in enqueue_threads:
t.start()
for t in enqueue_threads:
t.join()
# Dequeue and check
for t in dequeue_threads:
t.start()
for t in dequeue_threads:
t.join()
# We can't guarantee full sorting because we can't guarantee
# that the dequeued.extend() call runs immediately after the
# sess.run() call. Here we're just happy everything came out.
self.assertAllEqual(set(dequeued), set(all_enqueued_values))
def testRoundTripInsertManyMultiThreadedReadOnceSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
batch = 5
enqueue_ops = [
q.enqueue_many((elem[i * batch:(i + 1) * batch],
side_value_0[i * batch:(i + 1) * batch],
side_value_1[i * batch:(i + 1) * batch]))
for i in range(20)
]
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
for t in enqueue_threads:
t.start()
deq_elem, deq_value_0, deq_value_1 = sess.run(dequeue_op)
for t in enqueue_threads:
t.join()
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripInsertOnceReadOnceSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
side_value_0 = np.random.rand(1000).astype(bytes)
side_value_1 = np.random.rand(1000).astype(bytes)
q.enqueue_many((elem, side_value_0, side_value_1)).run()
deq = q.dequeue_many(1000)
deq_elem, deq_value_0, deq_value_1 = sess.run(deq)
allowed = {}
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
def testRoundTripInsertOnceReadManySorts(self):
with self.test_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
deq_values = np.hstack((q.dequeue_many(100)[0].eval() for _ in range(10)))
self.assertAllEqual(deq_values, sorted(elem))
def testRoundTripInsertOnceReadOnceLotsSorts(self):
with self.test_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
dequeue_op = q.dequeue()
deq_values = np.hstack(dequeue_op[0].eval() for _ in range(1000))
self.assertAllEqual(deq_values, sorted(elem))
def testInsertingNonInt64Fails(self):
with self.test_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.string), (()))
with self.assertRaises(TypeError):
q.enqueue_many((["a", "b", "c"], ["a", "b", "c"])).run()
def testInsertingNonScalarFails(self):
with self.test_session() as sess:
input_priority = array_ops.placeholder(dtypes.int64)
input_other = array_ops.placeholder(dtypes.string)
q = data_flow_ops.PriorityQueue(2000, (dtypes.string,), (()))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[\], got \[2\]"):
sess.run([q.enqueue((input_priority, input_other))],
feed_dict={
input_priority: np.array(
[0, 2], dtype=np.int64),
input_other: np.random.rand(3, 5).astype(bytes)
})
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[2\], got \[2,2\]"):
sess.run(
[q.enqueue_many((input_priority, input_other))],
feed_dict={
input_priority: np.array(
[[0, 2], [3, 4]], dtype=np.int64),
input_other: np.random.rand(2, 3).astype(bytes)
})
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
from functools import wraps
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_control
from django.views.decorators.http import last_modified as cache_last_modified
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import PermissionDenied
from django.core.cache import caches
from django.contrib.auth.decorators import user_passes_test
from django.contrib import messages
from django.views.generic.edit import BaseUpdateView
from django.views.generic.detail import BaseDetailView
from .settings import app_settings
from .helpers import user_has_perm
from . import models as mapentity_models
def view_permission_required(login_url=None, raise_exception=None):
if raise_exception is None:
raise_exception = (login_url is None)
def check_perms(request, user, perm):
# Check both authenticated and anonymous
if user_has_perm(user, perm):
return True
if not user.is_anonymous and raise_exception:
raise PermissionDenied
# As the last resort, redirects
msg = _(u'Access to the requested resource is restricted. You have been redirected.')
messages.warning(request, msg)
return False
def decorator(view_func):
def _wrapped_view(self, request, *args, **kwargs):
perm = self.get_view_perm()
redirect_url = login_url
if login_url in mapentity_models.ENTITY_KINDS:
is_handle_object = issubclass(self.__class__, (BaseDetailView, BaseUpdateView))
if is_handle_object:
view_subject = self.get_object()
else:
view_subject = self.get_model()
get_url_method = getattr(view_subject, 'get_{0}_url'.format(login_url))
redirect_url = get_url_method()
has_perm_decorator = user_passes_test(lambda u: check_perms(request, u, perm),
login_url=redirect_url,
redirect_field_name=None)
cbv_user_has_perm = method_decorator(has_perm_decorator)
@cbv_user_has_perm
def decorated(self, request, *args, **kwargs):
return view_func(self, request, *args, **kwargs)
return decorated(self, request, *args, **kwargs)
return _wrapped_view
return decorator
def view_cache_latest():
def decorator(view_func):
def _wrapped_view(self, request, *args, **kwargs):
view_model = self.get_model()
cache_latest = cache_last_modified(lambda x: view_model.latest_updated())
cbv_cache_latest = method_decorator(cache_latest)
@method_decorator(cache_control(max_age=0, must_revalidate=True))
@cbv_cache_latest
def decorated(self, request, *args, **kwargs):
return view_func(self, request, *args, **kwargs)
return decorated(self, request, *args, **kwargs)
return _wrapped_view
return decorator
def view_cache_response_content():
def decorator(view_func):
def _wrapped_method(self, *args, **kwargs):
response_class = self.response_class
response_kwargs = dict()
# Do not cache if filters presents
params = self.request.GET.keys()
with_filters = all([not p.startswith('_') for p in params])
if len(params) > 0 and with_filters:
return view_func(self, *args, **kwargs)
# Restore from cache or store view result
geojson_lookup = None
if hasattr(self, 'view_cache_key'):
geojson_lookup = self.view_cache_key()
elif not self.request.GET: # Do not cache filtered responses
view_model = self.get_model()
language = self.request.LANGUAGE_CODE
latest_saved = view_model.latest_updated()
if latest_saved:
geojson_lookup = '%s_%s_%s_json_layer' % (
language,
view_model._meta.model_name,
latest_saved.strftime('%y%m%d%H%M%S%f')
)
geojson_cache = caches[app_settings['GEOJSON_LAYERS_CACHE_BACKEND']]
if geojson_lookup:
content = geojson_cache.get(geojson_lookup)
if content:
return response_class(content=content, **response_kwargs)
response = view_func(self, *args, **kwargs)
if geojson_lookup:
geojson_cache.set(geojson_lookup, response.content)
return response
return _wrapped_method
return decorator
def save_history():
"""
A decorator for class-based views, which save navigation history in
session.
"""
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(self, request, *args, **kwargs):
result = view_func(self, request, *args, **kwargs)
# Stack list of request paths
history = request.session.get('history', [])
# Remove previous visits of this page
history = [h for h in history if h['path'] != request.path]
# Add this one and remove extras
model = self.model or self.queryset.model
history.insert(0, dict(title=self.get_title(),
path=request.path,
modelname=model._meta.object_name.lower()))
if len(history) > app_settings['HISTORY_ITEMS_MAX']:
history.pop()
request.session['history'] = history
return result
return _wrapped_view
return decorator | unknown | codeparrot/codeparrot-clean | ||
# Django settings for scholarec_web
import os
import sys
from django import VERSION
import mongoengine
sys.path.insert(0, '../..')
CUSTOM_USER_MODEL = bool(int(os.environ.get('CUSTOM_USER_MODEL', '1')))
MODE = os.environ.get('MODE', 'standalone')
BASE_ROOT = os.path.abspath(
os.path.join(os.path.split(__file__)[0]))
TEST_RUNNER = 'scholarec_web.users.tests.NoSQLTestRunner'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
'''
DATABASES = {
'default': {
'ENGINE': '',
},
}
'''
SESSION_ENGINE = 'mongoengine.django.sessions' # optional
_MONGODB_USER = os.environ.get('U_USER')
_MONGODB_PASSWD = os.environ.get('U_PASS')
#open(os.path.join(BASE_ROOT,'key.txt'),'rb').read()
#open('/home/arcolife/temp/A_PERSONAL_projects/recommender/django-scholarec/scholarec_web/scholarec_web/key.txt','rb').readline().strip('\n')
_MONGODB_HOST = 'localhost'
_MONGODB_NAME = os.environ.get('U_DB')
_MONGODB_DATABASE_HOST = \
'mongodb://%s:%s@%s/%s' \
% (_MONGODB_USER, _MONGODB_PASSWD, _MONGODB_HOST, _MONGODB_NAME)
mongoengine.connect(_MONGODB_NAME, host=_MONGODB_DATABASE_HOST)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db_scholarec', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Kolkata'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_ROOT, 'media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
#STATICFILES_ROOT = os.path.join(BASE_ROOT, 'static/')
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('SECRET_KEY')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'scholarec_web.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'scholarec_web.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \
'scholarec_web/templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
#'haystack',
'search',
'social.apps.django_app.default',
'scholarec_web.app',
'django_facebook',
'south',
'open_facebook',
'users',
#'bootstrap_pagination',
#'django_extensions',
)
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'arxiv',
},
}
'''
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
'''
#SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'django.core.context_processors.tz',
# django-facebook specific
'django_facebook.context_processors.facebook',
# and add request if you didn't do so already
'django.core.context_processors.request',
)
AUTHENTICATION_BACKENDS = (
#'social.backends.amazon.AmazonOAuth2',
#'social.backends.angel.AngelOAuth2',
#'social.backends.aol.AOLOpenId',
#'social.backends.appsfuel.AppsfuelOAuth2',
#'social.backends.behance.BehanceOAuth2',
#'social.backends.belgiumeid.BelgiumEIDOpenId',
#'social.backends.bitbucket.BitbucketOAuth',
#'social.backends.box.BoxOAuth2',
#'social.backends.clef.ClefOAuth2',
#'social.backends.coinbase.CoinbaseOAuth2',
#'social.backends.dailymotion.DailymotionOAuth2',
#'social.backends.disqus.DisqusOAuth2',
#'social.backends.douban.DoubanOAuth2',
#'social.backends.dropbox.DropboxOAuth',
#'social.backends.evernote.EvernoteSandboxOAuth',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.facebook.FacebookOAuth2',
'social.backends.fedora.FedoraOpenId',
#'social.backends.fitbit.FitbitOAuth',
#'social.backends.flickr.FlickrOAuth',
#'social.backends.foursquare.FoursquareOAuth2',
'social.backends.github.GithubOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOpenId',
'social.backends.google.GooglePlusAuth',
#'social.backends.instagram.InstagramOAuth2',
#'social.backends.jawbone.JawboneOAuth2',
'social.backends.linkedin.LinkedinOAuth',
'social.backends.linkedin.LinkedinOAuth2',
#'social.backends.live.LiveOAuth2',
#'social.backends.livejournal.LiveJournalOpenId',
#'social.backends.mailru.MailruOAuth2',
#'social.backends.mendeley.MendeleyOAuth',
#'social.backends.mendeley.MendeleyOAuth2',
#'social.backends.mixcloud.MixcloudOAuth2',
#'social.backends.odnoklassniki.OdnoklassnikiOAuth2',
'social.backends.open_id.OpenIdAuth',
#'social.backends.openstreetmap.OpenStreetMapOAuth',
#'social.backends.orkut.OrkutOAuth',
'social.backends.persona.PersonaAuth',
#'social.backends.podio.PodioOAuth2',
#'social.backends.rdio.RdioOAuth1',
#'social.backends.rdio.RdioOAuth2',
#'social.backends.readability.ReadabilityOAuth',
'social.backends.reddit.RedditOAuth2',
#'social.backends.runkeeper.RunKeeperOAuth2',
#'social.backends.skyrock.SkyrockOAuth',
'social.backends.soundcloud.SoundcloudOAuth2',
'social.backends.stackoverflow.StackoverflowOAuth2',
#'social.backends.steam.SteamOpenId',
#'social.backends.stocktwits.StocktwitsOAuth2',
#'social.backends.stripe.StripeOAuth2',
#'social.backends.suse.OpenSUSEOpenId',
#'social.backends.thisismyjam.ThisIsMyJamOAuth1',
#'social.backends.trello.TrelloOAuth',
#'social.backends.tripit.TripItOAuth',
'social.backends.tumblr.TumblrOAuth',
#'social.backends.twilio.TwilioAuth',
'social.backends.twitter.TwitterOAuth',
#'social.backends.vk.VKOAuth2',
#'social.backends.weibo.WeiboOAuth2',
#'social.backends.xing.XingOAuth',
'social.backends.yahoo.YahooOAuth',
#'social.backends.yahoo.YahooOpenId',
#'social.backends.yammer.YammerOAuth2',
#'social.backends.yandex.YandexOAuth2',
#'social.backends.vimeo.VimeoOAuth1',
'social.backends.email.EmailAuth',
'social.backends.username.UsernameAuth',
'django.contrib.auth.backends.ModelBackend',
'django_facebook.auth_backends.FacebookBackend',
'mongoengine.django.auth.MongoEngineBackend',
)
'''
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
LINKEDIN_CONSUMER_KEY = ''
LINKEDIN_CONSUMER_SECRET = ''
ORKUT_CONSUMER_KEY = ''
ORKUT_CONSUMER_SECRET = ''
GOOGLE_CONSUMER_KEY = ''
GOOGLE_CONSUMER_SECRET = ''
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
FOURSQUARE_CONSUMER_KEY = ''
FOURSQUARE_CONSUMER_SECRET = ''
VK_APP_ID = ''
VK_API_SECRET = ''
LIVE_CLIENT_ID = ''
LIVE_CLIENT_SECRET = ''
SKYROCK_CONSUMER_KEY = ''
SKYROCK_CONSUMER_SECRET = ''
YAHOO_CONSUMER_KEY = ''
YAHOO_CONSUMER_SECRET = ''
READABILITY_CONSUMER_SECRET = ''
READABILITY_CONSUMER_SECRET = ''
'''
if CUSTOM_USER_MODEL:
AUTH_USER_MODEL = 'django_facebook.FacebookCustomUser'
else:
AUTH_USER_MODEL = 'auth.User'
AUTH_PROFILE_MODULE = 'member.UserProfile'
#AUTH_USER_MODEL = 'django_facebook.FacebookCustomUser'
AUTH_PROFILE_MODULE = 'django_facebook.FacebookProfile'
FACEBOOK_APP_ID = os.environ.get('FB_KEY_L')
FACEBOOK_APP_SECRET = os.environ.get('FB_SECRET_L')
SOCIAL_AUTH_FACEBOOK_APP_ID = os.environ.get('FB_KEY_L')
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('FB_SECRET_L')
#SOCIAL_AUTH_FACEBOOK_APP_NAMESPACE = 'scholarec'
#SOCIAL_AUTH_FACEBOOK_EXTENDED_PERMISSIONS = ['email']
SOCIAL_AUTH_TWITTER_KEY = os.environ.get('TWITTER_KEY')
SOCIAL_AUTH_TWITTER_SECRET = os.environ.get('TWITTER_SECRET')
SOCIAL_AUTH_GITHUB_KEY = os.environ.get('GITHUB_KEY')
SOCIAL_AUTH_GITHUB_SECRET = os.environ.get('GITHUB_SECRET')
SOCIAL_AUTH_LINKEDIN_KEY = os.environ.get('LINKEDIN_KEY')
SOCIAL_AUTH_LINKEDIN_SECRET = os.environ.get('LINKEDIN_SECRET')
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/done/'
URL_PATH = ''
SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
EMAIL_FROM = 'archit.py@gmail.com'
SOCIAL_AUTH_EMAIL_FORM_URL = '/signup-email'
SOCIAL_AUTH_EMAIL_FORM_HTML = 'email_signup.html'
SOCIAL_AUTH_EMAIL_VALIDATION_FUNCTION = 'scholarec_web.app.mail.send_validation'
SOCIAL_AUTH_EMAIL_VALIDATION_URL = '/email-sent/'
# SOCIAL_AUTH_USERNAME_FORM_URL = '/signup-username'
SOCIAL_AUTH_USERNAME_FORM_HTML = 'username_signup.html'
SOCIAL_AUTH_GOOGLE_OAUTH_SCOPE = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/userinfo.profile'
]
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'scholarec_web.app.pipeline.require_email',
'social.pipeline.mail.mail_validation',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
FILTERS = {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
}
MAIL_ADMINS = {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'open_facebook': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'django_facebook': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
}
}
LOGGING['handlers']['mail_admins'] = MAIL_ADMINS
if VERSION > (1, 4, 0):
LOGGING['filters'] = FILTERS
MAIL_ADMINS['filters'] = ['require_debug_false']
LOGGING['handlers']['mail_admins'] = MAIL_ADMINS
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
if MODE == 'django_registration':
FACEBOOK_REGISTRATION_BACKEND = 'facebook_example.registration_backends.DjangoRegistrationDefaultBackend'
INSTALLED_APPS += (
'registration',
)
ACCOUNT_ACTIVATION_DAYS = 10
elif MODE == 'userena':
'''
Settings based on these docs
http://docs.django-userena.org/en/latest/installation.html#installing-django-userena
'''
FACEBOOK_REGISTRATION_BACKEND = 'django_facebook.registration_backends.UserenaBackend'
AUTHENTICATION_BACKENDS = (
'django_facebook.auth_backends.FacebookBackend',
'userena.backends.UserenaAuthenticationBackend',
'django.contrib.auth.backends.ModelBackend',
)
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
LOGIN_REDIRECT_URL = '/accounts/%(username)s/'
LOGIN_URL = '/accounts/signin/'
LOGOUT_URL = '/accounts/signout/'
ANONYMOUS_USER_ID = 1
INSTALLED_APPS += (
'userena',
'guardian',
) | unknown | codeparrot/codeparrot-clean | ||
from corpustools.gui.views import *
from corpustools.gui.models import CorpusModel
def test_discourse_view(qtbot):
widget = DiscourseView()
qtbot.addWidget(widget)
def test_lexicon_view(qtbot, unspecified_test_corpus, settings):
widget = LexiconView()
model = CorpusModel(unspecified_test_corpus, settings)
qtbot.addWidget(widget)
qtbot.addWidget(model)
widget.setModel(model)
widget.search()
assert(len(widget.table.selectionModel().selectedRows()) == 0)
widget.searchField.setText('ma')
widget.search()
assert(len(widget.table.selectionModel().selectedRows()) == 1)
assert(widget.table.selectionModel().selectedRows()[0].row() == 0)
widget.search()
assert(len(widget.table.selectionModel().selectedRows()) == 1)
assert(widget.table.selectionModel().selectedRows()[0].row() == 2)
widget.searchField.setText('matemma')
widget.search()
assert(len(widget.table.selectionModel().selectedRows()) == 0)
w = model.wordObject(0)
widget.highlightType(w)
assert(len(widget.table.selectionModel().selectedRows()) == 1)
assert(widget.table.selectionModel().selectedRows()[0].row() == 0)
#def test_phono_search_results():
# widget = PhonoSearchResults()
#qtbot.addWidget(widget)
def test_tree_widget(qtbot):
widget = TreeWidget()
qtbot.addWidget(widget)
def test_table_widget(qtbot):
widget = TableWidget()
qtbot.addWidget(widget)
def test_text_view(qtbot):
widget = TextView()
qtbot.addWidget(widget)
def test_variant_view(qtbot, unspecified_test_corpus):
w = unspecified_test_corpus['atema']
widget = VariantView(None, w)
qtbot.addWidget(widget) | unknown | codeparrot/codeparrot-clean | ||
import { isCssUrlWithoutSideEffects } from "../vite/styles";
describe("isCssUrlWithoutSideEffects", () => {
it("returns true for query parameters that result in an exported value with no side effects", () => {
let urls = [
"my/file.css?inline",
"my/file.css?inline-css",
"my/file.css?inline&raw",
"my/file.css?raw",
"my/file.css?raw&url",
"my/file.css?url",
"my/file.css?url&something=else",
"my/file.css?something=else&url",
"my/file.css?url&raw",
// other parameters mixed in
"my/file.css?inline&something=else",
"my/file.css?something=else&inline",
"my/file.css?inline&raw&something=else",
"my/file.css?something=else&inline&raw",
"my/file.css?raw&something=else&url",
"my/file.css?something=else&raw&url",
"my/file.css?url&something=else&raw",
"my/file.css?url&raw&something=else",
"my/file.css?something=else&url&raw",
];
for (let url of urls) {
expect(isCssUrlWithoutSideEffects(url)).toBe(true);
}
});
it("returns false for other query parameters or no parameters", () => {
let urls = [
"my/file.css",
"my/file.css?foo",
"my/file.css?foo=bar",
"my/file.css?foo&bar",
"my/file.css?inlinex",
"my/file.css?rawx",
"my/file.css?urlx",
// values other than blank since Vite doesn't match these
"my/file.css?inline=foo",
"my/file.css?inline-css=foo",
"my/file.css?raw=foo",
"my/file.css?url=foo",
// explicitly blank values since Vite doesn't match these
"my/file.css?inline=",
"my/file.css?inline-css=",
"my/file.css?raw=",
"my/file.css?url=",
];
for (let url of urls) {
expect(isCssUrlWithoutSideEffects(url)).toBe(false);
}
});
}); | typescript | github | https://github.com/remix-run/react-router | packages/react-router-dev/__tests__/styles-test.ts |
"""
Matrix Solver
Parses a calibration table and solves the equations for the alpha constants
used in the Hardy's Multi-Quadric method of calibration.
"""
import os, sys, string
from math import sqrt
from xml.dom import *
from xml.dom.minidom import *
import Numeric, LinearAlgebra
# Define useful functions
def length(v):
"""
Determines the magnitude of a three dimensional vector, v.
"""
return sqrt( v[0] * v[0] + v[1] * v[1] + v[2] * v[2] )
def vec_subtract(a, b):
"""
Returns a tuple c, s.t. c = a - b
"""
return (a[0] - b[0], a[1] - b[1], a[2] - b[2])
def vec_multiply(a, b):
"""
Returns the scalar result of a dot b.
"""
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
argc = len(sys.argv)
if argc < 2 or argc > 3:
print "Usage: matrix_solver.py input_file [output_file]"
sys.exit(1)
# XXX: Take out the debug file when ready.
dbg_file = file('debug_output.txt', 'w')
# Open the table file
in_file = file(sys.argv[1], 'r')
doc = parse(in_file)
root_element = doc.documentElement
# Get the offsets from the table
offset_elements = root_element.getElementsByTagName('Offset')
offset_table = {}
# This has to be done since keys and values in Python dictionaries are stored
# in random order.
keys_in_order = []
dbg_file.write('Parsed Offsets\n')
# Build an offset table.
for e in offset_elements:
curr_offset = string.split(e.firstChild.data)
qx = e.attributes['X'].nodeValue
qy = e.attributes['Y'].nodeValue
qz = e.attributes['Z'].nodeValue
q = ( float(qx), float(qy), float(qz) )
px = curr_offset[0]
py = curr_offset[1]
pz = curr_offset[2]
p = ( float(px), float(py), float(pz) )
dbg_file.write('(' + qx + ',' + qy + ',' + qz + '):(' + px + ',' + py + ',' + pz + ')\n')
dbg_file.write(str(q) + ' : ' + str(p) + '\n')
offset_table[q] = p
keys_in_order.append(q)
dbg_file.write('\nOffset Table\n')
dbg_file.write(str(offset_table))
# w[j](p) = sqrt( (p-p[j]) * (p-p[j]) + R^2 )
# s.t. 10 <= pow(R, 2) <= 1000
w_matrix_list = []
r_squared = 0.4
print 'Calculating W Matrix...'
for i in range(0, len(offset_table)):
w_matrix_row = []
p = offset_table[keys_in_order[i]]
for j in range(0, len(offset_table)):
pj = offset_table[keys_in_order[j]]
p_difference = vec_subtract(p, pj)
w = sqrt(vec_multiply(p_difference, p_difference) + r_squared)
w_matrix_row.append(w)
w_matrix_list.append(w_matrix_row)
dbg_file.write('\nW Matrix List\n')
dbg_file.write( str(w_matrix_list) )
w_matrix = Numeric.array(w_matrix_list)
dbg_file.write('\nW Matrix\n')
dbg_file.write( str(w_matrix) )
q_list = []
#for q in offset_table.values():
# q_list.append(list(q))
for k in keys_in_order:
q_list.append( list(k) )
dbg_file.write('\nQ List\n')
dbg_file.write( str(q_list) )
q_vector = Numeric.array(q_list)
print 'Solving for alpha vector...'
alpha_vector = LinearAlgebra.solve_linear_equations(w_matrix, q_vector)
dbg_file.write('\nAlpha Vector\n')
dbg_file.write( str(alpha_vector) )
print 'Alpha Vector found.'
out_file = ''
if argc == '2':
out_file = sys.argv[1]
else:
out_file = sys.argv[2]
in_file.close()
out_file = file(out_file, 'w')
alpha_vector_list = alpha_vector.tolist()
dbg_file.write('\nCheck Solution\n')
solution_check = Numeric.matrixmultiply(w_matrix, alpha_vector)
dbg_file.write( str(solution_check) )
# Add Alpha constants to XML Tree
for i in alpha_vector_list:
element = Element('Alpha')
element.setAttribute('X', str(i[0]))
element.setAttribute('Y', str(i[1]))
element.setAttribute('Z', str(i[2]))
root_element.appendChild(element)
out_file.write(doc.toprettyxml())
out_file.close() | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
//go:build !minimal
package builtinplugins
import (
"maps"
credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud"
credAzure "github.com/hashicorp/vault-plugin-auth-azure"
credCF "github.com/hashicorp/vault-plugin-auth-cf"
credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin"
credKerb "github.com/hashicorp/vault-plugin-auth-kerberos"
credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase"
dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch"
dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
dbRedis "github.com/hashicorp/vault-plugin-database-redis"
dbRedisElastiCache "github.com/hashicorp/vault-plugin-database-redis-elasticache"
dbSnowflake "github.com/hashicorp/vault-plugin-database-snowflake"
logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin"
logicalAlicloud "github.com/hashicorp/vault-plugin-secrets-alicloud"
logicalAzure "github.com/hashicorp/vault-plugin-secrets-azure"
logicalGcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin"
logicalGcpKms "github.com/hashicorp/vault-plugin-secrets-gcpkms"
logicalKube "github.com/hashicorp/vault-plugin-secrets-kubernetes"
logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas"
logicalLDAP "github.com/hashicorp/vault-plugin-secrets-openldap"
logicalTerraform "github.com/hashicorp/vault-plugin-secrets-terraform"
credAws "github.com/hashicorp/vault/builtin/credential/aws"
credGitHub "github.com/hashicorp/vault/builtin/credential/github"
credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
credOkta "github.com/hashicorp/vault/builtin/credential/okta"
credRadius "github.com/hashicorp/vault/builtin/credential/radius"
logicalAws "github.com/hashicorp/vault/builtin/logical/aws"
logicalConsul "github.com/hashicorp/vault/builtin/logical/consul"
logicalNomad "github.com/hashicorp/vault/builtin/logical/nomad"
logicalRabbit "github.com/hashicorp/vault/builtin/logical/rabbitmq"
logicalTotp "github.com/hashicorp/vault/builtin/logical/totp"
"github.com/hashicorp/vault/helper/pluginconsts"
dbCass "github.com/hashicorp/vault/plugins/database/cassandra"
dbHana "github.com/hashicorp/vault/plugins/database/hana"
dbInflux "github.com/hashicorp/vault/plugins/database/influxdb"
dbMongo "github.com/hashicorp/vault/plugins/database/mongodb"
dbMssql "github.com/hashicorp/vault/plugins/database/mssql"
dbMysql "github.com/hashicorp/vault/plugins/database/mysql"
dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql"
dbRedshift "github.com/hashicorp/vault/plugins/database/redshift"
"github.com/hashicorp/vault/sdk/helper/consts"
)
func newFullAddonRegistry() *registry {
return ®istry{
credentialBackends: map[string]credentialBackend{
pluginconsts.AuthTypeAliCloud: {Factory: credAliCloud.Factory},
pluginconsts.AuthTypeAppId: {
Factory: removedFactory,
DeprecationStatus: consts.Removed,
},
pluginconsts.AuthTypeAWS: {Factory: credAws.Factory},
pluginconsts.AuthTypeAzure: {Factory: credAzure.Factory},
pluginconsts.AuthTypeCF: {Factory: credCF.Factory},
pluginconsts.AuthTypeGCP: {Factory: credGcp.Factory},
pluginconsts.AuthTypeGitHub: {Factory: credGitHub.Factory},
pluginconsts.AuthTypeKerberos: {Factory: credKerb.Factory},
pluginconsts.AuthTypeKubernetes: {Factory: credKube.Factory},
pluginconsts.AuthTypeLDAP: {Factory: credLdap.Factory},
pluginconsts.AuthTypeOCI: {Factory: credOCI.Factory},
pluginconsts.AuthTypeOkta: {Factory: credOkta.Factory},
pluginconsts.AuthTypePCF: {
Factory: credCF.Factory,
DeprecationStatus: consts.Deprecated,
},
pluginconsts.AuthTypeRadius: {Factory: credRadius.Factory},
},
databasePlugins: map[string]databasePlugin{
// These four plugins all use the same mysql implementation but with
// different username settings passed by the constructor.
"mysql-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultUserNameTemplate)},
"mysql-aurora-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)},
"mysql-rds-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)},
"mysql-legacy-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)},
"cassandra-database-plugin": {Factory: dbCass.New},
"couchbase-database-plugin": {Factory: dbCouchbase.New},
"elasticsearch-database-plugin": {Factory: dbElastic.New},
"hana-database-plugin": {Factory: dbHana.New},
"influxdb-database-plugin": {Factory: dbInflux.New},
"mongodb-database-plugin": {Factory: dbMongo.New},
"mongodbatlas-database-plugin": {Factory: dbMongoAtlas.New},
"mssql-database-plugin": {Factory: dbMssql.New},
"postgresql-database-plugin": {Factory: dbPostgres.New},
"redshift-database-plugin": {Factory: dbRedshift.New},
"redis-database-plugin": {Factory: dbRedis.New},
"redis-elasticache-database-plugin": {Factory: dbRedisElastiCache.New},
"snowflake-database-plugin": {Factory: dbSnowflake.New},
},
logicalBackends: map[string]logicalBackend{
pluginconsts.SecretEngineAD: {
Factory: logicalAd.Factory,
DeprecationStatus: consts.Deprecated,
},
pluginconsts.SecretEngineAlicloud: {Factory: logicalAlicloud.Factory},
pluginconsts.SecretEngineAWS: {Factory: logicalAws.Factory},
pluginconsts.SecretEngineAzure: {Factory: logicalAzure.Factory},
pluginconsts.SecretEngineCassandra: {
Factory: removedFactory,
DeprecationStatus: consts.Removed,
},
pluginconsts.SecretEngineConsul: {Factory: logicalConsul.Factory},
pluginconsts.SecretEngineGCP: {Factory: logicalGcp.Factory},
pluginconsts.SecretEngineGCPKMS: {Factory: logicalGcpKms.Factory},
pluginconsts.SecretEngineKubernetes: {Factory: logicalKube.Factory},
pluginconsts.SecretEngineMongoDB: {
Factory: removedFactory,
DeprecationStatus: consts.Removed,
},
pluginconsts.SecretEngineMongoDBAtlas: {Factory: logicalMongoAtlas.Factory},
pluginconsts.SecretEngineMSSQL: {
Factory: removedFactory,
DeprecationStatus: consts.Removed,
},
pluginconsts.SecretEngineMySQL: {
Factory: removedFactory,
DeprecationStatus: consts.Removed,
},
pluginconsts.SecretEngineNomad: {Factory: logicalNomad.Factory},
pluginconsts.SecretEngineOpenLDAP: {Factory: logicalLDAP.Factory},
pluginconsts.SecretEngineLDAP: {Factory: logicalLDAP.Factory},
pluginconsts.SecretEnginePostgresql: {
Factory: removedFactory,
DeprecationStatus: consts.Removed,
},
pluginconsts.SecretEngineRabbitMQ: {Factory: logicalRabbit.Factory},
pluginconsts.SecretEngineTerraform: {Factory: logicalTerraform.Factory},
pluginconsts.SecretEngineTOTP: {Factory: logicalTotp.Factory},
},
}
}
func extendAddonPlugins(reg *registry) {
addonReg := newFullAddonRegistry()
maps.Copy(reg.credentialBackends, addonReg.credentialBackends)
maps.Copy(reg.databasePlugins, addonReg.databasePlugins)
maps.Copy(reg.logicalBackends, addonReg.logicalBackends)
} | go | github | https://github.com/hashicorp/vault | helper/builtinplugins/registry_full.go |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\DataCollector;
use Symfony\Bundle\FrameworkBundle\Controller\RedirectController;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpKernel\DataCollector\RouterDataCollector as BaseRouterDataCollector;
/**
* @author Fabien Potencier <fabien@symfony.com>
*
* @final
*/
class RouterDataCollector extends BaseRouterDataCollector
{
public function guessRoute(Request $request, mixed $controller): string
{
if (\is_array($controller)) {
$controller = $controller[0];
}
if ($controller instanceof RedirectController && $request->attributes->has('_route')) {
return $request->attributes->get('_route');
}
return parent::guessRoute($request, $controller);
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/DataCollector/RouterDataCollector.php |
# $Id: data.py,v 1.1 2013/07/10 02:25:44 paus Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_031'),
annotation = cms.untracked.string('AOD'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/tmp/FAB36B02-36D4-E111-92D6-0025B32036E2.root')
# fileNames = cms.untracked.vstring('file:/tmp/F853EAC9-44C8-E111-9778-003048F110BE.root')
# fileNames = cms.untracked.vstring('file:/tmp/4EA92226-F2C6-E111-A390-001D09F23A20.root')
# fileNames = cms.untracked.vstring('file:/tmp/1C19C50D-AED9-E111-9DDF-E0CB4E553651.root')
fileNames = cms.untracked.vstring('root://xrootd.unl.edu//store/data/Run2011A/Photon/AOD/21Jun2013-v1/10000/767F1882-00E0-E211-B32F-001E67396FA9.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'FT_R_53_LV3::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillAOD_cfi")
#process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.MitTreeFiller.TreeWriter.fileName = 'bambu-output-file-tmp'
process.bambu_step = cms.Path(process.BambuFillAOD)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step) | unknown | codeparrot/codeparrot-clean | ||
import pytest
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
class TestPipe:
def test_pipe(self, frame_or_series):
obj = DataFrame({"A": [1, 2, 3]})
expected = DataFrame({"A": [1, 4, 9]})
if frame_or_series is Series:
obj = obj["A"]
expected = expected["A"]
f = lambda x, y: x**y
result = obj.pipe(f, 2)
tm.assert_equal(result, expected)
def test_pipe_tuple(self, frame_or_series):
obj = DataFrame({"A": [1, 2, 3]})
obj = tm.get_obj(obj, frame_or_series)
f = lambda x, y: y
result = obj.pipe((f, "y"), 0)
tm.assert_equal(result, obj)
def test_pipe_tuple_error(self, frame_or_series):
obj = DataFrame({"A": [1, 2, 3]})
obj = tm.get_obj(obj, frame_or_series)
f = lambda x, y: y
msg = "y is both the pipe target and a keyword argument"
with pytest.raises(ValueError, match=msg):
obj.pipe((f, "y"), x=1, y=0) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/frame/methods/test_pipe.py |
{
"apiVersion": "v1",
"groupVersion": "coordination.k8s.io/v1alpha2",
"kind": "APIResourceList",
"resources": [
{
"kind": "LeaseCandidate",
"name": "leasecandidates",
"namespaced": true,
"singularName": "leasecandidate",
"storageVersionHash": "lvME0iHWE20=",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
}
]
} | json | github | https://github.com/kubernetes/kubernetes | api/discovery/apis__coordination.k8s.io__v1alpha2.json |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "UnhandledExceptionAtNewCheck.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
using namespace clang::ast_matchers;
namespace clang::tidy::bugprone {
namespace {
AST_MATCHER_P(CXXTryStmt, hasHandlerFor,
ast_matchers::internal::Matcher<QualType>, InnerMatcher) {
const unsigned NH = Node.getNumHandlers();
for (unsigned I = 0; I < NH; ++I) {
const CXXCatchStmt *CatchS = Node.getHandler(I);
// Check for generic catch handler (match anything).
if (CatchS->getCaughtType().isNull())
return true;
ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(CatchS->getCaughtType(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
AST_MATCHER(CXXNewExpr, mayThrow) {
const FunctionDecl *OperatorNew = Node.getOperatorNew();
if (!OperatorNew)
return false;
return !OperatorNew->getType()->castAs<FunctionProtoType>()->isNothrow();
}
} // namespace
UnhandledExceptionAtNewCheck::UnhandledExceptionAtNewCheck(
StringRef Name, ClangTidyContext *Context)
: ClangTidyCheck(Name, Context) {}
void UnhandledExceptionAtNewCheck::registerMatchers(MatchFinder *Finder) {
auto BadAllocType =
recordType(hasDeclaration(cxxRecordDecl(hasName("::std::bad_alloc"))));
auto ExceptionType =
recordType(hasDeclaration(cxxRecordDecl(hasName("::std::exception"))));
auto BadAllocReferenceType = referenceType(pointee(BadAllocType));
auto ExceptionReferenceType = referenceType(pointee(ExceptionType));
auto CatchBadAllocType =
qualType(hasCanonicalType(anyOf(BadAllocType, BadAllocReferenceType,
ExceptionType, ExceptionReferenceType)));
auto BadAllocCatchingTryBlock = cxxTryStmt(hasHandlerFor(CatchBadAllocType));
auto FunctionMayNotThrow = functionDecl(isNoThrow());
Finder->addMatcher(cxxNewExpr(mayThrow(),
unless(hasAncestor(BadAllocCatchingTryBlock)),
hasAncestor(FunctionMayNotThrow))
.bind("new-expr"),
this);
}
void UnhandledExceptionAtNewCheck::check(
const MatchFinder::MatchResult &Result) {
const auto *MatchedExpr = Result.Nodes.getNodeAs<CXXNewExpr>("new-expr");
if (MatchedExpr)
diag(MatchedExpr->getBeginLoc(),
"missing exception handler for allocation failure at 'new'");
}
} // namespace clang::tidy::bugprone | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/bugprone/UnhandledExceptionAtNewCheck.cpp |
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleActionFail
from ansible.executor.module_common import _apply_action_arg_defaults
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
UNUSED_PARAMS = {
'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'],
}
# HACK: list of unqualified service manager names that are/were built-in, we'll prefix these with `ansible.legacy` to
# avoid collisions with collections search
BUILTIN_SVC_MGR_MODULES = set(['openwrt_init', 'service', 'systemd', 'sysvinit'])
def run(self, tmp=None, task_vars=None):
""" handler for package operations """
self._supports_check_mode = True
self._supports_async = True
super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto').lower()
if module == 'auto':
try:
# if we delegate, we should use delegated host's facts
expr = "hostvars[delegate_to].ansible_facts.service_mgr" if self._task.delegate_to else "ansible_facts.service_mgr"
module = self._templar.resolve_variable_expression(expr, local_variables=dict(delegate_to=self._task.delegate_to))
except Exception:
pass # could not get it from template!
try:
if module == 'auto':
facts = self._execute_module(
module_name='ansible.legacy.setup',
module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)
self._display.debug("Facts %s" % facts)
module = facts.get('ansible_facts', {}).get('ansible_service_mgr', 'auto')
if not module or module == 'auto' or not self._shared_loader_obj.module_loader.has_plugin(module):
module = 'ansible.legacy.service'
if module != 'auto':
# run the 'service' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
if module in self.UNUSED_PARAMS:
for unused in self.UNUSED_PARAMS[module]:
if unused in new_module_args:
del new_module_args[unused]
self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module))
# get defaults for specific module
context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
new_module_args = _apply_action_arg_defaults(context.resolved_fqcn, self._task, new_module_args, self._templar)
# collection prefix known internal modules to avoid collisions from collections search, while still allowing library/ overrides
if module in self.BUILTIN_SVC_MGR_MODULES:
module = 'ansible.legacy.' + module
self._display.vvvv("Running %s" % module)
return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)
else:
raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.')
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir) | python | github | https://github.com/ansible/ansible | lib/ansible/plugins/action/service.py |
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for Firefox 2.0."""
import pywintypes
import time
import types
from drivers import keyboard
from drivers import mouse
from drivers import windowing
# Default version
version = "2.0.0.6"
DEFAULT_PATH = r"c:\program files\mozilla firefox\firefox.exe"
# TODO(jhaas): the Firefox scraper is a bit rickety at the moment. Known
# issues: 1) won't work if the default profile puts toolbars in different
# locations, 2) uses sleep() statements rather than more robust checks,
# 3) fails badly if an existing Firefox window is open when the scrape
# is invoked. This needs to be fortified at some point.
def GetBrowser(path):
"""Invoke the Firefox browser and return the process and window.
Args:
path: full path to browser
Returns:
A tuple of (process handle, render pane)
"""
if not path: path = DEFAULT_PATH
# Invoke Firefox
(proc, wnd) = windowing.InvokeAndWait(path)
# Get the content pane
render_pane = windowing.FindChildWindow(
wnd,
"MozillaWindowClass/MozillaWindowClass/MozillaWindowClass")
return (proc, wnd, render_pane)
def InvokeBrowser(path):
"""Invoke the Firefox browser.
Args:
path: full path to browser
Returns:
A tuple of (main window, process handle, render pane)
"""
# Reuse an existing instance of the browser if we can find one. This
# may not work correctly, especially if the window is behind other windows.
wnds = windowing.FindChildWindows(0, "MozillaUIWindowClass")
if len(wnds):
wnd = wnds[0]
proc = None
else:
# Invoke Firefox
(proc, wnd) = windowing.InvokeAndWait(path)
# Get the content pane
render_pane = windowing.FindChildWindow(
wnd,
"MozillaWindowClass/MozillaWindowClass/MozillaWindowClass")
return (wnd, proc, render_pane)
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if success, else an error string
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
(wnd, proc, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, pos, size, render_pane)
time.sleep(3)
# Firefox is a bit of a pain: it doesn't use standard edit controls,
# and it doesn't display a throbber when there's no tab. Let's make
# sure there's at least one tab, then select the first one
mouse.ClickInWindow(wnd)
keyboard.TypeString("[t]", True)
mouse.ClickInWindow(wnd, (30, 115))
time.sleep(2)
timedout = False
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
for url in urls:
# Use keyboard shortcuts
keyboard.TypeString("{d}", True)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (10, 96, 26, 112), timeout)
timedout = load_time < 0
if timedout:
break
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
filename = kwargs["filename"](url)
else:
filename = kwargs["filename"]
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
# Close all the tabs, cheesily
mouse.ClickInWindow(wnd)
while len(windowing.FindChildWindows(0, "MozillaUIWindowClass")):
keyboard.TypeString("[w]", True)
time.sleep(1)
if timedout:
return "timeout"
def Time(urls, size, timeout, **kwargs):
"""Measure how long it takes to load each of a series of URLs
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
time.sleep(3)
# Firefox is a bit of a pain: it doesn't use standard edit controls,
# and it doesn't display a throbber when there's no tab. Let's make
# sure there's at least one tab, then select the first one
mouse.ClickInWindow(wnd)
keyboard.TypeString("[t]", True)
mouse.ClickInWindow(wnd, (30, 115))
time.sleep(2)
# Use keyboard shortcuts
keyboard.TypeString("{d}", True)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (10, 96, 26, 112), timeout)
timedout = load_time < 0
if timedout:
load_time = "timeout"
# Try to close the browser; if this fails it's probably a crash
mouse.ClickInWindow(wnd)
count = 0
while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
and count < 5):
keyboard.TypeString("[w]", True)
time.sleep(1)
count = count + 1
if len(windowing.FindChildWindows(0, "MozillaUIWindowClass")):
windowing.EndProcess(proc)
load_time = "crashed"
proc = None
except pywintypes.error:
proc = None
load_time = "crashed"
ret.append( (url, load_time) )
if proc:
count = 0
while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
and count < 5):
keyboard.TypeString("[w]", True)
time.sleep(1)
count = count + 1
return ret
def main():
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\Firefox\2.0.0.6"
windowing.PreparePath(path)
# Scrape three sites and save the results
Scrape(
["http://www.microsoft.com", "http://www.google.com",
"http://www.sun.com"],
path, (1024, 768), (0, 0))
return 0
if __name__ == "__main__":
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
rule_files:
- 'my_rule' # fine
- 'my/*/rule' # bad | unknown | github | https://github.com/prometheus/prometheus | config/testdata/rules.bad.yml |
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- ============================================================= -->
<!-- CLASS: FutureDataInputStreamBuilder -->
<!-- ============================================================= -->
# class `org.apache.hadoop.fs.FutureDataInputStreamBuilder`
<!-- MACRO{toc|fromDepth=1|toDepth=2} -->
An interface offering of the Builder pattern for creating Java `Future`
references to `FSDataInputStream` and its subclasses.
It is used to initate a (potentially asynchronous) operation to open an existing
file for reading.
## <a name="History"></a> History
### Hadoop 3.3.0: API introduced
[HADOOP-15229](https://issues.apache.org/jira/browse/HADOOP-15229)
_Add FileSystem builder-based openFile() API to match createFile()_
* No `opt(String key, long value)` method was available.
* the `withFileStatus(status)` call required a non-null parameter.
* Sole Filesystem to process options and file status was S3A;
* Only the s3a specific options were the S3 select and `fs.s3a.experimental.input.fadvise`
* S3A Filesystem raised `IllegalArgumentException` if a file status was passed in
and the path of the filestatus did not match the path of the `openFile(path)` call.
This is the baseline implementation. To write code guaranteed to compile against this version,
use the `opt(String, String)` and `must(String, String)` methods, converting numbers to
string explicitly.
```java
fs.open("s3a://bucket/file")
.opt("fs.option.openfile.length", Long.toString(length))
.build().get()
```
### Hadoop 3.3.5: standardization and expansion
[HADOOP-16202](https://issues.apache.org/jira/browse/HADOOP-16202)
_Enhance openFile() for better read performance against object stores_
* `withFileStatus(null)` required to be accepted (and ignored)
* only the filename part of any supplied FileStatus path must match the
filename passed in on `openFile(path)`.
* An `opt(String key, long value)` option was added. *This is now deprecated as it
caused regression
* Standard `fs.option.openfile` options defined.
* S3A FS to use openfile length option, seek start/end options not _yet_ used.
* Azure ABFS connector takes a supplied `VersionedFileStatus` and omits any
HEAD probe for the object.
### Hadoop 3.3.6: API change to address operator overload bugs.
new `optLong()`, `optDouble()`, `mustLong()` and `mustDouble()` builder methods.
* See [HADOOP-18724](https://issues.apache.org/jira/browse/HADOOP-18724) _Open file fails with NumberFormatException for S3AFileSystem_,
which was somehow caused by the overloaded `opt(long)`.
* Specification updated to declare that unparseable numbers MUST be treated as "unset" and the default
value used instead.
## Invariants
The `FutureDataInputStreamBuilder` interface does not require parameters or
or the state of `FileSystem` until `build()` is
invoked and/or during the asynchronous open operation itself.
Some aspects of the state of the filesystem, MAY be checked in the initial
`openFile()` call, provided they are known to be invariants which will not
change between `openFile()` and the `build().get()` sequence. For example,
path validation.
## <a name="parameters"></a> `Implementation-agnostic parameters.
### <a name="Builder.bufferSize"></a> `FutureDataInputStreamBuilder bufferSize(int bufSize)`
Set the size of the buffer to be used.
### <a name="Builder.withFileStatus"></a> `FutureDataInputStreamBuilder withFileStatus(FileStatus status)`
A `FileStatus` instance which refers to the file being opened.
This MAY be used by implementations to short-circuit checks for the file,
So potentially saving on remote calls especially to object stores.
Requirements:
* `status != null`
* `status.getPath().getName()` == the name of the file being opened.
The path validation MUST take place if the store uses the `FileStatus` when
it opens files, and MAY be performed otherwise. The validation
SHOULD be postponed until the `build()` operation.
This operation should be considered a hint to the filesystem.
If a filesystem implementation extends the `FileStatus` returned in its
implementation MAY use this information when opening the file.
This is relevant with those stores which return version/etag information,
-they MAY use this to guarantee that the file they opened
is exactly the one returned in the listing.
The final `status.getPath().getName()` element of the supplied status MUST equal
the name value of the path supplied to the `openFile(path)` call.
Filesystems MUST NOT validate the rest of the path.
This is needed to support viewfs and other mount-point wrapper filesystems
where schemas and paths are different. These often create their own FileStatus results
Preconditions
```python
status == null or status.getPath().getName() == path.getName()
```
Filesystems MUST NOT require the class of `status` to equal
that of any specific subclass their implementation returns in filestatus/list
operations. This is to support wrapper filesystems and serialization/deserialization
of the status.
### <a name="optional"></a> Set optional or mandatory parameters
```java
FutureDataInputStreamBuilder opt(String key, String value)
FutureDataInputStreamBuilder opt(String key, int value)
FutureDataInputStreamBuilder opt(String key, boolean value)
FutureDataInputStreamBuilder optLong(String key, long value)
FutureDataInputStreamBuilder optDouble(String key, double value)
FutureDataInputStreamBuilder must(String key, String value)
FutureDataInputStreamBuilder must(String key, int value)
FutureDataInputStreamBuilder must(String key, boolean value)
FutureDataInputStreamBuilder mustLong(String key, long value)
FutureDataInputStreamBuilder mustDouble(String key, double value)
```
Set optional or mandatory parameters to the builder. Using `opt()` or `must()`,
client can specify FS-specific parameters without inspecting the concrete type
of `FileSystem`.
Example:
```java
out = fs.openFile(path)
.must("fs.option.openfile.read.policy", "random")
.optLong("fs.http.connection.timeout", 30_000L)
.withFileStatus(statusFromListing)
.build()
.get();
```
Here the read policy of `random` has been specified,
with the requirement that the filesystem implementation must understand the option.
An http-specific option has been supplied which may be interpreted by any store;
If the filesystem opening the file does not recognize the option, it can safely be
ignored.
### <a name="usage"></a> When to use `opt` versus `must`
The difference between `opt` versus `must` is how the FileSystem opening
the file must react to an option which it does not recognize.
```python
def must(name, value):
if not name in known_keys:
raise IllegalArgumentException
if not name in supported_keys:
raise UnsupportedException
def opt(name, value):
if not name in known_keys:
# ignore option
```
For any known key, the validation of the `value` argument MUST be the same
irrespective of how the (key, value) pair was declared.
1. For a filesystem-specific option, it is the choice of the implementation
how to validate the entry.
1. For standard options, the specification of what is a valid `value` is
defined in this filesystem specification, validated through contract
tests.
## <a name="implementation"></a> Implementation Notes
Checking for supported options must be performed in the `build()` operation.
1. If a mandatory parameter declared via `must(key, value)`) is not recognized,
`IllegalArgumentException` MUST be thrown.
1. If a mandatory parameter declared via `must(key, value)` relies on
a feature which is recognized but not supported in the specific
`FileSystem`/`FileContext` instance `UnsupportedException` MUST be thrown.
Parsing of numeric values SHOULD trim any string and if the value
cannot be parsed as a number, downgrade to any default value supplied.
This is to address [HADOOP-18724](https://issues.apache.org/jira/browse/HADOOP-18724)
_Open file fails with NumberFormatException for S3AFileSystem_, which was cause by the overloaded `opt()`
builder parameter binding to `opt(String, double)` rather than `opt(String, long)` when a long
value was passed in.
The behavior of resolving the conflicts between the parameters set by
builder methods (i.e., `bufferSize()`) and `opt()`/`must()` is as follows:
> The last option specified defines the value and its optional/mandatory state.
If the `FileStatus` option passed in `withFileStatus()` is used, implementations
MUST accept all subclasses of `FileStatus`, including `LocatedFileStatus`,
rather than just any FS-specific subclass implemented by the implementation
(e.g `S3AFileStatus`). They MAY simply ignore those which are not the
custom subclasses.
This is critical to ensure safe use of the feature: directory listing/
status serialization/deserialization can result in the `withFileStatus()`
argument not being the custom subclass returned by the Filesystem instance's
own `getFileStatus()`, `listFiles()`, `listLocatedStatus()` calls, etc.
In such a situation the implementations must:
1. Verify that `status.getPath().getName()` matches the current `path.getName()`
value. The rest of the path MUST NOT be validated.
1. Use any status fields as desired -for example the file length.
Even if not values of the status are used, the presence of the argument
can be interpreted as the caller declaring that they believe the file
to be present and of the given size.
## <a name="builder"></a> Builder interface
### <a name="build"></a> `CompletableFuture<FSDataInputStream> build()`
Return an `CompletableFuture<FSDataInputStream>` which, when successfully
completed, returns an input stream which can read data from the filesystem.
The `build()` operation MAY perform the validation of the file's existence,
its kind, so rejecting attempts to read from a directory or non-existent
file. Alternatively
* file existence/status checks MAY be performed asynchronously within the returned
`CompletableFuture<>`.
* file existence/status checks MAY be postponed until the first byte is read in
any of the read such as `read()` or `PositionedRead`.
That is, the precondition `exists(FS, path)` and `isFile(FS, path)` are
only guaranteed to have been met after the `get()` called on returned future
and an attempt has been made to read the stream.
Thus, if even when file does not exist, or is a directory rather than a file,
the following call MUST succeed, returning a `CompletableFuture` to be evaluated.
```java
Path p = new Path("file://tmp/file-which-does-not-exist");
CompletableFuture<FSDataInputStream> future = p.getFileSystem(conf)
.openFile(p)
.build();
```
The inability to access/read a file MUST raise an `IOException`or subclass
in either the future's `get()` call, or, for late binding operations,
when an operation to read data is invoked.
Therefore the following sequence SHALL fail when invoked on the
`future` returned by the previous example.
```java
future.get().read();
```
Access permission checks have the same visibility requirements: permission failures
MUST be delayed until the `get()` call and MAY be delayed into subsequent operations.
Note: some operations on the input stream, such as `seek()` may not attempt any IO
at all. Such operations MAY NOT raise exceotions when interacting with
nonexistent/unreadable files.
## <a name="options"></a> Standard `openFile()` options since hadoop branch-3.3
These are options which `FileSystem` and `FileContext` implementation
MUST recognise and MAY support by changing the behavior of
their input streams as appropriate.
Hadoop 3.3.0 added the `openFile()` API; these standard options were defined in
a later release. Therefore, although they are "well known", unless confident that
the application will only be executed against releases of Hadoop which knows of
the options -applications SHOULD set the options via `opt()` calls rather than `must()`.
When opening a file through the `openFile()` builder API, callers MAY use
both `.opt(key, value)` and `.must(key, value)` calls to set standard and
filesystem-specific options.
If set as an `opt()` parameter, unsupported "standard" options MUST be ignored,
as MUST unrecognized standard options.
If set as a `must()` parameter, unsupported "standard" options MUST be ignored.
unrecognized standard options MUST be rejected.
The standard `openFile()` options are defined
in `org.apache.hadoop.fs.OpenFileOptions`; they all SHALL start
with `fs.option.openfile.`.
Note that while all `FileSystem`/`FileContext` instances SHALL support these
options to the extent that `must()` declarations SHALL NOT fail, the
implementations MAY support them to the extent of interpreting the values. This
means that it is not a requirement for the stores to actually read the read
policy or file length values and use them when opening files.
Unless otherwise stated, they SHOULD be viewed as hints.
Note: if a standard option is added such that if set but not
supported would be an error, then implementations SHALL reject it. For example,
the S3A filesystem client supports the ability to push down SQL commands. If
something like that were ever standardized, then the use of the option, either
in `opt()` or `must()` argument MUST be rejected for filesystems which don't
support the feature.
### <a name="buffer.size"></a> Option: `fs.option.openfile.buffer.size`
Read buffer size in bytes.
This overrides the default value set in the configuration with the option
`io.file.buffer.size`.
It is supported by all filesystem clients which allow for stream-specific buffer
sizes to be set via `FileSystem.open(path, buffersize)`.
### <a name="read.policy"></a> Option: `fs.option.openfile.read.policy`
Declare the read policy of the input stream. This is a hint as to what the
expected read pattern of an input stream will be. This MAY control readahead,
buffering and other optimizations.
Sequential reads may be optimized with prefetching data and/or reading data in
larger blocks. Some applications (e.g. distCp) perform sequential IO even over
columnar data.
In contrast, random IO reads data in different parts of the file using a
sequence of `seek()/read()`
or via the `PositionedReadable` or `ByteBufferPositionedReadable` APIs.
Random IO performance may be best if little/no prefetching takes place, along
with other possible optimizations
Queries over columnar formats such as Apache ORC and Apache Parquet perform such
random IO; other data formats may be best read with sequential or whole-file
policies.
What is key is that optimizing reads for seqential reads may impair random
performance -and vice versa.
1. The seek policy is a hint; even if declared as a `must()` option, the
filesystem MAY ignore it.
1. The interpretation/implementation of a policy is a filesystem specific
behavior -and it may change with Hadoop releases and/or specific storage
subsystems.
1. If a policy is not recognized, the filesystem client MUST ignore it.
| Policy | Meaning |
|--------------|------------------------------------------------------------------------|
| `adaptive` | Any adaptive policy implemented by the store. |
| `avro` | This is an avro format which will be read sequentially |
| `csv` | This is CSV data which will be read sequentially |
| `default` | The default policy for this store. Generally "adaptive". |
| `columnar` | This is any columnar format other than ORC/parquet. |
| `hbase` | This is an HBase Table |
| `json` | This is a UTF-8 JSON/JSON lines format which will be read sequentially |
| `orc` | This is an ORC file. Optimize for it. |
| `parquet` | This is a Parquet file. Optimize for it. |
| `random` | Optimize for random access. |
| `sequential` | Optimize for sequential access. |
| `vector` | The Vectored IO API is intended to be used. |
| `whole-file` | The whole file will be read. |
Choosing the wrong read policy for an input source may be inefficient but never fatal.
A list of read policies MAY be supplied; the first one recognized/supported by
the filesystem SHALL be the one used. This allows for configurations which are compatible
across versions. A policy `parquet, columnar, vector, random, adaptive` will use the parquet policy for
any filesystem aware of it, falling back to `columnar`, `vector`, `random` and finally `adaptive`.
The S3A connector will recognize the `random` since Hadoop 3.3.5 (i.e. since the `openFile()` API
was added), and `vector` from Hadoop 3.4.0.
The S3A and ABFS input streams both implement
the [IOStatisticsSource](iostatistics.html) API, and can be queried for their IO
Performance.
*Tip:* log the `toString()` value of input streams at `DEBUG`. The S3A and ABFS
Input Streams log read statistics, which can provide insight about whether reads
are being performed efficiently or not.
_Futher reading_
* [Linux fadvise()](https://linux.die.net/man/2/fadvise).
* [Windows `CreateFile()`](https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#caching-behavior)
#### <a name="read.policy.adaptive"></a> Read Policy `adaptive`
Try to adapt the seek policy to the read pattern of the application.
The `normal` policy of the S3A client and the sole policy supported by
the `wasb:` client are both adaptive -they assume sequential IO, but once a
backwards seek/positioned read call is made the stream switches to random IO.
Other filesystem implementations may wish to adopt similar strategies, and/or
extend the algorithms to detect forward seeks and/or switch from random to
sequential IO if that is considered more efficient.
Adaptive read policies are the absence of the ability to
declare the seek policy in the `open()` API, so requiring it to be declared, if
configurable, in the cluster/application configuration. However, the switch from
sequential to random seek policies may be exensive.
When applications explicitly set the `fs.option.openfile.read.policy` option, if
they know their read plan, they SHOULD declare which policy is most appropriate.
#### <a name="read.policy.default"></a> Read Policy `default`
The default policy for the filesystem instance.
Implementation/installation-specific.
#### <a name="read.policy.sequential"></a> Read Policy `sequential`
Expect sequential reads from the first byte read to the end of the file/until
the stream is closed.
#### <a name="read.policy.random"></a> Read Policy `random`
Expect `seek()/read()` sequences, or use of `PositionedReadable`
or `ByteBufferPositionedReadable` APIs.
#### <a name="read.policy.vector"></a> Read Policy `vector`
This declares that the caller intends to use the Vectored read API of
[HADOOP-11867](https://issues.apache.org/jira/browse/HADOOP-11867)
_Add a high-performance vectored read API_.
This is a hint: it is not a requirement when using the API.
It does inform the implemenations that the stream should be
configured for optimal vectored IO performance, if such a
feature has been implemented.
It is *not* exclusive: the same stream may still be used for
classic `InputStream` and `PositionedRead` API calls.
Implementations SHOULD use the `random` read policy
with these operations.
#### <a name="read.policy.whole-file"></a> Read Policy `whole-file`
This declares that the whole file is to be read end-to-end; the file system client is free to enable
whatever strategies maximise performance for this. In particular, larger ranged reads/GETs can
deliver high bandwidth by reducing socket/TLS setup costs and providing a connection long-lived
enough for TCP flow control to determine the optimal download rate.
Strategies can include:
* Initiate an HTTP GET of the entire file in `openFile()` operation.
* Prefech data in large blocks, possibly in parallel read operations.
Applications which know that the entire file is to be read from an opened stream SHOULD declare this
read policy.
#### <a name="read.policy.columnar"></a> Read Policy `columnar`
Declare that the data is some (unspecific) columnar format and that read sequencies
should be expected to be random IO of whole column stripes/rowgroups, possibly fetching associated
column statistics first, to determine whether a scan of a stripe/rowgroup can
be skipped entirely.
#### <a name="read.policy.fileformat.parquet"></a> File Format Read Policies `parquet`, and `orc`
These are read policies which declare that the file is of a specific columnar format
and that the input stream MAY be optimized for reading from these.
In particular
* File footers may be fetched and cached.
* Vector IO and random IO SHOULD be expected.
These read policies are a Hadoop 3.4.x addition, so applications and
libraries targeting multiple versions, SHOULD list their fallback
policies if these are not recognized, e.g. request a policy such as `parquet, vector, random`.
#### <a name="read.policy.fileformat.sequential"></a> File format Read Policies `avro`, `json` and `csv`
These are read policies which declare that the file is of a specific sequential format
and that the input stream MAY be optimized for reading from these.
These read policies are a Hadoop 3.4.x addition, so applications and
libraries targeting multiple versions, SHOULD list their fallback
policies if these are not recognized, e.g. request a policy such as `avro, sequential`.
#### <a name="read.policy.fileformat.hbase"></a> File Format Read Policy `hbase`
The file is an HBase table.
Use whatever policy is appropriate for these files, where `random` is
what should be used unless there are specific optimizations related to HBase.
### <a name="openfile.length"></a> Option: `fs.option.openfile.length`: `Long`
Declare the length of a file.
This can be used by clients to skip querying a remote store for the size
of/existence of a file when opening it, similar to declaring a file status
through the `withFileStatus()` option.
If supported by a filesystem connector, this option MUST be interpreted as
declaring the minimum length of the file:
1. If the value is negative, the option SHALL be considered unset.
2. It SHALL NOT be an error if the actual length of the file is greater than
this value.
3. `read()`, `seek()` and positioned read calls MAY use a position across/beyond
this length but below the actual length of the file. Implementations MAY
raise `EOFExceptions` in such cases, or they MAY return data.
If this option is used by the FileSystem implementation
*Implementor's Notes*
* A value of `fs.option.openfile.length` < 0 MUST be ignored.
* If a file status is supplied along with a value in `fs.opt.openfile.length`;
the file status values take precedence.
### <a name="split.start"></a> Options: `fs.option.openfile.split.start` and `fs.option.openfile.split.end`: `Long`
Declare the start and end of the split when a file has been split for processing
in pieces.
1. If a value is negative, the option SHALL be considered unset.
1. Filesystems MAY assume that the length of the file is greater than or equal
to the value of `fs.option.openfile.split.end`.
1. And that they MAY raise an exception if the client application reads past the
value set in `fs.option.openfile.split.end`.
1. The pair of options MAY be used to optimise the read plan, such as setting
the content range for GET requests, or using the split end as an implicit
declaration of the guaranteed minimum length of the file.
1. If both options are set, and the split start is declared as greater than the
split end, then the split start SHOULD just be reset to zero, rather than
rejecting the operation.
The split end value can provide a hint as to the end of the input stream. The
split start can be used to optimize any initial read offset for filesystem
clients.
*Note for implementors: applications will read past the end of a split when they
need to read to the end of a record/line which begins before the end of the
split.
Therefore clients MUST be allowed to `seek()`/`read()` past the length
set in `fs.option.openfile.split.end` if the file is actually longer
than that value.
### <a name="footer.cache"></a> Option: `fs.option.openfile.footer.cache`: `Boolean`
Should a footer be cached?
* This is a hint for clients which cache footers.
* If a format with known footers are is declared in the read policy, the
default footer cache policy of that file type SHALL be used.
This option allows for that default policy to be overridden.
This is recommended if an application wishes to explicitly declare that Parquet/ORC files
are being read -but does not want or need the filesystem stream to cache any footer
because the application itself does such caching.
Duplicating footer caching is inefficient and if there is memory/memory cache conflict,
potentially counter-efficient.
## <a name="s3a"></a> S3A-specific options
The S3A Connector supports custom options for readahead and seek policy.
| Name | Type | Meaning |
|--------------------------------------|----------|---------------------------------------------------------------------------|
| `fs.s3a.readahead.range` | `long` | readahead range in bytes |
| `fs.s3a.experimental.input.fadvise` | `String` | seek policy. Superceded by `fs.option.openfile.read.policy` |
| `fs.s3a.input.async.drain.threshold` | `long` | threshold to switch to asynchronous draining of the stream. (Since 3.3.5) |
If the option set contains a SQL statement in the `fs.s3a.select.sql` statement,
then the file is opened as an S3 Select query.
Consult the S3A documentation for more details.
## <a name="abfs"></a> ABFS-specific options
The ABFS Connector supports custom input stream options.
| Name | Type | Meaning |
|-----------------------------------|-----------|----------------------------------------------------|
| `fs.azure.buffered.pread.disable` | `boolean` | disable caching on the positioned read operations. |
Disables caching on data read through the [PositionedReadable](fsdatainputstream.html#PositionedReadable)
APIs.
Consult the ABFS Documentation for more details.
## <a name="examples"></a> Examples
#### Declaring seek policy and split limits when opening a file.
Here is an example from a proof of
concept `org.apache.parquet.hadoop.util.HadoopInputFile`
reader which uses a (nullable) file status and a split start/end.
The `FileStatus` value is always passed in -but if it is null, then the split
end is used to declare the length of the file.
```java
protected SeekableInputStream newStream(Path path, FileStatus stat,
long splitStart, long splitEnd)
throws IOException {
FutureDataInputStreamBuilder builder = fs.openFile(path)
.opt("fs.option.openfile.read.policy", "vector, random")
.withFileStatus(stat);
builder.optLong("fs.option.openfile.split.start", splitStart);
builder.optLong("fs.option.openfile.split.end", splitEnd);
CompletableFuture<FSDataInputStream> streamF = builder.build();
return HadoopStreams.wrap(FutureIO.awaitFuture(streamF));
}
```
As a result, whether driven directly by a file listing, or when opening a file
from a query plan of `(path, splitStart, splitEnd)`, there is no need to probe
the remote store for the length of the file. When working with remote object
stores, this can save tens to hundreds of milliseconds, even if such a probe is
done asynchronously.
If both the file length and the split end is set, then the file length MUST be
considered "more" authoritative, that is it really SHOULD be defining the file
length. If the split end is set, the caller MAY ot read past it.
The `CompressedSplitLineReader` can read past the end of a split if it is
partway through processing a compressed record. That is: it assumes an
incomplete record read means that the file length is greater than the split
length, and that it MUST read the entirety of the partially read record. Other
readers may behave similarly.
Therefore
1. File length as supplied in a `FileStatus` or in `fs.option.openfile.length`
SHALL set the strict upper limit on the length of a file
2. The split end as set in `fs.option.openfile.split.end` MUST be viewed as a
hint, rather than the strict end of the file.
### Opening a file with both standard and non-standard options
Standard and non-standard options MAY be combined in the same `openFile()`
operation.
```java
Future<FSDataInputStream> f = openFile(path)
.must("fs.option.openfile.read.policy", "random, adaptive")
.opt("fs.s3a.readahead.range", 1024 * 1024)
.build();
FSDataInputStream is = f.get();
```
The option set in `must()` MUST be understood, or at least recognized and
ignored by all filesystems. In this example, S3A-specific option MAY be
ignored by all other filesystem clients.
### Opening a file with older releases
Not all hadoop releases recognize the `fs.option.openfile.read.policy` option.
The option can be safely used in application code if it is added via the `opt()`
builder argument, as it will be treated as an unknown optional key which can
then be discarded.
```java
Future<FSDataInputStream> f = openFile(path)
.opt("fs.option.openfile.read.policy", "vector, random, adaptive")
.build();
FSDataInputStream is = f.get();
```
*Note 1* if the option name is set by a reference to a constant in
`org.apache.hadoop.fs.Options.OpenFileOptions`, then the program will not link
against versions of Hadoop without the specific option. Therefore for resilient
linking against older releases -use a copy of the value.
*Note 2* as option validation is performed in the FileSystem connector,
a third-party connector designed to work with multiple hadoop versions
MAY NOT support the option.
### Passing options in to MapReduce
Hadoop MapReduce will automatically read MR Job Options with the prefixes
`mapreduce.job.input.file.option.` and `mapreduce.job.input.file.must.`
prefixes, and apply these values as `.opt()` and `must()` respectively, after
remove the mapreduce-specific prefixes.
This makes passing options in to MR jobs straightforward. For example, to
declare that a job should read its data using random IO:
```java
JobConf jobConf = (JobConf) job.getConfiguration()
jobConf.set(
"mapreduce.job.input.file.option.fs.option.openfile.read.policy",
"random");
```
### MapReduce input format propagating options
An example of a record reader passing in options to the file it opens.
```java
public void initialize(InputSplit genericSplit,
TaskAttemptContext context) throws IOException {
FileSplit split = (FileSplit)genericSplit;
Configuration job = context.getConfiguration();
start = split.getStart();
end = start + split.getLength();
Path file = split.getPath();
// open the file and seek to the start of the split
FutureDataInputStreamBuilder builder =
file.getFileSystem(job).openFile(file);
// the start and end of the split may be used to build
// an input strategy.
builder.optLong("fs.option.openfile.split.start", start);
builder.optLong("fs.option.openfile.split.end", end);
FutureIO.propagateOptions(builder, job,
"mapreduce.job.input.file.option",
"mapreduce.job.input.file.must");
fileIn = FutureIO.awaitFuture(builder.build());
fileIn.seek(start)
/* Rest of the operation on the opened stream */
}
```
### `FileContext.openFile`
From `org.apache.hadoop.fs.AvroFSInput`; a file is opened with sequential input.
Because the file length has already been probed for, the length is passed down
```java
public AvroFSInput(FileContext fc, Path p) throws IOException {
FileStatus status = fc.getFileStatus(p);
this.len = status.getLen();
this.stream = awaitFuture(fc.openFile(p)
.opt("fs.option.openfile.read.policy",
"sequential")
.optLong("fs.option.openfile.length",
Long.toString(status.getLen()))
.build());
fc.open(p);
}
```
In this example, the length is passed down as a string (via `Long.toString()`)
rather than directly as a long. This is to ensure that the input format will
link against versions of $Hadoop which do not have the
`opt(String, long)` and `must(String, long)` builder parameters. Similarly, the
values are passed as optional, so that if unrecognized the application will
still succeed.
### Example: reading a whole file
This is from `org.apache.hadoop.util.JsonSerialization`.
Its `load(FileSystem, Path, FileStatus)` method
* declares the whole file is to be read end to end.
* passes down the file status
```java
public T load(FileSystem fs,
Path path,
status)
throws IOException {
try (FSDataInputStream dataInputStream =
awaitFuture(fs.openFile(path)
.opt("fs.option.openfile.read.policy", "whole-file")
.withFileStatus(status)
.build())) {
return fromJsonStream(dataInputStream);
} catch (JsonProcessingException e) {
throw new PathIOException(path.toString(),
"Failed to read JSON file " + e, e);
}
}
``` | unknown | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md |
from __future__ import division, print_function, absolute_import
from ._ufuncs import _lambertw
def lambertw(z, k=0, tol=1e-8):
r"""
lambertw(z, k=0, tol=1e-8)
Lambert W function.
The Lambert W function `W(z)` is defined as the inverse function
of ``w * exp(w)``. In other words, the value of ``W(z)`` is
such that ``z = W(z) * exp(W(z))`` for any complex number
``z``.
The Lambert W function is a multivalued function with infinitely
many branches. Each branch gives a separate solution of the
equation ``z = w exp(w)``. Here, the branches are indexed by the
integer `k`.
Parameters
----------
z : array_like
Input argument.
k : int, optional
Branch index.
tol : float, optional
Evaluation tolerance.
Returns
-------
w : array
`w` will have the same shape as `z`.
Notes
-----
All branches are supported by `lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real ``z > -1/e``, and the
``k = -1`` branch is real for ``-1/e < z < 0``. All branches except
``k = 0`` have a logarithmic singularity at ``z = 0``.
**Possible issues**
The evaluation can become inaccurate very close to the branch point
at ``-1/e``. In some corner cases, `lambertw` might currently
fail to converge, or can end up on the wrong branch.
**Algorithm**
Halley's iteration is used to invert ``w * exp(w)``, using a first-order
asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate.
The definition, implementation and choice of branches is based on [2]_.
See Also
--------
wrightomega : the Wright Omega function
References
----------
.. [1] http://en.wikipedia.org/wiki/Lambert_W_function
.. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
(1996) 329-359.
http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf
Examples
--------
The Lambert W function is the inverse of ``w exp(w)``:
>>> from scipy.special import lambertw
>>> w = lambertw(1)
>>> w
(0.56714329040978384+0j)
>>> w * np.exp(w)
(1.0+0j)
Any branch gives a valid inverse:
>>> w = lambertw(1, k=3)
>>> w
(-2.8535817554090377+17.113535539412148j)
>>> w*np.exp(w)
(1.0000000000000002+1.609823385706477e-15j)
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower :math:`z^{z^{z^{\ldots}}}`:
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(0.5, 100)
0.641185744504986
>>> -lambertw(-np.log(0.5)) / np.log(0.5)
(0.64118574450498589+0j)
"""
return _lambertw(z, k, tol) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util.functional;
import java.io.Closeable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import static java.util.Objects.requireNonNull;
/**
* A task submitter which is closeable, and whose close() call
* shuts down the pool. This can help manage
* thread pool lifecycles.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class CloseableTaskPoolSubmitter implements TaskPool.Submitter,
Closeable {
/** Executors. */
private ExecutorService pool;
/**
* Constructor.
* @param pool non-null executor.
*/
public CloseableTaskPoolSubmitter(final ExecutorService pool) {
this.pool = requireNonNull(pool);
}
/**
* Get the pool.
* @return the pool.
*/
public ExecutorService getPool() {
return pool;
}
/**
* Shut down the pool.
*/
@Override
public void close() {
if (pool != null) {
pool.shutdown();
pool = null;
}
}
@Override
public Future<?> submit(final Runnable task) {
return pool.submit(task);
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CloseableTaskPoolSubmitter.java |
name: Distribute
on:
workflow_dispatch:
inputs:
build-number:
description: 'Number of the build to use to create the bundle'
required: true
type: string
create-bundle:
description: 'Whether to create the bundle. If unchecked, only the bundle distribution is executed'
required: true
type: boolean
default: true
version:
description: 'Version to bundle and distribute'
required: true
type: string
permissions:
contents: read
jobs:
distribute-spring-enterprise-release-bundle:
runs-on: ${{ vars.UBUNTU_SMALL || 'ubuntu-latest' }}
steps:
- name: Create Bundle
if: ${{ vars.COMMERCIAL && inputs.create-bundle }}
shell: bash
run: |
curl -s -u "${{ secrets.COMMERCIAL_ARTIFACTORY_USERNAME }}:${{ secrets.COMMERCIAL_ARTIFACTORY_PASSWORD }}" \
-X POST -H "X-JFrog-Signing-Key-Name: packagesKey" -H "Content-Type: application/json" \
"https://usw1.packages.broadcom.com/lifecycle/api/v2/release_bundle?project=spring" \
-d '{"release_bundle_name": "TNZ-spring-boot-commercial", "release_bundle_version": "${{ inputs.version }}", "skip_docker_manifest_resolution": true, "source_type": "builds", "source": {"builds": [ {"build_repository": "spring-build-info", "build_name": "spring-boot-commercial-${{ inputs.version }}", "build_number": "${{ inputs.build-number }}", "include_dependencies": false}]}}' | \
jq -e 'if has("repository_key") then . else halt_error end'
- name: Sleep
if: ${{ vars.COMMERCIAL && inputs.create-bundle }}
shell: bash
run: sleep 30
- name: Distribute Bundle
if: ${{ vars.COMMERCIAL }}
shell: bash
run: |
curl -s -u "${{ secrets.COMMERCIAL_ARTIFACTORY_USERNAME }}:${{ secrets.COMMERCIAL_ARTIFACTORY_PASSWORD }}" \
-X POST -H "Content-Type: application/json" \
"https://usw1.packages.broadcom.com/lifecycle/api/v2/distribution/distribute/TNZ-spring-boot-commercial/${{ inputs.version }}?project=spring" \
-d '{"auto_create_missing_repositories": "false", "distribution_rules": [{"site_name": "JP-SaaS"}], "modifications": {"mappings": [{"input": "spring-enterprise-maven-prod-local/(.*)", "output": "spring-enterprise/$1"}]}}' | \
jq -e 'if has("id") then . else halt_error end' | unknown | github | https://github.com/spring-projects/spring-boot | .github/workflows/distribute.yml |
# Copyright (C) 2009, 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""EDNS Options"""
NSID = 3
class Option(object):
"""Base class for all EDNS option types.
"""
def __init__(self, otype):
"""Initialize an option.
@param otype: The rdata type
@type otype: int
"""
self.otype = otype
def to_wire(self, file):
"""Convert an option to wire format.
"""
raise NotImplementedError
def from_wire(cls, otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.edns.Option instance"""
raise NotImplementedError
from_wire = classmethod(from_wire)
def _cmp(self, other):
"""Compare an EDNS option with another option of the same type.
Return < 0 if self < other, 0 if self == other, and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) > 0
class GenericOption(Option):
"""Generate Rdata Class
This class is used for EDNS option types for which we have no better
implementation.
"""
def __init__(self, otype, data):
super(GenericOption, self).__init__(otype)
self.data = data
def to_wire(self, file):
file.write(self.data)
def from_wire(cls, otype, wire, current, olen):
return cls(otype, wire[current : current + olen])
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.data, other.data)
_type_to_class = {
}
def get_option_class(otype):
cls = _type_to_class.get(otype)
if cls is None:
cls = GenericOption
return cls
def option_from_wire(otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.edns.Option instance"""
cls = get_option_class(otype)
return cls.from_wire(otype, wire, current, olen) | unknown | codeparrot/codeparrot-clean | ||
/* css for clang-doc mustache backend */
@import "https://fonts.googleapis.com/css2?family=Inter:ital,opsz,wght@0,14..32,100..900;1,14..32,100..900&display=swap";
*,*::before *::after {
box-sizing:border-box
}
* {
margin:0;
padding:0
}
ol,
ul {
list-style:none
}
img,
picture,
svg,
video {
display:block;
max-width:100%
}
* {
--brand-light:#ce6300;
--text1-light:#000000;
--text2-light:#333333;
--surface1-light:#ffffff;
--surface2-light:#f5f5f5;
--brand-dark:#de9853;
--text1-dark:#ffffff;
--text2-dark:#cccccc;
--surface1-dark:#161212;
--surface2-dark:#272424
}
:root {
color-scheme:light;
--brand:var(--brand-light);
--text1:var(--text1-light);
--text2:var(--text2-light);
--text1-inverse:var(--text1-dark);
--text2-inverse:var(--text2-dark);
--surface1:var(--surface1-light);
--surface2:var(--surface2-light)
}
@media(prefers-color-scheme:dark) {
:root {
color-scheme:dark;
--brand:var(--brand-dark);
--text1:var(--text1-dark);
--text2:var(--text2-dark);
--text1-inverse:var(--text1-light);
--text2-inverse:var(--text2-light);
--surface1:var(--surface1-dark);
--surface2:var(--surface2-dark)
}
}
[color-scheme=light] {
color-scheme:light;
--brand:var(--brand-light);
--text1:var(--text1-light);
--text2:var(--text2-light);
--text1-inverse:var(--text1-dark);
--text2-inverse:var(--text2-dark);
--surface1:var(--surface1-light);
--surface2:var(--surface2-light)
}
[color-scheme=dark] {
color-scheme:dark;
--brand:var(--brand-dark);
--text1:var(--text1-dark);
--text2:var(--text2-dark);
--text1-inverse:var(--text1-light);
--text2-inverse:var(--text2-light);
--surface1:var(--surface1-dark);
--surface2:var(--surface2-dark)
}
html {
background-color:var(--surface1)
}
html, body {
min-height: 100vh;
margin: 0;
padding: 0;
width: 100%;
}
.container {
display: flex;
box-sizing: border-box;
align-items: stretch;
}
body, html {
font-family:Inter,sans-serif;
margin: 0;
padding: 0;
height: 100%;
}
/* Navbar Styles */
header {
background-color: var(--surface2);
display: flex;
flex-direction: column;
}
.navbar {
background-color: var(--surface2);
border-bottom: 1px solid var(--text2);
width: 100%;
color: white;
display: flex;
flex-direction: column;
align-items: stretch;
padding: 0;
box-sizing: border-box;
}
.navbar__container {
display:grid;
grid-template-columns: auto 1fr auto;
align-items:center;
padding:1rem;
color:var(--text1);
max-width:2048px;
margin:auto;
width:100%
}
.navbar__logo {
display:flex;
align-items:center;
height:40px
}
.navbar__logo a {
display:flex;
align-items:center;
text-decoration:none;
height:100%
}
.navbar__logo img {
height:100%;
width:auto
}
.navbar__toggle {
background:0 0;
color:var(--text2);
border:none;
cursor:pointer;
font-size:1.5rem;
width:2.5rem;
height:2.5rem;
margin-left:auto
}
.navbar__toggle:hover {
color:var(--text1)
}
@media(min-width:769px) {
.navbar__toggle {
display:none
}
}
.navbar__menu {
display:flex;
justify-content:space-between;
align-items:center;
list-style:none;
margin:0;
padding:0;
gap:.25rem;
margin-left:0;
grid-column:2;
justify-self:center
}
@media(max-width:768px) {
.navbar__menu {
flex-direction:column;
justify-content:flex-start;
width:100%;
background-color:var(--surface2);
position:fixed;
top:0;
left:0;
right:0;
bottom:0;
padding:1.5rem;
transform:translateX(100%);
transition:transform .5s ease-in-out
}
}
@media(max-width:768px) {
.navbar__menu.active {
transform:translateX(0)
}
}
.navbar__close {
background:0 0;
border:none;
cursor:pointer;
font-size:1.5rem;
color:var(--text2);
margin-left:auto
}
.navbar__close:hover {
color:var(--text1)
}
@media(min-width:769px) {
.navbar__close {
display:none
}
}
.navbar__links {
display:flex;
gap:1rem;
align-items:center;
margin:0;
padding:0;
justify-content:center
}
@media(max-width:768px) {
.navbar__links {
flex-direction:column
}
}
.navbar__item {
list-style-type:none
}
.navbar__link {
color:var(--text2);
text-decoration:none;
padding:.5rem
}
.navbar__link:hover {
color:var(--text1)
}
.navbar__theme-toggle-button {
background:0 0;
color:var(--text2);
border:none;
cursor:pointer;
font-size:1.5rem;
width:2.5rem;
height:2.5rem
}
.navbar__theme-toggle-button:hover {
color:var(--text1)
}
.navbar-breadcrumb-container {
position: static;
width: 100%;
background: var(--surface2);
padding: 0.5rem 1rem;
display: flex;
gap: 0.5rem;
border-bottom: 1px solid var(--text2);
box-sizing: border-box;
border-top: 1px solid var(--text2);
border-bottom: 1px solid var(--text2);
}
.navbar-breadcrumb-item {
padding: 0.25rem 0.75rem;
background: var(--surface1);
border: 1px solid var(--text2);
border-radius: 4px;
color: var(--text1);
font-size: 0.9rem;
white-space: nowrap;
}
.navbar-breadcrumb-item:hover {
background: var(--brand);
color: var(--text1-inverse);
border-color: var(--brand);
cursor: pointer;
}
.hero__container {
margin-top:1rem;
display:flex;
justify-content:center;
align-items:center;
gap:2rem
}
.hero__title {
font-size:2.5rem;
margin-bottom:.5rem
}
.hero__title-large {
font-size:3rem
}
@media(max-width:768px) {
.hero__title-large {
font-size:2.5rem
}
}
@media(max-width:480px) {
.hero__title-large {
font-size:2rem
}
}
@media(max-width:768px) {
.hero__title {
font-size:2rem
}
}
@media(max-width:480px) {
.hero__title {
font-size:1.75rem
}
}
.hero__subtitle {
font-size:1.25rem;
font-weight:500
}
@media(max-width:768px) {
.hero__subtitle {
font-size:1rem
}
}
@media(max-width:480px) {
.hero__subtitle {
font-size:.875rem
}
}
.section-container {
max-width: 2048px;
margin-left:auto;
margin-right:auto;
margin-bottom: 1rem;
}
@media(max-width:768px) {
.section-container {
padding:1rem
}
}
.section-container h2 {
font-size:1.5rem;
margin-bottom:1rem;
color:var(--brand);
border-bottom: 1px solid var(--text2);
}
@media(max-width:768px) {
.section-container h2 {
font-size:1.25rem
}
}
.section-container p {
font-size:1rem;
line-height:1.5
}
@media(max-width:768px) {
.section-container p {
font-size:.875rem
}
}
.home__row {
display:grid;
grid-template-columns:repeat(auto-fit,minmax(300px,1fr));
gap:2rem
}
.table-wrapper {
display:flex;
flex-direction:column;
padding:1rem;
border-collapse: collapse; /* Ensures there are no gaps between cells */
}
.table-wrapper th, .table-wrapper td {
padding: 0.5rem 1rem; /* Adds padding inside the cells */
border:1px solid var(--text1);
text-align: left;
}
.block-command-command {
font-weight: bold;
}
.code-clang-doc {
font-size: 1.1rem;
}
.delimiter-container {
padding: 0.5rem 1rem;
margin-bottom:1rem;
}
.nested-delimiter-container {
margin-bottom: 1rem;
}
.nested-delimiter-container:last-of-type {
margin-bottom: 0rem;
}
.resizer {
width: 5px;
cursor: col-resize;
background-color: var(--surface1);
}
.resizer:hover {
background-color: var(--text2-inverse);
}
.sidebar {
width: 250px;
position: sticky;
top: 0;
background-color: var(--surface1);
display: flex;
border-left: 1px solid var(--text2);
flex-direction: column;
overflow-y: auto;
scrollbar-width: thin;
flex-shrink: 0;
}
.sidebar h2 {
margin-top: 0;
margin-bottom: 20px;
padding: 10px;
}
.sidebar ul {
width: 100%;
padding: 0;
list-style-type: none;
}
.sidebar ul li {
padding-left: 1rem;
padding-top: 0.25rem;
padding-bottom: 0.25rem;
}
.sidebar-section {
font-size:1.5rem;
font-weight: bold;
padding-left: 1rem;
}
.sidebar-section a {
color: var(--brand)
}
/* Content */
.content {
background-color: var(--text1-inverse);
position: relative;
flex: 1;
min-width: 0;
min-height: 100vh;
padding-left: 1rem;
}
.sidebar-item {
color: var(--text1);
}
.sidebar-item-container:hover {
width: 100%;
background-color: grey;
}
.sidebar-item-container:hover a {
width: 100%;
color: var(--text1-inverse);
}
.class-container {
padding: 0.5rem 1rem;
}
a, a:visited, a:hover, a:active {
text-decoration: none;
color: inherit;
}
.code-block {
white-space: pre-line;
}
.doc-card {
border: 3px solid rgba(0, 0, 0, 0.12);
border-radius: 10px;
background: var(--surface1);
margin: 14px 0;
overflow: hidden;
padding: 10px;
} | css | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-doc/assets/clang-doc-mustache.css |
'''Wrapper for pulse
Generated with:
tools/genwrappers.py pulseaudio
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('pulse')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
# /usr/include/pulse/version.h:40
pa_get_library_version = _lib.pa_get_library_version
pa_get_library_version.restype = c_char_p
pa_get_library_version.argtypes = []
PA_API_VERSION = 12 # /usr/include/pulse/version.h:46
PA_PROTOCOL_VERSION = 30 # /usr/include/pulse/version.h:50
PA_MAJOR = 6 # /usr/include/pulse/version.h:53
PA_MINOR = 0 # /usr/include/pulse/version.h:56
PA_MICRO = 0 # /usr/include/pulse/version.h:59
PA_CHANNELS_MAX = 32 # /usr/include/pulse/sample.h:128
PA_RATE_MAX = 192000 # /usr/include/pulse/sample.h:131
enum_pa_sample_format = c_int
PA_SAMPLE_U8 = 0
PA_SAMPLE_ALAW = 1
PA_SAMPLE_ULAW = 2
PA_SAMPLE_S16LE = 3
PA_SAMPLE_S16BE = 4
PA_SAMPLE_FLOAT32LE = 5
PA_SAMPLE_FLOAT32BE = 6
PA_SAMPLE_S32LE = 7
PA_SAMPLE_S32BE = 8
PA_SAMPLE_S24LE = 9
PA_SAMPLE_S24BE = 10
PA_SAMPLE_S24_32LE = 11
PA_SAMPLE_S24_32BE = 12
PA_SAMPLE_MAX = 13
PA_SAMPLE_INVALID = -1
pa_sample_format_t = enum_pa_sample_format # /usr/include/pulse/sample.h:179
class struct_pa_sample_spec(Structure):
__slots__ = [
'format',
'rate',
'channels',
]
struct_pa_sample_spec._fields_ = [
('format', pa_sample_format_t),
('rate', c_uint32),
('channels', c_uint8),
]
pa_sample_spec = struct_pa_sample_spec # /usr/include/pulse/sample.h:257
pa_usec_t = c_uint64 # /usr/include/pulse/sample.h:260
# /usr/include/pulse/sample.h:263
pa_bytes_per_second = _lib.pa_bytes_per_second
pa_bytes_per_second.restype = c_size_t
pa_bytes_per_second.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:266
pa_frame_size = _lib.pa_frame_size
pa_frame_size.restype = c_size_t
pa_frame_size.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:269
pa_sample_size = _lib.pa_sample_size
pa_sample_size.restype = c_size_t
pa_sample_size.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:273
pa_sample_size_of_format = _lib.pa_sample_size_of_format
pa_sample_size_of_format.restype = c_size_t
pa_sample_size_of_format.argtypes = [pa_sample_format_t]
# /usr/include/pulse/sample.h:278
pa_bytes_to_usec = _lib.pa_bytes_to_usec
pa_bytes_to_usec.restype = pa_usec_t
pa_bytes_to_usec.argtypes = [c_uint64, POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:283
pa_usec_to_bytes = _lib.pa_usec_to_bytes
pa_usec_to_bytes.restype = c_size_t
pa_usec_to_bytes.argtypes = [pa_usec_t, POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:288
pa_sample_spec_init = _lib.pa_sample_spec_init
pa_sample_spec_init.restype = POINTER(pa_sample_spec)
pa_sample_spec_init.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:291
pa_sample_format_valid = _lib.pa_sample_format_valid
pa_sample_format_valid.restype = c_int
pa_sample_format_valid.argtypes = [c_uint]
# /usr/include/pulse/sample.h:294
pa_sample_rate_valid = _lib.pa_sample_rate_valid
pa_sample_rate_valid.restype = c_int
pa_sample_rate_valid.argtypes = [c_uint32]
# /usr/include/pulse/sample.h:298
pa_channels_valid = _lib.pa_channels_valid
pa_channels_valid.restype = c_int
pa_channels_valid.argtypes = [c_uint8]
# /usr/include/pulse/sample.h:301
pa_sample_spec_valid = _lib.pa_sample_spec_valid
pa_sample_spec_valid.restype = c_int
pa_sample_spec_valid.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:304
pa_sample_spec_equal = _lib.pa_sample_spec_equal
pa_sample_spec_equal.restype = c_int
pa_sample_spec_equal.argtypes = [POINTER(pa_sample_spec), POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:307
pa_sample_format_to_string = _lib.pa_sample_format_to_string
pa_sample_format_to_string.restype = c_char_p
pa_sample_format_to_string.argtypes = [pa_sample_format_t]
# /usr/include/pulse/sample.h:310
pa_parse_sample_format = _lib.pa_parse_sample_format
pa_parse_sample_format.restype = pa_sample_format_t
pa_parse_sample_format.argtypes = [c_char_p]
PA_SAMPLE_SPEC_SNPRINT_MAX = 32 # /usr/include/pulse/sample.h:317
# /usr/include/pulse/sample.h:320
pa_sample_spec_snprint = _lib.pa_sample_spec_snprint
pa_sample_spec_snprint.restype = c_char_p
pa_sample_spec_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_sample_spec)]
PA_BYTES_SNPRINT_MAX = 11 # /usr/include/pulse/sample.h:327
# /usr/include/pulse/sample.h:330
pa_bytes_snprint = _lib.pa_bytes_snprint
pa_bytes_snprint.restype = c_char_p
pa_bytes_snprint.argtypes = [c_char_p, c_size_t, c_uint]
# /usr/include/pulse/sample.h:334
pa_sample_format_is_le = _lib.pa_sample_format_is_le
pa_sample_format_is_le.restype = c_int
pa_sample_format_is_le.argtypes = [pa_sample_format_t]
# /usr/include/pulse/sample.h:338
pa_sample_format_is_be = _lib.pa_sample_format_is_be
pa_sample_format_is_be.restype = c_int
pa_sample_format_is_be.argtypes = [pa_sample_format_t]
enum_pa_context_state = c_int
PA_CONTEXT_UNCONNECTED = 0
PA_CONTEXT_CONNECTING = 1
PA_CONTEXT_AUTHORIZING = 2
PA_CONTEXT_SETTING_NAME = 3
PA_CONTEXT_READY = 4
PA_CONTEXT_FAILED = 5
PA_CONTEXT_TERMINATED = 6
pa_context_state_t = enum_pa_context_state # /usr/include/pulse/def.h:45
enum_pa_stream_state = c_int
PA_STREAM_UNCONNECTED = 0
PA_STREAM_CREATING = 1
PA_STREAM_READY = 2
PA_STREAM_FAILED = 3
PA_STREAM_TERMINATED = 4
pa_stream_state_t = enum_pa_stream_state # /usr/include/pulse/def.h:74
enum_pa_operation_state = c_int
PA_OPERATION_RUNNING = 0
PA_OPERATION_DONE = 1
PA_OPERATION_CANCELLED = 2
pa_operation_state_t = enum_pa_operation_state # /usr/include/pulse/def.h:102
enum_pa_context_flags = c_int
PA_CONTEXT_NOFLAGS = 0
PA_CONTEXT_NOAUTOSPAWN = 1
PA_CONTEXT_NOFAIL = 2
pa_context_flags_t = enum_pa_context_flags # /usr/include/pulse/def.h:122
enum_pa_direction = c_int
PA_DIRECTION_OUTPUT = 1
PA_DIRECTION_INPUT = 2
pa_direction_t = enum_pa_direction # /usr/include/pulse/def.h:137
enum_pa_device_type = c_int
PA_DEVICE_TYPE_SINK = 0
PA_DEVICE_TYPE_SOURCE = 1
pa_device_type_t = enum_pa_device_type # /usr/include/pulse/def.h:148
enum_pa_stream_direction = c_int
PA_STREAM_NODIRECTION = 0
PA_STREAM_PLAYBACK = 1
PA_STREAM_RECORD = 2
PA_STREAM_UPLOAD = 3
pa_stream_direction_t = enum_pa_stream_direction # /usr/include/pulse/def.h:161
enum_pa_stream_flags = c_int
PA_STREAM_NOFLAGS = 0
PA_STREAM_START_CORKED = 1
PA_STREAM_INTERPOLATE_TIMING = 2
PA_STREAM_NOT_MONOTONIC = 4
PA_STREAM_AUTO_TIMING_UPDATE = 8
PA_STREAM_NO_REMAP_CHANNELS = 16
PA_STREAM_NO_REMIX_CHANNELS = 32
PA_STREAM_FIX_FORMAT = 64
PA_STREAM_FIX_RATE = 128
PA_STREAM_FIX_CHANNELS = 256
PA_STREAM_DONT_MOVE = 512
PA_STREAM_VARIABLE_RATE = 1024
PA_STREAM_PEAK_DETECT = 2048
PA_STREAM_START_MUTED = 4096
PA_STREAM_ADJUST_LATENCY = 8192
PA_STREAM_EARLY_REQUESTS = 16384
PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND = 32768
PA_STREAM_START_UNMUTED = 65536
PA_STREAM_FAIL_ON_SUSPEND = 131072
PA_STREAM_RELATIVE_VOLUME = 262144
PA_STREAM_PASSTHROUGH = 524288
pa_stream_flags_t = enum_pa_stream_flags # /usr/include/pulse/def.h:355
class struct_pa_buffer_attr(Structure):
__slots__ = [
'maxlength',
'tlength',
'prebuf',
'minreq',
'fragsize',
]
struct_pa_buffer_attr._fields_ = [
('maxlength', c_uint32),
('tlength', c_uint32),
('prebuf', c_uint32),
('minreq', c_uint32),
('fragsize', c_uint32),
]
pa_buffer_attr = struct_pa_buffer_attr # /usr/include/pulse/def.h:452
enum_pa_error_code = c_int
PA_OK = 0
PA_ERR_ACCESS = 1
PA_ERR_COMMAND = 2
PA_ERR_INVALID = 3
PA_ERR_EXIST = 4
PA_ERR_NOENTITY = 5
PA_ERR_CONNECTIONREFUSED = 6
PA_ERR_PROTOCOL = 7
PA_ERR_TIMEOUT = 8
PA_ERR_AUTHKEY = 9
PA_ERR_INTERNAL = 10
PA_ERR_CONNECTIONTERMINATED = 11
PA_ERR_KILLED = 12
PA_ERR_INVALIDSERVER = 13
PA_ERR_MODINITFAILED = 14
PA_ERR_BADSTATE = 15
PA_ERR_NODATA = 16
PA_ERR_VERSION = 17
PA_ERR_TOOLARGE = 18
PA_ERR_NOTSUPPORTED = 19
PA_ERR_UNKNOWN = 20
PA_ERR_NOEXTENSION = 21
PA_ERR_OBSOLETE = 22
PA_ERR_NOTIMPLEMENTED = 23
PA_ERR_FORKED = 24
PA_ERR_IO = 25
PA_ERR_BUSY = 26
PA_ERR_MAX = 27
pa_error_code_t = enum_pa_error_code # /usr/include/pulse/def.h:484
enum_pa_subscription_mask = c_int
PA_SUBSCRIPTION_MASK_NULL = 0
PA_SUBSCRIPTION_MASK_SINK = 1
PA_SUBSCRIPTION_MASK_SOURCE = 2
PA_SUBSCRIPTION_MASK_SINK_INPUT = 4
PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT = 8
PA_SUBSCRIPTION_MASK_MODULE = 16
PA_SUBSCRIPTION_MASK_CLIENT = 32
PA_SUBSCRIPTION_MASK_SAMPLE_CACHE = 64
PA_SUBSCRIPTION_MASK_SERVER = 128
PA_SUBSCRIPTION_MASK_AUTOLOAD = 256
PA_SUBSCRIPTION_MASK_CARD = 512
PA_SUBSCRIPTION_MASK_ALL = 767
pa_subscription_mask_t = enum_pa_subscription_mask # /usr/include/pulse/def.h:554
enum_pa_subscription_event_type = c_int
PA_SUBSCRIPTION_EVENT_SINK = 0
PA_SUBSCRIPTION_EVENT_SOURCE = 1
PA_SUBSCRIPTION_EVENT_SINK_INPUT = 2
PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT = 3
PA_SUBSCRIPTION_EVENT_MODULE = 4
PA_SUBSCRIPTION_EVENT_CLIENT = 5
PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE = 6
PA_SUBSCRIPTION_EVENT_SERVER = 7
PA_SUBSCRIPTION_EVENT_AUTOLOAD = 8
PA_SUBSCRIPTION_EVENT_CARD = 9
PA_SUBSCRIPTION_EVENT_FACILITY_MASK = 15
PA_SUBSCRIPTION_EVENT_NEW = 0
PA_SUBSCRIPTION_EVENT_CHANGE = 16
PA_SUBSCRIPTION_EVENT_REMOVE = 32
PA_SUBSCRIPTION_EVENT_TYPE_MASK = 48
pa_subscription_event_type_t = enum_pa_subscription_event_type # /usr/include/pulse/def.h:605
class struct_pa_timing_info(Structure):
__slots__ = [
'timestamp',
'synchronized_clocks',
'sink_usec',
'source_usec',
'transport_usec',
'playing',
'write_index_corrupt',
'write_index',
'read_index_corrupt',
'read_index',
'configured_sink_usec',
'configured_source_usec',
'since_underrun',
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
struct_pa_timing_info._fields_ = [
('timestamp', struct_timeval),
('synchronized_clocks', c_int),
('sink_usec', pa_usec_t),
('source_usec', pa_usec_t),
('transport_usec', pa_usec_t),
('playing', c_int),
('write_index_corrupt', c_int),
('write_index', c_int64),
('read_index_corrupt', c_int),
('read_index', c_int64),
('configured_sink_usec', pa_usec_t),
('configured_source_usec', pa_usec_t),
('since_underrun', c_int64),
]
pa_timing_info = struct_pa_timing_info # /usr/include/pulse/def.h:725
class struct_pa_spawn_api(Structure):
__slots__ = [
'prefork',
'postfork',
'atfork',
]
struct_pa_spawn_api._fields_ = [
('prefork', POINTER(CFUNCTYPE(None))),
('postfork', POINTER(CFUNCTYPE(None))),
('atfork', POINTER(CFUNCTYPE(None))),
]
pa_spawn_api = struct_pa_spawn_api # /usr/include/pulse/def.h:749
enum_pa_seek_mode = c_int
PA_SEEK_RELATIVE = 0
PA_SEEK_ABSOLUTE = 1
PA_SEEK_RELATIVE_ON_READ = 2
PA_SEEK_RELATIVE_END = 3
pa_seek_mode_t = enum_pa_seek_mode # /usr/include/pulse/def.h:764
enum_pa_sink_flags = c_int
PA_SINK_NOFLAGS = 0
PA_SINK_HW_VOLUME_CTRL = 1
PA_SINK_LATENCY = 2
PA_SINK_HARDWARE = 4
PA_SINK_NETWORK = 8
PA_SINK_HW_MUTE_CTRL = 16
PA_SINK_DECIBEL_VOLUME = 32
PA_SINK_FLAT_VOLUME = 64
PA_SINK_DYNAMIC_LATENCY = 128
PA_SINK_SET_FORMATS = 256
pa_sink_flags_t = enum_pa_sink_flags # /usr/include/pulse/def.h:829
enum_pa_sink_state = c_int
PA_SINK_INVALID_STATE = -1
PA_SINK_RUNNING = 0
PA_SINK_IDLE = 1
PA_SINK_SUSPENDED = 2
PA_SINK_INIT = -2
PA_SINK_UNLINKED = -3
pa_sink_state_t = enum_pa_sink_state # /usr/include/pulse/def.h:875
enum_pa_source_flags = c_int
PA_SOURCE_NOFLAGS = 0
PA_SOURCE_HW_VOLUME_CTRL = 1
PA_SOURCE_LATENCY = 2
PA_SOURCE_HARDWARE = 4
PA_SOURCE_NETWORK = 8
PA_SOURCE_HW_MUTE_CTRL = 16
PA_SOURCE_DECIBEL_VOLUME = 32
PA_SOURCE_DYNAMIC_LATENCY = 64
PA_SOURCE_FLAT_VOLUME = 128
pa_source_flags_t = enum_pa_source_flags # /usr/include/pulse/def.h:946
enum_pa_source_state = c_int
PA_SOURCE_INVALID_STATE = -1
PA_SOURCE_RUNNING = 0
PA_SOURCE_IDLE = 1
PA_SOURCE_SUSPENDED = 2
PA_SOURCE_INIT = -2
PA_SOURCE_UNLINKED = -3
pa_source_state_t = enum_pa_source_state # /usr/include/pulse/def.h:991
pa_free_cb_t = CFUNCTYPE(None, POINTER(None)) # /usr/include/pulse/def.h:1014
enum_pa_port_available = c_int
PA_PORT_AVAILABLE_UNKNOWN = 0
PA_PORT_AVAILABLE_NO = 1
PA_PORT_AVAILABLE_YES = 2
pa_port_available_t = enum_pa_port_available # /usr/include/pulse/def.h:1040
class struct_pa_mainloop_api(Structure):
__slots__ = [
]
struct_pa_mainloop_api._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_mainloop_api(Structure):
__slots__ = [
]
struct_pa_mainloop_api._fields_ = [
('_opaque_struct', c_int)
]
pa_mainloop_api = struct_pa_mainloop_api # /usr/include/pulse/mainloop-api.h:47
enum_pa_io_event_flags = c_int
PA_IO_EVENT_NULL = 0
PA_IO_EVENT_INPUT = 1
PA_IO_EVENT_OUTPUT = 2
PA_IO_EVENT_HANGUP = 4
PA_IO_EVENT_ERROR = 8
pa_io_event_flags_t = enum_pa_io_event_flags # /usr/include/pulse/mainloop-api.h:56
class struct_pa_io_event(Structure):
__slots__ = [
]
struct_pa_io_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_io_event(Structure):
__slots__ = [
]
struct_pa_io_event._fields_ = [
('_opaque_struct', c_int)
]
pa_io_event = struct_pa_io_event # /usr/include/pulse/mainloop-api.h:59
pa_io_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_io_event), c_int, pa_io_event_flags_t, POINTER(None)) # /usr/include/pulse/mainloop-api.h:61
pa_io_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_io_event), POINTER(None)) # /usr/include/pulse/mainloop-api.h:63
class struct_pa_time_event(Structure):
__slots__ = [
]
struct_pa_time_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_time_event(Structure):
__slots__ = [
]
struct_pa_time_event._fields_ = [
('_opaque_struct', c_int)
]
pa_time_event = struct_pa_time_event # /usr/include/pulse/mainloop-api.h:66
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
pa_time_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_time_event), POINTER(struct_timeval), POINTER(None)) # /usr/include/pulse/mainloop-api.h:68
pa_time_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_time_event), POINTER(None)) # /usr/include/pulse/mainloop-api.h:70
class struct_pa_defer_event(Structure):
__slots__ = [
]
struct_pa_defer_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_defer_event(Structure):
__slots__ = [
]
struct_pa_defer_event._fields_ = [
('_opaque_struct', c_int)
]
pa_defer_event = struct_pa_defer_event # /usr/include/pulse/mainloop-api.h:73
pa_defer_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_defer_event), POINTER(None)) # /usr/include/pulse/mainloop-api.h:75
pa_defer_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_defer_event), POINTER(None)) # /usr/include/pulse/mainloop-api.h:77
# /usr/include/pulse/mainloop-api.h:120
pa_mainloop_api_once = _lib.pa_mainloop_api_once
pa_mainloop_api_once.restype = None
pa_mainloop_api_once.argtypes = [POINTER(pa_mainloop_api), CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(None)), POINTER(None)]
enum_pa_channel_position = c_int
PA_CHANNEL_POSITION_INVALID = -1
PA_CHANNEL_POSITION_MONO = 0
PA_CHANNEL_POSITION_FRONT_LEFT = 1
PA_CHANNEL_POSITION_FRONT_RIGHT = 2
PA_CHANNEL_POSITION_FRONT_CENTER = 3
PA_CHANNEL_POSITION_LEFT = 0
PA_CHANNEL_POSITION_RIGHT = 0
PA_CHANNEL_POSITION_CENTER = 0
PA_CHANNEL_POSITION_REAR_CENTER = 1
PA_CHANNEL_POSITION_REAR_LEFT = 2
PA_CHANNEL_POSITION_REAR_RIGHT = 3
PA_CHANNEL_POSITION_LFE = 4
PA_CHANNEL_POSITION_SUBWOOFER = 0
PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER = 1
PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER = 2
PA_CHANNEL_POSITION_SIDE_LEFT = 3
PA_CHANNEL_POSITION_SIDE_RIGHT = 4
PA_CHANNEL_POSITION_AUX0 = 5
PA_CHANNEL_POSITION_AUX1 = 6
PA_CHANNEL_POSITION_AUX2 = 7
PA_CHANNEL_POSITION_AUX3 = 8
PA_CHANNEL_POSITION_AUX4 = 9
PA_CHANNEL_POSITION_AUX5 = 10
PA_CHANNEL_POSITION_AUX6 = 11
PA_CHANNEL_POSITION_AUX7 = 12
PA_CHANNEL_POSITION_AUX8 = 13
PA_CHANNEL_POSITION_AUX9 = 14
PA_CHANNEL_POSITION_AUX10 = 15
PA_CHANNEL_POSITION_AUX11 = 16
PA_CHANNEL_POSITION_AUX12 = 17
PA_CHANNEL_POSITION_AUX13 = 18
PA_CHANNEL_POSITION_AUX14 = 19
PA_CHANNEL_POSITION_AUX15 = 20
PA_CHANNEL_POSITION_AUX16 = 21
PA_CHANNEL_POSITION_AUX17 = 22
PA_CHANNEL_POSITION_AUX18 = 23
PA_CHANNEL_POSITION_AUX19 = 24
PA_CHANNEL_POSITION_AUX20 = 25
PA_CHANNEL_POSITION_AUX21 = 26
PA_CHANNEL_POSITION_AUX22 = 27
PA_CHANNEL_POSITION_AUX23 = 28
PA_CHANNEL_POSITION_AUX24 = 29
PA_CHANNEL_POSITION_AUX25 = 30
PA_CHANNEL_POSITION_AUX26 = 31
PA_CHANNEL_POSITION_AUX27 = 32
PA_CHANNEL_POSITION_AUX28 = 33
PA_CHANNEL_POSITION_AUX29 = 34
PA_CHANNEL_POSITION_AUX30 = 35
PA_CHANNEL_POSITION_AUX31 = 36
PA_CHANNEL_POSITION_TOP_CENTER = 37
PA_CHANNEL_POSITION_TOP_FRONT_LEFT = 38
PA_CHANNEL_POSITION_TOP_FRONT_RIGHT = 39
PA_CHANNEL_POSITION_TOP_FRONT_CENTER = 40
PA_CHANNEL_POSITION_TOP_REAR_LEFT = 41
PA_CHANNEL_POSITION_TOP_REAR_RIGHT = 42
PA_CHANNEL_POSITION_TOP_REAR_CENTER = 43
PA_CHANNEL_POSITION_MAX = 44
pa_channel_position_t = enum_pa_channel_position # /usr/include/pulse/channelmap.h:147
pa_channel_position_mask_t = c_uint64 # /usr/include/pulse/channelmap.h:210
enum_pa_channel_map_def = c_int
PA_CHANNEL_MAP_AIFF = 0
PA_CHANNEL_MAP_ALSA = 1
PA_CHANNEL_MAP_AUX = 2
PA_CHANNEL_MAP_WAVEEX = 3
PA_CHANNEL_MAP_OSS = 4
PA_CHANNEL_MAP_DEF_MAX = 5
PA_CHANNEL_MAP_DEFAULT = 0
pa_channel_map_def_t = enum_pa_channel_map_def # /usr/include/pulse/channelmap.h:247
class struct_pa_channel_map(Structure):
__slots__ = [
'channels',
'map',
]
struct_pa_channel_map._fields_ = [
('channels', c_uint8),
('map', pa_channel_position_t * 32),
]
pa_channel_map = struct_pa_channel_map # /usr/include/pulse/channelmap.h:268
# /usr/include/pulse/channelmap.h:273
pa_channel_map_init = _lib.pa_channel_map_init
pa_channel_map_init.restype = POINTER(pa_channel_map)
pa_channel_map_init.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:276
pa_channel_map_init_mono = _lib.pa_channel_map_init_mono
pa_channel_map_init_mono.restype = POINTER(pa_channel_map)
pa_channel_map_init_mono.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:279
pa_channel_map_init_stereo = _lib.pa_channel_map_init_stereo
pa_channel_map_init_stereo.restype = POINTER(pa_channel_map)
pa_channel_map_init_stereo.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:285
pa_channel_map_init_auto = _lib.pa_channel_map_init_auto
pa_channel_map_init_auto.restype = POINTER(pa_channel_map)
pa_channel_map_init_auto.argtypes = [POINTER(pa_channel_map), c_uint, pa_channel_map_def_t]
# /usr/include/pulse/channelmap.h:291
pa_channel_map_init_extend = _lib.pa_channel_map_init_extend
pa_channel_map_init_extend.restype = POINTER(pa_channel_map)
pa_channel_map_init_extend.argtypes = [POINTER(pa_channel_map), c_uint, pa_channel_map_def_t]
# /usr/include/pulse/channelmap.h:294
pa_channel_position_to_string = _lib.pa_channel_position_to_string
pa_channel_position_to_string.restype = c_char_p
pa_channel_position_to_string.argtypes = [pa_channel_position_t]
# /usr/include/pulse/channelmap.h:297
pa_channel_position_from_string = _lib.pa_channel_position_from_string
pa_channel_position_from_string.restype = pa_channel_position_t
pa_channel_position_from_string.argtypes = [c_char_p]
# /usr/include/pulse/channelmap.h:300
pa_channel_position_to_pretty_string = _lib.pa_channel_position_to_pretty_string
pa_channel_position_to_pretty_string.restype = c_char_p
pa_channel_position_to_pretty_string.argtypes = [pa_channel_position_t]
PA_CHANNEL_MAP_SNPRINT_MAX = 336 # /usr/include/pulse/channelmap.h:307
# /usr/include/pulse/channelmap.h:310
pa_channel_map_snprint = _lib.pa_channel_map_snprint
pa_channel_map_snprint.restype = c_char_p
pa_channel_map_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:316
pa_channel_map_parse = _lib.pa_channel_map_parse
pa_channel_map_parse.restype = POINTER(pa_channel_map)
pa_channel_map_parse.argtypes = [POINTER(pa_channel_map), c_char_p]
# /usr/include/pulse/channelmap.h:319
pa_channel_map_equal = _lib.pa_channel_map_equal
pa_channel_map_equal.restype = c_int
pa_channel_map_equal.argtypes = [POINTER(pa_channel_map), POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:322
pa_channel_map_valid = _lib.pa_channel_map_valid
pa_channel_map_valid.restype = c_int
pa_channel_map_valid.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:326
pa_channel_map_compatible = _lib.pa_channel_map_compatible
pa_channel_map_compatible.restype = c_int
pa_channel_map_compatible.argtypes = [POINTER(pa_channel_map), POINTER(pa_sample_spec)]
# /usr/include/pulse/channelmap.h:329
pa_channel_map_superset = _lib.pa_channel_map_superset
pa_channel_map_superset.restype = c_int
pa_channel_map_superset.argtypes = [POINTER(pa_channel_map), POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:334
pa_channel_map_can_balance = _lib.pa_channel_map_can_balance
pa_channel_map_can_balance.restype = c_int
pa_channel_map_can_balance.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:339
pa_channel_map_can_fade = _lib.pa_channel_map_can_fade
pa_channel_map_can_fade.restype = c_int
pa_channel_map_can_fade.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:345
pa_channel_map_to_name = _lib.pa_channel_map_to_name
pa_channel_map_to_name.restype = c_char_p
pa_channel_map_to_name.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:350
pa_channel_map_to_pretty_name = _lib.pa_channel_map_to_pretty_name
pa_channel_map_to_pretty_name.restype = c_char_p
pa_channel_map_to_pretty_name.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:354
pa_channel_map_has_position = _lib.pa_channel_map_has_position
pa_channel_map_has_position.restype = c_int
pa_channel_map_has_position.argtypes = [POINTER(pa_channel_map), pa_channel_position_t]
# /usr/include/pulse/channelmap.h:357
pa_channel_map_mask = _lib.pa_channel_map_mask
pa_channel_map_mask.restype = pa_channel_position_mask_t
pa_channel_map_mask.argtypes = [POINTER(pa_channel_map)]
class struct_pa_operation(Structure):
__slots__ = [
]
struct_pa_operation._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_operation(Structure):
__slots__ = [
]
struct_pa_operation._fields_ = [
('_opaque_struct', c_int)
]
pa_operation = struct_pa_operation # /usr/include/pulse/operation.h:33
pa_operation_notify_cb_t = CFUNCTYPE(None, POINTER(pa_operation), POINTER(None)) # /usr/include/pulse/operation.h:36
# /usr/include/pulse/operation.h:39
pa_operation_ref = _lib.pa_operation_ref
pa_operation_ref.restype = POINTER(pa_operation)
pa_operation_ref.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:42
pa_operation_unref = _lib.pa_operation_unref
pa_operation_unref.restype = None
pa_operation_unref.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:49
pa_operation_cancel = _lib.pa_operation_cancel
pa_operation_cancel.restype = None
pa_operation_cancel.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:52
pa_operation_get_state = _lib.pa_operation_get_state
pa_operation_get_state.restype = pa_operation_state_t
pa_operation_get_state.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:60
pa_operation_set_state_callback = _lib.pa_operation_set_state_callback
pa_operation_set_state_callback.restype = None
pa_operation_set_state_callback.argtypes = [POINTER(pa_operation), pa_operation_notify_cb_t, POINTER(None)]
class struct_pa_context(Structure):
__slots__ = [
]
struct_pa_context._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_context(Structure):
__slots__ = [
]
struct_pa_context._fields_ = [
('_opaque_struct', c_int)
]
pa_context = struct_pa_context # /usr/include/pulse/context.h:154
pa_context_notify_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(None)) # /usr/include/pulse/context.h:157
pa_context_success_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_int, POINTER(None)) # /usr/include/pulse/context.h:160
class struct_pa_proplist(Structure):
__slots__ = [
]
struct_pa_proplist._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_proplist(Structure):
__slots__ = [
]
struct_pa_proplist._fields_ = [
('_opaque_struct', c_int)
]
pa_proplist = struct_pa_proplist # /usr/include/pulse/proplist.h:272
pa_context_event_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_char_p, POINTER(pa_proplist), POINTER(None)) # /usr/include/pulse/context.h:167
# /usr/include/pulse/context.h:172
pa_context_new = _lib.pa_context_new
pa_context_new.restype = POINTER(pa_context)
pa_context_new.argtypes = [POINTER(pa_mainloop_api), c_char_p]
# /usr/include/pulse/context.h:177
pa_context_new_with_proplist = _lib.pa_context_new_with_proplist
pa_context_new_with_proplist.restype = POINTER(pa_context)
pa_context_new_with_proplist.argtypes = [POINTER(pa_mainloop_api), c_char_p, POINTER(pa_proplist)]
# /usr/include/pulse/context.h:180
pa_context_unref = _lib.pa_context_unref
pa_context_unref.restype = None
pa_context_unref.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:183
pa_context_ref = _lib.pa_context_ref
pa_context_ref.restype = POINTER(pa_context)
pa_context_ref.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:186
pa_context_set_state_callback = _lib.pa_context_set_state_callback
pa_context_set_state_callback.restype = None
pa_context_set_state_callback.argtypes = [POINTER(pa_context), pa_context_notify_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:190
pa_context_set_event_callback = _lib.pa_context_set_event_callback
pa_context_set_event_callback.restype = None
pa_context_set_event_callback.argtypes = [POINTER(pa_context), pa_context_event_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:193
pa_context_errno = _lib.pa_context_errno
pa_context_errno.restype = c_int
pa_context_errno.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:196
pa_context_is_pending = _lib.pa_context_is_pending
pa_context_is_pending.restype = c_int
pa_context_is_pending.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:199
pa_context_get_state = _lib.pa_context_get_state
pa_context_get_state.restype = pa_context_state_t
pa_context_get_state.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:209
pa_context_connect = _lib.pa_context_connect
pa_context_connect.restype = c_int
pa_context_connect.argtypes = [POINTER(pa_context), c_char_p, pa_context_flags_t, POINTER(pa_spawn_api)]
# /usr/include/pulse/context.h:212
pa_context_disconnect = _lib.pa_context_disconnect
pa_context_disconnect.restype = None
pa_context_disconnect.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:215
pa_context_drain = _lib.pa_context_drain
pa_context_drain.restype = POINTER(pa_operation)
pa_context_drain.argtypes = [POINTER(pa_context), pa_context_notify_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:220
pa_context_exit_daemon = _lib.pa_context_exit_daemon
pa_context_exit_daemon.restype = POINTER(pa_operation)
pa_context_exit_daemon.argtypes = [POINTER(pa_context), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:223
pa_context_set_default_sink = _lib.pa_context_set_default_sink
pa_context_set_default_sink.restype = POINTER(pa_operation)
pa_context_set_default_sink.argtypes = [POINTER(pa_context), c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:226
pa_context_set_default_source = _lib.pa_context_set_default_source
pa_context_set_default_source.restype = POINTER(pa_operation)
pa_context_set_default_source.argtypes = [POINTER(pa_context), c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:229
pa_context_is_local = _lib.pa_context_is_local
pa_context_is_local.restype = c_int
pa_context_is_local.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:232
pa_context_set_name = _lib.pa_context_set_name
pa_context_set_name.restype = POINTER(pa_operation)
pa_context_set_name.argtypes = [POINTER(pa_context), c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:235
pa_context_get_server = _lib.pa_context_get_server
pa_context_get_server.restype = c_char_p
pa_context_get_server.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:238
pa_context_get_protocol_version = _lib.pa_context_get_protocol_version
pa_context_get_protocol_version.restype = c_uint32
pa_context_get_protocol_version.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:241
pa_context_get_server_protocol_version = _lib.pa_context_get_server_protocol_version
pa_context_get_server_protocol_version.restype = c_uint32
pa_context_get_server_protocol_version.argtypes = [POINTER(pa_context)]
enum_pa_update_mode = c_int
PA_UPDATE_SET = 0
PA_UPDATE_MERGE = 1
PA_UPDATE_REPLACE = 2
pa_update_mode_t = enum_pa_update_mode # /usr/include/pulse/proplist.h:337
# /usr/include/pulse/context.h:248
pa_context_proplist_update = _lib.pa_context_proplist_update
pa_context_proplist_update.restype = POINTER(pa_operation)
pa_context_proplist_update.argtypes = [POINTER(pa_context), pa_update_mode_t, POINTER(pa_proplist), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:251
pa_context_proplist_remove = _lib.pa_context_proplist_remove
pa_context_proplist_remove.restype = POINTER(pa_operation)
pa_context_proplist_remove.argtypes = [POINTER(pa_context), POINTER(c_char_p), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:256
pa_context_get_index = _lib.pa_context_get_index
pa_context_get_index.restype = c_uint32
pa_context_get_index.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:260
pa_context_rttime_new = _lib.pa_context_rttime_new
pa_context_rttime_new.restype = POINTER(pa_time_event)
pa_context_rttime_new.argtypes = [POINTER(pa_context), pa_usec_t, pa_time_event_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:264
pa_context_rttime_restart = _lib.pa_context_rttime_restart
pa_context_rttime_restart.restype = None
pa_context_rttime_restart.argtypes = [POINTER(pa_context), POINTER(pa_time_event), pa_usec_t]
# /usr/include/pulse/context.h:279
pa_context_get_tile_size = _lib.pa_context_get_tile_size
pa_context_get_tile_size.restype = c_size_t
pa_context_get_tile_size.argtypes = [POINTER(pa_context), POINTER(pa_sample_spec)]
# /usr/include/pulse/context.h:287
pa_context_load_cookie_from_file = _lib.pa_context_load_cookie_from_file
pa_context_load_cookie_from_file.restype = c_int
pa_context_load_cookie_from_file.argtypes = [POINTER(pa_context), c_char_p]
pa_volume_t = c_uint32 # /usr/include/pulse/volume.h:120
class struct_pa_cvolume(Structure):
__slots__ = [
'channels',
'values',
]
struct_pa_cvolume._fields_ = [
('channels', c_uint8),
('values', pa_volume_t * 32),
]
pa_cvolume = struct_pa_cvolume # /usr/include/pulse/volume.h:151
# /usr/include/pulse/volume.h:154
pa_cvolume_equal = _lib.pa_cvolume_equal
pa_cvolume_equal.restype = c_int
pa_cvolume_equal.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:159
pa_cvolume_init = _lib.pa_cvolume_init
pa_cvolume_init.restype = POINTER(pa_cvolume)
pa_cvolume_init.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:168
pa_cvolume_set = _lib.pa_cvolume_set
pa_cvolume_set.restype = POINTER(pa_cvolume)
pa_cvolume_set.argtypes = [POINTER(pa_cvolume), c_uint, pa_volume_t]
PA_CVOLUME_SNPRINT_MAX = 320 # /usr/include/pulse/volume.h:175
# /usr/include/pulse/volume.h:178
pa_cvolume_snprint = _lib.pa_cvolume_snprint
pa_cvolume_snprint.restype = c_char_p
pa_cvolume_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_cvolume)]
PA_SW_CVOLUME_SNPRINT_DB_MAX = 448 # /usr/include/pulse/volume.h:185
# /usr/include/pulse/volume.h:188
pa_sw_cvolume_snprint_dB = _lib.pa_sw_cvolume_snprint_dB
pa_sw_cvolume_snprint_dB.restype = c_char_p
pa_sw_cvolume_snprint_dB.argtypes = [c_char_p, c_size_t, POINTER(pa_cvolume)]
PA_CVOLUME_SNPRINT_VERBOSE_MAX = 1984 # /usr/include/pulse/volume.h:194
# /usr/include/pulse/volume.h:200
pa_cvolume_snprint_verbose = _lib.pa_cvolume_snprint_verbose
pa_cvolume_snprint_verbose.restype = c_char_p
pa_cvolume_snprint_verbose.argtypes = [c_char_p, c_size_t, POINTER(pa_cvolume), POINTER(pa_channel_map), c_int]
PA_VOLUME_SNPRINT_MAX = 10 # /usr/include/pulse/volume.h:207
# /usr/include/pulse/volume.h:210
pa_volume_snprint = _lib.pa_volume_snprint
pa_volume_snprint.restype = c_char_p
pa_volume_snprint.argtypes = [c_char_p, c_size_t, pa_volume_t]
PA_SW_VOLUME_SNPRINT_DB_MAX = 11 # /usr/include/pulse/volume.h:217
# /usr/include/pulse/volume.h:220
pa_sw_volume_snprint_dB = _lib.pa_sw_volume_snprint_dB
pa_sw_volume_snprint_dB.restype = c_char_p
pa_sw_volume_snprint_dB.argtypes = [c_char_p, c_size_t, pa_volume_t]
PA_VOLUME_SNPRINT_VERBOSE_MAX = 35 # /usr/include/pulse/volume.h:226
# /usr/include/pulse/volume.h:231
pa_volume_snprint_verbose = _lib.pa_volume_snprint_verbose
pa_volume_snprint_verbose.restype = c_char_p
pa_volume_snprint_verbose.argtypes = [c_char_p, c_size_t, pa_volume_t, c_int]
# /usr/include/pulse/volume.h:234
pa_cvolume_avg = _lib.pa_cvolume_avg
pa_cvolume_avg.restype = pa_volume_t
pa_cvolume_avg.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:241
pa_cvolume_avg_mask = _lib.pa_cvolume_avg_mask
pa_cvolume_avg_mask.restype = pa_volume_t
pa_cvolume_avg_mask.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_mask_t]
# /usr/include/pulse/volume.h:244
pa_cvolume_max = _lib.pa_cvolume_max
pa_cvolume_max.restype = pa_volume_t
pa_cvolume_max.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:251
pa_cvolume_max_mask = _lib.pa_cvolume_max_mask
pa_cvolume_max_mask.restype = pa_volume_t
pa_cvolume_max_mask.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_mask_t]
# /usr/include/pulse/volume.h:254
pa_cvolume_min = _lib.pa_cvolume_min
pa_cvolume_min.restype = pa_volume_t
pa_cvolume_min.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:261
pa_cvolume_min_mask = _lib.pa_cvolume_min_mask
pa_cvolume_min_mask.restype = pa_volume_t
pa_cvolume_min_mask.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_mask_t]
# /usr/include/pulse/volume.h:264
pa_cvolume_valid = _lib.pa_cvolume_valid
pa_cvolume_valid.restype = c_int
pa_cvolume_valid.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:267
pa_cvolume_channels_equal_to = _lib.pa_cvolume_channels_equal_to
pa_cvolume_channels_equal_to.restype = c_int
pa_cvolume_channels_equal_to.argtypes = [POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:278
pa_sw_volume_multiply = _lib.pa_sw_volume_multiply
pa_sw_volume_multiply.restype = pa_volume_t
pa_sw_volume_multiply.argtypes = [pa_volume_t, pa_volume_t]
# /usr/include/pulse/volume.h:283
pa_sw_cvolume_multiply = _lib.pa_sw_cvolume_multiply
pa_sw_cvolume_multiply.restype = POINTER(pa_cvolume)
pa_sw_cvolume_multiply.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:289
pa_sw_cvolume_multiply_scalar = _lib.pa_sw_cvolume_multiply_scalar
pa_sw_cvolume_multiply_scalar.restype = POINTER(pa_cvolume)
pa_sw_cvolume_multiply_scalar.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:295
pa_sw_volume_divide = _lib.pa_sw_volume_divide
pa_sw_volume_divide.restype = pa_volume_t
pa_sw_volume_divide.argtypes = [pa_volume_t, pa_volume_t]
# /usr/include/pulse/volume.h:300
pa_sw_cvolume_divide = _lib.pa_sw_cvolume_divide
pa_sw_cvolume_divide.restype = POINTER(pa_cvolume)
pa_sw_cvolume_divide.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:306
pa_sw_cvolume_divide_scalar = _lib.pa_sw_cvolume_divide_scalar
pa_sw_cvolume_divide_scalar.restype = POINTER(pa_cvolume)
pa_sw_cvolume_divide_scalar.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:309
pa_sw_volume_from_dB = _lib.pa_sw_volume_from_dB
pa_sw_volume_from_dB.restype = pa_volume_t
pa_sw_volume_from_dB.argtypes = [c_double]
# /usr/include/pulse/volume.h:312
pa_sw_volume_to_dB = _lib.pa_sw_volume_to_dB
pa_sw_volume_to_dB.restype = c_double
pa_sw_volume_to_dB.argtypes = [pa_volume_t]
# /usr/include/pulse/volume.h:316
pa_sw_volume_from_linear = _lib.pa_sw_volume_from_linear
pa_sw_volume_from_linear.restype = pa_volume_t
pa_sw_volume_from_linear.argtypes = [c_double]
# /usr/include/pulse/volume.h:319
pa_sw_volume_to_linear = _lib.pa_sw_volume_to_linear
pa_sw_volume_to_linear.restype = c_double
pa_sw_volume_to_linear.argtypes = [pa_volume_t]
# /usr/include/pulse/volume.h:329
pa_cvolume_remap = _lib.pa_cvolume_remap
pa_cvolume_remap.restype = POINTER(pa_cvolume)
pa_cvolume_remap.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), POINTER(pa_channel_map)]
# /usr/include/pulse/volume.h:333
pa_cvolume_compatible = _lib.pa_cvolume_compatible
pa_cvolume_compatible.restype = c_int
pa_cvolume_compatible.argtypes = [POINTER(pa_cvolume), POINTER(pa_sample_spec)]
# /usr/include/pulse/volume.h:337
pa_cvolume_compatible_with_channel_map = _lib.pa_cvolume_compatible_with_channel_map
pa_cvolume_compatible_with_channel_map.restype = c_int
pa_cvolume_compatible_with_channel_map.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map)]
# /usr/include/pulse/volume.h:344
pa_cvolume_get_balance = _lib.pa_cvolume_get_balance
pa_cvolume_get_balance.restype = c_float
pa_cvolume_get_balance.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map)]
# /usr/include/pulse/volume.h:355
pa_cvolume_set_balance = _lib.pa_cvolume_set_balance
pa_cvolume_set_balance.restype = POINTER(pa_cvolume)
pa_cvolume_set_balance.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), c_float]
# /usr/include/pulse/volume.h:362
pa_cvolume_get_fade = _lib.pa_cvolume_get_fade
pa_cvolume_get_fade.restype = c_float
pa_cvolume_get_fade.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map)]
# /usr/include/pulse/volume.h:373
pa_cvolume_set_fade = _lib.pa_cvolume_set_fade
pa_cvolume_set_fade.restype = POINTER(pa_cvolume)
pa_cvolume_set_fade.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), c_float]
# /usr/include/pulse/volume.h:378
pa_cvolume_scale = _lib.pa_cvolume_scale
pa_cvolume_scale.restype = POINTER(pa_cvolume)
pa_cvolume_scale.argtypes = [POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:384
pa_cvolume_scale_mask = _lib.pa_cvolume_scale_mask
pa_cvolume_scale_mask.restype = POINTER(pa_cvolume)
pa_cvolume_scale_mask.argtypes = [POINTER(pa_cvolume), pa_volume_t, POINTER(pa_channel_map), pa_channel_position_mask_t]
# /usr/include/pulse/volume.h:391
pa_cvolume_set_position = _lib.pa_cvolume_set_position
pa_cvolume_set_position.restype = POINTER(pa_cvolume)
pa_cvolume_set_position.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_t, pa_volume_t]
# /usr/include/pulse/volume.h:397
pa_cvolume_get_position = _lib.pa_cvolume_get_position
pa_cvolume_get_position.restype = pa_volume_t
pa_cvolume_get_position.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_t]
# /usr/include/pulse/volume.h:402
pa_cvolume_merge = _lib.pa_cvolume_merge
pa_cvolume_merge.restype = POINTER(pa_cvolume)
pa_cvolume_merge.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:406
pa_cvolume_inc_clamp = _lib.pa_cvolume_inc_clamp
pa_cvolume_inc_clamp.restype = POINTER(pa_cvolume)
pa_cvolume_inc_clamp.argtypes = [POINTER(pa_cvolume), pa_volume_t, pa_volume_t]
# /usr/include/pulse/volume.h:410
pa_cvolume_inc = _lib.pa_cvolume_inc
pa_cvolume_inc.restype = POINTER(pa_cvolume)
pa_cvolume_inc.argtypes = [POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:414
pa_cvolume_dec = _lib.pa_cvolume_dec
pa_cvolume_dec.restype = POINTER(pa_cvolume)
pa_cvolume_dec.argtypes = [POINTER(pa_cvolume), pa_volume_t]
class struct_pa_stream(Structure):
__slots__ = [
]
struct_pa_stream._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_stream(Structure):
__slots__ = [
]
struct_pa_stream._fields_ = [
('_opaque_struct', c_int)
]
pa_stream = struct_pa_stream # /usr/include/pulse/stream.h:335
pa_stream_success_cb_t = CFUNCTYPE(None, POINTER(pa_stream), c_int, POINTER(None)) # /usr/include/pulse/stream.h:338
pa_stream_request_cb_t = CFUNCTYPE(None, POINTER(pa_stream), c_size_t, POINTER(None)) # /usr/include/pulse/stream.h:341
pa_stream_notify_cb_t = CFUNCTYPE(None, POINTER(pa_stream), POINTER(None)) # /usr/include/pulse/stream.h:344
pa_stream_event_cb_t = CFUNCTYPE(None, POINTER(pa_stream), c_char_p, POINTER(pa_proplist), POINTER(None)) # /usr/include/pulse/stream.h:352
# /usr/include/pulse/stream.h:357
pa_stream_new = _lib.pa_stream_new
pa_stream_new.restype = POINTER(pa_stream)
pa_stream_new.argtypes = [POINTER(pa_context), c_char_p, POINTER(pa_sample_spec), POINTER(pa_channel_map)]
# /usr/include/pulse/stream.h:366
pa_stream_new_with_proplist = _lib.pa_stream_new_with_proplist
pa_stream_new_with_proplist.restype = POINTER(pa_stream)
pa_stream_new_with_proplist.argtypes = [POINTER(pa_context), c_char_p, POINTER(pa_sample_spec), POINTER(pa_channel_map), POINTER(pa_proplist)]
class struct_pa_format_info(Structure):
__slots__ = [
'encoding',
'plist',
]
enum_pa_encoding = c_int
PA_ENCODING_ANY = 0
PA_ENCODING_PCM = 1
PA_ENCODING_AC3_IEC61937 = 2
PA_ENCODING_EAC3_IEC61937 = 3
PA_ENCODING_MPEG_IEC61937 = 4
PA_ENCODING_DTS_IEC61937 = 5
PA_ENCODING_MPEG2_AAC_IEC61937 = 6
PA_ENCODING_MAX = 7
PA_ENCODING_INVALID = -1
pa_encoding_t = enum_pa_encoding # /usr/include/pulse/format.h:64
struct_pa_format_info._fields_ = [
('encoding', pa_encoding_t),
('plist', POINTER(pa_proplist)),
]
pa_format_info = struct_pa_format_info # /usr/include/pulse/format.h:91
# /usr/include/pulse/stream.h:377
pa_stream_new_extended = _lib.pa_stream_new_extended
pa_stream_new_extended.restype = POINTER(pa_stream)
pa_stream_new_extended.argtypes = [POINTER(pa_context), c_char_p, POINTER(POINTER(pa_format_info)), c_uint, POINTER(pa_proplist)]
# /usr/include/pulse/stream.h:385
pa_stream_unref = _lib.pa_stream_unref
pa_stream_unref.restype = None
pa_stream_unref.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:388
pa_stream_ref = _lib.pa_stream_ref
pa_stream_ref.restype = POINTER(pa_stream)
pa_stream_ref.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:391
pa_stream_get_state = _lib.pa_stream_get_state
pa_stream_get_state.restype = pa_stream_state_t
pa_stream_get_state.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:394
pa_stream_get_context = _lib.pa_stream_get_context
pa_stream_get_context.restype = POINTER(pa_context)
pa_stream_get_context.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:400
pa_stream_get_index = _lib.pa_stream_get_index
pa_stream_get_index.restype = c_uint32
pa_stream_get_index.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:411
pa_stream_get_device_index = _lib.pa_stream_get_device_index
pa_stream_get_device_index.restype = c_uint32
pa_stream_get_device_index.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:422
pa_stream_get_device_name = _lib.pa_stream_get_device_name
pa_stream_get_device_name.restype = c_char_p
pa_stream_get_device_name.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:428
pa_stream_is_suspended = _lib.pa_stream_is_suspended
pa_stream_is_suspended.restype = c_int
pa_stream_is_suspended.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:432
pa_stream_is_corked = _lib.pa_stream_is_corked
pa_stream_is_corked.restype = c_int
pa_stream_is_corked.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:458
pa_stream_connect_playback = _lib.pa_stream_connect_playback
pa_stream_connect_playback.restype = c_int
pa_stream_connect_playback.argtypes = [POINTER(pa_stream), c_char_p, POINTER(pa_buffer_attr), pa_stream_flags_t, POINTER(pa_cvolume), POINTER(pa_stream)]
# /usr/include/pulse/stream.h:467
pa_stream_connect_record = _lib.pa_stream_connect_record
pa_stream_connect_record.restype = c_int
pa_stream_connect_record.argtypes = [POINTER(pa_stream), c_char_p, POINTER(pa_buffer_attr), pa_stream_flags_t]
# /usr/include/pulse/stream.h:474
pa_stream_disconnect = _lib.pa_stream_disconnect
pa_stream_disconnect.restype = c_int
pa_stream_disconnect.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:508
pa_stream_begin_write = _lib.pa_stream_begin_write
pa_stream_begin_write.restype = c_int
pa_stream_begin_write.argtypes = [POINTER(pa_stream), POINTER(POINTER(None)), POINTER(c_size_t)]
# /usr/include/pulse/stream.h:522
pa_stream_cancel_write = _lib.pa_stream_cancel_write
pa_stream_cancel_write.restype = c_int
pa_stream_cancel_write.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:547
pa_stream_write = _lib.pa_stream_write
pa_stream_write.restype = c_int
pa_stream_write.argtypes = [POINTER(pa_stream), POINTER(None), c_size_t, pa_free_cb_t, c_int64, pa_seek_mode_t]
# /usr/include/pulse/stream.h:557
pa_stream_write_ext_free = _lib.pa_stream_write_ext_free
pa_stream_write_ext_free.restype = c_int
pa_stream_write_ext_free.argtypes = [POINTER(pa_stream), POINTER(None), c_size_t, pa_free_cb_t, POINTER(None), c_int64, pa_seek_mode_t]
# /usr/include/pulse/stream.h:582
pa_stream_peek = _lib.pa_stream_peek
pa_stream_peek.restype = c_int
pa_stream_peek.argtypes = [POINTER(pa_stream), POINTER(POINTER(None)), POINTER(c_size_t)]
# /usr/include/pulse/stream.h:589
pa_stream_drop = _lib.pa_stream_drop
pa_stream_drop.restype = c_int
pa_stream_drop.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:592
pa_stream_writable_size = _lib.pa_stream_writable_size
pa_stream_writable_size.restype = c_size_t
pa_stream_writable_size.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:595
pa_stream_readable_size = _lib.pa_stream_readable_size
pa_stream_readable_size.restype = c_size_t
pa_stream_readable_size.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:601
pa_stream_drain = _lib.pa_stream_drain
pa_stream_drain.restype = POINTER(pa_operation)
pa_stream_drain.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:607
pa_stream_update_timing_info = _lib.pa_stream_update_timing_info
pa_stream_update_timing_info.restype = POINTER(pa_operation)
pa_stream_update_timing_info.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:610
pa_stream_set_state_callback = _lib.pa_stream_set_state_callback
pa_stream_set_state_callback.restype = None
pa_stream_set_state_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:614
pa_stream_set_write_callback = _lib.pa_stream_set_write_callback
pa_stream_set_write_callback.restype = None
pa_stream_set_write_callback.argtypes = [POINTER(pa_stream), pa_stream_request_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:617
pa_stream_set_read_callback = _lib.pa_stream_set_read_callback
pa_stream_set_read_callback.restype = None
pa_stream_set_read_callback.argtypes = [POINTER(pa_stream), pa_stream_request_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:620
pa_stream_set_overflow_callback = _lib.pa_stream_set_overflow_callback
pa_stream_set_overflow_callback.restype = None
pa_stream_set_overflow_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:626
pa_stream_get_underflow_index = _lib.pa_stream_get_underflow_index
pa_stream_get_underflow_index.restype = c_int64
pa_stream_get_underflow_index.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:629
pa_stream_set_underflow_callback = _lib.pa_stream_set_underflow_callback
pa_stream_set_underflow_callback.restype = None
pa_stream_set_underflow_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:636
pa_stream_set_started_callback = _lib.pa_stream_set_started_callback
pa_stream_set_started_callback.restype = None
pa_stream_set_started_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:641
pa_stream_set_latency_update_callback = _lib.pa_stream_set_latency_update_callback
pa_stream_set_latency_update_callback.restype = None
pa_stream_set_latency_update_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:648
pa_stream_set_moved_callback = _lib.pa_stream_set_moved_callback
pa_stream_set_moved_callback.restype = None
pa_stream_set_moved_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:658
pa_stream_set_suspended_callback = _lib.pa_stream_set_suspended_callback
pa_stream_set_suspended_callback.restype = None
pa_stream_set_suspended_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:662
pa_stream_set_event_callback = _lib.pa_stream_set_event_callback
pa_stream_set_event_callback.restype = None
pa_stream_set_event_callback.argtypes = [POINTER(pa_stream), pa_stream_event_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:669
pa_stream_set_buffer_attr_callback = _lib.pa_stream_set_buffer_attr_callback
pa_stream_set_buffer_attr_callback.restype = None
pa_stream_set_buffer_attr_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:681
pa_stream_cork = _lib.pa_stream_cork
pa_stream_cork.restype = POINTER(pa_operation)
pa_stream_cork.argtypes = [POINTER(pa_stream), c_int, pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:686
pa_stream_flush = _lib.pa_stream_flush
pa_stream_flush.restype = POINTER(pa_operation)
pa_stream_flush.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:690
pa_stream_prebuf = _lib.pa_stream_prebuf
pa_stream_prebuf.restype = POINTER(pa_operation)
pa_stream_prebuf.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:695
pa_stream_trigger = _lib.pa_stream_trigger
pa_stream_trigger.restype = POINTER(pa_operation)
pa_stream_trigger.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:698
pa_stream_set_name = _lib.pa_stream_set_name
pa_stream_set_name.restype = POINTER(pa_operation)
pa_stream_set_name.argtypes = [POINTER(pa_stream), c_char_p, pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:731
pa_stream_get_time = _lib.pa_stream_get_time
pa_stream_get_time.restype = c_int
pa_stream_get_time.argtypes = [POINTER(pa_stream), POINTER(pa_usec_t)]
# /usr/include/pulse/stream.h:745
pa_stream_get_latency = _lib.pa_stream_get_latency
pa_stream_get_latency.restype = c_int
pa_stream_get_latency.argtypes = [POINTER(pa_stream), POINTER(pa_usec_t), POINTER(c_int)]
# /usr/include/pulse/stream.h:761
pa_stream_get_timing_info = _lib.pa_stream_get_timing_info
pa_stream_get_timing_info.restype = POINTER(pa_timing_info)
pa_stream_get_timing_info.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:764
pa_stream_get_sample_spec = _lib.pa_stream_get_sample_spec
pa_stream_get_sample_spec.restype = POINTER(pa_sample_spec)
pa_stream_get_sample_spec.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:767
pa_stream_get_channel_map = _lib.pa_stream_get_channel_map
pa_stream_get_channel_map.restype = POINTER(pa_channel_map)
pa_stream_get_channel_map.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:770
pa_stream_get_format_info = _lib.pa_stream_get_format_info
pa_stream_get_format_info.restype = POINTER(pa_format_info)
pa_stream_get_format_info.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:780
pa_stream_get_buffer_attr = _lib.pa_stream_get_buffer_attr
pa_stream_get_buffer_attr.restype = POINTER(pa_buffer_attr)
pa_stream_get_buffer_attr.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:790
pa_stream_set_buffer_attr = _lib.pa_stream_set_buffer_attr
pa_stream_set_buffer_attr.restype = POINTER(pa_operation)
pa_stream_set_buffer_attr.argtypes = [POINTER(pa_stream), POINTER(pa_buffer_attr), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:797
pa_stream_update_sample_rate = _lib.pa_stream_update_sample_rate
pa_stream_update_sample_rate.restype = POINTER(pa_operation)
pa_stream_update_sample_rate.argtypes = [POINTER(pa_stream), c_uint32, pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:805
pa_stream_proplist_update = _lib.pa_stream_proplist_update
pa_stream_proplist_update.restype = POINTER(pa_operation)
pa_stream_proplist_update.argtypes = [POINTER(pa_stream), pa_update_mode_t, POINTER(pa_proplist), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:809
pa_stream_proplist_remove = _lib.pa_stream_proplist_remove
pa_stream_proplist_remove.restype = POINTER(pa_operation)
pa_stream_proplist_remove.argtypes = [POINTER(pa_stream), POINTER(c_char_p), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:815
pa_stream_set_monitor_stream = _lib.pa_stream_set_monitor_stream
pa_stream_set_monitor_stream.restype = c_int
pa_stream_set_monitor_stream.argtypes = [POINTER(pa_stream), c_uint32]
# /usr/include/pulse/stream.h:820
pa_stream_get_monitor_stream = _lib.pa_stream_get_monitor_stream
pa_stream_get_monitor_stream.restype = c_uint32
pa_stream_get_monitor_stream.argtypes = [POINTER(pa_stream)]
class struct_pa_sink_port_info(Structure):
__slots__ = [
'name',
'description',
'priority',
'available',
]
struct_pa_sink_port_info._fields_ = [
('name', c_char_p),
('description', c_char_p),
('priority', c_uint32),
('available', c_int),
]
pa_sink_port_info = struct_pa_sink_port_info # /usr/include/pulse/introspect.h:232
class struct_pa_sink_info(Structure):
__slots__ = [
'name',
'index',
'description',
'sample_spec',
'channel_map',
'owner_module',
'volume',
'mute',
'monitor_source',
'monitor_source_name',
'latency',
'driver',
'flags',
'proplist',
'configured_latency',
'base_volume',
'state',
'n_volume_steps',
'card',
'n_ports',
'ports',
'active_port',
'n_formats',
'formats',
]
struct_pa_sink_info._fields_ = [
('name', c_char_p),
('index', c_uint32),
('description', c_char_p),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('owner_module', c_uint32),
('volume', pa_cvolume),
('mute', c_int),
('monitor_source', c_uint32),
('monitor_source_name', c_char_p),
('latency', pa_usec_t),
('driver', c_char_p),
('flags', pa_sink_flags_t),
('proplist', POINTER(pa_proplist)),
('configured_latency', pa_usec_t),
('base_volume', pa_volume_t),
('state', pa_sink_state_t),
('n_volume_steps', c_uint32),
('card', c_uint32),
('n_ports', c_uint32),
('ports', POINTER(POINTER(pa_sink_port_info))),
('active_port', POINTER(pa_sink_port_info)),
('n_formats', c_uint8),
('formats', POINTER(POINTER(pa_format_info))),
]
pa_sink_info = struct_pa_sink_info # /usr/include/pulse/introspect.h:262
pa_sink_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_sink_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:265
# /usr/include/pulse/introspect.h:268
pa_context_get_sink_info_by_name = _lib.pa_context_get_sink_info_by_name
pa_context_get_sink_info_by_name.restype = POINTER(pa_operation)
pa_context_get_sink_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_sink_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:271
pa_context_get_sink_info_by_index = _lib.pa_context_get_sink_info_by_index
pa_context_get_sink_info_by_index.restype = POINTER(pa_operation)
pa_context_get_sink_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_sink_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:274
pa_context_get_sink_info_list = _lib.pa_context_get_sink_info_list
pa_context_get_sink_info_list.restype = POINTER(pa_operation)
pa_context_get_sink_info_list.argtypes = [POINTER(pa_context), pa_sink_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:277
pa_context_set_sink_volume_by_index = _lib.pa_context_set_sink_volume_by_index
pa_context_set_sink_volume_by_index.restype = POINTER(pa_operation)
pa_context_set_sink_volume_by_index.argtypes = [POINTER(pa_context), c_uint32, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:280
pa_context_set_sink_volume_by_name = _lib.pa_context_set_sink_volume_by_name
pa_context_set_sink_volume_by_name.restype = POINTER(pa_operation)
pa_context_set_sink_volume_by_name.argtypes = [POINTER(pa_context), c_char_p, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:283
pa_context_set_sink_mute_by_index = _lib.pa_context_set_sink_mute_by_index
pa_context_set_sink_mute_by_index.restype = POINTER(pa_operation)
pa_context_set_sink_mute_by_index.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:286
pa_context_set_sink_mute_by_name = _lib.pa_context_set_sink_mute_by_name
pa_context_set_sink_mute_by_name.restype = POINTER(pa_operation)
pa_context_set_sink_mute_by_name.argtypes = [POINTER(pa_context), c_char_p, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:289
pa_context_suspend_sink_by_name = _lib.pa_context_suspend_sink_by_name
pa_context_suspend_sink_by_name.restype = POINTER(pa_operation)
pa_context_suspend_sink_by_name.argtypes = [POINTER(pa_context), c_char_p, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:292
pa_context_suspend_sink_by_index = _lib.pa_context_suspend_sink_by_index
pa_context_suspend_sink_by_index.restype = POINTER(pa_operation)
pa_context_suspend_sink_by_index.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:295
pa_context_set_sink_port_by_index = _lib.pa_context_set_sink_port_by_index
pa_context_set_sink_port_by_index.restype = POINTER(pa_operation)
pa_context_set_sink_port_by_index.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:298
pa_context_set_sink_port_by_name = _lib.pa_context_set_sink_port_by_name
pa_context_set_sink_port_by_name.restype = POINTER(pa_operation)
pa_context_set_sink_port_by_name.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_context_success_cb_t, POINTER(None)]
class struct_pa_source_port_info(Structure):
__slots__ = [
'name',
'description',
'priority',
'available',
]
struct_pa_source_port_info._fields_ = [
('name', c_char_p),
('description', c_char_p),
('priority', c_uint32),
('available', c_int),
]
pa_source_port_info = struct_pa_source_port_info # /usr/include/pulse/introspect.h:312
class struct_pa_source_info(Structure):
__slots__ = [
'name',
'index',
'description',
'sample_spec',
'channel_map',
'owner_module',
'volume',
'mute',
'monitor_of_sink',
'monitor_of_sink_name',
'latency',
'driver',
'flags',
'proplist',
'configured_latency',
'base_volume',
'state',
'n_volume_steps',
'card',
'n_ports',
'ports',
'active_port',
'n_formats',
'formats',
]
struct_pa_source_info._fields_ = [
('name', c_char_p),
('index', c_uint32),
('description', c_char_p),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('owner_module', c_uint32),
('volume', pa_cvolume),
('mute', c_int),
('monitor_of_sink', c_uint32),
('monitor_of_sink_name', c_char_p),
('latency', pa_usec_t),
('driver', c_char_p),
('flags', pa_source_flags_t),
('proplist', POINTER(pa_proplist)),
('configured_latency', pa_usec_t),
('base_volume', pa_volume_t),
('state', pa_source_state_t),
('n_volume_steps', c_uint32),
('card', c_uint32),
('n_ports', c_uint32),
('ports', POINTER(POINTER(pa_source_port_info))),
('active_port', POINTER(pa_source_port_info)),
('n_formats', c_uint8),
('formats', POINTER(POINTER(pa_format_info))),
]
pa_source_info = struct_pa_source_info # /usr/include/pulse/introspect.h:342
pa_source_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_source_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:345
# /usr/include/pulse/introspect.h:348
pa_context_get_source_info_by_name = _lib.pa_context_get_source_info_by_name
pa_context_get_source_info_by_name.restype = POINTER(pa_operation)
pa_context_get_source_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_source_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:351
pa_context_get_source_info_by_index = _lib.pa_context_get_source_info_by_index
pa_context_get_source_info_by_index.restype = POINTER(pa_operation)
pa_context_get_source_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_source_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:354
pa_context_get_source_info_list = _lib.pa_context_get_source_info_list
pa_context_get_source_info_list.restype = POINTER(pa_operation)
pa_context_get_source_info_list.argtypes = [POINTER(pa_context), pa_source_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:357
pa_context_set_source_volume_by_index = _lib.pa_context_set_source_volume_by_index
pa_context_set_source_volume_by_index.restype = POINTER(pa_operation)
pa_context_set_source_volume_by_index.argtypes = [POINTER(pa_context), c_uint32, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:360
pa_context_set_source_volume_by_name = _lib.pa_context_set_source_volume_by_name
pa_context_set_source_volume_by_name.restype = POINTER(pa_operation)
pa_context_set_source_volume_by_name.argtypes = [POINTER(pa_context), c_char_p, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:363
pa_context_set_source_mute_by_index = _lib.pa_context_set_source_mute_by_index
pa_context_set_source_mute_by_index.restype = POINTER(pa_operation)
pa_context_set_source_mute_by_index.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:366
pa_context_set_source_mute_by_name = _lib.pa_context_set_source_mute_by_name
pa_context_set_source_mute_by_name.restype = POINTER(pa_operation)
pa_context_set_source_mute_by_name.argtypes = [POINTER(pa_context), c_char_p, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:369
pa_context_suspend_source_by_name = _lib.pa_context_suspend_source_by_name
pa_context_suspend_source_by_name.restype = POINTER(pa_operation)
pa_context_suspend_source_by_name.argtypes = [POINTER(pa_context), c_char_p, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:372
pa_context_suspend_source_by_index = _lib.pa_context_suspend_source_by_index
pa_context_suspend_source_by_index.restype = POINTER(pa_operation)
pa_context_suspend_source_by_index.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:375
pa_context_set_source_port_by_index = _lib.pa_context_set_source_port_by_index
pa_context_set_source_port_by_index.restype = POINTER(pa_operation)
pa_context_set_source_port_by_index.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:378
pa_context_set_source_port_by_name = _lib.pa_context_set_source_port_by_name
pa_context_set_source_port_by_name.restype = POINTER(pa_operation)
pa_context_set_source_port_by_name.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_context_success_cb_t, POINTER(None)]
class struct_pa_server_info(Structure):
__slots__ = [
'user_name',
'host_name',
'server_version',
'server_name',
'sample_spec',
'default_sink_name',
'default_source_name',
'cookie',
'channel_map',
]
struct_pa_server_info._fields_ = [
('user_name', c_char_p),
('host_name', c_char_p),
('server_version', c_char_p),
('server_name', c_char_p),
('sample_spec', pa_sample_spec),
('default_sink_name', c_char_p),
('default_source_name', c_char_p),
('cookie', c_uint32),
('channel_map', pa_channel_map),
]
pa_server_info = struct_pa_server_info # /usr/include/pulse/introspect.h:397
pa_server_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_server_info), POINTER(None)) # /usr/include/pulse/introspect.h:400
# /usr/include/pulse/introspect.h:403
pa_context_get_server_info = _lib.pa_context_get_server_info
pa_context_get_server_info.restype = POINTER(pa_operation)
pa_context_get_server_info.argtypes = [POINTER(pa_context), pa_server_info_cb_t, POINTER(None)]
class struct_pa_module_info(Structure):
__slots__ = [
'index',
'name',
'argument',
'n_used',
'auto_unload',
'proplist',
]
struct_pa_module_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('argument', c_char_p),
('n_used', c_uint32),
('auto_unload', c_int),
('proplist', POINTER(pa_proplist)),
]
pa_module_info = struct_pa_module_info # /usr/include/pulse/introspect.h:421
pa_module_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_module_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:424
# /usr/include/pulse/introspect.h:427
pa_context_get_module_info = _lib.pa_context_get_module_info
pa_context_get_module_info.restype = POINTER(pa_operation)
pa_context_get_module_info.argtypes = [POINTER(pa_context), c_uint32, pa_module_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:430
pa_context_get_module_info_list = _lib.pa_context_get_module_info_list
pa_context_get_module_info_list.restype = POINTER(pa_operation)
pa_context_get_module_info_list.argtypes = [POINTER(pa_context), pa_module_info_cb_t, POINTER(None)]
pa_context_index_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_uint32, POINTER(None)) # /usr/include/pulse/introspect.h:433
# /usr/include/pulse/introspect.h:436
pa_context_load_module = _lib.pa_context_load_module
pa_context_load_module.restype = POINTER(pa_operation)
pa_context_load_module.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_context_index_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:439
pa_context_unload_module = _lib.pa_context_unload_module
pa_context_unload_module.restype = POINTER(pa_operation)
pa_context_unload_module.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
class struct_pa_client_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'driver',
'proplist',
]
struct_pa_client_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('driver', c_char_p),
('proplist', POINTER(pa_proplist)),
]
pa_client_info = struct_pa_client_info # /usr/include/pulse/introspect.h:454
pa_client_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_client_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:457
# /usr/include/pulse/introspect.h:460
pa_context_get_client_info = _lib.pa_context_get_client_info
pa_context_get_client_info.restype = POINTER(pa_operation)
pa_context_get_client_info.argtypes = [POINTER(pa_context), c_uint32, pa_client_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:463
pa_context_get_client_info_list = _lib.pa_context_get_client_info_list
pa_context_get_client_info_list.restype = POINTER(pa_operation)
pa_context_get_client_info_list.argtypes = [POINTER(pa_context), pa_client_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:466
pa_context_kill_client = _lib.pa_context_kill_client
pa_context_kill_client.restype = POINTER(pa_operation)
pa_context_kill_client.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
class struct_pa_card_profile_info(Structure):
__slots__ = [
'name',
'description',
'n_sinks',
'n_sources',
'priority',
]
struct_pa_card_profile_info._fields_ = [
('name', c_char_p),
('description', c_char_p),
('n_sinks', c_uint32),
('n_sources', c_uint32),
('priority', c_uint32),
]
pa_card_profile_info = struct_pa_card_profile_info # /usr/include/pulse/introspect.h:479
class struct_pa_card_profile_info2(Structure):
__slots__ = [
'name',
'description',
'n_sinks',
'n_sources',
'priority',
'available',
]
struct_pa_card_profile_info2._fields_ = [
('name', c_char_p),
('description', c_char_p),
('n_sinks', c_uint32),
('n_sources', c_uint32),
('priority', c_uint32),
('available', c_int),
]
pa_card_profile_info2 = struct_pa_card_profile_info2 # /usr/include/pulse/introspect.h:496
class struct_pa_card_port_info(Structure):
__slots__ = [
'name',
'description',
'priority',
'available',
'direction',
'n_profiles',
'profiles',
'proplist',
'latency_offset',
'profiles2',
]
struct_pa_card_port_info._fields_ = [
('name', c_char_p),
('description', c_char_p),
('priority', c_uint32),
('available', c_int),
('direction', c_int),
('n_profiles', c_uint32),
('profiles', POINTER(POINTER(pa_card_profile_info))),
('proplist', POINTER(pa_proplist)),
('latency_offset', c_int64),
('profiles2', POINTER(POINTER(pa_card_profile_info2))),
]
pa_card_port_info = struct_pa_card_port_info # /usr/include/pulse/introspect.h:512
class struct_pa_card_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'driver',
'n_profiles',
'profiles',
'active_profile',
'proplist',
'n_ports',
'ports',
'profiles2',
'active_profile2',
]
struct_pa_card_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('driver', c_char_p),
('n_profiles', c_uint32),
('profiles', POINTER(pa_card_profile_info)),
('active_profile', POINTER(pa_card_profile_info)),
('proplist', POINTER(pa_proplist)),
('n_ports', c_uint32),
('ports', POINTER(POINTER(pa_card_port_info))),
('profiles2', POINTER(POINTER(pa_card_profile_info2))),
('active_profile2', POINTER(pa_card_profile_info2)),
]
pa_card_info = struct_pa_card_info # /usr/include/pulse/introspect.h:530
pa_card_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_card_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:533
# /usr/include/pulse/introspect.h:536
pa_context_get_card_info_by_index = _lib.pa_context_get_card_info_by_index
pa_context_get_card_info_by_index.restype = POINTER(pa_operation)
pa_context_get_card_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_card_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:539
pa_context_get_card_info_by_name = _lib.pa_context_get_card_info_by_name
pa_context_get_card_info_by_name.restype = POINTER(pa_operation)
pa_context_get_card_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_card_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:542
pa_context_get_card_info_list = _lib.pa_context_get_card_info_list
pa_context_get_card_info_list.restype = POINTER(pa_operation)
pa_context_get_card_info_list.argtypes = [POINTER(pa_context), pa_card_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:545
pa_context_set_card_profile_by_index = _lib.pa_context_set_card_profile_by_index
pa_context_set_card_profile_by_index.restype = POINTER(pa_operation)
pa_context_set_card_profile_by_index.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:548
pa_context_set_card_profile_by_name = _lib.pa_context_set_card_profile_by_name
pa_context_set_card_profile_by_name.restype = POINTER(pa_operation)
pa_context_set_card_profile_by_name.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:551
pa_context_set_port_latency_offset = _lib.pa_context_set_port_latency_offset
pa_context_set_port_latency_offset.restype = POINTER(pa_operation)
pa_context_set_port_latency_offset.argtypes = [POINTER(pa_context), c_char_p, c_char_p, c_int64, pa_context_success_cb_t, POINTER(None)]
class struct_pa_sink_input_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'client',
'sink',
'sample_spec',
'channel_map',
'volume',
'buffer_usec',
'sink_usec',
'resample_method',
'driver',
'mute',
'proplist',
'corked',
'has_volume',
'volume_writable',
'format',
]
struct_pa_sink_input_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('client', c_uint32),
('sink', c_uint32),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('volume', pa_cvolume),
('buffer_usec', pa_usec_t),
('sink_usec', pa_usec_t),
('resample_method', c_char_p),
('driver', c_char_p),
('mute', c_int),
('proplist', POINTER(pa_proplist)),
('corked', c_int),
('has_volume', c_int),
('volume_writable', c_int),
('format', POINTER(pa_format_info)),
]
pa_sink_input_info = struct_pa_sink_input_info # /usr/include/pulse/introspect.h:579
pa_sink_input_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_sink_input_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:582
# /usr/include/pulse/introspect.h:585
pa_context_get_sink_input_info = _lib.pa_context_get_sink_input_info
pa_context_get_sink_input_info.restype = POINTER(pa_operation)
pa_context_get_sink_input_info.argtypes = [POINTER(pa_context), c_uint32, pa_sink_input_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:588
pa_context_get_sink_input_info_list = _lib.pa_context_get_sink_input_info_list
pa_context_get_sink_input_info_list.restype = POINTER(pa_operation)
pa_context_get_sink_input_info_list.argtypes = [POINTER(pa_context), pa_sink_input_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:591
pa_context_move_sink_input_by_name = _lib.pa_context_move_sink_input_by_name
pa_context_move_sink_input_by_name.restype = POINTER(pa_operation)
pa_context_move_sink_input_by_name.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:594
pa_context_move_sink_input_by_index = _lib.pa_context_move_sink_input_by_index
pa_context_move_sink_input_by_index.restype = POINTER(pa_operation)
pa_context_move_sink_input_by_index.argtypes = [POINTER(pa_context), c_uint32, c_uint32, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:597
pa_context_set_sink_input_volume = _lib.pa_context_set_sink_input_volume
pa_context_set_sink_input_volume.restype = POINTER(pa_operation)
pa_context_set_sink_input_volume.argtypes = [POINTER(pa_context), c_uint32, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:600
pa_context_set_sink_input_mute = _lib.pa_context_set_sink_input_mute
pa_context_set_sink_input_mute.restype = POINTER(pa_operation)
pa_context_set_sink_input_mute.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:603
pa_context_kill_sink_input = _lib.pa_context_kill_sink_input
pa_context_kill_sink_input.restype = POINTER(pa_operation)
pa_context_kill_sink_input.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
class struct_pa_source_output_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'client',
'source',
'sample_spec',
'channel_map',
'buffer_usec',
'source_usec',
'resample_method',
'driver',
'proplist',
'corked',
'volume',
'mute',
'has_volume',
'volume_writable',
'format',
]
struct_pa_source_output_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('client', c_uint32),
('source', c_uint32),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('buffer_usec', pa_usec_t),
('source_usec', pa_usec_t),
('resample_method', c_char_p),
('driver', c_char_p),
('proplist', POINTER(pa_proplist)),
('corked', c_int),
('volume', pa_cvolume),
('mute', c_int),
('has_volume', c_int),
('volume_writable', c_int),
('format', POINTER(pa_format_info)),
]
pa_source_output_info = struct_pa_source_output_info # /usr/include/pulse/introspect.h:631
pa_source_output_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_source_output_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:634
# /usr/include/pulse/introspect.h:637
pa_context_get_source_output_info = _lib.pa_context_get_source_output_info
pa_context_get_source_output_info.restype = POINTER(pa_operation)
pa_context_get_source_output_info.argtypes = [POINTER(pa_context), c_uint32, pa_source_output_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:640
pa_context_get_source_output_info_list = _lib.pa_context_get_source_output_info_list
pa_context_get_source_output_info_list.restype = POINTER(pa_operation)
pa_context_get_source_output_info_list.argtypes = [POINTER(pa_context), pa_source_output_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:643
pa_context_move_source_output_by_name = _lib.pa_context_move_source_output_by_name
pa_context_move_source_output_by_name.restype = POINTER(pa_operation)
pa_context_move_source_output_by_name.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:646
pa_context_move_source_output_by_index = _lib.pa_context_move_source_output_by_index
pa_context_move_source_output_by_index.restype = POINTER(pa_operation)
pa_context_move_source_output_by_index.argtypes = [POINTER(pa_context), c_uint32, c_uint32, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:649
pa_context_set_source_output_volume = _lib.pa_context_set_source_output_volume
pa_context_set_source_output_volume.restype = POINTER(pa_operation)
pa_context_set_source_output_volume.argtypes = [POINTER(pa_context), c_uint32, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:652
pa_context_set_source_output_mute = _lib.pa_context_set_source_output_mute
pa_context_set_source_output_mute.restype = POINTER(pa_operation)
pa_context_set_source_output_mute.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:655
pa_context_kill_source_output = _lib.pa_context_kill_source_output
pa_context_kill_source_output.restype = POINTER(pa_operation)
pa_context_kill_source_output.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
class struct_pa_stat_info(Structure):
__slots__ = [
'memblock_total',
'memblock_total_size',
'memblock_allocated',
'memblock_allocated_size',
'scache_size',
]
struct_pa_stat_info._fields_ = [
('memblock_total', c_uint32),
('memblock_total_size', c_uint32),
('memblock_allocated', c_uint32),
('memblock_allocated_size', c_uint32),
('scache_size', c_uint32),
]
pa_stat_info = struct_pa_stat_info # /usr/include/pulse/introspect.h:670
pa_stat_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_stat_info), POINTER(None)) # /usr/include/pulse/introspect.h:673
# /usr/include/pulse/introspect.h:676
pa_context_stat = _lib.pa_context_stat
pa_context_stat.restype = POINTER(pa_operation)
pa_context_stat.argtypes = [POINTER(pa_context), pa_stat_info_cb_t, POINTER(None)]
class struct_pa_sample_info(Structure):
__slots__ = [
'index',
'name',
'volume',
'sample_spec',
'channel_map',
'duration',
'bytes',
'lazy',
'filename',
'proplist',
]
struct_pa_sample_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('volume', pa_cvolume),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('duration', pa_usec_t),
('bytes', c_uint32),
('lazy', c_int),
('filename', c_char_p),
('proplist', POINTER(pa_proplist)),
]
pa_sample_info = struct_pa_sample_info # /usr/include/pulse/introspect.h:696
pa_sample_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_sample_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:699
# /usr/include/pulse/introspect.h:702
pa_context_get_sample_info_by_name = _lib.pa_context_get_sample_info_by_name
pa_context_get_sample_info_by_name.restype = POINTER(pa_operation)
pa_context_get_sample_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_sample_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:705
pa_context_get_sample_info_by_index = _lib.pa_context_get_sample_info_by_index
pa_context_get_sample_info_by_index.restype = POINTER(pa_operation)
pa_context_get_sample_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_sample_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:708
pa_context_get_sample_info_list = _lib.pa_context_get_sample_info_list
pa_context_get_sample_info_list.restype = POINTER(pa_operation)
pa_context_get_sample_info_list.argtypes = [POINTER(pa_context), pa_sample_info_cb_t, POINTER(None)]
enum_pa_autoload_type = c_int
PA_AUTOLOAD_SINK = 0
PA_AUTOLOAD_SOURCE = 1
pa_autoload_type_t = enum_pa_autoload_type # /usr/include/pulse/introspect.h:720
class struct_pa_autoload_info(Structure):
__slots__ = [
'index',
'name',
'type',
'module',
'argument',
]
struct_pa_autoload_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('type', pa_autoload_type_t),
('module', c_char_p),
('argument', c_char_p),
]
pa_autoload_info = struct_pa_autoload_info # /usr/include/pulse/introspect.h:731
pa_autoload_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_autoload_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:734
# /usr/include/pulse/introspect.h:737
pa_context_get_autoload_info_by_name = _lib.pa_context_get_autoload_info_by_name
pa_context_get_autoload_info_by_name.restype = POINTER(pa_operation)
pa_context_get_autoload_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_autoload_type_t, pa_autoload_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:740
pa_context_get_autoload_info_by_index = _lib.pa_context_get_autoload_info_by_index
pa_context_get_autoload_info_by_index.restype = POINTER(pa_operation)
pa_context_get_autoload_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_autoload_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:743
pa_context_get_autoload_info_list = _lib.pa_context_get_autoload_info_list
pa_context_get_autoload_info_list.restype = POINTER(pa_operation)
pa_context_get_autoload_info_list.argtypes = [POINTER(pa_context), pa_autoload_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:746
pa_context_add_autoload = _lib.pa_context_add_autoload
pa_context_add_autoload.restype = POINTER(pa_operation)
pa_context_add_autoload.argtypes = [POINTER(pa_context), c_char_p, pa_autoload_type_t, c_char_p, c_char_p, pa_context_index_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:749
pa_context_remove_autoload_by_name = _lib.pa_context_remove_autoload_by_name
pa_context_remove_autoload_by_name.restype = POINTER(pa_operation)
pa_context_remove_autoload_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_autoload_type_t, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:752
pa_context_remove_autoload_by_index = _lib.pa_context_remove_autoload_by_index
pa_context_remove_autoload_by_index.restype = POINTER(pa_operation)
pa_context_remove_autoload_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
pa_context_subscribe_cb_t = CFUNCTYPE(None, POINTER(pa_context), pa_subscription_event_type_t, c_uint32, POINTER(None)) # /usr/include/pulse/subscribe.h:73
# /usr/include/pulse/subscribe.h:76
pa_context_subscribe = _lib.pa_context_subscribe
pa_context_subscribe.restype = POINTER(pa_operation)
pa_context_subscribe.argtypes = [POINTER(pa_context), pa_subscription_mask_t, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/subscribe.h:79
pa_context_set_subscribe_callback = _lib.pa_context_set_subscribe_callback
pa_context_set_subscribe_callback.restype = None
pa_context_set_subscribe_callback.argtypes = [POINTER(pa_context), pa_context_subscribe_cb_t, POINTER(None)]
pa_context_play_sample_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_uint32, POINTER(None)) # /usr/include/pulse/scache.h:85
# /usr/include/pulse/scache.h:88
pa_stream_connect_upload = _lib.pa_stream_connect_upload
pa_stream_connect_upload.restype = c_int
pa_stream_connect_upload.argtypes = [POINTER(pa_stream), c_size_t]
# /usr/include/pulse/scache.h:93
pa_stream_finish_upload = _lib.pa_stream_finish_upload
pa_stream_finish_upload.restype = c_int
pa_stream_finish_upload.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/scache.h:96
pa_context_remove_sample = _lib.pa_context_remove_sample
pa_context_remove_sample.restype = POINTER(pa_operation)
pa_context_remove_sample.argtypes = [POINTER(pa_context), c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/scache.h:101
pa_context_play_sample = _lib.pa_context_play_sample
pa_context_play_sample.restype = POINTER(pa_operation)
pa_context_play_sample.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_volume_t, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/scache.h:113
pa_context_play_sample_with_proplist = _lib.pa_context_play_sample_with_proplist
pa_context_play_sample_with_proplist.restype = POINTER(pa_operation)
pa_context_play_sample_with_proplist.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_volume_t, POINTER(pa_proplist), pa_context_play_sample_cb_t, POINTER(None)]
# /usr/include/pulse/error.h:33
pa_strerror = _lib.pa_strerror
pa_strerror.restype = c_char_p
pa_strerror.argtypes = [c_int]
# /usr/include/pulse/xmalloc.h:39
pa_xmalloc = _lib.pa_xmalloc
pa_xmalloc.restype = POINTER(c_void)
pa_xmalloc.argtypes = [c_size_t]
# /usr/include/pulse/xmalloc.h:42
pa_xmalloc0 = _lib.pa_xmalloc0
pa_xmalloc0.restype = POINTER(c_void)
pa_xmalloc0.argtypes = [c_size_t]
# /usr/include/pulse/xmalloc.h:45
pa_xrealloc = _lib.pa_xrealloc
pa_xrealloc.restype = POINTER(c_void)
pa_xrealloc.argtypes = [POINTER(None), c_size_t]
# /usr/include/pulse/xmalloc.h:48
pa_xfree = _lib.pa_xfree
pa_xfree.restype = None
pa_xfree.argtypes = [POINTER(None)]
# /usr/include/pulse/xmalloc.h:51
pa_xstrdup = _lib.pa_xstrdup
pa_xstrdup.restype = c_char_p
pa_xstrdup.argtypes = [c_char_p]
# /usr/include/pulse/xmalloc.h:54
pa_xstrndup = _lib.pa_xstrndup
pa_xstrndup.restype = c_char_p
pa_xstrndup.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/xmalloc.h:57
pa_xmemdup = _lib.pa_xmemdup
pa_xmemdup.restype = POINTER(c_void)
pa_xmemdup.argtypes = [POINTER(None), c_size_t]
# /usr/include/pulse/utf8.h:35
pa_utf8_valid = _lib.pa_utf8_valid
pa_utf8_valid.restype = c_char_p
pa_utf8_valid.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:38
pa_ascii_valid = _lib.pa_ascii_valid
pa_ascii_valid.restype = c_char_p
pa_ascii_valid.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:41
pa_utf8_filter = _lib.pa_utf8_filter
pa_utf8_filter.restype = c_char_p
pa_utf8_filter.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:44
pa_ascii_filter = _lib.pa_ascii_filter
pa_ascii_filter.restype = c_char_p
pa_ascii_filter.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:47
pa_utf8_to_locale = _lib.pa_utf8_to_locale
pa_utf8_to_locale.restype = c_char_p
pa_utf8_to_locale.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:50
pa_locale_to_utf8 = _lib.pa_locale_to_utf8
pa_locale_to_utf8.restype = c_char_p
pa_locale_to_utf8.argtypes = [c_char_p]
class struct_pa_threaded_mainloop(Structure):
__slots__ = [
]
struct_pa_threaded_mainloop._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_threaded_mainloop(Structure):
__slots__ = [
]
struct_pa_threaded_mainloop._fields_ = [
('_opaque_struct', c_int)
]
pa_threaded_mainloop = struct_pa_threaded_mainloop # /usr/include/pulse/thread-mainloop.h:246
# /usr/include/pulse/thread-mainloop.h:251
pa_threaded_mainloop_new = _lib.pa_threaded_mainloop_new
pa_threaded_mainloop_new.restype = POINTER(pa_threaded_mainloop)
pa_threaded_mainloop_new.argtypes = []
# /usr/include/pulse/thread-mainloop.h:256
pa_threaded_mainloop_free = _lib.pa_threaded_mainloop_free
pa_threaded_mainloop_free.restype = None
pa_threaded_mainloop_free.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:259
pa_threaded_mainloop_start = _lib.pa_threaded_mainloop_start
pa_threaded_mainloop_start.restype = c_int
pa_threaded_mainloop_start.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:263
pa_threaded_mainloop_stop = _lib.pa_threaded_mainloop_stop
pa_threaded_mainloop_stop.restype = None
pa_threaded_mainloop_stop.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:271
pa_threaded_mainloop_lock = _lib.pa_threaded_mainloop_lock
pa_threaded_mainloop_lock.restype = None
pa_threaded_mainloop_lock.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:274
pa_threaded_mainloop_unlock = _lib.pa_threaded_mainloop_unlock
pa_threaded_mainloop_unlock.restype = None
pa_threaded_mainloop_unlock.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:285
pa_threaded_mainloop_wait = _lib.pa_threaded_mainloop_wait
pa_threaded_mainloop_wait.restype = None
pa_threaded_mainloop_wait.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:292
pa_threaded_mainloop_signal = _lib.pa_threaded_mainloop_signal
pa_threaded_mainloop_signal.restype = None
pa_threaded_mainloop_signal.argtypes = [POINTER(pa_threaded_mainloop), c_int]
# /usr/include/pulse/thread-mainloop.h:298
pa_threaded_mainloop_accept = _lib.pa_threaded_mainloop_accept
pa_threaded_mainloop_accept.restype = None
pa_threaded_mainloop_accept.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:302
pa_threaded_mainloop_get_retval = _lib.pa_threaded_mainloop_get_retval
pa_threaded_mainloop_get_retval.restype = c_int
pa_threaded_mainloop_get_retval.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:307
pa_threaded_mainloop_get_api = _lib.pa_threaded_mainloop_get_api
pa_threaded_mainloop_get_api.restype = POINTER(pa_mainloop_api)
pa_threaded_mainloop_get_api.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:310
pa_threaded_mainloop_in_thread = _lib.pa_threaded_mainloop_in_thread
pa_threaded_mainloop_in_thread.restype = c_int
pa_threaded_mainloop_in_thread.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:313
pa_threaded_mainloop_set_name = _lib.pa_threaded_mainloop_set_name
pa_threaded_mainloop_set_name.restype = None
pa_threaded_mainloop_set_name.argtypes = [POINTER(pa_threaded_mainloop), c_char_p]
class struct_pa_mainloop(Structure):
__slots__ = [
]
struct_pa_mainloop._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_mainloop(Structure):
__slots__ = [
]
struct_pa_mainloop._fields_ = [
('_opaque_struct', c_int)
]
pa_mainloop = struct_pa_mainloop # /usr/include/pulse/mainloop.h:78
# /usr/include/pulse/mainloop.h:81
pa_mainloop_new = _lib.pa_mainloop_new
pa_mainloop_new.restype = POINTER(pa_mainloop)
pa_mainloop_new.argtypes = []
# /usr/include/pulse/mainloop.h:84
pa_mainloop_free = _lib.pa_mainloop_free
pa_mainloop_free.restype = None
pa_mainloop_free.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:89
pa_mainloop_prepare = _lib.pa_mainloop_prepare
pa_mainloop_prepare.restype = c_int
pa_mainloop_prepare.argtypes = [POINTER(pa_mainloop), c_int]
# /usr/include/pulse/mainloop.h:92
pa_mainloop_poll = _lib.pa_mainloop_poll
pa_mainloop_poll.restype = c_int
pa_mainloop_poll.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:96
pa_mainloop_dispatch = _lib.pa_mainloop_dispatch
pa_mainloop_dispatch.restype = c_int
pa_mainloop_dispatch.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:99
pa_mainloop_get_retval = _lib.pa_mainloop_get_retval
pa_mainloop_get_retval.restype = c_int
pa_mainloop_get_retval.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:107
pa_mainloop_iterate = _lib.pa_mainloop_iterate
pa_mainloop_iterate.restype = c_int
pa_mainloop_iterate.argtypes = [POINTER(pa_mainloop), c_int, POINTER(c_int)]
# /usr/include/pulse/mainloop.h:110
pa_mainloop_run = _lib.pa_mainloop_run
pa_mainloop_run.restype = c_int
pa_mainloop_run.argtypes = [POINTER(pa_mainloop), POINTER(c_int)]
# /usr/include/pulse/mainloop.h:115
pa_mainloop_get_api = _lib.pa_mainloop_get_api
pa_mainloop_get_api.restype = POINTER(pa_mainloop_api)
pa_mainloop_get_api.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:118
pa_mainloop_quit = _lib.pa_mainloop_quit
pa_mainloop_quit.restype = None
pa_mainloop_quit.argtypes = [POINTER(pa_mainloop), c_int]
# /usr/include/pulse/mainloop.h:121
pa_mainloop_wakeup = _lib.pa_mainloop_wakeup
pa_mainloop_wakeup.restype = None
pa_mainloop_wakeup.argtypes = [POINTER(pa_mainloop)]
class struct_pollfd(Structure):
__slots__ = [
]
struct_pollfd._fields_ = [
('_opaque_struct', c_int)
]
class struct_pollfd(Structure):
__slots__ = [
]
struct_pollfd._fields_ = [
('_opaque_struct', c_int)
]
pa_poll_func = CFUNCTYPE(c_int, POINTER(struct_pollfd), c_ulong, c_int, POINTER(None)) # /usr/include/pulse/mainloop.h:124
# /usr/include/pulse/mainloop.h:127
pa_mainloop_set_poll_func = _lib.pa_mainloop_set_poll_func
pa_mainloop_set_poll_func.restype = None
pa_mainloop_set_poll_func.argtypes = [POINTER(pa_mainloop), pa_poll_func, POINTER(None)]
class struct_pa_signal_event(Structure):
__slots__ = [
]
struct_pa_signal_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_signal_event(Structure):
__slots__ = [
]
struct_pa_signal_event._fields_ = [
('_opaque_struct', c_int)
]
pa_signal_event = struct_pa_signal_event # /usr/include/pulse/mainloop-signal.h:39
pa_signal_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_signal_event), c_int, POINTER(None)) # /usr/include/pulse/mainloop-signal.h:42
pa_signal_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_signal_event), POINTER(None)) # /usr/include/pulse/mainloop-signal.h:45
# /usr/include/pulse/mainloop-signal.h:48
pa_signal_init = _lib.pa_signal_init
pa_signal_init.restype = c_int
pa_signal_init.argtypes = [POINTER(pa_mainloop_api)]
# /usr/include/pulse/mainloop-signal.h:51
pa_signal_done = _lib.pa_signal_done
pa_signal_done.restype = None
pa_signal_done.argtypes = []
# /usr/include/pulse/mainloop-signal.h:54
pa_signal_new = _lib.pa_signal_new
pa_signal_new.restype = POINTER(pa_signal_event)
pa_signal_new.argtypes = [c_int, pa_signal_cb_t, POINTER(None)]
# /usr/include/pulse/mainloop-signal.h:57
pa_signal_free = _lib.pa_signal_free
pa_signal_free.restype = None
pa_signal_free.argtypes = [POINTER(pa_signal_event)]
# /usr/include/pulse/mainloop-signal.h:60
pa_signal_set_destroy = _lib.pa_signal_set_destroy
pa_signal_set_destroy.restype = None
pa_signal_set_destroy.argtypes = [POINTER(pa_signal_event), pa_signal_destroy_cb_t]
# /usr/include/pulse/util.h:35
pa_get_user_name = _lib.pa_get_user_name
pa_get_user_name.restype = c_char_p
pa_get_user_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:38
pa_get_host_name = _lib.pa_get_host_name
pa_get_host_name.restype = c_char_p
pa_get_host_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:41
pa_get_fqdn = _lib.pa_get_fqdn
pa_get_fqdn.restype = c_char_p
pa_get_fqdn.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:44
pa_get_home_dir = _lib.pa_get_home_dir
pa_get_home_dir.restype = c_char_p
pa_get_home_dir.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:48
pa_get_binary_name = _lib.pa_get_binary_name
pa_get_binary_name.restype = c_char_p
pa_get_binary_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:52
pa_path_get_filename = _lib.pa_path_get_filename
pa_path_get_filename.restype = c_char_p
pa_path_get_filename.argtypes = [c_char_p]
# /usr/include/pulse/util.h:55
pa_msleep = _lib.pa_msleep
pa_msleep.restype = c_int
pa_msleep.argtypes = [c_ulong]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:61
pa_gettimeofday = _lib.pa_gettimeofday
pa_gettimeofday.restype = POINTER(struct_timeval)
pa_gettimeofday.argtypes = [POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:65
pa_timeval_diff = _lib.pa_timeval_diff
pa_timeval_diff.restype = pa_usec_t
pa_timeval_diff.argtypes = [POINTER(struct_timeval), POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:68
pa_timeval_cmp = _lib.pa_timeval_cmp
pa_timeval_cmp.restype = c_int
pa_timeval_cmp.argtypes = [POINTER(struct_timeval), POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:71
pa_timeval_age = _lib.pa_timeval_age
pa_timeval_age.restype = pa_usec_t
pa_timeval_age.argtypes = [POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:74
pa_timeval_add = _lib.pa_timeval_add
pa_timeval_add.restype = POINTER(struct_timeval)
pa_timeval_add.argtypes = [POINTER(struct_timeval), pa_usec_t]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:77
pa_timeval_sub = _lib.pa_timeval_sub
pa_timeval_sub.restype = POINTER(struct_timeval)
pa_timeval_sub.argtypes = [POINTER(struct_timeval), pa_usec_t]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:80
pa_timeval_store = _lib.pa_timeval_store
pa_timeval_store.restype = POINTER(struct_timeval)
pa_timeval_store.argtypes = [POINTER(struct_timeval), pa_usec_t]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:83
pa_timeval_load = _lib.pa_timeval_load
pa_timeval_load.restype = pa_usec_t
pa_timeval_load.argtypes = [POINTER(struct_timeval)]
__all__ = ['pa_get_library_version', 'PA_API_VERSION', 'PA_PROTOCOL_VERSION',
'PA_MAJOR', 'PA_MINOR', 'PA_MICRO', 'PA_CHANNELS_MAX', 'PA_RATE_MAX',
'pa_sample_format_t', 'PA_SAMPLE_U8', 'PA_SAMPLE_ALAW', 'PA_SAMPLE_ULAW',
'PA_SAMPLE_S16LE', 'PA_SAMPLE_S16BE', 'PA_SAMPLE_FLOAT32LE',
'PA_SAMPLE_FLOAT32BE', 'PA_SAMPLE_S32LE', 'PA_SAMPLE_S32BE',
'PA_SAMPLE_S24LE', 'PA_SAMPLE_S24BE', 'PA_SAMPLE_S24_32LE',
'PA_SAMPLE_S24_32BE', 'PA_SAMPLE_MAX', 'PA_SAMPLE_INVALID', 'pa_sample_spec',
'pa_usec_t', 'pa_bytes_per_second', 'pa_frame_size', 'pa_sample_size',
'pa_sample_size_of_format', 'pa_bytes_to_usec', 'pa_usec_to_bytes',
'pa_sample_spec_init', 'pa_sample_format_valid', 'pa_sample_rate_valid',
'pa_channels_valid', 'pa_sample_spec_valid', 'pa_sample_spec_equal',
'pa_sample_format_to_string', 'pa_parse_sample_format',
'PA_SAMPLE_SPEC_SNPRINT_MAX', 'pa_sample_spec_snprint',
'PA_BYTES_SNPRINT_MAX', 'pa_bytes_snprint', 'pa_sample_format_is_le',
'pa_sample_format_is_be', 'pa_context_state_t', 'PA_CONTEXT_UNCONNECTED',
'PA_CONTEXT_CONNECTING', 'PA_CONTEXT_AUTHORIZING', 'PA_CONTEXT_SETTING_NAME',
'PA_CONTEXT_READY', 'PA_CONTEXT_FAILED', 'PA_CONTEXT_TERMINATED',
'pa_stream_state_t', 'PA_STREAM_UNCONNECTED', 'PA_STREAM_CREATING',
'PA_STREAM_READY', 'PA_STREAM_FAILED', 'PA_STREAM_TERMINATED',
'pa_operation_state_t', 'PA_OPERATION_RUNNING', 'PA_OPERATION_DONE',
'PA_OPERATION_CANCELLED', 'pa_context_flags_t', 'PA_CONTEXT_NOFLAGS',
'PA_CONTEXT_NOAUTOSPAWN', 'PA_CONTEXT_NOFAIL', 'pa_direction_t',
'PA_DIRECTION_OUTPUT', 'PA_DIRECTION_INPUT', 'pa_device_type_t',
'PA_DEVICE_TYPE_SINK', 'PA_DEVICE_TYPE_SOURCE', 'pa_stream_direction_t',
'PA_STREAM_NODIRECTION', 'PA_STREAM_PLAYBACK', 'PA_STREAM_RECORD',
'PA_STREAM_UPLOAD', 'pa_stream_flags_t', 'PA_STREAM_NOFLAGS',
'PA_STREAM_START_CORKED', 'PA_STREAM_INTERPOLATE_TIMING',
'PA_STREAM_NOT_MONOTONIC', 'PA_STREAM_AUTO_TIMING_UPDATE',
'PA_STREAM_NO_REMAP_CHANNELS', 'PA_STREAM_NO_REMIX_CHANNELS',
'PA_STREAM_FIX_FORMAT', 'PA_STREAM_FIX_RATE', 'PA_STREAM_FIX_CHANNELS',
'PA_STREAM_DONT_MOVE', 'PA_STREAM_VARIABLE_RATE', 'PA_STREAM_PEAK_DETECT',
'PA_STREAM_START_MUTED', 'PA_STREAM_ADJUST_LATENCY',
'PA_STREAM_EARLY_REQUESTS', 'PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND',
'PA_STREAM_START_UNMUTED', 'PA_STREAM_FAIL_ON_SUSPEND',
'PA_STREAM_RELATIVE_VOLUME', 'PA_STREAM_PASSTHROUGH', 'pa_buffer_attr',
'pa_error_code_t', 'PA_OK', 'PA_ERR_ACCESS', 'PA_ERR_COMMAND',
'PA_ERR_INVALID', 'PA_ERR_EXIST', 'PA_ERR_NOENTITY',
'PA_ERR_CONNECTIONREFUSED', 'PA_ERR_PROTOCOL', 'PA_ERR_TIMEOUT',
'PA_ERR_AUTHKEY', 'PA_ERR_INTERNAL', 'PA_ERR_CONNECTIONTERMINATED',
'PA_ERR_KILLED', 'PA_ERR_INVALIDSERVER', 'PA_ERR_MODINITFAILED',
'PA_ERR_BADSTATE', 'PA_ERR_NODATA', 'PA_ERR_VERSION', 'PA_ERR_TOOLARGE',
'PA_ERR_NOTSUPPORTED', 'PA_ERR_UNKNOWN', 'PA_ERR_NOEXTENSION',
'PA_ERR_OBSOLETE', 'PA_ERR_NOTIMPLEMENTED', 'PA_ERR_FORKED', 'PA_ERR_IO',
'PA_ERR_BUSY', 'PA_ERR_MAX', 'pa_subscription_mask_t',
'PA_SUBSCRIPTION_MASK_NULL', 'PA_SUBSCRIPTION_MASK_SINK',
'PA_SUBSCRIPTION_MASK_SOURCE', 'PA_SUBSCRIPTION_MASK_SINK_INPUT',
'PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT', 'PA_SUBSCRIPTION_MASK_MODULE',
'PA_SUBSCRIPTION_MASK_CLIENT', 'PA_SUBSCRIPTION_MASK_SAMPLE_CACHE',
'PA_SUBSCRIPTION_MASK_SERVER', 'PA_SUBSCRIPTION_MASK_AUTOLOAD',
'PA_SUBSCRIPTION_MASK_CARD', 'PA_SUBSCRIPTION_MASK_ALL',
'pa_subscription_event_type_t', 'PA_SUBSCRIPTION_EVENT_SINK',
'PA_SUBSCRIPTION_EVENT_SOURCE', 'PA_SUBSCRIPTION_EVENT_SINK_INPUT',
'PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT', 'PA_SUBSCRIPTION_EVENT_MODULE',
'PA_SUBSCRIPTION_EVENT_CLIENT', 'PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE',
'PA_SUBSCRIPTION_EVENT_SERVER', 'PA_SUBSCRIPTION_EVENT_AUTOLOAD',
'PA_SUBSCRIPTION_EVENT_CARD', 'PA_SUBSCRIPTION_EVENT_FACILITY_MASK',
'PA_SUBSCRIPTION_EVENT_NEW', 'PA_SUBSCRIPTION_EVENT_CHANGE',
'PA_SUBSCRIPTION_EVENT_REMOVE', 'PA_SUBSCRIPTION_EVENT_TYPE_MASK',
'pa_timing_info', 'pa_spawn_api', 'pa_seek_mode_t', 'PA_SEEK_RELATIVE',
'PA_SEEK_ABSOLUTE', 'PA_SEEK_RELATIVE_ON_READ', 'PA_SEEK_RELATIVE_END',
'pa_sink_flags_t', 'PA_SINK_NOFLAGS', 'PA_SINK_HW_VOLUME_CTRL',
'PA_SINK_LATENCY', 'PA_SINK_HARDWARE', 'PA_SINK_NETWORK',
'PA_SINK_HW_MUTE_CTRL', 'PA_SINK_DECIBEL_VOLUME', 'PA_SINK_FLAT_VOLUME',
'PA_SINK_DYNAMIC_LATENCY', 'PA_SINK_SET_FORMATS', 'pa_sink_state_t',
'PA_SINK_INVALID_STATE', 'PA_SINK_RUNNING', 'PA_SINK_IDLE',
'PA_SINK_SUSPENDED', 'PA_SINK_INIT', 'PA_SINK_UNLINKED', 'pa_source_flags_t',
'PA_SOURCE_NOFLAGS', 'PA_SOURCE_HW_VOLUME_CTRL', 'PA_SOURCE_LATENCY',
'PA_SOURCE_HARDWARE', 'PA_SOURCE_NETWORK', 'PA_SOURCE_HW_MUTE_CTRL',
'PA_SOURCE_DECIBEL_VOLUME', 'PA_SOURCE_DYNAMIC_LATENCY',
'PA_SOURCE_FLAT_VOLUME', 'pa_source_state_t', 'PA_SOURCE_INVALID_STATE',
'PA_SOURCE_RUNNING', 'PA_SOURCE_IDLE', 'PA_SOURCE_SUSPENDED',
'PA_SOURCE_INIT', 'PA_SOURCE_UNLINKED', 'pa_free_cb_t', 'pa_port_available_t',
'PA_PORT_AVAILABLE_UNKNOWN', 'PA_PORT_AVAILABLE_NO', 'PA_PORT_AVAILABLE_YES',
'pa_mainloop_api', 'pa_io_event_flags_t', 'PA_IO_EVENT_NULL',
'PA_IO_EVENT_INPUT', 'PA_IO_EVENT_OUTPUT', 'PA_IO_EVENT_HANGUP',
'PA_IO_EVENT_ERROR', 'pa_io_event', 'pa_io_event_cb_t',
'pa_io_event_destroy_cb_t', 'pa_time_event', 'pa_time_event_cb_t',
'pa_time_event_destroy_cb_t', 'pa_defer_event', 'pa_defer_event_cb_t',
'pa_defer_event_destroy_cb_t', 'pa_mainloop_api_once',
'pa_channel_position_t', 'PA_CHANNEL_POSITION_INVALID',
'PA_CHANNEL_POSITION_MONO', 'PA_CHANNEL_POSITION_FRONT_LEFT',
'PA_CHANNEL_POSITION_FRONT_RIGHT', 'PA_CHANNEL_POSITION_FRONT_CENTER',
'PA_CHANNEL_POSITION_LEFT', 'PA_CHANNEL_POSITION_RIGHT',
'PA_CHANNEL_POSITION_CENTER', 'PA_CHANNEL_POSITION_REAR_CENTER',
'PA_CHANNEL_POSITION_REAR_LEFT', 'PA_CHANNEL_POSITION_REAR_RIGHT',
'PA_CHANNEL_POSITION_LFE', 'PA_CHANNEL_POSITION_SUBWOOFER',
'PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER',
'PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER', 'PA_CHANNEL_POSITION_SIDE_LEFT',
'PA_CHANNEL_POSITION_SIDE_RIGHT', 'PA_CHANNEL_POSITION_AUX0',
'PA_CHANNEL_POSITION_AUX1', 'PA_CHANNEL_POSITION_AUX2',
'PA_CHANNEL_POSITION_AUX3', 'PA_CHANNEL_POSITION_AUX4',
'PA_CHANNEL_POSITION_AUX5', 'PA_CHANNEL_POSITION_AUX6',
'PA_CHANNEL_POSITION_AUX7', 'PA_CHANNEL_POSITION_AUX8',
'PA_CHANNEL_POSITION_AUX9', 'PA_CHANNEL_POSITION_AUX10',
'PA_CHANNEL_POSITION_AUX11', 'PA_CHANNEL_POSITION_AUX12',
'PA_CHANNEL_POSITION_AUX13', 'PA_CHANNEL_POSITION_AUX14',
'PA_CHANNEL_POSITION_AUX15', 'PA_CHANNEL_POSITION_AUX16',
'PA_CHANNEL_POSITION_AUX17', 'PA_CHANNEL_POSITION_AUX18',
'PA_CHANNEL_POSITION_AUX19', 'PA_CHANNEL_POSITION_AUX20',
'PA_CHANNEL_POSITION_AUX21', 'PA_CHANNEL_POSITION_AUX22',
'PA_CHANNEL_POSITION_AUX23', 'PA_CHANNEL_POSITION_AUX24',
'PA_CHANNEL_POSITION_AUX25', 'PA_CHANNEL_POSITION_AUX26',
'PA_CHANNEL_POSITION_AUX27', 'PA_CHANNEL_POSITION_AUX28',
'PA_CHANNEL_POSITION_AUX29', 'PA_CHANNEL_POSITION_AUX30',
'PA_CHANNEL_POSITION_AUX31', 'PA_CHANNEL_POSITION_TOP_CENTER',
'PA_CHANNEL_POSITION_TOP_FRONT_LEFT', 'PA_CHANNEL_POSITION_TOP_FRONT_RIGHT',
'PA_CHANNEL_POSITION_TOP_FRONT_CENTER', 'PA_CHANNEL_POSITION_TOP_REAR_LEFT',
'PA_CHANNEL_POSITION_TOP_REAR_RIGHT', 'PA_CHANNEL_POSITION_TOP_REAR_CENTER',
'PA_CHANNEL_POSITION_MAX', 'pa_channel_position_mask_t',
'pa_channel_map_def_t', 'PA_CHANNEL_MAP_AIFF', 'PA_CHANNEL_MAP_ALSA',
'PA_CHANNEL_MAP_AUX', 'PA_CHANNEL_MAP_WAVEEX', 'PA_CHANNEL_MAP_OSS',
'PA_CHANNEL_MAP_DEF_MAX', 'PA_CHANNEL_MAP_DEFAULT', 'pa_channel_map',
'pa_channel_map_init', 'pa_channel_map_init_mono',
'pa_channel_map_init_stereo', 'pa_channel_map_init_auto',
'pa_channel_map_init_extend', 'pa_channel_position_to_string',
'pa_channel_position_from_string', 'pa_channel_position_to_pretty_string',
'PA_CHANNEL_MAP_SNPRINT_MAX', 'pa_channel_map_snprint',
'pa_channel_map_parse', 'pa_channel_map_equal', 'pa_channel_map_valid',
'pa_channel_map_compatible', 'pa_channel_map_superset',
'pa_channel_map_can_balance', 'pa_channel_map_can_fade',
'pa_channel_map_to_name', 'pa_channel_map_to_pretty_name',
'pa_channel_map_has_position', 'pa_channel_map_mask', 'pa_operation',
'pa_operation_notify_cb_t', 'pa_operation_ref', 'pa_operation_unref',
'pa_operation_cancel', 'pa_operation_get_state',
'pa_operation_set_state_callback', 'pa_context', 'pa_context_notify_cb_t',
'pa_context_success_cb_t', 'pa_context_event_cb_t', 'pa_context_new',
'pa_context_new_with_proplist', 'pa_context_unref', 'pa_context_ref',
'pa_context_set_state_callback', 'pa_context_set_event_callback',
'pa_context_errno', 'pa_context_is_pending', 'pa_context_get_state',
'pa_context_connect', 'pa_context_disconnect', 'pa_context_drain',
'pa_context_exit_daemon', 'pa_context_set_default_sink',
'pa_context_set_default_source', 'pa_context_is_local', 'pa_context_set_name',
'pa_context_get_server', 'pa_context_get_protocol_version',
'pa_context_get_server_protocol_version', 'PA_UPDATE_SET', 'PA_UPDATE_MERGE',
'PA_UPDATE_REPLACE', 'pa_context_proplist_update',
'pa_context_proplist_remove', 'pa_context_get_index', 'pa_context_rttime_new',
'pa_context_rttime_restart', 'pa_context_get_tile_size',
'pa_context_load_cookie_from_file', 'pa_volume_t', 'pa_cvolume',
'pa_cvolume_equal', 'pa_cvolume_init', 'pa_cvolume_set',
'PA_CVOLUME_SNPRINT_MAX', 'pa_cvolume_snprint',
'PA_SW_CVOLUME_SNPRINT_DB_MAX', 'pa_sw_cvolume_snprint_dB',
'PA_CVOLUME_SNPRINT_VERBOSE_MAX', 'pa_cvolume_snprint_verbose',
'PA_VOLUME_SNPRINT_MAX', 'pa_volume_snprint', 'PA_SW_VOLUME_SNPRINT_DB_MAX',
'pa_sw_volume_snprint_dB', 'PA_VOLUME_SNPRINT_VERBOSE_MAX',
'pa_volume_snprint_verbose', 'pa_cvolume_avg', 'pa_cvolume_avg_mask',
'pa_cvolume_max', 'pa_cvolume_max_mask', 'pa_cvolume_min',
'pa_cvolume_min_mask', 'pa_cvolume_valid', 'pa_cvolume_channels_equal_to',
'pa_sw_volume_multiply', 'pa_sw_cvolume_multiply',
'pa_sw_cvolume_multiply_scalar', 'pa_sw_volume_divide',
'pa_sw_cvolume_divide', 'pa_sw_cvolume_divide_scalar', 'pa_sw_volume_from_dB',
'pa_sw_volume_to_dB', 'pa_sw_volume_from_linear', 'pa_sw_volume_to_linear',
'pa_cvolume_remap', 'pa_cvolume_compatible',
'pa_cvolume_compatible_with_channel_map', 'pa_cvolume_get_balance',
'pa_cvolume_set_balance', 'pa_cvolume_get_fade', 'pa_cvolume_set_fade',
'pa_cvolume_scale', 'pa_cvolume_scale_mask', 'pa_cvolume_set_position',
'pa_cvolume_get_position', 'pa_cvolume_merge', 'pa_cvolume_inc_clamp',
'pa_cvolume_inc', 'pa_cvolume_dec', 'pa_stream', 'pa_stream_success_cb_t',
'pa_stream_request_cb_t', 'pa_stream_notify_cb_t', 'pa_stream_event_cb_t',
'pa_stream_new', 'pa_stream_new_with_proplist', 'PA_ENCODING_ANY',
'PA_ENCODING_PCM', 'PA_ENCODING_AC3_IEC61937', 'PA_ENCODING_EAC3_IEC61937',
'PA_ENCODING_MPEG_IEC61937', 'PA_ENCODING_DTS_IEC61937',
'PA_ENCODING_MPEG2_AAC_IEC61937', 'PA_ENCODING_MAX', 'PA_ENCODING_INVALID',
'pa_stream_new_extended', 'pa_stream_unref', 'pa_stream_ref',
'pa_stream_get_state', 'pa_stream_get_context', 'pa_stream_get_index',
'pa_stream_get_device_index', 'pa_stream_get_device_name',
'pa_stream_is_suspended', 'pa_stream_is_corked', 'pa_stream_connect_playback',
'pa_stream_connect_record', 'pa_stream_disconnect', 'pa_stream_begin_write',
'pa_stream_cancel_write', 'pa_stream_write', 'pa_stream_write_ext_free',
'pa_stream_peek', 'pa_stream_drop', 'pa_stream_writable_size',
'pa_stream_readable_size', 'pa_stream_drain', 'pa_stream_update_timing_info',
'pa_stream_set_state_callback', 'pa_stream_set_write_callback',
'pa_stream_set_read_callback', 'pa_stream_set_overflow_callback',
'pa_stream_get_underflow_index', 'pa_stream_set_underflow_callback',
'pa_stream_set_started_callback', 'pa_stream_set_latency_update_callback',
'pa_stream_set_moved_callback', 'pa_stream_set_suspended_callback',
'pa_stream_set_event_callback', 'pa_stream_set_buffer_attr_callback',
'pa_stream_cork', 'pa_stream_flush', 'pa_stream_prebuf', 'pa_stream_trigger',
'pa_stream_set_name', 'pa_stream_get_time', 'pa_stream_get_latency',
'pa_stream_get_timing_info', 'pa_stream_get_sample_spec',
'pa_stream_get_channel_map', 'pa_stream_get_format_info',
'pa_stream_get_buffer_attr', 'pa_stream_set_buffer_attr',
'pa_stream_update_sample_rate', 'pa_stream_proplist_update',
'pa_stream_proplist_remove', 'pa_stream_set_monitor_stream',
'pa_stream_get_monitor_stream', 'pa_sink_port_info', 'pa_sink_info',
'pa_sink_info_cb_t', 'pa_context_get_sink_info_by_name',
'pa_context_get_sink_info_by_index', 'pa_context_get_sink_info_list',
'pa_context_set_sink_volume_by_index', 'pa_context_set_sink_volume_by_name',
'pa_context_set_sink_mute_by_index', 'pa_context_set_sink_mute_by_name',
'pa_context_suspend_sink_by_name', 'pa_context_suspend_sink_by_index',
'pa_context_set_sink_port_by_index', 'pa_context_set_sink_port_by_name',
'pa_source_port_info', 'pa_source_info', 'pa_source_info_cb_t',
'pa_context_get_source_info_by_name', 'pa_context_get_source_info_by_index',
'pa_context_get_source_info_list', 'pa_context_set_source_volume_by_index',
'pa_context_set_source_volume_by_name', 'pa_context_set_source_mute_by_index',
'pa_context_set_source_mute_by_name', 'pa_context_suspend_source_by_name',
'pa_context_suspend_source_by_index', 'pa_context_set_source_port_by_index',
'pa_context_set_source_port_by_name', 'pa_server_info', 'pa_server_info_cb_t',
'pa_context_get_server_info', 'pa_module_info', 'pa_module_info_cb_t',
'pa_context_get_module_info', 'pa_context_get_module_info_list',
'pa_context_index_cb_t', 'pa_context_load_module', 'pa_context_unload_module',
'pa_client_info', 'pa_client_info_cb_t', 'pa_context_get_client_info',
'pa_context_get_client_info_list', 'pa_context_kill_client',
'pa_card_profile_info', 'pa_card_profile_info2', 'pa_card_port_info',
'pa_card_info', 'pa_card_info_cb_t', 'pa_context_get_card_info_by_index',
'pa_context_get_card_info_by_name', 'pa_context_get_card_info_list',
'pa_context_set_card_profile_by_index', 'pa_context_set_card_profile_by_name',
'pa_context_set_port_latency_offset', 'pa_sink_input_info',
'pa_sink_input_info_cb_t', 'pa_context_get_sink_input_info',
'pa_context_get_sink_input_info_list', 'pa_context_move_sink_input_by_name',
'pa_context_move_sink_input_by_index', 'pa_context_set_sink_input_volume',
'pa_context_set_sink_input_mute', 'pa_context_kill_sink_input',
'pa_source_output_info', 'pa_source_output_info_cb_t',
'pa_context_get_source_output_info', 'pa_context_get_source_output_info_list',
'pa_context_move_source_output_by_name',
'pa_context_move_source_output_by_index',
'pa_context_set_source_output_volume', 'pa_context_set_source_output_mute',
'pa_context_kill_source_output', 'pa_stat_info', 'pa_stat_info_cb_t',
'pa_context_stat', 'pa_sample_info', 'pa_sample_info_cb_t',
'pa_context_get_sample_info_by_name', 'pa_context_get_sample_info_by_index',
'pa_context_get_sample_info_list', 'pa_autoload_type_t', 'PA_AUTOLOAD_SINK',
'PA_AUTOLOAD_SOURCE', 'pa_autoload_info', 'pa_autoload_info_cb_t',
'pa_context_get_autoload_info_by_name',
'pa_context_get_autoload_info_by_index', 'pa_context_get_autoload_info_list',
'pa_context_add_autoload', 'pa_context_remove_autoload_by_name',
'pa_context_remove_autoload_by_index', 'pa_context_subscribe_cb_t',
'pa_context_subscribe', 'pa_context_set_subscribe_callback',
'pa_context_play_sample_cb_t', 'pa_stream_connect_upload',
'pa_stream_finish_upload', 'pa_context_remove_sample',
'pa_context_play_sample', 'pa_context_play_sample_with_proplist',
'pa_strerror', 'pa_xmalloc', 'pa_xmalloc0', 'pa_xrealloc', 'pa_xfree',
'pa_xstrdup', 'pa_xstrndup', 'pa_xmemdup', '_pa_xnew_internal',
'_pa_xnew0_internal', '_pa_xnewdup_internal', '_pa_xrenew_internal',
'pa_utf8_valid', 'pa_ascii_valid', 'pa_utf8_filter', 'pa_ascii_filter',
'pa_utf8_to_locale', 'pa_locale_to_utf8', 'pa_threaded_mainloop',
'pa_threaded_mainloop_new', 'pa_threaded_mainloop_free',
'pa_threaded_mainloop_start', 'pa_threaded_mainloop_stop',
'pa_threaded_mainloop_lock', 'pa_threaded_mainloop_unlock',
'pa_threaded_mainloop_wait', 'pa_threaded_mainloop_signal',
'pa_threaded_mainloop_accept', 'pa_threaded_mainloop_get_retval',
'pa_threaded_mainloop_get_api', 'pa_threaded_mainloop_in_thread',
'pa_threaded_mainloop_set_name', 'pa_mainloop', 'pa_mainloop_new',
'pa_mainloop_free', 'pa_mainloop_prepare', 'pa_mainloop_poll',
'pa_mainloop_dispatch', 'pa_mainloop_get_retval', 'pa_mainloop_iterate',
'pa_mainloop_run', 'pa_mainloop_get_api', 'pa_mainloop_quit',
'pa_mainloop_wakeup', 'pa_poll_func', 'pa_mainloop_set_poll_func',
'pa_signal_event', 'pa_signal_cb_t', 'pa_signal_destroy_cb_t',
'pa_signal_init', 'pa_signal_done', 'pa_signal_new', 'pa_signal_free',
'pa_signal_set_destroy', 'pa_get_user_name', 'pa_get_host_name',
'pa_get_fqdn', 'pa_get_home_dir', 'pa_get_binary_name',
'pa_path_get_filename', 'pa_msleep', 'pa_gettimeofday', 'pa_timeval_diff',
'pa_timeval_cmp', 'pa_timeval_age', 'pa_timeval_add', 'pa_timeval_sub',
'pa_timeval_store', 'pa_timeval_load'] | unknown | codeparrot/codeparrot-clean | ||
# mysql/pymysql.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pymysql
:name: PyMySQL
:dbapi: pymysql
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>\
[?<options>]
:url: http://www.pymysql.org/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply
to the pymysql driver as well.
"""
from .mysqldb import MySQLDialect_mysqldb
from ...util import py3k
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
# generally, these two values should be both True
# or both False. PyMySQL unicode tests pass all the way back
# to 0.4 either way. See [ticket:3337]
supports_unicode_statements = True
supports_unicode_binds = True
@classmethod
def dbapi(cls):
return __import__('pymysql')
if py3k:
def _extract_error_code(self, exception):
if isinstance(exception.args[0], Exception):
exception = exception.args[0]
return exception.args[0]
dialect = MySQLDialect_pymysql | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# encoding: utf-8 -*-
# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import re
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: kernel_blacklist
author: "Matthias Vogelgesang (@matze)"
version_added: 1.4
short_description: Blacklist kernel modules
description:
- Add or remove kernel modules from blacklist.
options:
name:
required: true
description:
- Name of kernel module to black- or whitelist.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the module should be present in the blacklist or absent.
blacklist_file:
required: false
description:
- If specified, use this blacklist file instead of
C(/etc/modprobe.d/blacklist-ansible.conf).
default: null
requirements: []
'''
EXAMPLES = '''
# Blacklist the nouveau driver module
- kernel_blacklist:
name: nouveau
state: present
'''
class Blacklist(object):
def __init__(self, module, filename):
if not os.path.exists(filename):
open(filename, 'a').close()
self.filename = filename
self.module = module
def get_pattern(self):
return '^blacklist\s*' + self.module + '$'
def readlines(self):
f = open(self.filename, 'r')
lines = f.readlines()
f.close()
return lines
def module_listed(self):
lines = self.readlines()
pattern = self.get_pattern()
for line in lines:
stripped = line.strip()
if stripped.startswith('#'):
continue
if re.match(pattern, stripped):
return True
return False
def remove_module(self):
lines = self.readlines()
pattern = self.get_pattern()
f = open(self.filename, 'w')
for line in lines:
if not re.match(pattern, line.strip()):
f.write(line)
f.close()
def add_module(self):
f = open(self.filename, 'a')
f.write('blacklist %s\n' % self.module)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=False, choices=['present', 'absent'],
default='present'),
blacklist_file=dict(required=False, default=None)
),
supports_check_mode=False,
)
args = dict(changed=False, failed=False,
name=module.params['name'], state=module.params['state'])
filename = '/etc/modprobe.d/blacklist-ansible.conf'
if module.params['blacklist_file']:
filename = module.params['blacklist_file']
blacklist = Blacklist(args['name'], filename)
if blacklist.module_listed():
if args['state'] == 'absent':
blacklist.remove_module()
args['changed'] = True
else:
if args['state'] == 'present':
blacklist.add_module()
args['changed'] = True
module.exit_json(**args)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 Leon Sixt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from diktya.layers.core import Swap, Subtensor, SplitAt, InBounds, BatchLoss
import numpy as np
import theano
from keras.models import Sequential, Model
from keras.layers.core import Activation
from keras.layers.convolutional import Convolution2D
from keras.engine.topology import Input
def test_swap():
layer = Swap(0, 10)
shape = (4, 32)
layer.build(shape)
arr = np.random.sample(shape).astype(np.float32)
input = theano.shared(arr)
output = layer(input).eval()
swaped = arr.copy()
swaped[:, 0], swaped[:, 10] = swaped[:, 10].copy(), swaped[:, 0].copy()
assert (output == swaped).all()
def test_subtensor():
layer = Subtensor(0, 1)
shape = (4, 32)
layer.build(shape)
arr = np.random.sample(shape).astype(np.float32)
input = theano.shared(arr)
output = layer(input).eval()
assert (output == arr[0:1]).all()
def test_split_at():
shape = (4, 32)
idx_shape = (1,)
arr = np.random.sample(shape).astype(np.float32)
x = theano.shared(arr)
bound = 2
idx = theano.shared(np.cast['int32'](bound))
split_at = SplitAt(axis=0)
split_at.build([shape, idx_shape])
front, back = [o.eval() for o in split_at([idx, x])]
assert (front == arr[:bound]).all()
assert (back == arr[bound:]).all()
def test_in_bounds_clip():
layer = InBounds(-1, 1, clip=True)
shape = (1, 1)
layer.build(shape)
arr = np.array([[0]], dtype=np.float32)
output = layer(theano.shared(arr)).eval()
assert (output == arr).all()
arr = np.array([[0]], dtype=np.float32)
output = layer(theano.shared(arr)).eval()
assert (output == arr).all()
arr = np.array([[2]], dtype=np.float32)
output = layer(theano.shared(arr)).eval()
assert float(output) == 1.
def test_in_bounds_regularizer():
model = Sequential()
model.add(InBounds(-1, 1, clip=True, input_shape=(1,)))
model.compile('adam', 'mse')
assert model.metrics_names == ['loss', 'reg']
loss, reg = model.train_on_batch(np.array([[0]]), np.array([[0]]))
assert float(loss) == 0
loss_on_2, reg = model.train_on_batch(np.array([[2]]), np.array([[1]]))
assert float(loss_on_2) > 0
loss_on_100, reg = model.train_on_batch(np.array([[100]]), np.array([[1]]))
assert float(loss_on_2) < float(loss_on_100)
def test_batch_loss():
bl = BatchLoss(axis=1, normalize=True, l2=1.)
shape = (1, 8, 8)
input = Input(shape=shape)
conv_out = Convolution2D(4, 3, 3, border_mode='same')(input)
x = bl(conv_out)
x = Activation('relu')(x)
m = Model(input, x)
m.compile('adam')
m.fit(np.random.uniform(-1, 1, (10000,) + shape),
batch_size=128,
nb_epoch=10)
m_conv = Model(input, conv_out)
data = np.random.uniform(-1, 1, (1000,) + shape)
conv_out = m_conv.predict(data)
assert np.mean(abs(conv_out.mean(axis=(0, 2, 3)))) <= 0.1
assert np.mean(np.abs(1 - conv_out.std(axis=(0, 2, 3)))) <= 0.1 | unknown | codeparrot/codeparrot-clean | ||
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='IpcConnectionContext.proto',
package='hadoop.common',
serialized_pb='\n\x1aIpcConnectionContext.proto\x12\rhadoop.common\"?\n\x14UserInformationProto\x12\x15\n\reffectiveUser\x18\x01 \x01(\t\x12\x10\n\x08realUser\x18\x02 \x01(\t\"d\n\x19IpcConnectionContextProto\x12\x35\n\x08userInfo\x18\x02 \x01(\x0b\x32#.hadoop.common.UserInformationProto\x12\x10\n\x08protocol\x18\x03 \x01(\tB?\n\x1eorg.apache.hadoop.ipc.protobufB\x1aIpcConnectionContextProtos\xa0\x01\x01')
_USERINFORMATIONPROTO = descriptor.Descriptor(
name='UserInformationProto',
full_name='hadoop.common.UserInformationProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='effectiveUser', full_name='hadoop.common.UserInformationProto.effectiveUser', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='realUser', full_name='hadoop.common.UserInformationProto.realUser', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=45,
serialized_end=108,
)
_IPCCONNECTIONCONTEXTPROTO = descriptor.Descriptor(
name='IpcConnectionContextProto',
full_name='hadoop.common.IpcConnectionContextProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='userInfo', full_name='hadoop.common.IpcConnectionContextProto.userInfo', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='protocol', full_name='hadoop.common.IpcConnectionContextProto.protocol', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=110,
serialized_end=210,
)
_IPCCONNECTIONCONTEXTPROTO.fields_by_name['userInfo'].message_type = _USERINFORMATIONPROTO
DESCRIPTOR.message_types_by_name['UserInformationProto'] = _USERINFORMATIONPROTO
DESCRIPTOR.message_types_by_name['IpcConnectionContextProto'] = _IPCCONNECTIONCONTEXTPROTO
class UserInformationProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _USERINFORMATIONPROTO
# @@protoc_insertion_point(class_scope:hadoop.common.UserInformationProto)
class IpcConnectionContextProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _IPCCONNECTIONCONTEXTPROTO
# @@protoc_insertion_point(class_scope:hadoop.common.IpcConnectionContextProto)
# @@protoc_insertion_point(module_scope) | unknown | codeparrot/codeparrot-clean | ||
from binary_tree_prototype import BinaryTreeNode
import collections
# @include
def is_balanced_binary_tree(tree):
BalancedStatusWithHeight = collections.namedtuple(
'BalancedStatusWithHeight', ('balanced', 'height'))
# First value of the return value indicates if tree is balanced, and if
# balanced the second value of the return value is the height of tree.
def check_balanced(tree):
if not tree:
return BalancedStatusWithHeight(True, -1) # Base case.
left_result = check_balanced(tree.left)
if not left_result.balanced:
# Left subtree is not balanced.
return BalancedStatusWithHeight(False, 0)
right_result = check_balanced(tree.right)
if not right_result.balanced:
# Right subtree is not balanced.
return BalancedStatusWithHeight(False, 0)
is_balanced = abs(left_result.height - right_result.height) <= 1
height = max(left_result.height, right_result.height) + 1
return BalancedStatusWithHeight(is_balanced, height)
return check_balanced(tree).balanced
# @exclude
def main():
# balanced binary tree test
# 3
# 2 5
# 1 4 6
tree = BinaryTreeNode()
tree.left = BinaryTreeNode()
tree.left.left = BinaryTreeNode()
tree.right = BinaryTreeNode()
tree.right.left = BinaryTreeNode()
tree.right.right = BinaryTreeNode()
assert is_balanced_binary_tree(tree)
print(is_balanced_binary_tree(tree))
# Non-balanced binary tree test.
tree = BinaryTreeNode()
tree.left = BinaryTreeNode()
tree.left.left = BinaryTreeNode()
assert not is_balanced_binary_tree(tree)
print(is_balanced_binary_tree(tree))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* jcapistd.c
*
* Copyright (C) 1994-1996, Thomas G. Lane.
* Modified 2013 by Guido Vollbeding.
* This file is part of the Independent JPEG Group's software.
* For conditions of distribution and use, see the accompanying README file.
*
* This file contains application interface code for the compression half
* of the JPEG library. These are the "standard" API routines that are
* used in the normal full-compression case. They are not used by a
* transcoding-only application. Note that if an application links in
* jpeg_start_compress, it will end up linking in the entire compressor.
* We thus must separate this file from jcapimin.c to avoid linking the
* whole compression library into a transcoder.
*/
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
/*
* Compression initialization.
* Before calling this, all parameters and a data destination must be set up.
*
* We require a write_all_tables parameter as a failsafe check when writing
* multiple datastreams from the same compression object. Since prior runs
* will have left all the tables marked sent_table=TRUE, a subsequent run
* would emit an abbreviated stream (no tables) by default. This may be what
* is wanted, but for safety's sake it should not be the default behavior:
* programmers should have to make a deliberate choice to emit abbreviated
* images. Therefore the documentation and examples should encourage people
* to pass write_all_tables=TRUE; then it will take active thought to do the
* wrong thing.
*/
GLOBAL(void)
jpeg_start_compress (j_compress_ptr cinfo, boolean write_all_tables)
{
if (cinfo->global_state != CSTATE_START)
ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state);
if (write_all_tables)
jpeg_suppress_tables(cinfo, FALSE); /* mark all tables to be written */
/* (Re)initialize error mgr and destination modules */
(*cinfo->err->reset_error_mgr) ((j_common_ptr) cinfo);
(*cinfo->dest->init_destination) (cinfo);
/* Perform master selection of active modules */
jinit_compress_master(cinfo);
/* Set up for the first pass */
(*cinfo->master->prepare_for_pass) (cinfo);
/* Ready for application to drive first pass through jpeg_write_scanlines
* or jpeg_write_raw_data.
*/
cinfo->next_scanline = 0;
cinfo->global_state = (cinfo->raw_data_in ? CSTATE_RAW_OK : CSTATE_SCANNING);
}
/*
* Write some scanlines of data to the JPEG compressor.
*
* The return value will be the number of lines actually written.
* This should be less than the supplied num_lines only in case that
* the data destination module has requested suspension of the compressor,
* or if more than image_height scanlines are passed in.
*
* Note: we warn about excess calls to jpeg_write_scanlines() since
* this likely signals an application programmer error. However,
* excess scanlines passed in the last valid call are *silently* ignored,
* so that the application need not adjust num_lines for end-of-image
* when using a multiple-scanline buffer.
*/
GLOBAL(JDIMENSION)
jpeg_write_scanlines (j_compress_ptr cinfo, JSAMPARRAY scanlines,
JDIMENSION num_lines)
{
JDIMENSION row_ctr, rows_left;
if (cinfo->global_state != CSTATE_SCANNING)
ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state);
if (cinfo->next_scanline >= cinfo->image_height)
WARNMS(cinfo, JWRN_TOO_MUCH_DATA);
/* Call progress monitor hook if present */
if (cinfo->progress != NULL) {
cinfo->progress->pass_counter = (long) cinfo->next_scanline;
cinfo->progress->pass_limit = (long) cinfo->image_height;
(*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo);
}
/* Give master control module another chance if this is first call to
* jpeg_write_scanlines. This lets output of the frame/scan headers be
* delayed so that application can write COM, etc, markers between
* jpeg_start_compress and jpeg_write_scanlines.
*/
if (cinfo->master->call_pass_startup)
(*cinfo->master->pass_startup) (cinfo);
/* Ignore any extra scanlines at bottom of image. */
rows_left = cinfo->image_height - cinfo->next_scanline;
if (num_lines > rows_left)
num_lines = rows_left;
row_ctr = 0;
(*cinfo->main->process_data) (cinfo, scanlines, &row_ctr, num_lines);
cinfo->next_scanline += row_ctr;
return row_ctr;
}
/*
* Alternate entry point to write raw data.
* Processes exactly one iMCU row per call, unless suspended.
*/
GLOBAL(JDIMENSION)
jpeg_write_raw_data (j_compress_ptr cinfo, JSAMPIMAGE data,
JDIMENSION num_lines)
{
JDIMENSION lines_per_iMCU_row;
if (cinfo->global_state != CSTATE_RAW_OK)
ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state);
if (cinfo->next_scanline >= cinfo->image_height) {
WARNMS(cinfo, JWRN_TOO_MUCH_DATA);
return 0;
}
/* Call progress monitor hook if present */
if (cinfo->progress != NULL) {
cinfo->progress->pass_counter = (long) cinfo->next_scanline;
cinfo->progress->pass_limit = (long) cinfo->image_height;
(*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo);
}
/* Give master control module another chance if this is first call to
* jpeg_write_raw_data. This lets output of the frame/scan headers be
* delayed so that application can write COM, etc, markers between
* jpeg_start_compress and jpeg_write_raw_data.
*/
if (cinfo->master->call_pass_startup)
(*cinfo->master->pass_startup) (cinfo);
/* Verify that at least one iMCU row has been passed. */
lines_per_iMCU_row = cinfo->max_v_samp_factor * cinfo->min_DCT_v_scaled_size;
if (num_lines < lines_per_iMCU_row)
ERREXIT(cinfo, JERR_BUFFER_SIZE);
/* Directly compress the row. */
if (! (*cinfo->coef->compress_data) (cinfo, data)) {
/* If compressor did not consume the whole row, suspend processing. */
return 0;
}
/* OK, we processed one iMCU row. */
cinfo->next_scanline += lines_per_iMCU_row;
return lines_per_iMCU_row;
} | c | github | https://github.com/opencv/opencv | 3rdparty/libjpeg/jcapistd.c |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class crm_configuration(osv.TransientModel):
_name = 'sale.config.settings'
_inherit = ['sale.config.settings', 'fetchmail.config.settings']
_columns = {
'group_fund_raising': fields.boolean("Manage Fund Raising",
implied_group='crm.group_fund_raising',
help="""Allows you to trace and manage your activities for fund raising."""),
'module_crm_claim': fields.boolean("Manage Customer Claims",
help='Allows you to track your customers/suppliers claims and grievances.\n'
'-This installs the module crm_claim.'),
'module_crm_helpdesk': fields.boolean("Manage Helpdesk and Support",
help='Allows you to communicate with Customer, process Customer query, and provide better help and support.\n'
'-This installs the module crm_helpdesk.'),
'alias_prefix': fields.char('Default Alias Name for Leads'),
'alias_domain' : fields.char('Alias Domain'),
'group_scheduled_calls': fields.boolean("Schedule calls to manage call center",
implied_group='crm.group_scheduled_calls',
help="""This adds the menu 'Scheduled Calls' under 'Sales / Phone Calls'""")
}
_defaults = {
'alias_domain': lambda self, cr, uid, context: self.pool['mail.alias']._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
}
def _find_default_lead_alias_id(self, cr, uid, context=None):
alias_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'crm.mail_alias_lead_info')
if not alias_id:
alias_ids = self.pool['mail.alias'].search(
cr, uid, [
('alias_model_id.model', '=', 'crm.lead'),
('alias_force_thread_id', '=', False),
('alias_parent_model_id.model', '=', 'crm.case.section'),
('alias_parent_thread_id', '=', False),
('alias_defaults', '=', '{}')
], context=context)
alias_id = alias_ids and alias_ids[0] or False
return alias_id
def get_default_alias_prefix(self, cr, uid, ids, context=None):
alias_name = False
alias_id = self._find_default_lead_alias_id(cr, uid, context=context)
if alias_id:
alias_name = self.pool['mail.alias'].browse(cr, uid, alias_id, context=context).alias_name
return {'alias_prefix': alias_name}
def set_default_alias_prefix(self, cr, uid, ids, context=None):
mail_alias = self.pool['mail.alias']
for record in self.browse(cr, uid, ids, context=context):
alias_id = self._find_default_lead_alias_id(cr, uid, context=context)
if not alias_id:
create_ctx = dict(context, alias_model_name='crm.lead', alias_parent_model_name='crm.case.section')
alias_id = self.pool['mail.alias'].create(cr, uid, {'alias_name': record.alias_prefix}, context=create_ctx)
else:
mail_alias.write(cr, uid, alias_id, {'alias_name': record.alias_prefix}, context=context)
return True | unknown | codeparrot/codeparrot-clean | ||
import pytest
from pybind11_tests import opaque_types as m
from pybind11_tests import ConstructorStats, UserType
def test_string_list():
lst = m.StringList()
lst.push_back("Element 1")
lst.push_back("Element 2")
assert m.print_opaque_list(lst) == "Opaque list: [Element 1, Element 2]"
assert lst.back() == "Element 2"
for i, k in enumerate(lst, start=1):
assert k == "Element {}".format(i)
lst.pop_back()
assert m.print_opaque_list(lst) == "Opaque list: [Element 1]"
cvp = m.ClassWithSTLVecProperty()
assert m.print_opaque_list(cvp.stringList) == "Opaque list: []"
cvp.stringList = lst
cvp.stringList.push_back("Element 3")
assert m.print_opaque_list(cvp.stringList) == "Opaque list: [Element 1, Element 3]"
def test_pointers(msg):
living_before = ConstructorStats.get(UserType).alive()
assert m.get_void_ptr_value(m.return_void_ptr()) == 0x1234
assert m.get_void_ptr_value(UserType()) # Should also work for other C++ types
assert ConstructorStats.get(UserType).alive() == living_before
with pytest.raises(TypeError) as excinfo:
m.get_void_ptr_value([1, 2, 3]) # This should not work
assert msg(excinfo.value) == """
get_void_ptr_value(): incompatible function arguments. The following argument types are supported:
1. (arg0: capsule) -> int
Invoked with: [1, 2, 3]
""" # noqa: E501 line too long
assert m.return_null_str() is None
assert m.get_null_str_value(m.return_null_str()) is not None
ptr = m.return_unique_ptr()
assert "StringList" in repr(ptr)
assert m.print_opaque_list(ptr) == "Opaque list: [some value]" | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (c) 2021, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef KEYRING_OPERATIONS_HELPER_INCLUDED
#define KEYRING_OPERATIONS_HELPER_INCLUDED
#include <mysql/components/service.h>
#include <mysql/components/services/keyring_reader_with_status.h>
#include <mysql/service_mysql_alloc.h>
namespace keyring_operations_helper {
/**
Read secret from keyring
Note: Memory for secert and secret_type must be freed by the caller
@param [in] keyring_reader Handle to keyring_reader_with_status service
@param [in] secret_id Identifier for secret data
@param [in] auth_id Owner of secret data - nullptr for internal keys
@param [out] secret Output buffer for secret fetched from keyring
@param [out] secret_length Length of secret data
@param [out] secret_type Type of data
@param [out] psi_memory_key Memory key to be used to allocate memory for
secret and secret_type
@returns status of reading secret
@retval -1 Keyring error
@retval 0 Key absent
@retval 1 Key present. Check output buffers.
*/
int read_secret(SERVICE_TYPE(keyring_reader_with_status) * keyring_reader,
const char *secret_id, const char *auth_id,
unsigned char **secret, size_t *secret_length,
char **secret_type, PSI_memory_key psi_memory_key);
} // namespace keyring_operations_helper
#endif /* KEYRING_OPERATIONS_HELPER_INCLUDED */ | c | github | https://github.com/mysql/mysql-server | include/keyring_operations_helper.h |
from Screens.Screen import Screen
from Components.Sources.List import List
from Components.ActionMap import NumberActionMap
from Components.Sources.StaticText import StaticText
from Components.config import configfile
from Components.PluginComponent import plugins
from Components.config import config
from Components.SystemInfo import SystemInfo
from Tools.BoundFunction import boundFunction
from Tools.Directories import resolveFilename, SCOPE_SKIN
import xml.etree.cElementTree
from Screens.Setup import Setup, getSetupTitle
mainmenu = _("Main menu")
# read the menu
file = open(resolveFilename(SCOPE_SKIN, 'menu.xml'), 'r')
mdom = xml.etree.cElementTree.parse(file)
file.close()
class MenuUpdater:
def __init__(self):
self.updatedMenuItems = {}
def addMenuItem(self, id, pos, text, module, screen, weight):
if not self.updatedMenuAvailable(id):
self.updatedMenuItems[id] = []
self.updatedMenuItems[id].append([text, pos, module, screen, weight])
def delMenuItem(self, id, pos, text, module, screen, weight):
self.updatedMenuItems[id].remove([text, pos, module, screen, weight])
def updatedMenuAvailable(self, id):
return self.updatedMenuItems.has_key(id)
def getUpdatedMenu(self, id):
return self.updatedMenuItems[id]
menuupdater = MenuUpdater()
class MenuSummary(Screen):
pass
class Menu(Screen):
ALLOW_SUSPEND = True
def okbuttonClick(self):
# print "okbuttonClick"
selection = self["menu"].getCurrent()
if selection is not None:
selection[1]()
def execText(self, text):
exec text
def runScreen(self, arg):
# arg[0] is the module (as string)
# arg[1] is Screen inside this module
# plus possible arguments, as
# string (as we want to reference
# stuff which is just imported)
# FIXME. somehow
if arg[0] != "":
exec "from " + arg[0] + " import *"
self.openDialog(*eval(arg[1]))
def nothing(self): #dummy
pass
def openDialog(self, *dialog): # in every layer needed
self.session.openWithCallback(self.menuClosed, *dialog)
def openSetup(self, dialog):
self.session.openWithCallback(self.menuClosed, Setup, dialog)
def addMenu(self, destList, node):
requires = node.get("requires")
if requires:
if requires[0] == '!':
if SystemInfo.get(requires[1:], False):
return
elif not SystemInfo.get(requires, False):
return
MenuTitle = _(node.get("text", "??").encode("UTF-8"))
entryID = node.get("entryID", "undefined")
weight = node.get("weight", 50)
x = node.get("flushConfigOnClose")
if x:
a = boundFunction(self.session.openWithCallback, self.menuClosedWithConfigFlush, Menu, node)
else:
a = boundFunction(self.session.openWithCallback, self.menuClosed, Menu, node)
#TODO add check if !empty(node.childNodes)
destList.append((MenuTitle, a, entryID, weight))
def menuClosedWithConfigFlush(self, *res):
configfile.save()
self.menuClosed(*res)
def menuClosed(self, *res):
if res and res[0]:
self.close(True)
def addItem(self, destList, node):
requires = node.get("requires")
if requires:
if requires[0] == '!':
if SystemInfo.get(requires[1:], False):
return
elif not SystemInfo.get(requires, False):
return
configCondition = node.get("configcondition")
if configCondition and not eval(configCondition + ".value"):
return
item_text = node.get("text", "").encode("UTF-8")
entryID = node.get("entryID", "undefined")
weight = node.get("weight", 50)
for x in node:
if x.tag == 'screen':
module = x.get("module")
screen = x.get("screen")
if screen is None:
screen = module
# print module, screen
if module:
module = "Screens." + module
else:
module = ""
# check for arguments. they will be appended to the
# openDialog call
args = x.text or ""
screen += ", " + args
destList.append((_(item_text or "??"), boundFunction(self.runScreen, (module, screen)), entryID, weight))
return
elif x.tag == 'plugin':
extensions = x.get("extensions")
system = x.get("system")
screen = x.get("screen")
if extensions:
module = extensions
elif system:
module = system
if screen is None:
screen = module
if extensions:
module = "Plugins.Extensions." + extensions + '.plugin'
elif system:
module = "Plugins.SystemPlugins." + system + '.plugin'
else:
module = ""
# check for arguments. they will be appended to the
# openDialog call
args = x.text or ""
screen += ", " + args
destList.append((_(item_text or "??"), boundFunction(self.runScreen, (module, screen)), entryID, weight))
return
elif x.tag == 'code':
destList.append((_(item_text or "??"), boundFunction(self.execText, x.text), entryID, weight))
return
elif x.tag == 'setup':
id = x.get("id")
if item_text == "":
item_text = _(getSetupTitle(id))
else:
item_text = _(item_text)
destList.append((item_text, boundFunction(self.openSetup, id), entryID, weight))
return
destList.append((item_text, self.nothing, entryID, weight))
def __init__(self, session, parent):
Screen.__init__(self, session)
list = []
menuID = None
for x in parent: #walk through the actual nodelist
if not x.tag:
continue
if x.tag == 'item':
item_level = int(x.get("level", 0))
if item_level <= config.usage.setup_level.index:
self.addItem(list, x)
count += 1
elif x.tag == 'menu':
self.addMenu(list, x)
count += 1
elif x.tag == "id":
menuID = x.get("val")
count = 0
if menuID is not None:
# menuupdater?
if menuupdater.updatedMenuAvailable(menuID):
for x in menuupdater.getUpdatedMenu(menuID):
if x[1] == count:
list.append((x[0], boundFunction(self.runScreen, (x[2], x[3] + ", ")), x[4]))
count += 1
if menuID is not None:
# plugins
for l in plugins.getPluginsForMenu(menuID):
# check if a plugin overrides an existing menu
plugin_menuid = l[2]
for x in list:
if x[2] == plugin_menuid:
list.remove(x)
break
if len(l) > 4 and l[4]:
list.append((l[0], boundFunction(l[1], self.session, self.close), l[2], l[3] or 50))
else:
list.append((l[0], boundFunction(l[1], self.session), l[2], l[3] or 50))
# for the skin: first try a menu_<menuID>, then Menu
self.skinName = [ ]
if menuID is not None:
self.skinName.append("menu_" + menuID)
self.skinName.append("Menu")
# Sort by Weight
if config.usage.sort_menus.value:
list.sort()
else:
list.sort(key=lambda x: int(x[3]))
self["menu"] = List(list)
self["actions"] = NumberActionMap(["OkCancelActions", "MenuActions", "NumberActions"],
{
"ok": self.okbuttonClick,
"cancel": self.closeNonRecursive,
"menu": self.closeRecursive,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal
})
a = parent.get("title", "").encode("UTF-8") or None
a = a and _(a)
if a is None:
a = _(parent.get("text", "").encode("UTF-8"))
self["title"] = StaticText(a)
Screen.setTitle(self, a)
self.menu_title = a
def keyNumberGlobal(self, number):
# print "menu keyNumber:", number
# Calculate index
number -= 1
if len(self["menu"].list) > number:
self["menu"].setIndex(number)
self.okbuttonClick()
def closeNonRecursive(self):
self.close(False)
def closeRecursive(self):
self.close(True)
def createSummary(self):
return MenuSummary
class MainMenu(Menu):
#add file load functions for the xml-file
def __init__(self, *x):
self.skinName = "Menu"
Menu.__init__(self, *x) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from hashlib import sha1
import os
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from django.core.validators import MaxLengthValidator
from django.db import models
from django.db.models import aggregates
from django.db.models.signals import post_save
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django_fsm.db.fields import FSMField, transition, can_proceed
from djangobb_forum.fields import AutoOneToOneField, ExtendedImageField, JSONField
from djangobb_forum.util import smiles, convert_text_to_html
from djangobb_forum import settings as forum_settings
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ['^djangobb_forum\.fields\.AutoOneToOneField',
'^djangobb_forum\.fields\.JSONField',
'^djangobb_forum\.fields\.ExtendedImageField'])
TZ_CHOICES = [(float(x[0]), x[1]) for x in (
(-12, '-12'), (-11, '-11'), (-10, '-10'), (-9.5, '-09.5'), (-9, '-09'),
(-8.5, '-08.5'), (-8, '-08 PST'), (-7, '-07 MST'), (-6, '-06 CST'),
(-5, '-05 EST'), (-4, '-04 AST'), (-3.5, '-03.5'), (-3, '-03 ADT'),
(-2, '-02'), (-1, '-01'), (0, '00 GMT'), (1, '+01 CET'), (2, '+02'),
(3, '+03'), (3.5, '+03.5'), (4, '+04'), (4.5, '+04.5'), (5, '+05'),
(5.5, '+05.5'), (6, '+06'), (6.5, '+06.5'), (7, '+07'), (8, '+08'),
(9, '+09'), (9.5, '+09.5'), (10, '+10'), (10.5, '+10.5'), (11, '+11'),
(11.5, '+11.5'), (12, '+12'), (13, '+13'), (14, '+14'),
)]
SIGN_CHOICES = (
(1, 'PLUS'),
(-1, 'MINUS'),
)
PRIVACY_CHOICES = (
(0, _(u'Display your e-mail address.')),
(1, _(u'Hide your e-mail address but allow form e-mail.')),
(2, _(u'Hide your e-mail address and disallow form e-mail.')),
)
MARKUP_CHOICES = [('bbcode', 'bbcode')]
try:
import markdown
MARKUP_CHOICES.append(("markdown", "markdown"))
except ImportError:
pass
path = os.path.join(settings.STATIC_ROOT, 'djangobb_forum', 'themes')
if os.path.exists(path):
# fix for collectstatic
THEME_CHOICES = [(theme, theme) for theme in os.listdir(path)
if os.path.isdir(os.path.join(path, theme))]
else:
THEME_CHOICES = []
import logging
logger = logging.getLogger(__name__)
akismet_api = None
from akismet import Akismet, AkismetError
try:
if getattr(settings, 'AKISMET_ENABLED', True):
akismet_api = Akismet(key=forum_settings.AKISMET_API_KEY, blog_url=forum_settings.AKISMET_BLOG_URL, agent=forum_settings.AKISMET_AGENT)
except Exception as e:
logger.error("Error while initializing Akismet", extra={'exception': e})
class Category(models.Model):
name = models.CharField(_('Name'), max_length=80)
groups = models.ManyToManyField(Group, blank=True, null=True, verbose_name=_('Groups'), help_text=_('Only users from these groups can see this category'))
position = models.IntegerField(_('Position'), blank=True, default=0)
class Meta:
ordering = ['position']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __unicode__(self):
return self.name
def forum_count(self):
return self.forums.all().count()
@property
def topics(self):
return Topic.objects.filter(forum__category__id=self.id).select_related()
@property
def posts(self):
return Post.objects.filter(topic__forum__category__id=self.id).select_related()
def has_access(self, user):
if user.is_superuser:
return True
if self.groups.exists():
if user.is_authenticated():
if not self.groups.filter(user__pk=user.id).exists():
return False
else:
return False
return True
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums', verbose_name=_('Category'))
moderator_only = models.BooleanField(_('New topics by moderators only'), default=False)
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
description = models.TextField(_('Description'), blank=True, default='')
moderators = models.ManyToManyField(User, blank=True, null=True, verbose_name=_('Moderators'))
updated = models.DateTimeField(_('Updated'), auto_now=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
topic_count = models.IntegerField(_('Topic count'), blank=True, default=0)
last_post = models.ForeignKey('Post', related_name='last_forum_post', blank=True, null=True)
class Meta:
ordering = ['position']
verbose_name = _('Forum')
verbose_name_plural = _('Forums')
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('djangobb:forum', [self.id])
def get_mobile_url(self):
return reverse('djangobb:mobile_forum', args=[self.id])
@property
def posts(self):
return Post.objects.filter(topic__forum__id=self.id).select_related()
def set_last_post(self):
try:
self.last_post = Topic.objects.filter(forum=self).latest().last_post
except Topic.DoesNotExist:
self.last_post = None
def set_counts(self):
self.topic_count = Topic.objects.filter(forum=self).count()
self.post_count = Post.objects.filter(topic__forum=self).count()
class Topic(models.Model):
forum = models.ForeignKey(Forum, related_name='topics', verbose_name=_('Forum'))
name = models.CharField(_('Subject'), max_length=255)
created = models.DateTimeField(_('Created'), auto_now_add=True)
updated = models.DateTimeField(_('Updated'), null=True)
user = models.ForeignKey(User, verbose_name=_('User'))
views = models.IntegerField(_('Views count'), blank=True, default=0)
sticky = models.BooleanField(_('Sticky'), blank=True, default=False)
closed = models.BooleanField(_('Closed'), blank=True, default=False)
subscribers = models.ManyToManyField(User, related_name='subscriptions', verbose_name=_('Subscribers'), blank=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
last_post = models.ForeignKey('Post', related_name='last_topic_post', blank=True, null=True)
class Meta:
ordering = ['-updated']
get_latest_by = 'updated'
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
permissions = (
('delayed_close', 'Can close topics after a delay'),
)
def __unicode__(self):
return self.name
def move_to(self, new_forum):
"""
Move a topic to a new forum.
"""
self.clear_last_forum_post()
old_forum = self.forum
self.forum = new_forum
self.save()
old_forum.set_last_post()
old_forum.set_counts()
old_forum.save()
def delete(self, *args, **kwargs):
self.clear_last_forum_post()
forum = self.forum
if forum_settings.SOFT_DELETE_TOPICS and (self.forum != get_object_or_404(Forum, pk=forum_settings.SOFT_DELETE_TOPICS) or not kwargs.get('staff', False)):
self.forum = get_object_or_404(Forum, pk=forum_settings.SOFT_DELETE_TOPICS)
self.save()
else:
super(Topic, self).delete()
forum.set_last_post()
forum.set_counts()
forum.save()
@property
def head(self):
try:
return self.posts.select_related().order_by('created')[0]
except IndexError:
return None
@property
def reply_count(self):
return self.post_count - 1
@models.permalink
def get_absolute_url(self):
return ('djangobb:topic', [self.id])
def get_mobile_url(self):
return reverse('djangobb:mobile_topic', args=[self.id])
def update_read(self, user):
tracking = user.posttracking
#if last_read > last_read - don't check topics
if tracking.last_read and (tracking.last_read > self.last_post.created):
return
if isinstance(tracking.topics, dict):
#clear topics if len > 5Kb and set last_read to current time
if len(tracking.topics) > 5120:
tracking.topics = None
tracking.last_read = timezone.now()
tracking.save()
#update topics if exist new post or does't exist in dict
elif self.last_post_id > tracking.topics.get(str(self.id), 0):
tracking.topics[str(self.id)] = self.last_post_id
tracking.save()
else:
#initialize topic tracking dict
tracking.topics = {self.id: self.last_post_id}
tracking.save()
def clear_last_forum_post(self):
"""
Prep for moving/deleting. Update the forum the topic belongs to.
"""
try:
last_post = self.posts.latest()
last_post.last_forum_post.clear()
except Post.DoesNotExist:
pass
else:
last_post.last_forum_post.clear()
class Post(models.Model):
topic = models.ForeignKey(Topic, related_name='posts', verbose_name=_('Topic'))
user = models.ForeignKey(User, related_name='posts', verbose_name=_('User'))
created = models.DateTimeField(_('Created'), auto_now_add=True)
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
updated_by = models.ForeignKey(User, verbose_name=_('Updated by'), blank=True, null=True)
markup = models.CharField(_('Markup'), max_length=15, default=forum_settings.DEFAULT_MARKUP, choices=MARKUP_CHOICES)
body = models.TextField(_('Message'), validators=[MaxLengthValidator(forum_settings.POST_MAX_LENGTH)])
body_html = models.TextField(_('HTML version'))
user_ip = models.IPAddressField(_('User IP'), blank=True, null=True)
class Meta:
ordering = ['created']
get_latest_by = 'created'
verbose_name = _('Post')
verbose_name_plural = _('Posts')
permissions = (
('fast_post', 'Can add posts without a time limit'),
('med_post', 'Can add posts at medium speed'),
('post_external_links', 'Can post external links'),
('delayed_delete', 'Can delete posts after a delay'),
)
def save(self, *args, **kwargs):
self.body_html = convert_text_to_html(self.body, self.user.forum_profile)
if forum_settings.SMILES_SUPPORT and self.user.forum_profile.show_smilies:
self.body_html = smiles(self.body_html)
super(Post, self).save(*args, **kwargs)
def move_to(self, to_topic, delete_topic=True):
delete_topic = (self.topic.posts.count() == 1) and delete_topic
prev_topic = self.topic
self.topic = to_topic
self.save()
self.set_counts()
if delete_topic:
prev_topic.delete()
prev_topic.forum.set_last_post()
prev_topic.forum.set_counts()
prev_topic.forum.save()
def delete(self, *args, **kwargs):
self_id = self.id
head_post_id = self.topic.posts.order_by('created')[0].id
forum = self.topic.forum
topic = self.topic
profile = self.user.forum_profile
self.last_topic_post.clear()
self.last_forum_post.clear()
# If we actually delete the post, we lose any reports that my have come from it. Also, there is no recovery (but I don't care about that as much right now)
if self_id == head_post_id:
topic.delete(*args, **kwargs)
else:
if forum_settings.SOFT_DELETE_POSTS and (self.topic != get_object_or_404(Topic, pk=forum_settings.SOFT_DELETE_POSTS) or not kwargs.get('staff', False)):
self.topic = get_object_or_404(Topic, pk=forum_settings.SOFT_DELETE_POSTS)
self.save()
else:
super(Post, self).delete()
#if post was last in topic - remove topic
try:
topic.last_post = Post.objects.filter(topic__id=topic.id).latest()
except Post.DoesNotExist:
topic.last_post = None
topic.post_count = Post.objects.filter(topic__id=topic.id).count()
topic.save()
forum.set_last_post()
forum.save()
self.set_counts()
def set_counts(self):
"""
Recounts this post's forum and and topic post counts.
"""
forum = self.topic.forum
profile = self.user.forum_profile
#TODO: for speedup - save/update only changed fields
forum.set_counts()
forum.save()
profile.set_counts()
profile.save()
@models.permalink
def get_absolute_url(self):
return ('djangobb:post', [self.id])
def get_mobile_url(self):
return reverse('djangobb:mobile_post', args=[self.id])
def summary(self):
LIMIT = 50
tail = len(self.body) > LIMIT and '...' or ''
return self.body[:LIMIT] + tail
__unicode__ = summary
class Reputation(models.Model):
from_user = models.ForeignKey(User, related_name='reputations_from', verbose_name=_('From'))
to_user = models.ForeignKey(User, related_name='reputations_to', verbose_name=_('To'))
post = models.ForeignKey(Post, related_name='post', verbose_name=_('Post'))
time = models.DateTimeField(_('Time'), auto_now_add=True)
sign = models.IntegerField(_('Sign'), choices=SIGN_CHOICES, default=0)
reason = models.TextField(_('Reason'), max_length=1000)
class Meta:
verbose_name = _('Reputation')
verbose_name_plural = _('Reputations')
unique_together = (('from_user', 'post'),)
def __unicode__(self):
return u'T[%d], FU[%d], TU[%d]: %s' % (self.post.id, self.from_user.id, self.to_user.id, unicode(self.time))
class ProfileManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
qs = super(ProfileManager, self).get_query_set()
if forum_settings.REPUTATION_SUPPORT:
qs = qs.extra(select={
'reply_total': 'SELECT SUM(sign) FROM djangobb_forum_reputation WHERE to_user_id = djangobb_forum_profile.user_id GROUP BY to_user_id',
'reply_count_minus': "SELECT SUM(sign) FROM djangobb_forum_reputation WHERE to_user_id = djangobb_forum_profile.user_id AND sign = '-1' GROUP BY to_user_id",
'reply_count_plus': "SELECT SUM(sign) FROM djangobb_forum_reputation WHERE to_user_id = djangobb_forum_profile.user_id AND sign = '1' GROUP BY to_user_id",
})
return qs
class Profile(models.Model):
user = AutoOneToOneField(User, related_name='forum_profile', verbose_name=_('User'))
status = models.CharField(_('Status'), max_length=30, blank=True)
site = models.URLField(_('Site'), verify_exists=False, blank=True)
jabber = models.CharField(_('Jabber'), max_length=80, blank=True)
icq = models.CharField(_('ICQ'), max_length=12, blank=True)
msn = models.CharField(_('MSN'), max_length=80, blank=True)
aim = models.CharField(_('AIM'), max_length=80, blank=True)
yahoo = models.CharField(_('Yahoo'), max_length=80, blank=True)
location = models.CharField(_('Location'), max_length=30, blank=True)
signature = models.TextField(_('Signature'), blank=True, default='', max_length=forum_settings.SIGNATURE_MAX_LENGTH)
signature_html = models.TextField(_('Signature'), blank=True, default='', max_length=forum_settings.SIGNATURE_MAX_LENGTH)
time_zone = models.FloatField(_('Time zone'), choices=TZ_CHOICES, default=float(forum_settings.DEFAULT_TIME_ZONE))
language = models.CharField(_('Language'), max_length=5, default='', choices=settings.LANGUAGES)
avatar = ExtendedImageField(_('Avatar'), blank=True, default='', upload_to=forum_settings.AVATARS_UPLOAD_TO, width=forum_settings.AVATAR_WIDTH, height=forum_settings.AVATAR_HEIGHT)
theme = models.CharField(_('Theme'), choices=THEME_CHOICES, max_length=80, default='default')
show_avatar = models.BooleanField(_('Show avatar'), blank=True, default=True)
show_signatures = models.BooleanField(_('Show signatures'), blank=True, default=True)
show_smilies = models.BooleanField(_('Show smilies'), blank=True, default=True)
privacy_permission = models.IntegerField(_('Privacy permission'), choices=PRIVACY_CHOICES, default=1)
auto_subscribe = models.BooleanField(_('Auto subscribe'), help_text=_("Auto subscribe all topics you have created or reply."), blank=True, default=False)
markup = models.CharField(_('Default markup'), max_length=15, default=forum_settings.DEFAULT_MARKUP, choices=MARKUP_CHOICES)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
objects = ProfileManager()
class Meta:
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
def last_post(self):
posts = Post.objects.filter(user__id=self.user_id).order_by('-created')
if posts:
return posts[0].created
else:
return None
def set_counts(self):
self.post_count = Post.objects.filter(user=self.user).count()
class PostTracking(models.Model):
"""
Model for tracking read/unread posts.
In topics stored ids of topics and last_posts as dict.
"""
user = AutoOneToOneField(User)
topics = JSONField(null=True, blank=True)
last_read = models.DateTimeField(null=True, blank=True)
class Meta:
verbose_name = _('Post tracking')
verbose_name_plural = _('Post tracking')
def __unicode__(self):
return self.user.username
class Report(models.Model):
reported_by = models.ForeignKey(User, related_name='reported_by', verbose_name=_('Reported by'))
post = models.ForeignKey(Post, verbose_name=_('Post'))
zapped = models.BooleanField(_('Zapped'), blank=True, default=False)
zapped_by = models.ForeignKey(User, related_name='zapped_by', blank=True, null=True, verbose_name=_('Zapped by'))
created = models.DateTimeField(_('Created'), blank=True)
reason = models.TextField(_('Reason'), blank=True, default='', max_length='1000')
class Meta:
verbose_name = _('Report')
verbose_name_plural = _('Reports')
def __unicode__(self):
return u'%s %s' % (self.reported_by , self.zapped)
class Ban(models.Model):
user = models.OneToOneField(User, verbose_name=_('Banned user'), related_name='ban_users')
ban_start = models.DateTimeField(_('Ban start'), default=timezone.now)
ban_end = models.DateTimeField(_('Ban end'), blank=True, null=True)
reason = models.TextField(_('Reason'))
class Meta:
verbose_name = _('Ban')
verbose_name_plural = _('Bans')
def __unicode__(self):
return self.user.username
def save(self, *args, **kwargs):
self.user.is_active = False
self.user.save()
super(Ban, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.user.is_active = True
self.user.save()
super(Ban, self).delete(*args, **kwargs)
class Attachment(models.Model):
post = models.ForeignKey(Post, verbose_name=_('Post'), related_name='attachments')
size = models.IntegerField(_('Size'))
content_type = models.CharField(_('Content type'), max_length=255)
path = models.CharField(_('Path'), max_length=255)
name = models.TextField(_('Name'))
hash = models.CharField(_('Hash'), max_length=40, blank=True, default='', db_index=True)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
super(Attachment, self).save(*args, **kwargs)
if not self.hash:
self.hash = sha1(str(self.id) + settings.SECRET_KEY).hexdigest()
super(Attachment, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('djangobb:forum_attachment', [self.hash])
def get_absolute_path(self):
return os.path.join(settings.MEDIA_ROOT, forum_settings.ATTACHMENT_UPLOAD_TO,
self.path)
#------------------------------------------------------------------------------
class Poll(models.Model):
topic = models.ForeignKey(Topic)
question = models.CharField(max_length=200)
choice_count = models.PositiveSmallIntegerField(default=1,
help_text=_("How many choices are allowed simultaneously."),
)
active = models.BooleanField(default=True,
help_text=_("Can users vote to this poll or just see the result?"),
)
deactivate_date = models.DateTimeField(null=True, blank=True,
help_text=_("Point of time after this poll would be automatic deactivated"),
)
users = models.ManyToManyField(User, blank=True, null=True,
help_text=_("Users who has voted this poll."),
)
def auto_deactivate(self):
if self.active and self.deactivate_date:
now = timezone.now()
if now > self.deactivate_date:
self.active = False
self.save()
def __unicode__(self):
return self.question
class PollChoice(models.Model):
poll = models.ForeignKey(Poll, related_name="choices")
choice = models.CharField(max_length=200)
votes = models.IntegerField(default=0, editable=False)
def percent(self):
if not self.votes:
return 0.0
result = PollChoice.objects.filter(poll=self.poll).aggregate(aggregates.Sum("votes"))
votes_sum = result["votes__sum"]
return float(self.votes) / votes_sum * 100
def __unicode__(self):
return self.choice
#------------------------------------------------------------------------------
class PostStatusManager(models.Manager):
def create_for_post(self, post, **kwargs):
user_agent = kwargs.get("HTTP_USER_AGENT", None)
referrer = kwargs.get("HTTP_REFERER", None)
permalink = kwargs.get("permalink", None)
return self.create(
post=post, topic=post.topic, forum=post.topic.forum,
user_agent=user_agent, referrer=referrer, permalink=permalink)
def review_posts(self, posts, certainly_spam=False):
for post in posts:
try:
post_status = post.poststatus
except PostStatus.DoesNotExist:
post_status = self.create_for_post(post)
post_status.review(certainly_spam=certainly_spam)
def delete_user_posts(self, posts):
for post in posts:
try:
post_status = post.poststatus
except PostStatus.DoesNotExist:
post_status = self.create_for_post(post)
post_status.filter_user_deleted()
def undelete_user_posts(self, posts):
for post in posts:
try:
post_status = post.poststatus
except PostStatus.DoesNotExist:
post_status = self.create_for_post(post)
post_status.filter_user_undeleted()
def review_new_posts(self):
unreviewed = self.filter(state=PostStatus.UNREVIEWED)
for post_status in unreviewed:
post_status.review()
return unreviewed
class PostStatus(models.Model):
"""
Keeps track of the status of posts for moderation purposes.
"""
UNREVIEWED = 'unreviewed'
USER_DELETED = 'user_deleted'
FILTERED_SPAM = 'filtered_spam'
FILTERED_HAM = 'filtered_ham'
MARKED_SPAM = 'marked_spam'
MARKED_HAM = 'marked_ham'
AKISMET_MAX_SIZE = 1024*250
post = models.OneToOneField(Post, db_index=True)
state = FSMField(default=UNREVIEWED, db_index=True)
topic = models.ForeignKey(Topic) # Original topic
forum = models.ForeignKey(Forum) # Original forum
user_agent = models.CharField(max_length=200, blank=True, null=True)
referrer = models.CharField(max_length=200, blank=True, null=True)
permalink = models.CharField(max_length=200, blank=True, null=True)
objects = PostStatusManager()
spam_category = None
spam_forum = None
spam_topic = None
def _get_spam_dustbin(self):
if self.spam_category is None:
self.spam_category, _ = Category.objects.get_or_create(
name=forum_settings.SPAM_CATEGORY_NAME)
if self.spam_forum is None:
self.spam_forum, _ = Forum.objects.get_or_create(
category=self.spam_category,
name=forum_settings.SPAM_FORUM_NAME)
if self.spam_topic is None:
filterbot = User.objects.get_by_natural_key("filterbot")
self.spam_topic, _ = Topic.objects.get_or_create(
forum=self.spam_forum, name=forum_settings.SPAM_TOPIC_NAME,
user=filterbot)
return (self.spam_topic, self.spam_forum)
def _undelete_post(self):
"""
If the post is in the spam dustbin, move it back to its original location.
"""
spam_topic, spam_forum = self._get_spam_dustbin()
post = self.post
topic = self.topic
head = post.topic.head
if post == head:
# Move the original topic back to the original forum (either from
# the dustbin, or from the spam dustbin)
topic.move_to(self.forum)
if topic != post.topic:
# If the post was moved from its original topic, put it back now that
# the topic is in place.
post.move_to(topic, delete_topic=False)
def _delete_post(self):
"""
Move the post to the spam dustbin.
"""
spam_topic, spam_forum = self._get_spam_dustbin()
post = self.post
topic = self.topic
head = topic.head
if post == head:
topic.move_to(spam_forum)
else:
post.move_to(spam_topic)
def to_akismet_data(self):
post = self.post
topic = self.topic
user = post.user
user_ip = post.user_ip
comment_author = user.username
user_agent = self.user_agent
referrer = self.referrer
permalink = self.permalink
comment_date_gmt = post.created.isoformat(' ')
comment_post_modified_gmt = topic.created.isoformat(' ')
return {
'user_ip': user_ip,
'user_agent': user_agent,
'comment_author': comment_author,
'referrer': referrer,
'permalink': permalink,
'comment_type': 'comment',
'comment_date_gmt': comment_date_gmt,
'comment_post_modified_gmt': comment_post_modified_gmt
}
def to_akismet_content(self):
"""
Truncate the post body to the largest allowed string size. Use size, not
length, since the Akismet server checks size, not length.
"""
return self.post.body.encode('utf-8')[:self.AKISMET_MAX_SIZE].decode('utf-8', 'ignore')
def _comment_check(self):
"""
Pass the associated post through Akismet if it's available. If it's not
available return None. Otherwise return True or False.
"""
if akismet_api is None:
logger.warning("Skipping akismet check. No api.")
return None
data = self.to_akismet_data()
content = self.to_akismet_content()
is_spam = None
try:
is_spam = akismet_api.comment_check(content, data)
except AkismetError as e:
try:
# try again, in case of timeout
is_spam = akismet_api.comment_check(content, data)
except Exception as e:
logger.error(
"Error while checking Akismet", exc_info=True, extra={
"post": self.post, "post_id": self.post.id,
"content_length": len(content)})
is_spam = None
except Exception as e:
logger.error(
"Error while checking Akismet", exc_info=True, extra={
"post": self.post, "post_id": self.post.id,
"content_length": len(content)})
is_spam = None
return is_spam
def _submit_comment(self, report_type):
"""
Report this post to Akismet as spam or ham. Raises an exception if it
fails. report_type is 'spam' or 'ham'. Used by report_spam/report_ham.
"""
if akismet_api is None:
logger.error("Can't submit to Akismet. No API.")
return None
data = self.to_akismet_data()
content = self.to_akismet_content()
if report_type == "spam":
akismet_api.submit_spam(content, data)
elif report_type == "ham":
akismet_api.submit_ham(content, data)
else:
raise NotImplementedError(
"You're trying to report an unsupported comment type.")
def _submit_spam(self):
"""
Report this post to Akismet as spam.
"""
self._submit_comment("spam")
def _submit_ham(self):
"""
Report this post to Akismet as ham.
"""
self._submit_comment("ham")
def is_spam(self):
"""
Condition used by the FSM. Return True if the Akismet API is available
and returns a positive. Otherwise return False or None.
"""
is_spam = self._comment_check()
if is_spam is None:
return False
else:
return is_spam
def is_ham(self):
"""
Inverse of is_spam.
"""
is_spam = self._comment_check()
if is_spam is None:
return False
else:
return not is_spam
@transition(
field=state, source=UNREVIEWED, target=FILTERED_SPAM,
save=True, conditions=[is_spam])
def filter_spam(self):
"""
Akismet detected this post is spam, move it to the dustbin and report it.
"""
self._delete_post()
@transition(
field=state, source=UNREVIEWED, target=FILTERED_HAM,
save=True, conditions=[is_ham])
def filter_ham(self):
"""
Akismet detected this post as ham. Don't do anything (except change state).
"""
pass
@transition(
field=state, source=[UNREVIEWED, FILTERED_HAM, MARKED_HAM], target=USER_DELETED,
save=True)
def filter_user_deleted(self):
"""
Post is not marked spam by akismet, but user has been globally deleted,
putting this into the spam dusbin.
"""
self._delete_post()
@transition(
field=state, source=[FILTERED_SPAM, MARKED_SPAM], target=MARKED_HAM,
save=True)
def mark_ham(self):
"""
Either Akismet returned a false positive, or a moderator accidentally
marked this as spam. Tell Akismet that this is ham, undelete it.
"""
self._submit_ham()
self._undelete_post()
@transition(
field=state, source=[FILTERED_HAM, MARKED_HAM], target=MARKED_SPAM,
save=True)
def mark_spam(self):
"""
Akismet missed this, or a moderator accidentally marked it as ham. Tell
Akismet that this is spam.
"""
self._submit_spam()
self._delete_post()
@transition(
field=state, source=USER_DELETED, target=UNREVIEWED,
save=True)
def filter_user_undeleted(self):
"""
Post is not marked spam by akismet, but user has been globally deleted,
putting this into the spam dusbin.
"""
self._undelete_post()
def review(self, certainly_spam=False):
"""
Process this post, used by the manager and the spam-hammer. The
``certainly_spam`` argument is used to force mark as spam/delete the
post, no matter what status Akismet returns.
"""
if can_proceed(self.filter_spam):
self.filter_spam()
elif can_proceed(self.filter_ham):
self.filter_ham()
if certainly_spam:
self.mark_spam()
else:
if certainly_spam:
self._delete_post()
logger.warn(
"Couldn't filter post.", exc_info=True, extra={
'post_id': self.post.id, 'content_length': len(self.post.body)})
from .signals import post_saved, topic_saved
post_save.connect(post_saved, sender=Post, dispatch_uid='djangobb_post_save')
post_save.connect(topic_saved, sender=Topic, dispatch_uid='djangobb_topic_save') | unknown | codeparrot/codeparrot-clean | ||
---
- name: A second playbook run with fact caching enabled
hosts: localhost
tasks:
- name: show ansible_foobar fact
debug:
var: ansible_foobar
- name: assert ansible_foobar is correct value when read from cache
assert:
that:
- ansible_foobar == 'foobar_from_set_fact_cacheable'
- name: show ansible_foobar_not_cached fact
debug:
var: ansible_foobar_not_cached
- name: assert ansible_foobar_not_cached is not cached
assert:
that:
- ansible_foobar_not_cached is undefined
- name: show fact_not_cached fact
debug:
var: fact_not_cached
- name: assert fact_not_cached is not cached
assert:
that:
- fact_not_cached is undefined
- name: show ansible_foobar_fqcn fact (FQCN)
debug:
var: ansible_foobar_fqcn
- name: assert ansible_foobar_fqcn is correct value when read from cache (FQCN)
assert:
that:
- ansible_foobar_fqcn == 'foobar_fqcn_from_set_fact_cacheable'
- name: show ansible_foobar_fqcn_not_cached fact (FQCN)
debug:
var: ansible_foobar_fqcn_not_cached
- name: assert ansible_foobar_fqcn_not_cached is not cached (FQCN)
assert:
that:
- ansible_foobar_fqcn_not_cached is undefined
- name: show fact_not_cached_fqcn fact (FQCN)
debug:
var: fact_not_cached_fqcn
- name: assert fact_not_cached_fqcn is not cached (FQCN)
assert:
that:
- fact_not_cached_fqcn is undefined | unknown | github | https://github.com/ansible/ansible | test/integration/targets/set_fact/set_fact_cached_2.yml |
# -*- coding: utf-8 -*-
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft® Word® documents.
*New in Pygments 0.6.*
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
unicodeoutput = False
def __init__(self, **options):
"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
def _escape(self, text):
return text.replace('\\', '\\\\') \
.replace('{', '\\{') \
.replace('}', '\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvment
if not text:
return ''
# escape text
text = self._escape(text)
if self.encoding in ('utf-8', 'utf-16', 'utf-32'):
encoding = 'iso-8859-15'
else:
encoding = self.encoding or 'iso-8859-15'
buf = []
for c in text:
if ord(c) > 128:
ansic = c.encode(encoding, 'ignore') or '?'
if ord(ansic) > 128:
ansic = '\\\'%x' % ord(ansic)
else:
ansic = c
buf.append(r'\ud{\u%d%s}' % (ord(c), ansic))
else:
buf.append(str(c))
return ''.join(buf).replace('\n', '\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write(r'{\rtf1\ansi\deff0'
r'{\fonttbl{\f0\fmodern\fprq1\fcharset0%s;}}'
r'{\colortbl;' % (self.fontface and
' ' + self._escape(self.fontface) or
''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write(r'\red%d\green%d\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write(r'}\f0')
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append(r'\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append(r'\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append(r'\b')
if style['italic']:
buf.append(r'\i')
if style['underline']:
buf.append(r'\ul')
if style['border']:
buf.append(r'\chbrdr\chcfpat%d' %
color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write('{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
outfile.write('}') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the relevant methods to generate a
time-frequency coherence spectrogram from a pair of time-series.
"""
from multiprocessing import (Process, Queue as ProcessQueue)
from math import ceil
from numpy import zeros
from .spectrogram import (Spectrogram, SpectrogramList)
__author__ = "Duncan Macleod <duncan.macleod@ligo.org>"
def _from_timeseries(ts1, ts2, stride, fftlength=None, overlap=None,
window=None, **kwargs):
"""Generate a time-frequency coherence
:class:`~gwpy.spectrogram.Spectrogram` from a pair of
:class:`~gwpy.timeseries.TimeSeries`.
For each `stride`, a PSD :class:`~gwpy.frequencyseries.FrequencySeries`
is generated, with all resulting spectra stacked in time and returned.
"""
# check sampling rates
if ts1.sample_rate.to('Hertz') != ts2.sample_rate.to('Hertz'):
sampling = min(ts1.sample_rate.value, ts2.sample_rate.value)
# resample higher rate series
if ts1.sample_rate.value == sampling:
ts2 = ts2.resample(sampling)
else:
ts1 = ts1.resample(sampling)
else:
sampling = ts1.sample_rate.value
# format FFT parameters
if fftlength is None:
fftlength = stride
if overlap is None:
overlap = 0
nstride = int(stride * sampling)
# get size of spectrogram
nsteps = int(ts1.size // nstride)
nfreqs = int(fftlength * sampling // 2 + 1)
# generate output spectrogram
out = Spectrogram(zeros((nsteps, nfreqs)), epoch=ts1.epoch, dt=stride,
f0=0, df=1/fftlength, copy=True, unit='coherence')
if not nsteps:
return out
# stride through TimeSeries, recording PSDs as columns of spectrogram
for step in range(nsteps):
# find step TimeSeries
idx = nstride * step
idx_end = idx + nstride
stepseries1 = ts1[idx:idx_end]
stepseries2 = ts2[idx:idx_end]
stepcoh = stepseries1.coherence(stepseries2, fftlength=fftlength,
overlap=overlap, window=window,
**kwargs)
out.value[step] = stepcoh.value
return out
def from_timeseries(ts1, ts2, stride, fftlength=None, overlap=None,
window=None, nproc=1, **kwargs):
"""Calculate the coherence `Spectrogram` between two `TimeSeries`.
Parameters
----------
timeseries : :class:`~gwpy.timeseries.TimeSeries`
input time-series to process.
stride : `float`
number of seconds in single PSD (column of spectrogram).
fftlength : `float`
number of seconds in single FFT.
overlap : `int`, optiona, default: fftlength
number of seconds of overlap between FFTs, defaults to no overlap
window : `timeseries.window.Window`, optional, default: `None`
window function to apply to timeseries prior to FFT.
nproc : `int`, default: ``1``
maximum number of independent frame reading processes, default
is set to single-process file reading.
Returns
-------
spectrogram : :class:`~gwpy.spectrogram.Spectrogram`
time-frequency power spectrogram as generated from the
input time-series.
"""
# format FFT parameters
if fftlength is None:
fftlength = stride / 2.
# get size of spectrogram
nsteps = int(ts1.size // (stride * ts1.sample_rate.value))
nproc = min(nsteps, nproc)
# single-process return
if nsteps == 0 or nproc == 1:
return _from_timeseries(ts1, ts2, stride, fftlength=fftlength,
overlap=overlap, window=window, **kwargs)
# wrap spectrogram generator
def _specgram(queue_, tsa, tsb):
try:
queue_.put(_from_timeseries(tsa, tsb, stride, fftlength=fftlength,
overlap=overlap, window=window,
**kwargs))
except Exception as exc: # pylint: disable=broad-except
queue_.put(exc)
# otherwise build process list
stepperproc = int(ceil(nsteps / nproc))
nsamp = [stepperproc * ts.sample_rate.value * stride for ts in (ts1, ts2)]
queue = ProcessQueue(nproc)
processlist = []
for i in range(nproc):
process = Process(target=_specgram,
args=(queue, ts1[i * nsamp[0]:(i + 1) * nsamp[0]],
ts2[i * nsamp[1]:(i + 1) * nsamp[1]]))
process.daemon = True
processlist.append(process)
process.start()
if ((i + 1) * nsamp[0]) >= ts1.size:
break
# get data
data = []
for process in processlist:
result = queue.get()
if isinstance(result, Exception):
raise result
else:
data.append(result)
# and block
for process in processlist:
process.join()
# format and return
out = SpectrogramList(*data)
out.sort(key=lambda spec: spec.epoch.gps)
return out.join() | unknown | codeparrot/codeparrot-clean | ||
from collections import namedtuple
from nose.tools import assert_equals, assert_true
from numpy import arange, array_equal, expand_dims, isclose
from test_utils import LocalTestCase
from thunder.rdds.imgblocks.strategy import PaddedBlockingStrategy, SimpleBlockingStrategy
MockImage = namedtuple("MockImage", "dims nrecords dtype")
class TestSimpleSplitCalculation(LocalTestCase):
@staticmethod
def _run_tst_splitCalc(blockSize, image, expectedSplits, expectedSize, testIdx=-1):
strategy = SimpleBlockingStrategy.generateFromBlockSize(image, blockSize)
splits = strategy.splitsPerDim
avgSize = strategy.calcAverageBlockSize()
assert_equals(tuple(expectedSplits), tuple(splits),
msg="Failure in test %i, expected splits %s != actual splits %s" %
(testIdx, tuple(expectedSplits), tuple(splits)))
assert_true(isclose(expectedSize, avgSize, rtol=0.001),
msg="Failure in test %i, expected avg size %g not close to actual size %g" %
(testIdx, expectedSize, avgSize))
def test_splitCalc(self):
PARAMS = [
(1, MockImage((2, 2, 2), 1, "uint8"), (2, 2, 2), 1),
(2, MockImage((2, 2, 2), 2, "uint8"), (2, 2, 2), 2),
(2, MockImage((2, 2, 2), 1, "uint16"), (2, 2, 2), 2),
(800000, MockImage((200, 200, 30), 5, "uint32"), (1, 1, 30), 800000),
("150MB", MockImage((2048, 1060, 36), 1000, "uint8"), (1, 14, 36), 1.55e+08)]
for testIdx, params in enumerate(PARAMS):
TestSimpleSplitCalculation._run_tst_splitCalc(*params, testIdx=testIdx)
def test_splitsAndPix(self):
def genSlicesForPix(pix, size):
slices = []
st = 0
while st < size:
en = min(st + pix, size)
slices.append(slice(st, en, 1))
st = en
return slices
def genSlicesForSplits(splits, size):
slices = []
blocksize = size / splits # integer division
blockrem = size % splits
st = 0
while st < size:
en = st + blocksize
if blockrem > 0:
en += 1
blockrem -= 1
en = min(en, size)
slices.append(slice(st, en, 1))
st = en
return slices
Params = namedtuple("Params", "unitsPerDim units image padding expPix expSplits expSlices0")
PARAMS = \
[Params((5, 5, 1), "pix", MockImage((15, 15, 3), 2, "uint8"), 0, (5, 5, 1), None, genSlicesForPix(5, 15)),
Params((5, 5, 2), "pix", MockImage((7, 7, 3), 2, "uint8"), 0, (5, 5, 2), None, genSlicesForPix(5, 7)),
Params((2, 2, 2), "s", MockImage((7, 7, 3), 2, "uint8"), 0, None, (2, 2, 2), genSlicesForSplits(2, 7)),
Params((5, 5, 1), "pix", MockImage((15, 15, 3), 2, "uint8"), 2, (5, 5, 1), None, genSlicesForPix(5, 15)),
Params((5, 5, 2), "pix", MockImage((7, 7, 3), 2, "uint8"), 2, (5, 5, 2), None, genSlicesForPix(5, 7)),
Params((2, 2, 2), "s", MockImage((7, 7, 3), 2, "uint8"), 2, None, (2, 2, 2), genSlicesForSplits(2, 7))]
for params in PARAMS:
if params.padding:
strat = PaddedBlockingStrategy(params.unitsPerDim, units=params.units, padding=params.padding)
else:
strat = SimpleBlockingStrategy(params.unitsPerDim, params.units)
strat.setSource(params.image)
if params.expPix:
assert_equals(tuple(params.expPix), tuple(strat._pixPerDim))
else:
assert_true(strat._pixPerDim is None)
if params.expSplits:
assert_equals(tuple(params.expSplits), tuple(strat._splitsPerDim))
else:
assert_true(strat._splitsPerDim is None)
assert_equals(params.expSlices0, strat._slices[0])
class TestBlockExtraction(LocalTestCase):
ExtractParams = namedtuple("ExtractParams", "aryshape blockslices timepoint ntimepoints padding")
PARAMS = [ExtractParams((2, 2), (slice(None), slice(0, 1)), 5, 10, 0),
ExtractParams((12, 12), (slice(3, 6, 1), slice(6, 9, 1)), 5, 10, 0),
ExtractParams((12, 12), (slice(3, 6, 1), slice(6, 9, 1)), 5, 10, 2)]
def test_simpleBlockExtraction(self):
for params in TestBlockExtraction.PARAMS:
strategy = SimpleBlockingStrategy([1]*len(params.aryshape)) # dummy splits; not used here
n = reduce(lambda x, y: x*y, params.aryshape)
ary = arange(n, dtype='int16').reshape(params.aryshape)
key, val = strategy.extractBlockFromImage(ary, params.blockslices, params.timepoint, params.ntimepoints)
expectedSlices = [slice(params.timepoint, params.timepoint+1, 1)] + list(params.blockslices)
expectedAry = expand_dims(ary[params.blockslices], axis=0)
assert_equals(params.timepoint, key.temporalKey)
assert_equals(params.ntimepoints, key.origShape[0])
assert_equals(tuple(params.aryshape), tuple(key.origShape[1:]))
assert_equals(tuple(expectedSlices), tuple(key.imgSlices))
assert_true(array_equal(expectedAry, val))
def test_paddedBlockExtraction(self):
for params in TestBlockExtraction.PARAMS:
strategy = PaddedBlockingStrategy([1]*len(params.aryshape), params.padding) # dummy splits; not used here
n = reduce(lambda x, y: x*y, params.aryshape)
ary = arange(n, dtype='int16').reshape(params.aryshape)
key, val = strategy.extractBlockFromImage(ary, params.blockslices, params.timepoint, params.ntimepoints)
expectedSlices = [slice(params.timepoint, params.timepoint+1, 1)] + list(params.blockslices)
assert_equals(params.timepoint, key.temporalKey)
assert_equals(params.ntimepoints, key.origShape[0])
assert_equals(tuple(params.aryshape), tuple(key.origShape[1:]))
assert_equals(tuple(expectedSlices), tuple(key.imgSlices))
try:
_ = len(params.padding)
padding = list(params.padding)
except TypeError:
padding = [params.padding] * ary.ndim
expectedPaddedSlices = []
expectedValSlices = []
for slise, pad, l in zip(params.blockslices, padding, ary.shape):
paddedStart = max(0, slise.start - pad) if not (slise.start is None) else 0
paddedEnd = min(l, slise.stop + pad) if not (slise.stop is None) else l
actualPadStart = slise.start - paddedStart if not (slise.start is None) else 0
actualPadEnd = paddedEnd - slise.stop if not (slise.stop is None) else 0
expectedPaddedSlices.append(slice(paddedStart, paddedEnd, 1))
expectedValSlices.append(slice(actualPadStart, (paddedEnd-paddedStart)-actualPadEnd, 1))
expectedAry = expand_dims(ary[expectedPaddedSlices], axis=0)
expectedPaddedSlices = [slice(params.timepoint, params.timepoint+1, 1)] + expectedPaddedSlices
expectedValSlices = [slice(0, 1, 1)] + expectedValSlices
assert_equals(tuple(expectedPaddedSlices), tuple(key.padImgSlices))
assert_equals(tuple(expectedValSlices), tuple(key.valSlices))
assert_equals(tuple(expectedAry.shape), tuple(val.shape))
assert_true(array_equal(expectedAry, val)) | unknown | codeparrot/codeparrot-clean | ||
{
"bundles": [],
"resources": {
"includes": [
{
"condition": {
"typeReachable": "org.jline.terminal.TerminalBuilder"
},
"pattern": "\\QMETA-INF/services/org.jline.terminal.spi.JansiSupport\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.TerminalBuilder"
},
"pattern": "\\QMETA-INF/services/org.jline.terminal.spi.JnaSupport\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.jansi.JansiNativePty"
},
"pattern": "\\Qorg/fusesource/jansi/internal/native/Linux/x86_64/libjansi.so\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.jansi.JansiNativePty"
},
"pattern": "\\QMETA-INF/native/windows64/jansi.dll\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.jansi.JansiNativePty"
},
"pattern": "\\Qorg/fusesource/jansi/jansi.properties\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.jansi.JansiSupportImpl"
},
"pattern": "\\Qorg/fusesource/jansi/jansi.properties\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/capabilities.txt\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.Colors"
},
"pattern": "\\Qorg/jline/utils/colors.txt\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/ansi.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.DumbTerminal"
},
"pattern": "\\Qorg/jline/utils/dumb.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.DumbTerminal"
},
"pattern": "\\Qorg/jline/utils/dumb-color.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/rxvt.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/rxvt-basic.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/rxvt-unicode.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/rxvt-unicode-256color.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/screen.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/screen-256color.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.AbstractWindowsTerminal"
},
"pattern": "\\Qorg/jline/utils/windows.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.AbstractWindowsTerminal"
},
"pattern": "\\Qorg/jline/utils/windows-256color.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.terminal.impl.AbstractWindowsTerminal"
},
"pattern": "\\Qorg/jline/utils/windows-conemu.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/xterm.caps\\E"
},
{
"condition": {
"typeReachable": "org.jline.utils.InfoCmp"
},
"pattern": "\\Qorg/jline/utils/xterm-256color.caps\\E"
}
]
}
} | json | github | https://github.com/spring-projects/spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/resources/reachability-metadata-repository/org.jline/jline/3.21.0/resource-config.json |
import unittest
from test import support
import time
resource = support.import_module('resource')
# This test is checking a few specific problem spots with the resource module.
class ResourceTest(unittest.TestCase):
def test_args(self):
self.assertRaises(TypeError, resource.getrlimit)
self.assertRaises(TypeError, resource.getrlimit, 42, 42)
self.assertRaises(TypeError, resource.setrlimit)
self.assertRaises(TypeError, resource.setrlimit, 42, 42, 42)
def test_fsize_ismax(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
# number on a platform with large file support. On these platforms,
# we need to test that the get/setrlimit functions properly convert
# the number to a C long long and that the conversion doesn't raise
# an error.
self.assertEqual(resource.RLIM_INFINITY, max)
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
def test_fsize_enforced(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# Check to see what happens when the RLIMIT_FSIZE is small. Some
# versions of Python were terminated by an uncaught SIGXFSZ, but
# pythonrun.c has been fixed to ignore that exception. If so, the
# write() should return EFBIG when the limit is exceeded.
# At least one platform has an unlimited RLIMIT_FSIZE and attempts
# to change it raise ValueError instead.
try:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
limit_set = True
except ValueError:
limit_set = False
f = open(support.TESTFN, "wb")
try:
f.write(b"X" * 1024)
try:
f.write(b"Y")
f.flush()
# On some systems (e.g., Ubuntu on hppa) the flush()
# doesn't always cause the exception, but the close()
# does eventually. Try flushing several times in
# an attempt to ensure the file is really synced and
# the exception raised.
for i in range(5):
time.sleep(.1)
f.flush()
except IOError:
if not limit_set:
raise
if limit_set:
# Close will attempt to flush the byte we wrote
# Restore limit first to avoid getting a spurious error
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
finally:
f.close()
finally:
if limit_set:
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
support.unlink(support.TESTFN)
def test_fsize_toobig(self):
# Be sure that setrlimit is checking for really large values
too_big = 10**50
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
except (OverflowError, ValueError):
pass
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
except (OverflowError, ValueError):
pass
def test_getrusage(self):
self.assertRaises(TypeError, resource.getrusage)
self.assertRaises(TypeError, resource.getrusage, 42, 42)
usageself = resource.getrusage(resource.RUSAGE_SELF)
usagechildren = resource.getrusage(resource.RUSAGE_CHILDREN)
# May not be available on all systems.
try:
usageboth = resource.getrusage(resource.RUSAGE_BOTH)
except (ValueError, AttributeError):
pass
try:
usage_thread = resource.getrusage(resource.RUSAGE_THREAD)
except (ValueError, AttributeError):
pass
# Issue 6083: Reference counting bug
def test_setrusage_refcount(self):
try:
limits = resource.getrlimit(resource.RLIMIT_CPU)
except AttributeError:
pass
else:
class BadSequence:
def __len__(self):
return 2
def __getitem__(self, key):
if key in (0, 1):
return len(tuple(range(1000000)))
raise IndexError
resource.setrlimit(resource.RLIMIT_CPU, BadSequence())
def test_pagesize(self):
pagesize = resource.getpagesize()
self.assertIsInstance(pagesize, int)
self.assertGreaterEqual(pagesize, 0)
def test_main(verbose=None):
support.run_unittest(ResourceTest)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Nova Release Notes documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 5 11:50:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Nova Release Notes'
copyright = u'2015, Nova developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from nova.version import version_info as nova_version
# The short X.Y version.
version = nova_version.canonical_version_string()
# The full version, including alpha/beta/rc tags.
release = nova_version.version_string_with_vcs()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NovaReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NovaReleaseNotes.tex', u'Nova Release Notes Documentation',
u'Nova developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'novareleasenotes', u'Nova Release Notes Documentation',
[u'Nova developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NovaReleaseNotes', u'Nova Release Notes Documentation',
u'Nova developers', 'NovaReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from future.builtins import range
from mezzanine.conf import settings
from mezzanine.core.managers import DisplayableManager
from mezzanine.utils.urls import home_slug
class PageManager(DisplayableManager):
def published(self, for_user=None, include_login_required=False):
"""
Override ``DisplayableManager.published`` to exclude
pages with ``login_required`` set to ``True``. if the
user is unauthenticated and the setting
``PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED`` is ``False``.
The extra ``include_login_required`` arg allows callers to
override the ``PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED``
behaviour in special cases where they want to deal with the
``login_required`` field manually, such as the case in
``PageMiddleware``.
"""
published = super(PageManager, self).published(for_user=for_user)
unauthenticated = for_user and not for_user.is_authenticated()
if (unauthenticated and not include_login_required and
not settings.PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED):
published = published.exclude(login_required=True)
return published
def with_ascendants_for_slug(self, slug, **kwargs):
"""
Given a slug, returns a list of pages from ascendants to
descendants, that form the parent/child page relationships
for that slug. The main concern is to do this in a single
database query rather than querying the database for parents
of a given page.
Primarily used in ``PageMiddleware`` to provide the current
page, which in the case of non-page views, won't match the
slug exactly, but will likely match a page that has been
created for linking to the entry point for the app, eg the
blog page when viewing blog posts.
Also used within ``Page.get_ascendants``, which gets called
in the ``pages.views`` view, for building a list of possible
templates that can be used for the page.
If a valid chain of pages is found, we also assign the pages
to the ``page._ascendants`` attr of the main/first/deepest
page, so that when its ``get_ascendants`` method is called,
the ascendants chain can be re-used without querying the
database again. This occurs at least once, given the second
use-case described above.
"""
if slug == "/":
slugs = [home_slug()]
else:
# Create a list of slugs within this slug,
# eg: ['about', 'about/team', 'about/team/mike']
parts = slug.split("/")
slugs = ["/".join(parts[:i]) for i in range(1, len(parts) + 1)]
# Find the deepest page that matches one of our slugs.
# Sorting by "-slug" should ensure that the pages are in
# descendant -> ascendant order.
pages_for_user = self.published(**kwargs)
pages = list(pages_for_user.filter(slug__in=slugs).order_by("-slug"))
if not pages:
return []
# Check to see if the other pages retrieved form a valid path
# in the page tree, i.e. pages[0].parent == pages[1],
# pages[1].parent == pages[2], and so on. If they do, assign
# the ascendants to the main/first/deepest page, so that it
# can be re-used on calls to its get_ascendants method.
pages[0]._ascendants = []
for i, page in enumerate(pages):
try:
parent = pages[i + 1]
except IndexError:
# IndexError indicates that this is the last page in
# the list, so it should have no parent.
if page.parent_id:
break # Invalid parent
else:
if page.parent_id != parent.id:
break # Invalid parent
else:
# Valid parents
pages[0]._ascendants = pages[1:]
return pages | unknown | codeparrot/codeparrot-clean | ||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2024-05-07 and added to Hugging Face Transformers on 2025-07-09.*
# DeepSeek-V2
## Overview
The DeepSeek-V2 model was proposed in [DeepSeek-V2: A Strong, Economical, and Efficient Mixture-of-Experts Language Model](https://huggingface.co/papers/2405.04434) by DeepSeek-AI Team.
The abstract from the paper is the following:
We present DeepSeek-V2, a strong Mixture-of-Experts (MoE) language model characterized by economical training and efficient inference. It comprises 236B total parameters, of which 21B are activated for each token, and supports a context length of 128K tokens. DeepSeek-V2 adopts innovative architectures including Multi-head Latent Attention (MLA) and DeepSeekMoE. MLA guarantees efficient inference through significantly compressing the Key-Value (KV) cache into a latent vector, while DeepSeekMoE enables training strong models at an economical cost through sparse computation. Compared with DeepSeek 67B, DeepSeek-V2 achieves significantly stronger performance, and meanwhile saves 42.5% of training costs, reduces the KV cache by 93.3%, and boosts the maximum generation throughput to 5.76 times. We pretrain DeepSeek-V2 on a high-quality and multi-source corpus consisting of 8.1T tokens, and further perform Supervised Fine-Tuning (SFT) and Reinforcement Learning (RL) to fully unlock its potential. Evaluation results show that, even with only 21B activated parameters, DeepSeek-V2 and its chat versions still achieve top-tier performance among open-source models.
This model was contributed by [VladOS95-cyber](https://github.com/VladOS95-cyber).
The original code can be found [here](https://huggingface.co/deepseek-ai/DeepSeek-V2).
### Usage tips
The model uses Multi-head Latent Attention (MLA) and DeepSeekMoE architectures for efficient inference and cost-effective training. It employs an auxiliary-loss-free strategy for load balancing and multi-token prediction training objective. The model can be used for various language tasks after being pre-trained on 14.8 trillion tokens and going through Supervised Fine-Tuning and Reinforcement Learning stages.
## DeepseekV2Config
[[autodoc]] DeepseekV2Config
## DeepseekV2Model
[[autodoc]] DeepseekV2Model
- forward
## DeepseekV2ForCausalLM
[[autodoc]] DeepseekV2ForCausalLM
- forward
## DeepseekV2ForSequenceClassification
[[autodoc]] DeepseekV2ForSequenceClassification
- forward | unknown | github | https://github.com/huggingface/transformers | docs/source/en/model_doc/deepseek_v2.md |
# frozen_string_literal: true
module Arel # :nodoc: all
module Nodes
class Descending < Ordering
def reverse
Ascending.new(expr)
end
def direction
:desc
end
def ascending?
false
end
def descending?
true
end
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/lib/arel/nodes/descending.rb |
import { test } from '../../test';
// binding member expression shouldn't invalidate the property name
export default test({
test({ assert, component, target }) {
const div = target.querySelector('div');
assert.equal(div, component.container.a);
assert.deepEqual(component.logs.length, 1);
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/binding-this-member-expression-update/_config.js |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING
from fastapi import Depends, HTTPException, Request, status
from itsdangerous import BadSignature, URLSafeSerializer
from sqlalchemy import select
from airflow.api_fastapi.auth.managers.models.resource_details import DagDetails
from airflow.api_fastapi.common.db.common import SessionDep
from airflow.api_fastapi.common.router import AirflowRouter
from airflow.api_fastapi.core_api.openapi.exceptions import create_openapi_http_exception_doc
from airflow.api_fastapi.core_api.security import requires_access_dag
from airflow.api_fastapi.logging.decorators import action_logging
from airflow.models.dag import DagModel
from airflow.models.dagbag import DagPriorityParsingRequest
if TYPE_CHECKING:
from airflow.api_fastapi.auth.managers.models.batch_apis import IsAuthorizedDagRequest
dag_parsing_router = AirflowRouter(tags=["DAG Parsing"], prefix="/parseDagFile/{file_token}")
@dag_parsing_router.put(
"",
responses=create_openapi_http_exception_doc([status.HTTP_404_NOT_FOUND]),
status_code=status.HTTP_201_CREATED,
dependencies=[Depends(requires_access_dag(method="PUT")), Depends(action_logging())],
)
def reparse_dag_file(
file_token: str,
session: SessionDep,
request: Request,
) -> None:
"""Request re-parsing a DAG file."""
secret_key = request.app.state.secret_key
auth_s = URLSafeSerializer(secret_key)
try:
payload = auth_s.loads(file_token)
except BadSignature:
raise HTTPException(status.HTTP_404_NOT_FOUND, "File not found")
bundle_name = payload["bundle_name"]
relative_fileloc = payload["relative_fileloc"]
requests: Sequence[IsAuthorizedDagRequest] = [
{"method": "PUT", "details": DagDetails(id=dag_id)}
for dag_id in session.scalars(
select(DagModel.dag_id).where(
DagModel.bundle_name == bundle_name, DagModel.relative_fileloc == relative_fileloc
)
)
]
if not requests:
raise HTTPException(status.HTTP_404_NOT_FOUND, "File not found")
parsing_request = DagPriorityParsingRequest(bundle_name=bundle_name, relative_fileloc=relative_fileloc)
session.add(parsing_request) | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/api_fastapi/core_api/routes/public/dag_parsing.py |
# coding: utf-8
from __future__ import unicode_literals
import base64
import json
import os
from .common import InfoExtractor
from ..aes import aes_cbc_decrypt
from ..compat import compat_ord
from ..utils import (
bytes_to_intlist,
ExtractorError,
float_or_none,
intlist_to_bytes,
srt_subtitles_timecode,
strip_or_none,
urljoin,
)
class ADNIE(InfoExtractor):
IE_DESC = 'Anime Digital Network'
_VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
'md5': 'e497370d847fd79d9d4c74be55575c7a',
'info_dict': {
'id': '7778',
'ext': 'mp4',
'title': 'Blue Exorcist - Kyôto Saga - Épisode 1',
'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',
}
}
_BASE_URL = 'http://animedigitalnetwork.fr'
def _get_subtitles(self, sub_path, video_id):
if not sub_path:
return None
enc_subtitles = self._download_webpage(
urljoin(self._BASE_URL, sub_path),
video_id, fatal=False, headers={
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:53.0) Gecko/20100101 Firefox/53.0',
})
if not enc_subtitles:
return None
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
bytes_to_intlist(base64.b64decode(enc_subtitles[24:])),
bytes_to_intlist(b'\x1b\xe0\x29\x61\x38\x94\x24\x00\x12\xbd\xc5\x80\xac\xce\xbe\xb0'),
bytes_to_intlist(base64.b64decode(enc_subtitles[:24]))
))
subtitles_json = self._parse_json(
dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),
None, fatal=False)
if not subtitles_json:
return None
subtitles = {}
for sub_lang, sub in subtitles_json.items():
srt = ''
for num, current in enumerate(sub):
start, end, text = (
float_or_none(current.get('startTime')),
float_or_none(current.get('endTime')),
current.get('text'))
if start is None or end is None or text is None:
continue
srt += os.linesep.join(
(
'%d' % num,
'%s --> %s' % (
srt_subtitles_timecode(start),
srt_subtitles_timecode(end)),
text,
os.linesep,
))
if sub_lang == 'vostf':
sub_lang = 'fr'
subtitles.setdefault(sub_lang, []).extend([{
'ext': 'json',
'data': json.dumps(sub),
}, {
'ext': 'srt',
'data': srt,
}])
return subtitles
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_config = self._parse_json(self._search_regex(
r'playerConfig\s*=\s*({.+});', webpage, 'player config'), video_id)
video_info = {}
video_info_str = self._search_regex(
r'videoInfo\s*=\s*({.+});', webpage,
'video info', fatal=False)
if video_info_str:
video_info = self._parse_json(
video_info_str, video_id, fatal=False) or {}
options = player_config.get('options') or {}
metas = options.get('metas') or {}
title = metas.get('title') or video_info['title']
links = player_config.get('links') or {}
if not links:
links_url = player_config['linksurl']
links_data = self._download_json(urljoin(
self._BASE_URL, links_url), video_id)
links = links_data.get('links') or {}
formats = []
for format_id, qualities in links.items():
if not isinstance(qualities, dict):
continue
for load_balancer_url in qualities.values():
load_balancer_data = self._download_json(
load_balancer_url, video_id, fatal=False) or {}
m3u8_url = load_balancer_data.get('location')
if not m3u8_url:
continue
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False)
if format_id == 'vf':
for f in m3u8_formats:
f['language'] = 'fr'
formats.extend(m3u8_formats)
error = options.get('error')
if not formats and error:
raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': strip_or_none(metas.get('summary') or video_info.get('resume')),
'thumbnail': video_info.get('image'),
'formats': formats,
'subtitles': self.extract_subtitles(player_config.get('subtitles'), video_id),
'episode': metas.get('subtitle') or video_info.get('videoTitle'),
'series': video_info.get('playlistTitle'),
} | unknown | codeparrot/codeparrot-clean | ||
{
"properties": [
{
"name": "comment",
"value": "SBOM for MDB server product; this file should comply with the format specified here: https://cyclonedx.org/docs/1.5/json/#components_items_publisher; This file is still in development; see https://jira.mongodb.org/browse/DEVPROD-2623 for details."
}
],
"bomFormat": "CycloneDX",
"specVersion": "1.5",
"version": 1,
"components": [
{
"type": "library",
"name": "kafka",
"version": "v2.0.0",
"scope": "required",
"licenses": [
{
"expression": "BSD-3-Clause"
}
],
"cpe": "test_cpe",
"pedigree": {
"descendants": [
{
"type": "library",
"name": "kafka-fork",
"version": "v2.0.2"
}
]
},
"properties": [
{
"name": "internal:team_responsible",
"value": "server_security"
},
{
"name": "import_script_path",
"value": "buildscripts/tests/sbom_linter/inputs/kafka_valid_import.sh"
}
],
"evidence": {
"occurrences": [
{
"location": "src/third_party/kafka"
}
]
}
}
]
} | json | github | https://github.com/mongodb/mongo | buildscripts/tests/sbom_linter/inputs/sbom_pedigree_version_match.json |
from datetime import timedelta, datetime
class Switch(object):
"""
resends emails if WAKEUP_PERIOD is passed since last time an email was sent
or since last email was sent AMOUNT_TRIGGER of messages are received for the
same group.
Does not send email for loggers in SKIP_LOGGERS
"""
AMOUNT_TRIGGER = 100
WAKEUP_PERIOD = 12 * 30 * 24 * 3600 #more or less an year
SKIP_LOGGERS = ('http404',)
def __init__(self, group, logger_name= None):
self.last_seen = group.last_seen
self.last_email_sent = group.last_email_sent
self.group = group
self.logger_name = logger_name
def send_email(self):
if self.logger_name in self.SKIP_LOGGERS:
return False
now = datetime.now()
if not self.last_email_sent:
return True
if self.last_email_sent + timedelta(seconds= self.WAKEUP_PERIOD) < now:
return True
if self.group.message_set.filter(datetime__gte= self.last_email_sent,
datetime__lte= now).count() > self.AMOUNT_TRIGGER:
return True
return False | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.