code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
import unittest
from test import test_support
import UserDict, random, string
import gc, weakref
class DictTest(unittest.TestCase):
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assert_(dict() is not {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts (to exercise the BUILD_MAP oparg
for n in (0, 1, 6, 256, 400):
items = [(''.join([random.choice(string.letters)
for j in range(8)]),
i)
for i in range(n)]
random.shuffle(items)
dictliteral = '{' + ', '.join('%r: %d' % item for item in items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assert_(not {})
self.assert_({1: 2})
self.assert_(bool({}) is False)
self.assert_(bool({1: 2}) is True)
def test_keys(self):
d = {}
self.assertEqual(d.keys(), [])
d = {'a': 1, 'b': 2}
k = d.keys()
self.assert_(d.has_key('a'))
self.assert_(d.has_key('b'))
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = {}
self.assertEqual(d.values(), [])
d = {1:2}
self.assertEqual(d.values(), [2])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = {}
self.assertEqual(d.items(), [])
d = {1:2}
self.assertEqual(d.items(), [(1, 2)])
self.assertRaises(TypeError, d.items, None)
def test_has_key(self):
d = {}
self.assert_(not d.has_key('a'))
d = {'a': 1, 'b': 2}
k = d.keys()
k.sort()
self.assertEqual(k, ['a', 'b'])
self.assertRaises(TypeError, d.has_key)
def test_contains(self):
d = {}
self.assert_(not ('a' in d))
self.assert_('a' not in d)
d = {'a': 1, 'b': 2}
self.assert_('a' in d)
self.assert_('b' in d)
self.assert_('c' not in d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assert_(not(d.fromkeys('abc') is d))
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assert_(type(dictlike.fromkeys('a')) is dictlike)
self.assert_(type(dictlike().fromkeys('a')) is dictlike)
class mydict(dict):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assert_(isinstance(ud, UserDict.UserDict))
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
d = {'a' : 1, 'b' : 2}
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assert_(d.setdefault('key0') is None)
d.setdefault('key0', [])
self.assert_(d.setdefault('key0') is None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assert_(not(copymode < 0 and ta != tb))
self.assert_(not a)
self.assert_(not b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
# verify longs/ints get same value when key > 32 bits (for 64-bit archs)
# see SF bug #689659
x = 4503599627370496L
y = 4503599627370496
h = {x: 'anything', y: 'something else'}
self.assertEqual(h[x], h[y])
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
d = {}
d[1] = 1
try:
for i in d:
d[i+1] = 1
except RuntimeError:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_le(self):
self.assert_(not ({} < {}))
self.assert_(not ({1: 2} < {1L: 2L}))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 42
d1 = {BadCmp(): 1}
d2 = {1: 1}
try:
d1 < d2
except Exc:
pass
else:
self.fail("< didn't raise Exc")
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertEqual(hasattr(dict, "__missing__"), False)
self.assertEqual(hasattr({}, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assert_(2 not in d)
self.assert_(2 not in d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
try:
f[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(dict):
pass
g = G()
try:
g[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
def test_tuple_keyerror(self):
# SF #1576657
d = {}
try:
d[(1,)]
except KeyError, e:
self.assertEqual(e.args, ((1,),))
else:
self.fail("missing KeyError")
def test_bad_key(self):
# Dictionary lookups should fail if __cmp__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __cmp__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.has_key(x2)',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
try:
exec stmt in locals()
except CustomException:
pass
else:
self.fail("Statement didn't raise exception")
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
try:
d = {'a': 1/0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
except ZeroDivisionError:
pass
d = {}
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter objects
class C(object):
pass
iterators = (dict.iteritems, dict.itervalues, dict.iterkeys)
for i in iterators:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.x = i(container)
del obj, container
gc.collect()
self.assert_(ref() is None, "Cycle was not collected")
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
test_support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of MSM.
MSM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MSM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MSM. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
logger = logging.getLogger( __name__ )
import threading
from core.database import Invoice, PaymentType
from msmgui.widgets.base import ScopedDatabaseObject
class DirectDebitExporter( threading.Thread, ScopedDatabaseObject ):
def __init__( self, output_file, formatter, update_step=25 ):
self.logger = logging.getLogger(__name__)
threading.Thread.__init__( self )
ScopedDatabaseObject.__init__( self )
self._output_file = output_file
self._formatter = formatter
self._update_step = update_step
def run(self):
self.logger.info("Hole Rechnungen aus Datenbank...")
# add settings to the local session
all_invoices = list(Invoice.get_all(session=self.session))
invoices = []
for invoice in all_invoices:
if (invoice.value_left > 0 and
invoice.contract is not None and
invoice.contract.bankaccount is not None and
invoice.contract.paymenttype == PaymentType.DirectWithdrawal):
invoices.append(invoice)
num_invoices = len(invoices)
self.logger.info("1 Rechnung geholt!" if num_invoices == 1 else "{} Rechnungen geholt!".format(num_invoices))
self.logger.info("Exportiere Daten aus 1 Rechnung..." if num_invoices == 1 else "Exportiere Daten aus {} Rechnungen...".format(num_invoices))
for work_done, output in enumerate(self._formatter.write(invoices,
self._output_file),
start=1):
if (not self._update_step or
(work_done % self._update_step) == 0 or
work_done in (0, 1)):
self.logger.info("Exportiere %d von %s", work_done,
num_invoices)
self._session.expunge_all()
self._session.remove()
self.logger.info("Fertig! 1 Datensatz exportiert." if work_done == 1
else
"Fertig! {} Datensätze exportiert.".format(work_done)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module exposes one public function Login(), that given the password of
the Code Jam contestant should log him in and return a cookie."""
import mimetypes
from lib import error
class MultipartData(object):
"""Class to manage multipart data in HTTP requests."""
def __init__(self, boundary):
"""Initialize the object data empty and store the used boundary.
Args:
boundary: Boundary used to separate multipart data elements.
"""
self.data = []
self.boundary = boundary
def __str__(self):
"""Convert this multipart data to a readable string.
Returns:
A string with the body of the HTTP packet that will contain the multipart
data.
"""
return "\r\n".join(self.data + ['--' + self.boundary + '--', ''])
def _GetContentType(self, filename):
"""Guess the content type of a file given its name.
Args:
filename: Name of the file whose mimetype should be guessed.
Returns:
The guessed mimetype for the file, or 'application/octet-stream' if no
guess could be made.
"""
guessed_type = mimetypes.guess_type(filename)[0]
return guessed_type if guessed_type != None else 'application/octet-stream'
def AddFile(self, name, filename):
"""Add a file's contents to this multipart data.
Args:
name: Name of the element to add to the multipart data.
filename: Name of the file with the contents to add to the multipart data.
Raises:
error.InternalError: If a problem occurs when reading the file.
"""
try:
# Read the data from the specified file.
file = open(filename, 'rb')
file_data = file.read()
file.close()
# Append the metadata and then the read file data. Finally, complete with
# a closing boundary.
self.data.append('--' + self.boundary)
self.data.append('Content-Disposition: form-data; name="{0}"; '
'filename="{1}"'.format(name, filename))
self.data.append('Content-Type: {0}'.format(
self._GetContentType(filename)))
self.data.append('')
self.data.append(file_data)
except IOError as e:
raise error.InternalError('I/O error while reading file "{0}": '
'{1}.\n'.format(filename, e))
def AddString(self, name, value):
"""Add a string value to this multipart data.
Args:
name: Name of the element to add to the multipart data.
value: String with the contents to add to the multipart data.
"""
# Append the field metadata and then the value. Finally, complete with a
# closing boundary.
self.data.append('--' + self.boundary);
self.data.append('Content-Disposition: form-data; name="{0}"'.format(name))
self.data.append('')
self.data.append(str(value)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
from utils import DefaultBoxes, Encoder, COCODetection
from base_model import Loss
from utils import SSDTransformer
from ssd300 import SSD300
import torch
from torch.utils.data import DataLoader
import time
import numpy as np
import os
# necessary pytorch imports
import torch.utils.data.distributed
import torch.distributed as dist
from torch.autograd import Variable
# Apex imports
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
except ImportError:
raise ImportError("Please install APEX from https://github.com/nvidia/apex")
# DALI import
from coco_pipeline import COCOPipeline, DALICOCOIterator
from SSD import _C as C
def parse_args():
parser = ArgumentParser(description="Train Single Shot MultiBox Detector"
" on COCO")
parser.add_argument('--data', '-d', type=str, default='/coco/coco2017',
help='path to test and training data files')
parser.add_argument('--batch-size', '-b', type=int, default=128,
help='number of examples for each iteration')
#parser.add_argument('--checkpoint', type=str, default=None,
# help='path to model checkpoint file', required=True)
parser.add_argument('--backbone', type=str, choices=['vgg16', 'vgg16bn',
'resnet18', 'resnet34', 'resnet50'], default='resnet34')
parser.add_argument('--num-workers', type=int, default=3)
parser.add_argument('--fbu', type=int, default=1)
parser.add_argument('--use-fp16', action='store_true')
parser.add_argument('--use-train-dataset', action='store_true')
# Distributed stuff
parser.add_argument('--local_rank', default=0, type=int,
help='Used for multi-process training. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
return parser.parse_args()
def dboxes300_coco():
figsize = 300
feat_size = [38, 19, 10, 5, 3, 1]
steps = [8, 16, 32, 64, 100, 300]
# use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
scales = [21, 45, 99, 153, 207, 261, 315]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
return dboxes
def test_coco(args):
# For testing purposes we have to use CUDA
use_cuda = True
# Setup multi-GPU if necessary
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
if args.distributed:
N_gpu = torch.distributed.get_world_size()
else:
N_gpu = 1
# Setup data, defaults
dboxes = dboxes300_coco()
encoder = Encoder(dboxes)
if args.use_train_dataset:
annotate = os.path.join(args.data, "annotations/instances_train2017.json")
coco_root = os.path.join(args.data, "train2017")
img_number = 118287
else:
annotate = os.path.join(args.data, "annotations/instances_val2017.json")
coco_root = os.path.join(args.data, "val2017")
img_number = 5000
pipe = COCOPipeline(args.batch_size, args.local_rank, coco_root,
annotate, N_gpu, num_threads=args.num_workers)
pipe.build()
test_run = pipe.run()
dataloader = DALICOCOIterator(pipe, img_number / N_gpu)
# Build the model
ssd300 = SSD300(81, backbone=args.backbone, model_path='', dilation=False)
"""
# Note: args.checkpoint is required, so this can never be false
if args.checkpoint is not None:
print("loading model checkpoint", args.checkpoint)
od = torch.load(args.checkpoint)
# remove proceeding 'module' from checkpoint
model = od["model"]
for k in list(model.keys()):
if k.startswith('module.'):
model[k[7:]] = model.pop(k)
ssd300.load_state_dict(model)
"""
ssd300.cuda()
ssd300.eval()
loss_func = Loss(dboxes)
loss_func.cuda()
# parallelize
if args.distributed:
ssd300 = DDP(ssd300)
if args.use_fp16:
ssd300 = network_to_half(ssd300)
if args.use_train_dataset and args.local_rank == 0:
print('Image 000000320612.jpg is in fact PNG and it will cause fail if ' +
'used with nvJPEGDecoder in coco_pipeline')
for epoch in range(2):
if epoch == 1 and args.local_rank == 0:
print("Performance computation starts")
s = time.time()
for i, data in enumerate(dataloader):
with torch.no_grad():
# Get data from pipeline
img = data[0][0][0]
bbox = data[0][1][0]
label = data[0][2][0]
label = label.type(torch.cuda.LongTensor)
bbox_offsets = data[0][3][0]
bbox_offsets = bbox_offsets.cuda()
# Encode labels
N = img.shape[0]
if bbox_offsets[-1].item() == 0:
print("No labels in batch")
continue
bbox, label = C.box_encoder(N, bbox, bbox_offsets, label,
encoder.dboxes.cuda(), 0.5)
# Prepare tensors for computing loss
M = bbox.shape[0] // N
bbox = bbox.view(N, M, 4)
label = label.view(N, M)
trans_bbox = bbox.transpose(1,2).contiguous()
gloc, glabel = Variable(trans_bbox, requires_grad=False), \
Variable(label, requires_grad=False)
if args.use_fp16:
img = img.half()
for _ in range(args.fbu):
ploc, plabel = ssd300(img)
ploc, plabel = ploc.float(), plabel.float()
loss = loss_func(ploc, plabel, gloc, glabel)
if epoch == 1 and args.local_rank == 0:
e = time.time()
print("Performance achieved: {:.2f} img/sec".format(img_number / (e - s)))
dataloader.reset()
def main():
args = parse_args()
torch.backends.cudnn.benchmark = True
test_coco(args)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# This is equivalent to the aggregation suite, but runs with CBR with sampling CE enabled.
test_kind: js_test
selector:
roots:
- jstests/aggregation/**/*.js
exclude_files:
- jstests/aggregation/extras/*.js
- jstests/aggregation/data/*.js
# TODO SERVER-92589: Fails in all feature flags variant because SBE explain not supported
- jstests/aggregation/optimization/use_query_sort.js
exclude_with_any_tags:
# Profiler entries will have different shape in CBR
- requires_profiling
executor:
config:
shell_options:
eval: await import("jstests/libs/override_methods/detect_spawning_own_mongod.js");
fixture:
class: MongoDFixture
mongod_options:
set_parameters:
enableTestCommands: 1
featureFlagCostBasedRanker: true
internalQueryCBRCEMode: "samplingCE"
internalQuerySamplingBySequentialScan: true | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/query_cbr_sampling_aggregation_passthrough.yml |
twig:
cache: true
auto_reload: false | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/TwigBundle/Tests/DependencyInjection/Fixtures/yml/prod-cache.yml |
A return statement was found outside of a function body.
Erroneous code example:
```compile_fail,E0572
const FOO: u32 = return 0; // error: return statement outside of function body
fn main() {}
```
To fix this issue, just remove the return keyword or move the expression into a
function. Example:
```
const FOO: u32 = 0;
fn some_fn() -> u32 {
return FOO;
}
fn main() {
some_fn();
}
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0572.md |
#!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
# from __future__ import unicode_literals
import copy
import os
import psutil
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import threading
import unittest
import utils
import pexpect
try:
from pexpect.replwrap import REPLWrapper
except ImportError as e:
print("Could not import pexpect.replwrap: %s" % (str(e)))
print(" Need pexpect version 3.3, installed version: %s" % (
str(pexpect.__version__)))
print(" pexpect location: %s" % (str(pexpect.__file__)))
exit(1)
try:
import argparse
except ImportError:
print("Cannot import argparse: pip install argparse?")
exit(1)
try:
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
except ImportError:
print("Cannot import thrift: pip install thrift?")
exit(1)
'''Defaults that should be used in integration tests.'''
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_DIR = "/tmp/osquery-tests-python%d/" % (os.getuid())
CONFIG_NAME = CONFIG_DIR + "tests"
DEFAULT_CONFIG = {
"options": {
"database_path": "%s.db" % CONFIG_NAME,
"pidfile": "%s.pid" % CONFIG_NAME,
"config_path": "%s.conf" % CONFIG_NAME,
"extensions_socket": "%s.em" % CONFIG_NAME,
"extensions_interval": "1",
"extensions_timeout": "0",
"watchdog_level": "3",
"disable_logging": "true",
"disable_events": "true",
"force": "true",
},
"schedule": {},
}
'''Expect CONFIG to be set during Tester.main() to a python dict.'''
CONFIG = None
'''Expect ARGS to contain the argparsed namespace.'''
ARGS = None
class OsqueryUnknownException(Exception):
'''Exception thrown for unknown output from the shell'''
pass
class OsqueryException(Exception):
'''Exception thrown when the shell returns an error'''
pass
class OsqueryWrapper(REPLWrapper):
'''A pexpect wrapper intended for interacting with the osqueryi REPL'''
PROMPT = u'osquery> '
CONTINUATION_PROMPT = u' ...> '
ERROR_PREFIX = 'Error:'
def __init__(self, command='../osqueryi', args={}, env={}):
global CONFIG_NAME, CONFIG
options = copy.deepcopy(CONFIG)["options"]
for option in args.keys():
options[option] = args[option]
options["database_path"] += str(random.randint(1000, 9999))
command = command + " " + " ".join(["--%s=%s" % (k, v) for
k, v in options.iteritems()])
proc = pexpect.spawn(command, env=env)
super(OsqueryWrapper, self).__init__(
proc,
self.PROMPT,
None,
continuation_prompt=self.CONTINUATION_PROMPT)
def run_query(self, query):
'''Run a query, returning the results as a list of dictionaries
When unknown output is encountered, OsqueryUnknownException is thrown.
When osqueryi returns an error, OsqueryException is thrown.
'''
query = query + ';' # Extra semicolon causes no harm
result = self.run_command(query)
# On Mac, the query appears first in the string. Remove it if so.
result = re.sub(re.escape(query), '', result).strip()
result_lines = result.splitlines()
if len(result_lines) < 1:
raise OsqueryUnknownException(
'Unexpected output:\n %s' % result_lines)
if result_lines[0].startswith(self.ERROR_PREFIX):
raise OsqueryException(result_lines[0])
try:
header = result_lines[1]
columns = re.findall('[^ |]+', header)
rows = []
for line in result_lines[3:-1]:
values = re.findall('[^ |]+', line)
rows.append(
dict((col, val) for col, val in zip(columns, values)))
return rows
except:
raise OsqueryUnknownException(
'Unexpected output:\n %s' % result_lines)
class ProcRunner(object):
'''A helper class to open a subprocess and perform testing actions.
The subprocess is opened in a new thread and state is tracked using
this class wrapper.
'''
def __init__(self, name, path, _args=[], interval=0.02, silent=False):
self.started = False
self.proc = None
self.name = name
self.path = path
self.args = _args
self.interval = interval
self.silent = silent
self.retcode = -1
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
pid = 0
try:
if self.silent:
self.proc = subprocess.Popen(
[self.path] + self.args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
self.proc = subprocess.Popen([self.path] + self.args)
pid = self.proc.pid
self.started = True
except Exception as e:
print(utils.red("Process start failed:") + " %s" % self.name)
print(str(e))
sys.exit(1)
try:
while self.proc.poll() is None:
self.started = True
time.sleep(self.interval)
self.started = True
self.retcode = -1 if self.proc is None else self.proc.poll()
self.proc = None
except Exception as e:
return
def requireStarted(self, timeout=2):
delay = 0
while delay < timeout:
if self.started is True:
break
time.sleep(self.interval * 10)
delay += self.interval * 10
def getChildren(self, timeout=1):
'''Get the child pids.'''
self.requireStarted()
if not self.proc:
return []
try:
proc = psutil.Process(pid=self.proc.pid)
delay = 0
while len(proc.children()) == 0:
if delay > timeout:
return []
time.sleep(self.interval)
delay += self.interval
return [p.pid for p in proc.children()]
except:
pass
return []
@property
def code(self):
self.requireStarted()
return self.retcode
@property
def pid(self):
self.requireStarted()
return self.proc.pid if self.proc is not None else None
def kill(self, children=False):
self.requireStarted()
if children:
for child in self.getChildren():
try:
os.kill(child, 9)
except:
pass
if self.proc:
try:
os.kill(self.pid, 9)
except:
pass
self.proc = None
def isAlive(self, timeout=3):
self.requireStarted()
'''Check if the process is alive.'''
delay = 0
while self.proc is None:
if delay > timeout:
break
time.sleep(self.interval)
delay += self.interval
if self.proc is None:
return False
return self.proc.poll() is None
def isDead(self, pid, timeout=5):
self.requireStarted()
'''Check if the process was killed.
This is different than `isAlive` in that the timeout is an expectation
that the process will die before the timeout, `isAlive`'s timeout is
an expectation that the process will be scheduled before the timeout.
'''
try:
proc = psutil.Process(pid=pid)
except psutil.NoSuchProcess as e:
return True
delay = 0
while delay < timeout:
if not proc.is_running():
return True
time.sleep(self.interval)
delay += self.interval
return False
class ProcessGenerator(object):
'''Helper methods to patch into a unittest'''
generators = []
def setUp(self):
shutil.rmtree(CONFIG_DIR)
os.makedirs(CONFIG_DIR)
def _run_daemon(self, options={}, silent=False, options_only={},
overwrite={}):
'''Spawn an osquery daemon process'''
global ARGS, CONFIG_NAME, CONFIG
config = copy.deepcopy(CONFIG)
config["options"]["database_path"] += str(random.randint(1000, 9999))
config["options"][
"extensions_socket"] += str(random.randint(1000, 9999))
for option in options.keys():
config["options"][option] = options[option]
flags = ["--%s=%s" % (k, v) for k, v in config["options"].items()]
for option in options_only.keys():
config["options"][option] = options_only[option]
for key in overwrite:
config[key] = overwrite[key]
utils.write_config(config)
binary = os.path.join(ARGS.build, "osquery", "osqueryd")
daemon = ProcRunner("daemon", binary, flags, silent=silent)
daemon.options = config["options"]
self.generators.append(daemon)
return daemon
def _run_extension(self, timeout=0, path=None, silent=False):
'''Spawn an osquery extension (example_extension)'''
global ARGS, CONFIG
config = copy.deepcopy(CONFIG)
config["options"][
"extensions_socket"] += str(random.randint(1000, 9999))
binary = os.path.join(ARGS.build, "osquery", "example_extension.ext")
if path is not None:
config["options"]["extensions_socket"] = path
extension = ProcRunner("extension",
binary,
[
"--socket=%s" % config["options"][
"extensions_socket"],
"--verbose" if not silent else "",
"--timeout=%d" % timeout,
"--interval=%d" % 0,
],
silent=silent)
self.generators.append(extension)
extension.options = config["options"]
return extension
def tearDown(self):
'''When the unit test stops, clean up child-generated processes.
Iterate through the generated daemons and extensions, and kill -9 them.
Unittest should stop processes they generate, but on failure the
tearDown method will cleanup.
'''
for generator in self.generators:
if generator.pid is not None:
try:
os.kill(generator.pid, signal.SIGKILL)
except Exception as e:
pass
class EXClient(object):
'''An osquery Thrift/extensions python client generator.'''
transport = None
'''The instance transport object.'''
_manager = None
'''The client class's reference to run-time discovered manager.'''
_client = None
'''The client class's reference to run-time discovered client.'''
def __init__(self, path=None, uuid=None):
global CONFIG
'''Create a extensions client to a UNIX path and optional UUID.'''
if path is None:
path = CONFIG["options"]["extensions_socket"]
self.path = path
if uuid:
self.path += ".%s" % str(uuid)
transport = TSocket.TSocket(unix_socket=self.path)
transport = TTransport.TBufferedTransport(transport)
self.protocol = TBinaryProtocol.TBinaryProtocol(transport)
self.transport = transport
@classmethod
def setUp(cls, manager, client):
'''Set the manager and client modules to generate clients from.'''
cls._manager = manager
cls._client = client
def close(self):
if self.transport:
self.transport.close()
def open(self, timeout=0.1, interval=0.01):
'''Attempt to open the UNIX domain socket.'''
delay = 0
while delay < timeout:
try:
self.transport.open()
return True
except Exception as e:
pass
delay += interval
time.sleep(interval)
return False
def getEM(self):
'''Return an extension manager (osquery core) client.'''
if self._manager is None:
raise(Exception, "The EXClient must be 'setUp' with a manager")
return self._manager.Client(self.protocol)
def getEX(self):
'''Return an extension (osquery extension) client.'''
if self._client is None:
raise(Exception, "The EXClient must be 'setUp' with a client")
return self._client.Client(self.protocol)
class Autoloader(object):
'''Helper class to write a module or extension autoload file.'''
def __init__(self, autoloads=[]):
global CONFIG_DIR
self.path = CONFIG_DIR + "ext.load" + str(random.randint(1000, 9999))
with open(self.path, "w") as fh:
fh.write("\n".join(autoloads))
def __del__(self):
try:
os.unlink(self.path)
except:
pass
class TimeoutRunner(object):
def __init__(self, cmd=[], timeout_sec=1):
self.stdout = None
self.stderr = None
self.proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
kill_proc = lambda p: p.kill()
timer = threading.Timer(timeout_sec, kill_proc, [self.proc])
timer.start()
self.stdout, self.stderr = self.proc.communicate()
timer.cancel()
def flaky(gen):
exceptions = []
def attempt(this):
try:
worked = gen(this)
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
exceptions.append((e, fname, exc_tb.tb_lineno))
return False
def wrapper(this):
for i in range(3):
if attempt(this):
return True
i = 1
for exc in exceptions:
print("Test (attempt %d) %s::%s failed: %s (%s:%d)" % (
i,
this.__class__.__name__,
gen.__name__, str(exc[0]), exc[1], exc[2]))
i += 1
raise exceptions[0][0]
return wrapper
class Tester(object):
def __init__(self):
global ARGS, CONFIG, CONFIG_DIR
parser = argparse.ArgumentParser(description=(
"osquery python integration testing."
))
parser.add_argument(
"--config", metavar="FILE", default=None,
help="Use special options from a config."
)
parser.add_argument(
"--verbose", default=False, action="store_true",
help="Run daemons and extensions with --verbose"
)
# Directory structure options
parser.add_argument(
"--build", metavar="PATH", default=".",
help="Path to osquery build (./build/<sys>/)."
)
ARGS = parser.parse_args()
if not os.path.exists(ARGS.build):
print("Cannot find --build: %s" % ARGS.build)
print("You must first run: make")
exit(1)
# Write config
random.seed(time.time())
try:
shutil.rmtree(CONFIG_DIR)
except:
# Allow the tester to fail
pass
os.makedirs(CONFIG_DIR)
CONFIG = read_config(ARGS.config) if ARGS.config else DEFAULT_CONFIG
def run(self):
os.setpgrp()
unittest_args = [sys.argv[0]]
if ARGS.verbose:
unittest_args += ["-v"]
unittest.main(argv=unittest_args)
def expect(functional, expected, interval=0.01, timeout=4):
"""Helper function to run a function with expected latency"""
delay = 0
result = None
while result is None or len(result) != expected:
try:
result = functional()
if len(result) == expected:
break
except Exception as e:
print("Expect exception (%s): %s not %s" % (
str(e), str(functional), expected))
return None
if delay >= timeout:
return None
time.sleep(interval)
delay += interval
return result
class QueryTester(ProcessGenerator, unittest.TestCase):
def setUp(self):
self.binary = os.path.join(ARGS.build, "osquery", "osqueryi")
self.daemon = self._run_daemon({
# The set of queries will hammer the daemon process.
"disable_watchdog": True,
# Enable the 'hidden' flag "registry_exceptions" to prevent
# catching.
"registry_exceptions": True,
})
self.assertTrue(self.daemon.isAlive())
# The sets of example tests will use the extensions APIs.
self.client = EXClient(self.daemon.options["extensions_socket"])
expectTrue(self.client.open)
self.assertTrue(self.client.open())
self.em = self.client.getEM()
def tearDown(self):
self.client.close()
self.daemon.kill()
def _execute(self, query):
try:
result = self.em.query(query)
self.assertEqual(result.status.code, 0)
return result.response
except Exception as e:
print("General exception executing query: %s" % (
utils.lightred(query)))
raise e
def _execute_set(self, queries):
for example in queries:
start_time = time.time()
result = self._execute(example)
end_time = time.time()
duration_ms = int((end_time - start_time) * 1000)
if duration_ms > 2000:
# Query took longer than 2 seconds.
duration_ms = utils.lightred(duration_ms)
print("Query (%sms): %s, rows: %d" % (
duration_ms, example, len(result)))
def expectTrue(functional, interval=0.01, timeout=8):
"""Helper function to run a function with expected latency"""
delay = 0
while delay < timeout:
if functional():
return True
time.sleep(interval)
delay += interval
return False
def assertPermissions():
stat_info = os.stat('.')
if stat_info.st_uid != os.getuid():
print(utils.lightred("Will not load modules/extensions in tests."))
print(utils.lightred("Repository owner (%d) executer (%d) mismatch" % (
stat_info.st_uid, os.getuid())))
exit(1)
def getTestDirectory(base):
path = os.path.join(base, "test-dir" + str(random.randint(1000, 9999)))
try:
shutil.rmtree(path)
except:
pass
os.makedirs(path)
return path
def loadThriftFromBuild(build_dir):
'''Find and import the thrift-generated python interface.'''
thrift_path = build_dir + "/generated/gen-py"
try:
sys.path = [thrift_path, thrift_path + "/osquery"] + sys.path
from osquery import ExtensionManager, Extension
EXClient.setUp(ExtensionManager, Extension)
except ImportError as e:
print("Cannot import osquery thrift API from %s" % (thrift_path))
print("Exception: %s" % (str(e)))
print("You must first run: make")
exit(1) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/statements/for-ofStatements/ES5For-of21.ts] ////
//// [ES5For-of21.ts]
for (let v of []) {
for (let _i of []) { }
}
//// [ES5For-of21.js]
"use strict";
for (var _a = 0, _b = []; _a < _b.length; _a++) {
var v = _b[_a];
for (var _c = 0, _d = []; _c < _d.length; _c++) {
var _i = _d[_c];
}
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/ES5For-of21(target=es5).js |
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_COPY_TENSOR_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_COPY_TENSOR_H_
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class CopyTensor {
public:
typedef void (*CopyFunction)(
DeviceContext* send_dev_context, DeviceContext* recv_dev_context,
Device* src, Device* dst, const AllocatorAttributes src_alloc_attr,
const AllocatorAttributes dst_alloc_attr, const Tensor* input,
Tensor* output, int dev_to_dev_stream_index, StatusCallback done);
// Copies "input" to "output" between devices accessible to the
// local process via some DMA-like method. "edge_name" is the name
// of the tensor being copied, for debugging purposes. Depending on
// the type of devices and memory in use, the copy may be performed
// synchronously or asynchronously. 'done' will be invoked only
// after the copy is actually complete.
static void ViaDMA(absl::string_view edge_name,
DeviceContext* send_dev_context,
DeviceContext* recv_dev_context, Device* src, Device* dst,
const AllocatorAttributes src_alloc_attr,
const AllocatorAttributes dst_alloc_attr,
const Tensor* input, Tensor* output,
int dev_to_dev_stream_index, StatusCallback done,
bool sync_dst_compute = true);
// Object used to call Register() at static-initialization time.
// Note: This should only ever be used as a global-static object; no stack
// or heap instances.
class Registration {
public:
Registration(DeviceType sender_device_type, DeviceType receiver_device_type,
CopyFunction copy_function) {
TF_QCHECK_OK(Register(sender_device_type, receiver_device_type,
copy_function, /*is_pluggable_device=*/false));
}
};
// Register a function for copying between two specific DeviceTypes.
// Note: This should only be called via the constructor of
// CopyTensor::Registration or from PluggableDevice implementation.
static absl::Status Register(DeviceType sender_device_type,
DeviceType receiver_device_type,
CopyFunction copy_function,
bool is_pluggable_device);
};
void CopyDeviceToHost(const Tensor* input, Allocator* cpu_allocator,
Allocator* out_allocator, absl::string_view edge_name,
Device* src, Tensor* output,
DeviceContext* send_dev_context, StatusCallback done);
} // namespace tensorflow
#endif // TENSORFLOW_CORE_COMMON_RUNTIME_COPY_TENSOR_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/common_runtime/copy_tensor.h |
# (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from paste.auth.digest import *
from paste.wsgilib import raw_interactive
from paste.response import header_value
from paste.httpexceptions import *
from paste.httpheaders import AUTHORIZATION, WWW_AUTHENTICATE, REMOTE_USER
import os
def application(environ, start_response):
content = REMOTE_USER(environ)
start_response("200 OK",(('Content-Type', 'text/plain'),
('Content-Length', len(content))))
return content
realm = "tag:clarkevans.com,2005:testing"
def backwords(environ, realm, username):
""" dummy password hash, where user password is just reverse """
password = list(username)
password.reverse()
password = "".join(password)
return digest_password(realm, username, password)
application = AuthDigestHandler(application,realm,backwords)
application = HTTPExceptionHandler(application)
def check(username, password, path="/"):
""" perform two-stage authentication to verify login """
(status,headers,content,errors) = \
raw_interactive(application,path, accept='text/html')
assert status.startswith("401")
challenge = WWW_AUTHENTICATE(headers)
response = AUTHORIZATION(username=username, password=password,
challenge=challenge, path=path)
assert "Digest" in response and username in response
(status,headers,content,errors) = \
raw_interactive(application,path,
HTTP_AUTHORIZATION=response)
if status.startswith("200"):
return content
if status.startswith("401"):
return None
assert False, "Unexpected Status: %s" % status
def test_digest():
assert 'bing' == check("bing","gnib")
assert check("bing","bad") is None
#
# The following code uses sockets to test the functionality,
# to enable use:
#
# $ TEST_SOCKET py.test
#
if os.environ.get("TEST_SOCKET",""):
import urllib2
from paste.debug.testserver import serve
server = serve(application)
def authfetch(username,password,path="/",realm=realm):
server.accept(2)
import socket
socket.setdefaulttimeout(5)
uri = ("http://%s:%s" % server.server_address) + path
auth = urllib2.HTTPDigestAuthHandler()
auth.add_password(realm,uri,username,password)
opener = urllib2.build_opener(auth)
result = opener.open(uri)
return result.read()
def test_success():
assert "bing" == authfetch('bing','gnib')
def test_failure():
# urllib tries 5 more times before it gives up
server.accept(5)
try:
authfetch('bing','wrong')
assert False, "this should raise an exception"
except urllib2.HTTPError, e:
assert e.code == 401
def test_shutdown():
server.stop() | unknown | codeparrot/codeparrot-clean | ||
"""Compatibility wrapper for cProfile module.
This module maintains backward compatibility by importing from the new
profiling.tracing module.
"""
from profiling.tracing import run, runctx, Profile
__all__ = ["run", "runctx", "Profile"]
if __name__ == "__main__":
from profiling.tracing.__main__ import main
main() | python | github | https://github.com/python/cpython | Lib/cProfile.py |
import { test } from '../../test';
export default test({
html: '',
async test({ assert, component, target }) {
component.visible = true;
assert.htmlEqual(
target.innerHTML,
`
<div>a</div>
`
);
assert.equal(component.items[0].ref, target.querySelector('div'));
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/binding-this-each-block-property/_config.js |
const SIZE: usize = 32;
#[repr(C)]
#[derive(Debug, Default)]
pub struct BracketStack {
/// Bracket stack to ensure properly balanced brackets.
bracket_stack: [u8; SIZE],
bracket_stack_len: usize,
}
impl BracketStack {
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.bracket_stack_len == 0
}
#[inline(always)]
pub fn push(&mut self, bracket: u8) -> bool {
if self.bracket_stack_len >= SIZE {
return false;
}
unsafe {
*self.bracket_stack.get_unchecked_mut(self.bracket_stack_len) = match bracket {
b'(' => b')',
b'[' => b']',
b'{' => b'}',
_ => std::hint::unreachable_unchecked(),
};
}
self.bracket_stack_len += 1;
true
}
#[inline(always)]
pub fn pop(&mut self, bracket: u8) -> bool {
if self.bracket_stack_len == 0 {
return false;
}
self.bracket_stack_len -= 1;
unsafe {
if *self.bracket_stack.get_unchecked(self.bracket_stack_len) != bracket {
return false;
}
}
true
}
#[inline(always)]
pub fn reset(&mut self) {
self.bracket_stack_len = 0;
}
} | rust | github | https://github.com/tailwindlabs/tailwindcss | crates/oxide/src/extractor/bracket_stack.rs |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.HeartbeatResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.Readable;
import java.util.Map;
public class HeartbeatResponse extends AbstractResponse {
/**
* Possible error codes:
*
* GROUP_COORDINATOR_NOT_AVAILABLE (15)
* NOT_COORDINATOR (16)
* ILLEGAL_GENERATION (22)
* UNKNOWN_MEMBER_ID (25)
* REBALANCE_IN_PROGRESS (27)
* GROUP_AUTHORIZATION_FAILED (30)
*/
private final HeartbeatResponseData data;
public HeartbeatResponse(HeartbeatResponseData data) {
super(ApiKeys.HEARTBEAT);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public HeartbeatResponseData data() {
return data;
}
public static HeartbeatResponse parse(Readable readable, short version) {
return new HeartbeatResponse(new HeartbeatResponseData(readable, version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Create parts containing a Plainbox test collection known as a provider.
Plainbox is a toolkit consisting of python3 library, development tools,
documentation and examples. It is targeted at developers working on testing or
certification applications and authors creating tests for such applications.
More information: http://plainbox.readthedocs.org/en/latest/
To find out more about authoring a plainbox provider, see the following
documentation: http://plainbox.readthedocs.org/en/latest/author/providers.html
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
"""
import re
import snapcraft
from snapcraft import file_utils
class PlainboxProviderPlugin(snapcraft.BasePlugin):
def __init__(self, name, options, project):
super().__init__(name, options, project)
self.build_packages.extend(['intltool'])
def build(self):
super().build()
self.run(["python3", "manage.py", "build"])
self.run(["python3", "manage.py", "i18n"])
self.run([
"python3", "manage.py", "install", "--layout=relocatable",
"--prefix=/providers/{}".format(self.name),
"--root={}".format(self.installdir)])
# Fix all shebangs to use the in-snap python.
file_utils.replace_in_file(self.installdir, re.compile(r''),
re.compile(r'^#!.*python'),
r'#!/usr/bin/env python') | unknown | codeparrot/codeparrot-clean | ||
'''
Module defining setter methods for the UrbanScape class
'''
import numpy as np
#=========================
# |
# Distribution Setters |
# |
#=========================
def random_distribution(UrbanScape):
u = UrbanScape
u.rent = np.random.randint(u.rent_floor, u.rent_ceiling,(u.size,u.size))
def vertical_distribution(UrbanScape):
u = UrbanScape
gradient = float(u.rent_ceiling - u.rent_floor) / (u.size - 1)
g_multiplier = gradient / u.rent_ceiling
fr_ratio = float(u.rent_floor) / u.rent_ceiling
#loops through each row and modifies the rent by factor of gg
for i in range(u.size):
gg = fr_ratio + g_multiplier * i
u.rent[i] = u.rent[i] * gg
def diagonal_distribution(UrbanScape):
u = UrbanScape
gradient = float(u.rent_ceiling - u.rent_floor) / (u.size - 1)
g_multiplier = gradient / u.rent_ceiling
fr_ratio = float(u.rent_floor) / u.rent_ceiling
for i in range(u.size):
gg = float(fr_ratio + g_multiplier*i)
u.rent[i] = u.rent[i] * gg
for j in range(u.size):
gg = float(fr_ratio + g_multiplier*j)
u.rent[:,j] = u.rent[:,j] * gg
#Functions for calculating rent based on distance from a business district block
def distance_rent_function(quadrant_coords, bd_coords):
x_dist = float(quadrant_coords[0] - bd_coords[0])
y_dist = float(quadrant_coords[1] - bd_coords[1])
dist = np.sqrt(x_dist ** 2 + y_dist ** 2)
return dist
#block rent decays exponentially the further away it is from a business district
def exponential_rent_function(UrbanScape, distance, coord, dmax):
u = UrbanScape
i,j = coord
Lambda = np.log(float(u.rent_floor)/float(u.rent_ceiling)) * (-1/dmax)
rent_value = u.rent_ceiling * np.e**(-Lambda*distance)
u.rent[i][j] = rent_value
#block rent decreases linearly the further away it is from a business district
def linear_rent_function(UrbanScape, distance, coord, dmax):
u = UrbanScape
i,j = coord
Alpha = (float(u.rent_floor) - float(u.rent_ceiling)) / dmax
rent_value = u.rent_ceiling + (Alpha * distance)
u.rent[i][j] = rent_value
#Functions for setting an income distribution based on 'business districts'
#---- The approximate center of each quadrant of the urbanscape is a 'business district'
#---- The assumption is that rent and income decreases the further away a block is.
def businessdistricts_quadrant_distribution(UrbanScape, gradient_function=exponential_rent_function):
u = UrbanScape
bisector = int(np.ceil(u.size * 0.5)) #finds the bisecting block value for UrbanScape
lower_range = range(bisector) #lower range of block indices in UrbanScape
upper_range = range(bisector, u.size) #upper range of block indices in UrbanScape
district1 = [(i,j) for i in lower_range for j in lower_range]
district2 = [(i,j) for i in upper_range for j in lower_range]
district3 = [(i,j) for i in lower_range for j in upper_range]
district4 = [(i,j) for i in upper_range for j in upper_range]
d1_coords = (np.ceil(u.size * 0.25), np.ceil(u.size * 0.25))
d2_coords = (np.ceil(u.size * 0.75), np.ceil(u.size * 0.25))
d3_coords = (np.ceil(u.size * 0.25), np.ceil(u.size * 0.75))
d4_coords = (np.ceil(u.size * 0.75), np.ceil(u.size * 0.75))
d1_distances = [distance_rent_function(q, d1_coords) for q in district1]
d2_distances = [distance_rent_function(q, d2_coords) for q in district2]
d3_distances = [distance_rent_function(q, d3_coords) for q in district3]
d4_distances = [distance_rent_function(q, d4_coords) for q in district4]
dmax = max(d1_distances)
#for i in range(len(district1)):
for i in range( len(district1) ):
gradient_function(u, d1_distances[i], district1[i], dmax)
gradient_function(u, d2_distances[i], district2[i], dmax)
gradient_function(u, d3_distances[i], district3[i], dmax)
gradient_function(u, d4_distances[i], district4[i], dmax)
def centralbusinessdistrict_distribution(UrbanScape, gradient_function=exponential_rent_function):
u = UrbanScape
block_coords = [ (i,j) for i in range(u.size) for j in range(u.size) ]
district_coords = (np.ceil(u.size * 0.5), np.ceil(u.size * 0.5))
block_distances = [distance_rent_function(q, district_coords) for q in block_coords]
dmax = max(block_distances)
for i in range(len(block_coords)):
#u.linear_rent_function(block_distances[i], block_coords[i])
gradient_function(u, block_distances[i], block_coords[i], dmax)
#applies random variation to the rent values
def apply_variation(UrbanScape, var=0.25):
u = UrbanScape
rand_var = np.random.uniform( (1 - var), (1 + var), (u.size, u.size) )
u.rent = u.rent * rand_var
if __name__ == '__main__':
import urbanscape as us
u = us.UrbanScape(4, 5000, 200000, setDist=random_distribution)
print u.rent
print u.income | unknown | codeparrot/codeparrot-clean | ||
# Cross-compiling Swift for Windows with `clang`
This document describes how to cross compile Swift for Windows on a non-Windows
host. For more context on the status of Swift on Windows in general, see
[Getting Started with Swift on Windows](./Windows.md)
## 1. Set up Visual Studio environment variables
Building for Windows requires that the Visual Studio environment variables are
setup similar to the values on Windows. Currently, the runtime has been tested
to build against the Windows 10 SDK at revision 10.10.586.
```bash
# Visual Studio 2015 does not have VCToolsInstallDir, use VCINSTALLDIR's value
export UCRTVersion=10.0.10586.0
export UniversalCRTSdkDir=".../Windows Kits/10"
export VCToolsInstallDir=".../Microsoft Visual Studio/2017/Community"
```
## 2. Set up the `visualc` and `ucrt` modules
The `ucrt.modulemap` located at
`swift/stdlib/public/Platform/ucrt.modulemap` needs to be copied into
`${UniversalCRTSdkDir}/Include/${UCRTVersion}/ucrt` as `module.modulemap`. The
`visualc.modulemap` located at `swift/stdlib/public/Platform/visualc.modulemap`
needs to be copied into `${VCToolsInstallDir}/include` as `module.modulemap`
## 3. Configure the runtime to be built with the just built `clang`
Ensure that we use the tools from the just built LLVM and `clang` tools to
build the Windows SDK. You will need to pass a few extra options to cmake via
the `build-script` invocation to achieve this. You will need to expand out the
path where `llvm-ar` and `llvm-ranlib` are built. These are needed to correctly
build the static libraries. Note that cross-compiling will require the use of
`lld`. Ensure that `lld-link.exe` is available to clang via your path.
#### macOS
```bash
--extra-cmake-options=-DSWIFT_BUILD_RUNTIME_WITH_HOST_COMPILER=FALSE,\
-DCMAKE_AR=<path to llvm-ar>,\
-DCMAKE_RANLIB=<path to llvm-ranlib>,\
-DSWIFT_SDKS='OSX;WINDOWS'
```
#### Linux
For Linux, you will need to build the Linux SDK instead of the macOS SDK by
replacing the cmake option with `-DSWIFT_SDKS='LINUX;WINDOWS'`.
## 4. Build the Swift runtime and standard library with `ninja`
From the build directory, you can build the Swift runtime and standard library
for Windows using `ninja swiftCore-windows-x86_64`. | unknown | github | https://github.com/apple/swift | docs/WindowsCrossCompile.md |
from doajtest.helpers import DoajTestCase
from portality import models
import uuid, time
from random import randint
from portality.bll.doaj import DOAJ
from portality.bll.exceptions import ArticleMergeConflict
from datetime import datetime
class TestArticleMatch(DoajTestCase):
def setUp(self):
super(TestArticleMatch, self).setUp()
def tearDown(self):
super(TestArticleMatch, self).tearDown()
def test_01_same_fulltext(self):
"""Check duplication detection on articles with the same fulltext URL"""
# A list of various URLs to check matching on
ftus = [
"http://examplejournal.telfor.rs/Published/Vol1No1/Vol1No1_A5.pdf",
"http://www.sbe.deu.edu.tr/dergi/cilt15.say%C4%B12/06%20AKALIN.pdf",
"http://www.ujcem.med.sumdu.edu.ua/images/sampledata/2013/4/408_412_IV-020.pdf",
"http://www.psychologie-aktuell.com/fileadmin/download/ptam/1-2014_20140324/01_Geiser.pdf"
]
for ftu in ftus:
# make ourselves an example article
a = models.Article()
b = a.bibjson()
b.title = "Example article with a fulltext url"
b.add_url(ftu, urltype="fulltext")
a.save(blocking=True)
# create a replacement article
z = models.Article()
y = z.bibjson()
y.title = "Replacement article for fulltext url"
y.add_url(ftu, urltype="fulltext")
# determine if there's a duplicate
articleService = DOAJ.articleService()
d = articleService.get_duplicate(z)
assert d is not None
assert d.bibjson().title == "Example article with a fulltext url"
def test_02_different_fulltext(self):
"""Check that an article with different fulltext URLs is not considered a duplicate"""
# make ourselves an example article
a = models.Article()
b = a.bibjson()
b.title = "Example 2 article with a fulltext url"
b.add_url("http://www.sbe.deu.edu.tr/dergi/cilt15.say%C4%B12/06%20AKALIN.pdf", urltype="fulltext")
a.save(blocking=True)
# create another article
z = models.Article()
y = z.bibjson()
y.title = "Replacement article for fulltext url"
y.add_url("http://this.is/a/different/url", urltype="fulltext")
# determine if there's a duplicate
articleService = DOAJ.articleService()
d = articleService.get_duplicate(z)
assert d is None
def test_03_retrieve_multiple_conflict(self):
ftu = "http://www.sbe.deu.edu.tr/dergi/cilt15.say%C4%B12/06%20AKALIN.pdf"
# make ourselves a couple of example articles
a = models.Article()
b = a.bibjson()
b.title = "Example A article with a fulltext url"
b.add_url(ftu, urltype="fulltext")
a.save(blocking=True)
# Wait a second to ensure the timestamps are different
time.sleep(1.01)
a2 = models.Article()
b2 = a2.bibjson()
b2.title = "Example B article with a fulltext url"
b2.add_url(ftu, urltype="fulltext")
a2.save(blocking=True)
# create an article which should not be caught by the duplicate detection
not_duplicate = models.Article()
not_duplicate_bibjson = not_duplicate.bibjson()
not_duplicate_bibjson.title = "Example C article with a fulltext url"
not_duplicate_bibjson.add_url("http://this.is/a/different/url", urltype="fulltext")
not_duplicate.save(blocking=True)
# create a replacement article
z = models.Article()
y = z.bibjson()
y.title = "Replacement article for fulltext url"
y.add_url(ftu, urltype="fulltext")
# determine that there are multiple duplicates
articleService = DOAJ.articleService()
with self.assertRaises(ArticleMergeConflict):
d = articleService.get_duplicate(z)
# get the xwalk to determine all duplicates
# sort both results and expectations here to avoid false alarm
# we don't care about the order of duplicates
expected = [a, a2]
expected.sort(key=lambda x: datetime.strptime(x.last_updated, "%Y-%m-%dT%H:%M:%SZ"), reverse=True)
# determine if there's a duplicate
l = articleService.get_duplicates(z)
assert isinstance(l, list), l
assert l is not None
l.sort(key=lambda x: datetime.strptime(x.last_updated, "%Y-%m-%dT%H:%M:%SZ"), reverse=True)
assert expected == l
def test_04_with_doi_instead(self):
"""Detect a duplicate using the DOI field."""
# make ourselves a couple of example articles
a = models.Article()
b = a.bibjson()
b.title = "Example A article with a DOI"
b.add_identifier('doi', "10.doi/123")
a.save(blocking=True)
# create an article which should not be caught by the duplicate detection
not_duplicate = models.Article()
not_duplicate_bibjson = not_duplicate.bibjson()
not_duplicate_bibjson.title = "Example C article with a DOI"
not_duplicate_bibjson.add_identifier('doi', "10.doi/DIFFERENT")
not_duplicate.save(blocking=True)
# create a replacement article
z = models.Article()
y = z.bibjson()
y.title = "Replacement article for DOI"
y.add_identifier('doi', "10.doi/123")
# determine if there's a duplicate
articleService = DOAJ.articleService()
dups = articleService.get_duplicates(z)
assert len(dups) == 1
# Check when we ask for one duplicate we get the most recent duplicate.
d = articleService.get_duplicate(z)
assert d is not None
assert d.bibjson().title == "Example A article with a DOI", d.bibjson().title
def test_05_full_doi(self):
""" Test that we still detect duplicate DOIs when we have the full URI, not just the 10. """
# make ourselves a couple of example articles
a = models.Article()
b = a.bibjson()
b.title = "Example A article with a DOI"
b.add_identifier('doi', "https://doi.org/10.doi/123")
a.save(blocking=True)
# create an article which should not be caught by the duplicate detection
not_duplicate = models.Article()
not_duplicate_bibjson = not_duplicate.bibjson()
not_duplicate_bibjson.title = "Example C article with a DOI"
not_duplicate_bibjson.add_identifier('doi', "https://doi.org/10.doi/DIFFERENT")
not_duplicate.save(blocking=True)
# create a replacement article
z = models.Article()
y = z.bibjson()
y.title = "Replacement article for DOI"
y.add_identifier('doi', "http://doi.org/10.doi/123")
# determine if there's a duplicate
articleService = DOAJ.articleService()
dups = articleService.get_duplicates(z)
assert len(dups) == 1
# Check when we ask for one duplicate we get the most recent duplicate.
d = articleService.get_duplicate(z)
assert d is not None
assert d.bibjson().title == "Example A article with a DOI", d.bibjson().title
def test_06_merge_replaces_metadata(self):
"""Ensure that merging replaces metadata of a new article, but keeps its old id."""
ftu = "http://www.sbe.deu.edu.tr/dergi/cilt15.say%C4%B12/06%20AKALIN.pdf"
id1 = uuid.uuid4().hex
id2 = uuid.uuid4().hex
assert id1 != id2
a = models.Article()
a.set_id(id1)
b = a.bibjson()
b.title = "Example A article with a fulltext url"
b.abstract = "a bunch of text"
b.add_url(ftu, urltype="fulltext")
a2 = models.Article()
a2.set_id(id2)
b2 = a2.bibjson()
b2.title = "Example B article with a fulltext url"
b2.add_url(ftu, urltype="fulltext")
# perform a merge, which updates article records of a2 based on a - including the id.
assert a2.id == id2
a2.merge(a)
assert a2.id == id1
# Check that we have the newer metadata
assert a2.bibjson().title == "Example B article with a fulltext url"
assert a2.bibjson().abstract is None
# Create a 3rd article without an explicit id
a3 = models.Article()
b3 = a3.bibjson()
b3.title = "Example C article with a fulltext url"
b3.abstract = "a newer bunch of text"
b3.add_url(ftu, urltype="fulltext")
a3.merge(a2)
assert a3.id == a2.id == a.id
assert a3.bibjson().title == "Example C article with a fulltext url"
assert a3.bibjson().abstract == "a newer bunch of text"
def test_07_both_duplication_criteria(self):
"""Check that an article is only reported once if it is duplicated by both DOI and fulltext URL"""
# make ourselves an example article
ftu = "http://www.sbe.deu.edu.tr/dergi/cilt15.say%C4%B12/06%20AKALIN.pdf"
doi = "10.doi/123"
a = models.Article()
b = a.bibjson()
b.title = "Example article with a fulltext url and a DOI"
b.add_url(ftu, urltype="fulltext")
b.add_identifier('doi', doi)
a.save(blocking=True)
# create another article
z = models.Article()
y = z.bibjson()
y.title = "Replacement article for fulltext url and a DOI"
y.add_url(ftu, urltype="fulltext")
y.add_identifier('doi', doi)
# determine if there's a duplicate
articleService = DOAJ.articleService()
d = articleService.get_duplicates(z)
assert len(d) == 1
print(len(d))
assert d[0].bibjson().title == "Example article with a fulltext url and a DOI"
def test_08_many_issns(self):
"""Test that a query with a LOT of ISSNs is still successful."""
a = models.Article()
b = a.bibjson()
b.journal_issns = ["0000-0000"]
b.title = "Example A article with a fulltext url"
b.add_identifier(b.DOI, "10.1234/duplicate")
a.save(blocking=True)
def random_issn():
bits = []
for i in range(8):
bits.append(str(randint(1, 9)))
return "".join(bits[:4]) + "-" + "".join(bits[4:])
issns = [random_issn() for _ in range(2000)] + ["0000-0000"]
dupes = models.Article.duplicates(doi="10.1234/duplicate")
assert len(dupes) == 1 | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.nucliadb import NucliaDB
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"NucliaDB": "langchain_community.vectorstores.nucliadb"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NucliaDB",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/vectorstores/nucliadb.py |
"""Support for Genius Hub switch/outlet devices."""
from homeassistant.components.switch import SwitchDevice, DEVICE_CLASS_OUTLET
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import DOMAIN, GeniusZone
ATTR_DURATION = "duration"
GH_ON_OFF_ZONE = "on / off"
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Set up the Genius Hub switch entities."""
if discovery_info is None:
return
broker = hass.data[DOMAIN]["broker"]
async_add_entities(
[
GeniusSwitch(broker, z)
for z in broker.client.zone_objs
if z.data["type"] == GH_ON_OFF_ZONE
]
)
class GeniusSwitch(GeniusZone, SwitchDevice):
"""Representation of a Genius Hub switch."""
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_OUTLET
@property
def is_on(self) -> bool:
"""Return the current state of the on/off zone.
The zone is considered 'on' if & only if it is override/on (e.g. timer/on is 'off').
"""
return self._zone.data["mode"] == "override" and self._zone.data["setpoint"]
async def async_turn_off(self, **kwargs) -> None:
"""Send the zone to Timer mode.
The zone is deemed 'off' in this mode, although the plugs may actually be on.
"""
await self._zone.set_mode("timer")
async def async_turn_on(self, **kwargs) -> None:
"""Set the zone to override/on ({'setpoint': true}) for x seconds."""
await self._zone.set_override(1, kwargs.get(ATTR_DURATION, 3600)) | unknown | codeparrot/codeparrot-clean | ||
test_kind: parallel_fsm_workload_test
selector:
roots:
- jstests/concurrency/fsm_workloads/**/*.js
exclude_files:
# SERVER-30644 These tests create/drop/modify views which can deadlock on the ViewCatalog mutex if
# there's a concurrent operation which acquires a MODE_X lock.
- jstests/concurrency/fsm_workloads/view_catalog/view_catalog*.js
exclude_with_any_tags:
- uses_transactions
- requires_replication
- requires_sharding
- incompatible_with_concurrency_simultaneous
group_size: 10
group_count_multiplier: 2.5
executor:
archive:
hooks:
- ValidateCollections
tests: true
config: {}
hooks:
- class: ValidateCollections
shell_options:
global_vars:
TestData:
skipValidationOnNamespaceNotFound: false
- class: CleanupConcurrencyWorkloads
fixture:
class: MongoDFixture
mongod_options:
set_parameters:
# Increase the timeout of the cursor so that the cursor will continue to stay alive even
# when there is a delay in lock acquisition during a getMore command.
cursorTimeoutMillis: 3600000
enableTestCommands: 1
# We have historically had deadlocks occur due to lock acquisition issues involving the
# system running out of WiredTiger write tickets. We intentionally lower the number of
# WiredTiger write tickets available to below the maximum number of database clients to
# trigger this situation at least some of the time.
storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions"
wiredTigerConcurrentWriteTransactions: 64
roleGraphInvalidationIsFatal: 1 | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml |
{
"DOCS": {
"summary": "Returns documentary information about one, multiple or all commands.",
"complexity": "O(N) where N is the number of commands to look up",
"group": "server",
"since": "7.0.0",
"arity": -2,
"container": "COMMAND",
"function": "commandDocsCommand",
"command_flags": [
"LOADING",
"STALE",
"SENTINEL"
],
"acl_categories": [
"CONNECTION"
],
"command_tips": [
"NONDETERMINISTIC_OUTPUT_ORDER"
],
"reply_schema": {
"description": "A map where each key is a command name, and each value is the documentary information",
"type": "object",
"additionalProperties": false,
"patternProperties": {
"^.*$": {
"type": "object",
"additionalProperties": false,
"properties": {
"summary": {
"description": "short command description",
"type": "string"
},
"since": {
"description": "the Redis version that added the command (or for module commands, the module version).",
"type": "string"
},
"group": {
"description": "the functional group to which the command belongs",
"oneOf": [
{
"const": "bitmap"
},
{
"const": "cluster"
},
{
"const": "connection"
},
{
"const": "generic"
},
{
"const": "geo"
},
{
"const": "hash"
},
{
"const": "hyperloglog"
},
{
"const": "list"
},
{
"const": "module"
},
{
"const": "pubsub"
},
{
"const": "scripting"
},
{
"const": "sentinel"
},
{
"const": "server"
},
{
"const": "set"
},
{
"const": "sorted-set"
},
{
"const": "stream"
},
{
"const": "string"
},
{
"const": "transactions"
}
]
},
"complexity": {
"description": "a short explanation about the command's time complexity.",
"type": "string"
},
"module": {
"type": "string"
},
"doc_flags": {
"description": "an array of documentation flags",
"type": "array",
"items": {
"oneOf": [
{
"description": "the command is deprecated.",
"const": "deprecated"
},
{
"description": "a system command that isn't meant to be called by users.",
"const": "syscmd"
}
]
}
},
"deprecated_since": {
"description": "the Redis version that deprecated the command (or for module commands, the module version)",
"type": "string"
},
"replaced_by": {
"description": "the alternative for a deprecated command.",
"type": "string"
},
"history": {
"description": "an array of historical notes describing changes to the command's behavior or arguments.",
"type": "array",
"items": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": [
{
"type": "string",
"description": "The Redis version that the entry applies to."
},
{
"type": "string",
"description": "The description of the change."
}
]
}
},
"arguments": {
"description": "an array of maps that describe the command's arguments.",
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"type": {
"type": "string"
},
"display_text": {
"type": "string"
},
"key_spec_index": {
"type": "integer"
},
"token": {
"type": "string"
},
"summary": {
"type": "string"
},
"since": {
"type": "string"
},
"deprecated_since": {
"type": "string"
},
"flags": {
"type": "array",
"items": {
"type": "string"
}
},
"arguments": {
"type": "array"
}
}
}
},
"reply_schema": {
"description": "command reply schema",
"type": "object"
},
"subcommands": {
"description": "A map where each key is a subcommand, and each value is the documentary information",
"$ref": "#"
}
}
}
}
},
"arguments": [
{
"name": "command-name",
"type": "string",
"optional": true,
"multiple": true
}
]
}
} | json | github | https://github.com/redis/redis | src/commands/command-docs.json |
#!/usr/bin/env python
#
# Copyright (C) 2015 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm import vimsupport
from nose.tools import eq_
def ReplaceChunk_SingleLine_Repl_1_test():
# Replace with longer range
# 12345678901234567
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 1, 1, 5 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'How long',
0,
0,
result_buffer )
eq_( [ "How long is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 4 )
# and replace again, using delta
start, end = _BuildLocations( 1, 10, 1, 11 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
' piece of ',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ 'How long is a piece of string' ], result_buffer )
eq_( new_line_offset, 0 )
eq_( new_char_offset, 9 )
eq_( line_offset, 0 )
eq_( char_offset, 13 )
# and once more, for luck
start, end = _BuildLocations( 1, 11, 1, 17 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'pie',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( ['How long is a piece of pie' ], result_buffer )
eq_( new_line_offset, 0 )
eq_( new_char_offset, -3 )
eq_( line_offset, 0 )
eq_( char_offset, 10 )
def ReplaceChunk_SingleLine_Repl_2_test():
# Replace with shorter range
# 12345678901234567
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 11, 1, 17 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'test',
0,
0,
result_buffer )
eq_( [ "This is a test" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -2 )
def ReplaceChunk_SingleLine_Repl_3_test():
# Replace with equal range
# 12345678901234567
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 6, 1, 8 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'be',
0,
0,
result_buffer )
eq_( [ "This be a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 0 )
def ReplaceChunk_SingleLine_Add_1_test():
# Insert at start
result_buffer = [ "is a string" ]
start, end = _BuildLocations( 1, 1, 1, 1 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'This ',
0,
0,
result_buffer )
eq_( [ "This is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 5 )
def ReplaceChunk_SingleLine_Add_2_test():
# Insert at end
result_buffer = [ "This is a " ]
start, end = _BuildLocations( 1, 11, 1, 11 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'string',
0,
0,
result_buffer )
eq_( [ "This is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 6 )
def ReplaceChunk_SingleLine_Add_3_test():
# Insert in the middle
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 8, 1, 8 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
' not',
0,
0,
result_buffer )
eq_( [ "This is not a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 4 )
def ReplaceChunk_SingleLine_Del_1_test():
# Delete from start
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 1, 1, 6 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'',
0,
0,
result_buffer )
eq_( [ "is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -5 )
def ReplaceChunk_SingleLine_Del_2_test():
# Delete from end
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 10, 1, 18 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'',
0,
0,
result_buffer )
eq_( [ "This is a" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -8 )
def ReplaceChunk_SingleLine_Del_3_test():
# Delete from middle
result_buffer = [ "This is not a string" ]
start, end = _BuildLocations( 1, 9, 1, 13 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'',
0,
0,
result_buffer )
eq_( [ "This is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -4 )
def ReplaceChunk_RemoveSingleLine_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 1, 3, 1 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, '',
0, 0, result_buffer )
expected_buffer = [ "aAa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 0 )
def ReplaceChunk_SingleToMultipleLines_test():
result_buffer = [ "aAa",
"aBa",
"aCa" ]
start, end = _BuildLocations( 2, 2, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Eb\nbF',
0, 0, result_buffer )
expected_buffer = [ "aAa",
"aEb",
"bFBa",
"aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, 1 )
# now make another change to the "2nd" line
start, end = _BuildLocations( 2, 3, 2, 4 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'cccc',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ "aAa", "aEb", "bFBcccc", "aCa" ], result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, 4 )
def ReplaceChunk_SingleToMultipleLines2_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nG',
0,
0,
result_buffer )
expected_buffer = [ "aAa", "aEb" ,"bFb", "GBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 0 )
def ReplaceChunk_SingleToMultipleLines3_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nbGb',
0,
0,
result_buffer )
expected_buffer = [ "aAa", "aEb" ,"bFb", "bGbBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 2 )
def ReplaceChunk_SingleToMultipleLinesReplace_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 2, 1, 4 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nbGb',
0,
0,
result_buffer )
expected_buffer = [ "aEb", "bFb", "bGb", "aBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 0 )
def ReplaceChunk_SingleToMultipleLinesReplace_2_test():
result_buffer = [ "aAa",
"aBa",
"aCa" ]
start, end = _BuildLocations( 1, 2, 1, 4 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nbGb',
0,
0,
result_buffer )
expected_buffer = [ "aEb",
"bFb",
"bGb",
"aBa",
"aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 0 )
# now do a subsequent change (insert at end of line "1")
start, end = _BuildLocations( 1, 4, 1, 4 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'cccc',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ "aEb",
"bFb",
"bGbcccc",
"aBa",
"aCa" ], result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 4 )
def ReplaceChunk_MultipleLinesToSingleLine_test():
result_buffer = [ "aAa", "aBa", "aCaaaa" ]
start, end = _BuildLocations( 2, 2, 3, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'E',
0, 0, result_buffer )
expected_buffer = [ "aAa", "aECaaaa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 1 )
# make another modification applying offsets
start, end = _BuildLocations( 3, 3, 3, 4 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'cccc',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ "aAa", "aECccccaaa" ], result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 4 )
# and another, for luck
start, end = _BuildLocations( 3, 4, 3, 5 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'dd\ndd',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ "aAa", "aECccccdd", "ddaa" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -2 )
def ReplaceChunk_MultipleLinesToSameMultipleLines_test():
result_buffer = [ "aAa", "aBa", "aCa", "aDe" ]
start, end = _BuildLocations( 2, 2, 3, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Eb\nbF',
0, 0, result_buffer )
expected_buffer = [ "aAa", "aEb", "bFCa", "aDe" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 1 )
def ReplaceChunk_MultipleLinesToMoreMultipleLines_test():
result_buffer = [ "aAa", "aBa", "aCa", "aDe" ]
start, end = _BuildLocations( 2, 2, 3, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nbG',
0,
0,
result_buffer )
expected_buffer = [ "aAa", "aEb", "bFb", "bGCa", "aDe" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, 1 )
def ReplaceChunk_MultipleLinesToLessMultipleLines_test():
result_buffer = [ "aAa", "aBa", "aCa", "aDe" ]
start, end = _BuildLocations( 1, 2, 3, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Eb\nbF',
0, 0, result_buffer )
expected_buffer = [ "aEb", "bFCa", "aDe" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 1 )
def ReplaceChunk_MultipleLinesToEvenLessMultipleLines_test():
result_buffer = [ "aAa", "aBa", "aCa", "aDe" ]
start, end = _BuildLocations( 1, 2, 4, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Eb\nbF',
0, 0, result_buffer )
expected_buffer = [ "aEb", "bFDe" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -2 )
eq_( char_offset, 1 )
def ReplaceChunk_SpanBufferEdge_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 1, 1, 3 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
0, 0, result_buffer )
expected_buffer = [ "bDba", "aBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 1 )
def ReplaceChunk_DeleteTextInLine_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 3 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, '',
0, 0, result_buffer )
expected_buffer = [ "aAa", "aa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -1 )
def ReplaceChunk_AddTextInLine_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
0, 0, result_buffer )
expected_buffer = [ "aAa", "abDbBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 3 )
def ReplaceChunk_ReplaceTextInLine_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 3 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
0, 0, result_buffer )
expected_buffer = [ "aAa", "abDba", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 2 )
def ReplaceChunk_SingleLineOffsetWorks_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 1, 1, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
1, 1, result_buffer )
expected_buffer = [ "aAa", "abDba", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 2 )
def ReplaceChunk_SingleLineToMultipleLinesOffsetWorks_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 1, 1, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Db\nE',
1, 1, result_buffer )
expected_buffer = [ "aAa", "aDb", "Ea", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, -1 )
def ReplaceChunk_MultipleLinesToSingleLineOffsetWorks_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 1, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
1, 1, result_buffer )
expected_buffer = [ "aAa", "abDbCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 3 )
def ReplaceChunk_MultipleLineOffsetWorks_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 3, 1, 4, 3 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'bDb\nbEb\nbFb',
-1,
1,
result_buffer )
expected_buffer = [ "aAa", "abDb", "bEb", "bFba" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, 1 )
def _BuildLocations( start_line, start_column, end_line, end_column ):
return {
'line_num' : start_line,
'column_num': start_column,
}, {
'line_num' : end_line,
'column_num': end_column,
} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: experiment_collection
:platform: Unix
:synopsis: Contains the Experiment class and all possible experiment
collections from which Experiment can inherit at run time.
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import os
import time
import logging
from mpi4py import MPI
from savu.data.plugin_list import PluginList
from savu.data.data_structures import Data
from savu.data.meta_data import MetaData
class Experiment(object):
"""
One instance of this class is created at the beginning of the
processing chain and remains until the end. It holds the current data
object and a dictionary containing all metadata.
"""
def __init__(self, options):
self.meta_data = MetaData(options)
self.meta_data_setup(options["process_file"])
self.index = {"in_data": {}, "out_data": {}}
def meta_data_setup(self, process_file):
self.meta_data.load_experiment_collection()
self.meta_data.plugin_list = PluginList()
self.meta_data.plugin_list.populate_plugin_list(process_file)
def create_data_object(self, dtype, name, bases=[]):
try:
self.index[dtype][name]
except KeyError:
self.index[dtype][name] = Data(name)
data_obj = self.index[dtype][name]
bases.append(data_obj.get_transport_data(self.meta_data.get_meta_data("transport")))
data_obj.add_base_classes(bases)
return self.index[dtype][name]
def set_nxs_filename(self):
name = self.index["in_data"].keys()[0]
filename = os.path.basename(self.index["in_data"][name].backing_file.filename)
filename = os.path.splitext(filename)[0]
filename = os.path.join(self.meta_data.get_meta_data("out_path"),
"%s_processed_%s.nxs" % (filename,
time.strftime("%Y%m%d%H%M%S")))
self.meta_data.set_meta_data("nxs_filename", filename)
def clear_data_objects(self):
self.index["out_data"] = {}
self.index["in_data"] = {}
def clear_out_data_objects(self):
self.index["out_data"] = {}
def set_out_data_to_in(self):
self.index["in_data"] = self.index["out_data"]
self.index["out_data"] = {}
def barrier(self):
if self.meta_data.get_meta_data('mpi') is True:
logging.debug("About to hit a barrier")
MPI.COMM_WORLD.Barrier()
logging.debug("Past the barrier")
def log(self, log_tag, log_level=logging.DEBUG):
"""
Log the contents of the experiment at the specified level
"""
logging.log(log_level, "Experimental Parameters for %s", log_tag)
for key, value in self.index["in_data"].iteritems():
logging.log(log_level, "in data (%s) shape = %s", key,
value.get_shape())
for key, value in self.index["in_data"].iteritems():
logging.log(log_level, "out data (%s) shape = %s", key,
value.get_shape()) | unknown | codeparrot/codeparrot-clean | ||
import collections
import mock
import os
import sys
from ansible.compat.tests import unittest
try:
from ansible.modules.core.packaging.os.apt import (
expand_pkgspec_from_fnmatches,
)
except:
# Need some more module_utils work (porting urls.py) before we can test
# modules. So don't error out in this case.
if sys.version_info[0] >= 3:
pass
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
class AptExpandPkgspecTestCase(unittest.TestCase):
def setUp(self):
FakePackage = collections.namedtuple("Package", ("name",))
self.fake_cache = [ FakePackage("apt"),
FakePackage("apt-utils"),
FakePackage("not-selected"),
]
def test_trivial(self):
foo = ["apt"]
self.assertEqual(
expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
def test_version_wildcard(self):
foo = ["apt=1.0*"]
self.assertEqual(
expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
def test_pkgname_wildcard_version_wildcard(self):
foo = ["apt*=1.0*"]
m_mock = mock.Mock()
self.assertEqual(
expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
['apt', 'apt-utils'])
def test_pkgname_expands(self):
foo = ["apt*"]
m_mock = mock.Mock()
self.assertEqual(
expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
["apt", "apt-utils"]) | unknown | codeparrot/codeparrot-clean | ||
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
_cleanup = True
def __init__(self, tests=()):
self._tests = []
self._removed_tests = 0
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for index, test in enumerate(self):
if result.shouldStop:
break
test(result)
if self._cleanup:
self._removeTestAtIndex(index)
return result
def _removeTestAtIndex(self, index):
"""Stop holding a reference to the TestCase at index."""
try:
test = self._tests[index]
except TypeError:
# support for suite implementations that have overriden self._tests
pass
else:
# Some unittest tests add non TestCase/TestSuite objects to
# the suite.
if hasattr(test, 'countTestCases'):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if self._cleanup:
self._removeTestAtIndex(index)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False | unknown | codeparrot/codeparrot-clean | ||
package cleanups
import (
"context"
"errors"
"fmt"
"testing"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestCall(t *testing.T) {
c := Composite{}
err1 := errors.New("error1")
err2 := errors.New("error2")
errX := errors.New("errorX")
errY := errors.New("errorY")
errZ := errors.New("errorZ")
errYZ := errors.Join(errY, errZ)
c.Add(func(ctx context.Context) error {
return err1
})
c.Add(func(ctx context.Context) error {
return nil
})
c.Add(func(ctx context.Context) error {
return fmt.Errorf("something happened: %w", err2)
})
c.Add(func(ctx context.Context) error {
return errors.Join(errX, fmt.Errorf("joined: %w", errYZ))
})
err := c.Call(context.Background())
errs := err.(interface{ Unwrap() []error }).Unwrap()
assert.Check(t, is.ErrorContains(err, err1.Error()))
assert.Check(t, is.ErrorContains(err, err2.Error()))
assert.Check(t, is.ErrorContains(err, errX.Error()))
assert.Check(t, is.ErrorContains(err, errY.Error()))
assert.Check(t, is.ErrorContains(err, errZ.Error()))
assert.Check(t, is.ErrorContains(err, "something happened: "+err2.Error()))
t.Log(err)
assert.Assert(t, is.Len(errs, 3))
// Cleanups executed in reverse order.
assert.Check(t, is.ErrorIs(errs[2], err1))
assert.Check(t, is.ErrorIs(errs[1], err2))
assert.Check(t, is.ErrorIs(errs[0], errX))
assert.Check(t, is.ErrorIs(errs[0], errYZ))
} | go | github | https://github.com/moby/moby | daemon/internal/cleanups/composite_test.go |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import KayAiRetriever
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"KayAiRetriever": "langchain_community.retrievers"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"KayAiRetriever",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/retrievers/kay.py |
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, int32, float32
from numba.cuda.testing import unittest, SerialMixin
from numba.config import ENABLE_CUDASIM
def useless_sync(ary):
i = cuda.grid(1)
cuda.syncthreads()
ary[i] = i
def simple_smem(ary):
N = 100
sm = cuda.shared.array(N, int32)
i = cuda.grid(1)
if i == 0:
for j in range(N):
sm[j] = j
cuda.syncthreads()
ary[i] = sm[i]
def coop_smem2d(ary):
i, j = cuda.grid(2)
sm = cuda.shared.array((10, 20), float32)
sm[i, j] = (i + 1) / (j + 1)
cuda.syncthreads()
ary[i, j] = sm[i, j]
def dyn_shared_memory(ary):
i = cuda.grid(1)
sm = cuda.shared.array(0, float32)
sm[i] = i * 2
cuda.syncthreads()
ary[i] = sm[i]
def use_threadfence(ary):
ary[0] += 123
cuda.threadfence()
ary[0] += 321
def use_threadfence_block(ary):
ary[0] += 123
cuda.threadfence_block()
ary[0] += 321
def use_threadfence_system(ary):
ary[0] += 123
cuda.threadfence_system()
ary[0] += 321
def use_syncthreads_count(ary_in, ary_out):
i = cuda.grid(1)
ary_out[i] = cuda.syncthreads_count(ary_in[i])
def use_syncthreads_and(ary_in, ary_out):
i = cuda.grid(1)
ary_out[i] = cuda.syncthreads_and(ary_in[i])
def use_syncthreads_or(ary_in, ary_out):
i = cuda.grid(1)
ary_out[i] = cuda.syncthreads_or(ary_in[i])
class TestCudaSync(SerialMixin, unittest.TestCase):
def test_useless_sync(self):
compiled = cuda.jit("void(int32[::1])")(useless_sync)
nelem = 10
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp))
def test_simple_smem(self):
compiled = cuda.jit("void(int32[::1])")(simple_smem)
nelem = 100
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32)))
def test_coop_smem2d(self):
compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d)
shape = 10, 20
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape](ary)
exp = np.empty_like(ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = (i + 1) / (j + 1)
self.assertTrue(np.allclose(ary, exp))
def test_dyn_shared_memory(self):
compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory)
shape = 50
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape, 0, ary.size * 4](ary)
self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32)))
def test_threadfence_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.gl;", compiled.ptx)
def test_threadfence_block_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence_block)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.cta;", compiled.ptx)
def test_threadfence_system_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence_system)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.sys;", compiled.ptx)
def test_syncthreads_count(self):
compiled = cuda.jit("void(int32[:], int32[:])")(use_syncthreads_count)
ary_in = np.ones(72, dtype=np.int32)
ary_out = np.zeros(72, dtype=np.int32)
ary_in[31] = 0
ary_in[42] = 0
compiled[1, 72](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 70))
def test_syncthreads_and(self):
compiled = cuda.jit("void(int32[:], int32[:])")(use_syncthreads_and)
nelem = 100
ary_in = np.ones(nelem, dtype=np.int32)
ary_out = np.zeros(nelem, dtype=np.int32)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 1))
ary_in[31] = 0
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0))
def test_syncthreads_or(self):
compiled = cuda.jit("void(int32[:], int32[:])")(use_syncthreads_or)
nelem = 100
ary_in = np.zeros(nelem, dtype=np.int32)
ary_out = np.zeros(nelem, dtype=np.int32)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0))
ary_in[31] = 1
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 1))
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
def add_papyrus_handler(self, route_name_prefix, base_url, handler):
""" Add a Papyrus handler, i.e. a handler defining the MapFish
HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_handler(
'spots', '/spots', 'mypackage.handlers.SpotHandler')
Arguments:
``route_name_prefix`` The prefix used for the route names
passed to ``config.add_handler``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
``handler`` a dotted name or a reference to a handler class,
e.g. ``'mypackage.handlers.MyHandler'``.
"""
route_name = route_name_prefix + '_read_many'
self.add_handler(route_name, base_url, handler,
action='read_many', request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_handler(route_name, base_url + '/{id}', handler,
action='read_one', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_handler(route_name, base_url + '/count', handler,
action='count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_handler(route_name, base_url, handler,
action='create', request_method='POST')
route_name = route_name_prefix + '_update'
self.add_handler(route_name, base_url + '/{id}', handler,
action='update', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_handler(route_name, base_url + '/{id}', handler,
action='delete', request_method='DELETE')
def add_papyrus_routes(self, route_name_prefix, base_url):
""" A helper method that adds routes to view callables that, together,
implement the MapFish HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_routes('spots', '/spots')
config.scan()
Arguments:
``route_name_prefix' The prefix used for the route names
passed to ``config.add_route``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
"""
route_name = route_name_prefix + '_read_many'
self.add_route(route_name, base_url, request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_route(route_name, base_url + '/{id}', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_route(route_name, base_url + '/count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_route(route_name, base_url, request_method='POST')
route_name = route_name_prefix + '_update'
self.add_route(route_name, base_url + '/{id}', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_route(route_name, base_url + '/{id}', request_method='DELETE')
def includeme(config):
""" The function to pass to ``config.include``. Requires the
``pyramid_handlers`` module. """
config.add_directive('add_papyrus_handler', add_papyrus_handler)
config.add_directive('add_papyrus_routes', add_papyrus_routes) | unknown | codeparrot/codeparrot-clean | ||
#ifndef JSON_DOM_INCLUDED
#define JSON_DOM_INCLUDED
/* Copyright (c) 2015, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include <assert.h>
#include <stddef.h>
#include <iterator>
#include <map>
#include <memory> // unique_ptr
#include <new>
#include <string>
#include <string_view>
#include <type_traits> // is_base_of
#include <unordered_set>
#include <utility>
#include <vector>
#include "field_types.h" // enum_field_types
#include "my_byteorder.h"
#include "my_inttypes.h"
#include "my_time.h" // my_time_flags_t
#include "mysql_time.h" // MYSQL_TIME
#include "prealloced_array.h" // Prealloced_array
#include "sql-common/json_binary.h" // json_binary::Value
#include "sql-common/json_error_handler.h"
#include "sql-common/my_decimal.h" // my_decimal
#include "sql/malloc_allocator.h" // Malloc_allocator
class Field_json;
class Json_array;
class Json_container;
class Json_dom;
class Json_object;
class Json_path;
class Json_seekable_path;
class Json_wrapper;
class Json_wrapper_hasher;
class String;
struct CHARSET_INFO;
typedef Prealloced_array<Json_wrapper, 16> Json_wrapper_vector;
typedef Prealloced_array<Json_dom *, 16> Json_dom_vector;
using Json_dom_ptr = std::unique_ptr<Json_dom>;
using Json_array_ptr = std::unique_ptr<Json_array>;
using Json_object_ptr = std::unique_ptr<Json_object>;
using ha_checksum = std::uint32_t;
/**
@file
JSON DOM.
When a JSON value is retrieved from a column, a prior it exists in
a binary form, cf. Json_binary::Value class.
However, when we need to manipulate the JSON values we mostly convert them
from binary form to a structured in-memory from called DOM (from domain
object model) which uses a recursive tree representation of the JSON value
corresponding closely to a parse tree. This form is more suitable for
manipulation.
The JSON type is mostly represented internally as a Json_wrapper which hides
if the representation is a binary or DOM one. This makes is possible to avoid
building a DOM unless we really need one.
The file defines two sets of classes: a) The Json_dom hierarchy and
b) Json_wrapper and its companion classes Json_wrapper_object_iterator and
Json_object_wrapper. For both sets, arrays are traversed using an operator[].
*/
/**
Json values in MySQL comprises the stand set of JSON values plus a
MySQL specific set. A Json _number_ type is subdivided into _int_,
_uint_, _double_ and _decimal_.
MySQL also adds four built-in date/time values: _date_, _time_,
_datetime_ and _timestamp_. An additional _opaque_ value can
store any other MySQL type.
The enumeration is common to Json_dom and Json_wrapper.
The enumeration is also used by Json_wrapper::compare() to
determine the ordering when comparing values of different types,
so the order in which the values are defined in the enumeration,
is significant. The expected order is null < number < string <
object < array < boolean < date < time < datetime/timestamp <
opaque.
*/
enum class enum_json_type {
J_NULL,
J_DECIMAL,
J_INT,
J_UINT,
J_DOUBLE,
J_STRING,
J_OBJECT,
J_ARRAY,
J_BOOLEAN,
J_DATE,
J_TIME,
J_DATETIME,
J_TIMESTAMP,
J_OPAQUE,
J_ERROR
};
/**
Allocate a new Json_dom object and return a std::unique_ptr which points to
it.
@param args the arguments to pass to the constructor
@tparam T the type of Json_dom to create
@tparam Args the type of the arguments to pass to the constructor
@return a pointer to the allocated object
*/
template <typename T, typename... Args>
inline std::unique_ptr<T> create_dom_ptr(Args &&...args) {
return std::unique_ptr<T>(new (std::nothrow) T(std::forward<Args>(args)...));
}
/**
JSON DOM abstract base class.
MySQL representation of in-memory JSON objects used by the JSON type
Supports access, deep cloning, and updates. See also Json_wrapper and
json_binary::Value.
Uses heap for space allocation for now. FIXME.
Class hierarchy:
<code><pre>
Json_dom (abstract)
Json_scalar (abstract)
Json_string
Json_number (abstract)
Json_decimal
Json_int
Json_uint
Json_double
Json_boolean
Json_null
Json_temporal
Json_time
Json_datetime
Json_opaque
Json_container (abstract)
Json_object
Json_array
</pre></code>
At the outset, object and array add/insert/append operations takes
a clone unless specified in the method, e.g. add_alias hands the
responsibility for the passed in object over to the object.
*/
class Json_dom {
// so that these classes can call set_parent()
friend class Json_object;
friend class Json_array;
private:
/**
Set the parent dom to which this dom is attached.
@param[in] parent the parent we're being attached to
*/
void set_parent(Json_container *parent) { m_parent = parent; }
public:
virtual ~Json_dom() = default;
/**
Allocate space on the heap for a Json_dom object.
@return pointer to the allocated memory, or NULL if memory could
not be allocated (in which case my_error() will have been called
with the appropriate error message)
*/
void *operator new(size_t size, const std::nothrow_t &) noexcept;
/**
Deallocate the space used by a Json_dom object.
*/
void operator delete(void *ptr) noexcept;
/**
Nothrow delete.
*/
void operator delete(void *ptr, const std::nothrow_t &) noexcept;
/**
Get the parent dom to which this dom is attached.
@return the parent dom.
*/
Json_container *parent() const { return m_parent; }
/**
@return the type corresponding to the actual Json_dom subclass
*/
virtual enum_json_type json_type() const = 0;
/**
@return true if the object is a subclass of Json_scalar
*/
virtual bool is_scalar() const { return false; }
/**
@return true of the object is a subclass of Json_number
*/
virtual bool is_number() const { return false; }
/**
Compute the depth of a document. This is the value which would be
returned by the JSON_DEPTH() system function.
- for scalar values, empty array and empty object: 1
- for non-empty array: 1+ max(depth of array elements)
- for non-empty objects: 1+ max(depth of object values)
For example:
"abc", [] and {} have depth 1.
["abc", [3]] and {"a": "abc", "b": [3]} have depth 3.
@return the depth of the document
*/
virtual uint32 depth() const = 0;
/**
Make a deep clone. The ownership of the returned object is
henceforth with the caller.
@return a cloned Json_dom object.
*/
virtual Json_dom_ptr clone() const = 0;
/**
Parse Json text to DOM (using rapidjson). The text must be valid JSON.
The results when supplying an invalid document is undefined.
The ownership of the returned object is henceforth with the caller.
If the parsing fails because of a syntax error, the errmsg and
offset arguments will be given values that point to a detailed
error message and where the syntax error was located. The caller
will have to generate an error message with my_error() in this
case.
If the parsing fails because of some other error (such as out of
memory), errmsg will point to a location that holds the value
NULL. In this case, parse() will already have called my_error(),
and the caller doesn't need to generate an error message.
@param[in] text the JSON text
@param[in] length the length of the text
@param[in] error_handler Pointer to a function that should handle
reporting of parsing error.
@param[in] depth_handler Pointer to a function that should handle error
occurred when depth is exceeded.
@result the built DOM if JSON text was parseable, else NULL
*/
static Json_dom_ptr parse(const char *text, size_t length,
const JsonParseErrorHandler &error_handler,
const JsonErrorHandler &depth_handler);
/**
Construct a DOM object based on a binary JSON value. The ownership
of the returned object is henceforth with the caller.
@param v the binary value to parse
@return a DOM representation of the binary value, or NULL on error
*/
static Json_dom_ptr parse(const json_binary::Value &v);
/**
Get the path location of this dom, measured from the outermost
document it nests inside.
*/
Json_path get_location() const;
/**
Finds all of the json sub-documents which match the path expression.
Adds a vector element for each match.
See the header comment for Json_wrapper.seek() for a discussion
of complexities involving path expression with more than one
ellipsis (**) token.
@param[in] path the (possibly wildcarded) address of the sub-documents
@param[in] legs the number of legs to use from @a path
@param[out] hits one element per match
@param[in] auto_wrap
if true, match a tailing [0] to scalar at that position.
@param[in] only_need_one True if we can stop after finding one match
@return false on success, true on error
*/
bool seek(const Json_seekable_path &path, size_t legs, Json_dom_vector *hits,
bool auto_wrap, bool only_need_one);
private:
/** Parent pointer */
Json_container *m_parent{nullptr};
};
/**
Abstract base class of all JSON container types (Json_object and Json_array).
*/
class Json_container : public Json_dom {
public:
/**
Replace oldv contained inside this container array or object) with newv. If
this container does not contain oldv, calling the method is a no-op.
@param[in] oldv the value to be replaced
@param[in] newv the new value to put in the container
*/
virtual void replace_dom_in_container(const Json_dom *oldv,
Json_dom_ptr newv) = 0;
};
/**
A comparator that is used for ordering keys in a Json_object. It
orders the keys on length, and lexicographically if the keys have
the same length. The ordering is ascending. This ordering was chosen
for speed of look-up. See usage in Json_object_map.
*/
struct Json_key_comparator {
// is_transparent must be defined in order to make std::map::find() accept
// keys that are of a different type than the key_type of the map. In
// particular, this is needed to make it possible to call find() with
// a std::string_view argument or anything implicitly convertible to
// std::string_view.
using is_transparent = void;
bool operator()(std::string_view key1, std::string_view key2) const;
};
/**
A type used to hold JSON object elements in a map, see the
Json_object class.
*/
using Json_object_map =
std::map<std::string, Json_dom_ptr, Json_key_comparator,
Malloc_allocator<std::pair<const std::string, Json_dom_ptr>>>;
/**
Represents a JSON container value of type "object" (ECMA), type
J_OBJECT here.
*/
class Json_object final : public Json_container {
private:
/**
Map to hold the object elements.
*/
Json_object_map m_map;
public:
Json_object();
enum_json_type json_type() const override { return enum_json_type::J_OBJECT; }
/**
Insert a clone of the value into the object. If the key already
exists in the object, the existing value is replaced ("last value
wins").
@param[in] key the JSON element key of to be added
@param[in] value a JSON value: the element key's value
@retval false on success
@retval true on failure
*/
bool add_clone(std::string_view key, const Json_dom *value) {
return value == nullptr || add_alias(key, value->clone());
}
/**
Insert the value into the object. If the key already exists in the
object, the existing value is replaced ("last value wins").
Ownership of the value is effectively transferred to the
object and the value will be deallocated by the object so only add
values that can be deallocated safely (no stack variables please!)
New code should prefer #add_alias(std::string_view, Json_dom_ptr)
to this function, because that makes the transfer of ownership
more explicit. This function might be removed in the future.
@param[in] key the JSON key of to be added
@param[in] value a JSON value: the key's value
@retval false on success
@retval true on failure
*/
bool add_alias(std::string_view key, Json_dom *value) {
return add_alias(key, Json_dom_ptr(value));
}
/**
Insert the value into the object. If the key already exists in the
object, the existing value is replaced ("last value wins").
The ownership of the value is transferred to the object.
@param[in] key the key of the value to be added
@param[in] value the value to add
@return false on success, true on failure
*/
bool add_alias(std::string_view key, Json_dom_ptr value);
/**
Transfer all of the key/value pairs in the other object into this
object. The other object is deleted. If this object and the other
object share a key, then the two values of the key are merged.
@param [in] other a pointer to the object which will be consumed
@retval false on success
@retval true on failure
*/
bool consume(Json_object_ptr other);
/**
Return the value at key. The value is not cloned, so make
one if you need it. Do not delete the returned value, please!
If the key is not present, return a null pointer.
@param[in] key the key of the element whose value we want
@return the value associated with the key, or NULL if the key is not found
*/
Json_dom *get(std::string_view key) const;
/**
Remove the child element addressed by key. The removed child is deleted.
@param key the key of the element to remove
@retval true if an element was removed
@retval false if there was no element with that key
*/
bool remove(std::string_view key);
/**
@return The number of elements in the JSON object.
*/
size_t cardinality() const;
uint32 depth() const override;
Json_dom_ptr clone() const override;
void replace_dom_in_container(const Json_dom *oldv,
Json_dom_ptr newv) override;
/**
Remove all elements in the object.
*/
void clear() { m_map.clear(); }
/**
Constant iterator over the elements in the JSON object. Each
element is represented as a std::pair where first is a std::string
that represents the key name, and second is a pointer to a
Json_dom that represents the value.
*/
typedef Json_object_map::const_iterator const_iterator;
/// Returns a const_iterator that refers to the first element.
const_iterator begin() const { return m_map.begin(); }
/// Returns a const_iterator that refers past the last element.
const_iterator end() const { return m_map.end(); }
/**
Implementation of the MergePatch function specified in RFC 7396:
define MergePatch(Target, Patch):
if Patch is an Object:
if Target is not an Object:
Target = {} # Ignore the contents and set it to an empty Object
for each Key/Value pair in Patch:
if Value is null:
if Key exists in Target:
remove the Key/Value pair from Target
else:
Target[Key] = MergePatch(Target[Key], Value)
return Target
else:
return Patch
@param patch the object that describes the patch
@retval false on success
@retval true on memory allocation error
*/
bool merge_patch(Json_object_ptr patch);
};
/**
Represents a JSON array container, i.e. type J_ARRAY here.
*/
class Json_array final : public Json_container {
private:
/// Holds the array values.
std::vector<Json_dom_ptr, Malloc_allocator<Json_dom_ptr>> m_v;
public:
Json_array();
enum_json_type json_type() const override { return enum_json_type::J_ARRAY; }
/**
Append a clone of the value to the end of the array.
@param[in] value a JSON value to be appended
@retval false on success
@retval true on failure
*/
bool append_clone(const Json_dom *value) {
return insert_clone(size(), value);
}
/**
Append the value to the end of the array.
Ownership of the value is effectively transferred to the array and
the value will be deallocated by the array so only append values
that can be deallocated safely (no stack variables please!)
New code should prefer #append_alias(Json_dom_ptr) to this
function, because that makes the transfer of ownership more
explicit. This function might be removed in the future.
@param[in] value a JSON value to be appended
@retval false on success
@retval true on failure
*/
bool append_alias(Json_dom *value) {
return append_alias(Json_dom_ptr(value));
}
/**
Append the value to the end of the array and take over the
ownership of the value.
@param value the JSON value to be appended
@return false on success, true on failure
*/
bool append_alias(Json_dom_ptr value) {
return insert_alias(size(), std::move(value));
}
/**
Moves all of the elements in the other array to the end of
this array. The other array is deleted.
@param [in] other a pointer to the array which will be consumed
@retval false on success
@retval true on failure
*/
bool consume(Json_array_ptr other);
/**
Insert a clone of the value at position index of the array. If beyond the
end, insert at the end.
@param[in] index the position at which to insert
@param[in] value a JSON value to be inserted
@retval false on success
@retval true on failure
*/
bool insert_clone(size_t index, const Json_dom *value) {
return value == nullptr || insert_alias(index, value->clone());
}
/**
Insert the value at position index of the array.
If beyond the end, insert at the end.
Ownership of the value is effectively transferred to the array and
the value will be deallocated by the array so only append values
that can be deallocated safely (no stack variables please!)
@param[in] index the position at which to insert
@param[in] value a JSON value to be inserted
@retval false on success
@retval true on failure
*/
bool insert_alias(size_t index, Json_dom_ptr value);
/**
Remove the value at this index. A no-op if index is larger than
size. Deletes the value.
@param[in] index the index of the value to remove
@return true if a value was removed, false otherwise.
*/
bool remove(size_t index);
/**
The cardinality of the array (number of values).
@return the size
*/
size_t size() const { return m_v.size(); }
uint32 depth() const override;
Json_dom_ptr clone() const override;
/**
Get the value at position index. The value has not been cloned so
it is the responsibility of the user to make a copy if needed. Do
not try to deallocate the returned value - it is owned by the array
and will be deallocated by it in time. It is admissible to modify
its contents (in place; without a clone being taken) if it is a
compound.
@param[in] index the array index
@return the value at index
*/
Json_dom *operator[](size_t index) const {
assert(m_v[index]->parent() == this);
return m_v[index].get();
}
/**
Remove the values in the array.
*/
void clear() { m_v.clear(); }
/// Constant iterator over the elements in the JSON array.
using const_iterator = decltype(m_v)::const_iterator;
/// Returns a const_iterator that refers to the first element.
const_iterator begin() const { return m_v.begin(); }
/// Returns a const_iterator that refers past the last element.
const_iterator end() const { return m_v.end(); }
void replace_dom_in_container(const Json_dom *oldv,
Json_dom_ptr newv) override;
/// Sort the array
void sort(const CHARSET_INFO *cs = nullptr);
/// Sort the array using a user-defined comparator.
template <class T>
void sort(const T &comparator) {
std::sort(m_v.begin(), m_v.end(), comparator);
}
/**
Check if the given value appears in the array
@param val value to look for
@returns
true value is found
false otherwise
*/
bool binary_search(Json_dom *val);
/**
Sort array and remove duplicate elements.
Used by multi-value index implementation.
*/
void remove_duplicates(const CHARSET_INFO *cs);
friend Json_dom;
};
/**
Abstract base class for all Json scalars.
*/
class Json_scalar : public Json_dom {
public:
uint32 depth() const final { return 1; }
bool is_scalar() const final { return true; }
};
/**
Represents a JSON string value (ECMA), of type J_STRING here.
*/
class Json_string final : public Json_scalar {
private:
std::string m_str; //!< holds the string
public:
/*
Construct a Json_string object.
@param args any arguments accepted by std::string's constructors
*/
template <typename... Args>
explicit Json_string(Args &&...args)
: Json_scalar(), m_str(std::forward<Args>(args)...) {}
enum_json_type json_type() const override { return enum_json_type::J_STRING; }
Json_dom_ptr clone() const override {
return create_dom_ptr<Json_string>(m_str);
}
/**
Get the reference to the value of the JSON string.
@return the string reference
*/
const std::string &value() const { return m_str; }
/**
Get the number of characters in the string.
@return the number of characters
*/
size_t size() const { return m_str.size(); }
};
/**
Abstract base class of all JSON number (ECMA) types (subclasses
represent MySQL extensions).
*/
class Json_number : public Json_scalar {
public:
bool is_number() const final { return true; }
};
/**
Represents a MySQL decimal number, type J_DECIMAL.
*/
class Json_decimal final : public Json_number {
private:
my_decimal m_dec; //!< holds the decimal number
public:
static const int MAX_BINARY_SIZE = DECIMAL_MAX_FIELD_SIZE + 2;
explicit Json_decimal(const my_decimal &value);
/**
Get the number of bytes needed to store this decimal in a Json_opaque.
@return the number of bytes.
*/
int binary_size() const;
/**
Get the binary representation of the wrapped my_decimal, so that this
value can be stored inside of a Json_opaque.
@param dest the destination buffer to which the binary representation
is written
@return false on success, true on error
*/
bool get_binary(char *dest) const;
enum_json_type json_type() const override {
return enum_json_type::J_DECIMAL;
}
/**
Get a pointer to the MySQL decimal held by this object. Ownership
is _not_ transferred.
@return the decimal
*/
const my_decimal *value() const { return &m_dec; }
Json_dom_ptr clone() const override {
return create_dom_ptr<Json_decimal>(m_dec);
}
/**
Convert a binary value produced by get_binary() back to a my_decimal.
@details
This and two next functions help storage engine to deal with
decimal value in a serialized JSON document. This function converts
serialized value to my_decimal. The later two functions extract the
decimal value from serialized JSON, so SE can index it in multi-valued
index.
@param[in] bin decimal value in binary format
@param[in] len length of the binary value
@param[out] dec my_decimal object to store the value to
@return false on success, true on failure
*/
static bool convert_from_binary(const char *bin, size_t len, my_decimal *dec);
/**
Returns stored DECIMAL binary
@param bin serialized Json_decimal object
@returns
pointer to the binary decimal value
@see #convert_from_binary
*/
static const char *get_encoded_binary(const char *bin) {
// Skip stored precision and scale
return bin + 2;
}
/**
Returns length of stored DECIMAL binary
@param length length of serialized Json_decimal object
@returns
length of the binary decimal value
@see #convert_from_binary
*/
static size_t get_encoded_binary_len(size_t length) {
// Skip stored precision and scale
return length - 2;
}
};
/**
Represents a MySQL double JSON scalar (an extension of the ECMA
number value), type J_DOUBLE.
*/
class Json_double final : public Json_number {
private:
double m_f; //!< holds the double value
public:
explicit Json_double(double value) : Json_number(), m_f(value) {}
enum_json_type json_type() const override { return enum_json_type::J_DOUBLE; }
Json_dom_ptr clone() const override {
return create_dom_ptr<Json_double>(m_f);
}
/**
Return the double value held by this object.
@return the value
*/
double value() const { return m_f; }
};
/**
Represents a MySQL integer (64 bits signed) JSON scalar (an extension
of the ECMA number value), type J_INT.
*/
class Json_int final : public Json_number {
private:
longlong m_i; //!< holds the value
public:
explicit Json_int(longlong value) : Json_number(), m_i(value) {}
enum_json_type json_type() const override { return enum_json_type::J_INT; }
/**
Return the signed int held by this object.
@return the value
*/
longlong value() const { return m_i; }
/**
@return true if the number can be held by a 16 bit signed integer
*/
bool is_16bit() const { return INT_MIN16 <= m_i && m_i <= INT_MAX16; }
/**
@return true if the number can be held by a 32 bit signed integer
*/
bool is_32bit() const { return INT_MIN32 <= m_i && m_i <= INT_MAX32; }
Json_dom_ptr clone() const override { return create_dom_ptr<Json_int>(m_i); }
};
/**
Represents a MySQL integer (64 bits unsigned) JSON scalar (an extension
of the ECMA number value), type J_UINT.
*/
class Json_uint final : public Json_number {
private:
ulonglong m_i; //!< holds the value
public:
explicit Json_uint(ulonglong value) : Json_number(), m_i(value) {}
enum_json_type json_type() const override { return enum_json_type::J_UINT; }
/**
Return the unsigned int held by this object.
@return the value
*/
ulonglong value() const { return m_i; }
/**
@return true if the number can be held by a 16 bit unsigned
integer.
*/
bool is_16bit() const { return m_i <= UINT_MAX16; }
/**
@return true if the number can be held by a 32 bit unsigned
integer.
*/
bool is_32bit() const { return m_i <= UINT_MAX32; }
Json_dom_ptr clone() const override { return create_dom_ptr<Json_uint>(m_i); }
};
/**
Represents a JSON null type (ECMA), type J_NULL here.
*/
class Json_null final : public Json_scalar {
public:
enum_json_type json_type() const override { return enum_json_type::J_NULL; }
Json_dom_ptr clone() const override { return create_dom_ptr<Json_null>(); }
};
/**
Represents a MySQL temporal value (DATE, TIME, DATETIME or TIMESTAMP) -
an extension to the ECMA set of JSON scalar types, types J_DATE, J_TIME,
J_DATETIME and J_TIMESTAMP respectively. The method field_type identifies
which of the four it is. Currently, this is an abstract class with two
child classes: Json_datetime and Json_time.
*/
class Json_temporal : public Json_scalar {
public:
Json_temporal() : Json_scalar() {}
/**
@returns One of MYSQL_TYPE_TIME, MYSQL_TYPE_DATE, MYSQL_TYPE_DATETIME
or MYSQL_TYPE_TIMESTAMP.
*/
virtual enum_field_types field_type() const = 0;
/// Datetimes are packed in eight bytes.
static const size_t PACKED_SIZE = 8;
};
/// MySQL TIME value
class Json_time final : public Json_temporal {
public:
/**
Constructs a object to hold a MySQL time value.
@param time the time value
*/
explicit Json_time(const Time_val time) : Json_temporal(), m_time(time) {}
enum_json_type json_type() const override { return enum_json_type::J_TIME; }
Json_dom_ptr clone() const override;
/// @returns the time value.
Time_val value() const { return m_time; }
/**
Return what kind of temporal value this object holds.
@return One of MYSQL_TYPE_TIME, MYSQL_TYPE_DATE, MYSQL_TYPE_DATETIME
or MYSQL_TYPE_TIMESTAMP.
*/
enum_field_types field_type() const override { return MYSQL_TYPE_TIME; }
/**
Convert the TIME value to the packed format used for storage.
@param dest the destination buffer to write the packed time value to
(must at least have size PACKED_SIZE)
*/
void to_packed(char *dest) const;
/**
Convert a packed time back to a time value.
@param from the buffer to read from (must have at least PACKED_SIZE bytes)
@param to the time field to write the value to
*/
static void from_packed(const char *from, Time_val *to);
#ifdef MYSQL_SERVER
/**
Convert a packed time value to key string for indexing by SE
@param from the buffer to read from
@param to the destination buffer
@param dec value's decimals
*/
static void from_packed_to_key(const char *from, uchar *to, uint8 dec);
#endif
private:
/// Holds the time value
Time_val m_time;
};
/**
MySQL temporal value that is represented by a MYSQL_TIME struct, ie.
a DATETIME, TIMESTAMP or DATE value.
*/
class Json_datetime final : public Json_temporal {
public:
/**
Constructs a object to hold a MySQL date/time value.
@param[in] t the date/time value
@param[in] ft the field type: must be one of MYSQL_TYPE_DATE,
MYSQL_TYPE_DATETIME or MYSQL_TYPE_TIMESTAMP.
*/
Json_datetime(const Datetime_val &t, enum_field_types ft)
: Json_temporal(), m_t(t), m_field_type(ft) {
assert(ft != MYSQL_TYPE_TIME);
}
enum_json_type json_type() const override;
Json_dom_ptr clone() const override;
/**
@returns a pointer to the date/time value. Ownership is _not_ transferred.
To identify which time time the value represents, use @c field_type.
*/
const Datetime_val *value() const { return &m_t; }
enum_field_types field_type() const override { return m_field_type; }
/**
Convert the datetime to the packed format used for storage.
@param dest the destination buffer to write the packed datetime to
(must at least have size PACKED_SIZE)
*/
void to_packed(char *dest) const;
/**
Convert a packed datetime back to a MYSQL_TIME.
@param from the buffer to read from (must have at least PACKED_SIZE bytes)
@param ft the field type of the value
@param to the MYSQL_TIME to write the value to
*/
static void from_packed(const char *from, enum_field_types ft,
MYSQL_TIME *to);
#ifdef MYSQL_SERVER
/**
Convert a packed datetime to key string for indexing by SE
@param from the buffer to read from
@param ft the field type of the value
@param to the destination buffer
@param dec value's decimals
*/
static void from_packed_to_key(const char *from, enum_field_types ft,
uchar *to, uint8 dec);
#endif
private:
Datetime_val m_t; //!< holds the date/time value
enum_field_types m_field_type; //!< identifies which type of date/time
};
/**
Represents a MySQL value opaquely, i.e. the Json DOM can not
serialize or deserialize these values. This should be used to store
values that don't map to the other Json_scalar classes. Using the
"to_string" method on such values (via Json_wrapper) will yield a base
64 encoded string tagged with the MySQL type with this syntax:
"base64:typeXX:<base 64 encoded value>"
*/
class Json_opaque final : public Json_scalar {
private:
enum_field_types m_mytype;
std::string m_val;
public:
/**
An opaque MySQL value.
@param[in] mytype the MySQL type of the value
@param[in] args arguments to construct the binary value to be stored
in the DOM (anything accepted by the std::string
constructors)
@see #enum_field_types
@see Class documentation
*/
template <typename... Args>
explicit Json_opaque(enum_field_types mytype, Args &&...args)
: Json_scalar(), m_mytype(mytype), m_val(std::forward<Args>(args)...) {}
enum_json_type json_type() const override { return enum_json_type::J_OPAQUE; }
/**
@return a pointer to the opaque value. Use #size() to get its size.
*/
const char *value() const { return m_val.data(); }
/**
@return the MySQL type of the value
*/
enum_field_types type() const { return m_mytype; }
/**
@return the size in bytes of the value
*/
size_t size() const { return m_val.size(); }
Json_dom_ptr clone() const override;
};
/**
Represents a JSON true or false value, type J_BOOLEAN here.
*/
class Json_boolean final : public Json_scalar {
private:
bool m_v; //!< false or true: represents the eponymous JSON literal
public:
explicit Json_boolean(bool value) : Json_scalar(), m_v(value) {}
enum_json_type json_type() const override {
return enum_json_type::J_BOOLEAN;
}
/**
@return false for JSON false, true for JSON true
*/
bool value() const { return m_v; }
Json_dom_ptr clone() const override {
return create_dom_ptr<Json_boolean>(m_v);
}
};
/**
Perform quoting on a JSON string to make an external representation
of it. It wraps double quotes (text quotes) around the string (cptr)
and also performs escaping according to the following table:
<pre>
@verbatim
Common name C-style Original unescaped Transformed to
escape UTF-8 bytes escape sequence
notation in UTF-8 bytes
---------------------------------------------------------------
quote \" %x22 %x5C %x22
backslash \\ %x5C %x5C %x5C
backspace \b %x08 %x5C %x62
formfeed \f %x0C %x5C %x66
linefeed \n %x0A %x5C %x6E
carriage-return \r %x0D %x5C %x72
tab \t %x09 %x5C %x74
unicode \uXXXX A hex number in the %x5C %x75
range of 00-1F, followed by
except for the ones 4 hex digits
handled above (backspace,
formfeed, linefeed,
carriage-return,
and tab).
---------------------------------------------------------------
@endverbatim
</pre>
@param[in] cptr pointer to string data
@param[in] length the length of the string
@param[in,out] buf the destination buffer
@retval true on error
*/
bool double_quote(const char *cptr, size_t length, String *buf);
/**
Merge two doms. The right dom is either subsumed into the left dom
or the contents of the right dom are transferred to the left dom
and the right dom is deleted. After calling this function, the
caller should not reference the right dom again. It has been
deleted.
Returns NULL if there is a memory allocation failure. In this case
both doms are deleted.
scalars - If any of the documents that are being merged is a scalar,
each scalar document is autowrapped as a single value array before merging.
arrays - When merging a left array with a right array,
then the result is the left array concatenated
with the right array. For instance, [ 1, 2 ] merged with [ 3, 4 ]
is [ 1, 2, 3, 4 ].
array and object - When merging an array with an object,
the object is autowrapped as an array and then the rule above
is applied. So [ 1, 2 ] merged with { "a" : true }
is [ 1, 2, { "a": true } ].
objects - When merging two objects, the two objects are concatenated
into a single, larger object. So { "a" : "foo" } merged with { "b" : 5 }
is { "a" : "foo", "b" : 5 }.
duplicates - When two objects are merged and they share a key,
the values associated with the shared key are merged.
@param [in,out] left The recipient dom.
@param [in,out] right The dom to be consumed
@return A composite dom which subsumes the left and right doms, or NULL
if a failure happened while merging
*/
Json_dom_ptr merge_doms(Json_dom_ptr left, Json_dom_ptr right);
constexpr uchar JSON_KEY_NULL = '\x00';
constexpr uchar JSON_KEY_OBJECT = '\x05';
constexpr uchar JSON_KEY_ARRAY = '\x06';
constexpr uchar JSON_KEY_FALSE = '\x07';
constexpr uchar JSON_KEY_TRUE = '\x08';
/**
Abstraction for accessing JSON values irrespective of whether they
are (started out as) binary JSON values or JSON DOM values. The
purpose of this is to allow uniform access for callers. It allows us
to access binary JSON values without necessarily building a DOM (and
thus having to read the entire value unless necessary, e.g. for
accessing only a single array slot or object field).
Instances of this class are usually created on the stack. In some
cases instances are cached in an Item and reused, in which case they
are allocated from query-duration memory (by allocating them on a
MEM_ROOT).
*/
class Json_wrapper {
private:
/*
A Json_wrapper wraps either a Json_dom or a json_binary::Value,
never both at the same time.
*/
union {
/// The DOM representation, only used if m_is_dom is true.
struct {
Json_dom *m_value;
/// If true, don't deallocate m_dom_value in destructor.
bool m_alias;
} m_dom;
/// The binary representation, only used if m_is_dom is false.
json_binary::Value m_value;
};
bool m_is_dom; //!< Wraps a DOM iff true
public:
/**
Get the wrapped datetime value in the packed format.
@param[in,out] buffer a char buffer with space for at least
Json_datetime::PACKED_SIZE characters
@return a char buffer that contains the packed representation of the
datetime (may or may not be the same as buffer)
*/
const char *get_datetime_packed(char *buffer) const;
const char *get_time_packed(char *buffer) const;
/**
Create an empty wrapper. Cf #empty().
*/
Json_wrapper() : m_dom{nullptr, true}, m_is_dom(true) {}
/**
Wrap the supplied DOM value (no copy taken). The wrapper takes
ownership, unless alias is true or @c set_alias is called after
construction.
In the latter case the lifetime of the DOM is determined by
the owner of the DOM, so clients need to ensure that that
lifetime is sufficient, lest dead storage is attempted accessed.
@param[in,out] dom_value the DOM value
@param alias Whether the wrapper is an alias to DOM
*/
explicit Json_wrapper(Json_dom *dom_value, bool alias = false);
/**
Wrap the supplied DOM value. The wrapper takes over the ownership.
*/
explicit Json_wrapper(Json_dom_ptr dom_value)
: Json_wrapper(dom_value.release()) {}
/**
Only meaningful iff the wrapper encapsulates a DOM. Marks the
wrapper as not owning the DOM object, i.e. it will not be
deallocated in the wrapper's destructor. Useful if one wants a wrapper
around a DOM owned by someone else.
*/
void set_alias() { m_dom.m_alias = true; }
/**
Wrap a binary value. Does not copy the underlying buffer, so
lifetime is limited the that of the supplied value.
@param[in] value the binary value
*/
explicit Json_wrapper(const json_binary::Value &value);
/**
Copy constructor. Does a deep copy of any owned DOM. If a DOM
os not owned (aliased), the copy will also be aliased.
*/
Json_wrapper(const Json_wrapper &old);
/**
Move constructor. Take over the ownership of the other wrapper's
DOM, unless it's aliased. If the other wrapper is aliased, this
wrapper becomes an alias too. Any already owned DOM will be
deallocated.
@param old the wrapper whose contents to take over
*/
Json_wrapper(Json_wrapper &&old) noexcept;
/**
Assignment operator. Does a deep copy of any owned DOM. If a DOM
os not owned (aliased), the copy will also be aliased. Any owned
DOM in the left side will be deallocated.
*/
Json_wrapper &operator=(const Json_wrapper &old);
/**
Move-assignment operator. Take over the ownership of the other
wrapper's DOM, unless it's aliased. If the other wrapper is
aliased, this wrapper becomes an alias too. Any already owned DOM
will be deallocated.
@param old the wrapper whose contents to take over
*/
Json_wrapper &operator=(Json_wrapper &&old) noexcept;
~Json_wrapper();
/**
A Wrapper is defined to be empty if it is passed a NULL value with the
constructor for JSON dom, or if the default constructor is used.
@return true if the wrapper is empty.
*/
bool empty() const { return m_is_dom && !m_dom.m_value; }
/**
Does this wrapper contain a DOM?
@retval true if the wrapper contains a DOM representation
@retval false if the wrapper contains a binary representation
*/
bool is_dom() const { return m_is_dom; }
/**
Get the wrapped contents in DOM form. The DOM is (still) owned by the
wrapper. If this wrapper originally held a value, it is now converted
to hold (and eventually release) the DOM version.
@return pointer to a DOM object, or NULL if the DOM could not be allocated
*/
Json_dom *to_dom();
/**
Gets a pointer to the wrapped Json_dom object, if this wrapper holds a DOM.
If is_dom() returns false, the result of calling this function is undefined.
*/
const Json_dom *get_dom() const {
assert(m_is_dom);
return m_dom.m_value;
}
/**
Gets the wrapped json_binary::Value object, if this wrapper holds a binary
JSON value. If is_dom() returns true, the result of calling this function is
undefined.
*/
const json_binary::Value &get_binary_value() const {
assert(!m_is_dom);
return m_value;
}
/**
Get the wrapped contents in DOM form. Same as to_dom(), except it returns
a clone of the original DOM instead of the actual, internal DOM tree.
@return pointer to a DOM object, or NULL if the DOM could not be allocated
*/
Json_dom_ptr clone_dom() const;
/**
Get the wrapped contents in binary value form.
@param error_handler a handler that is invoked if an error occurs
@param[in,out] str a string that will be filled with the binary value
@retval false on success
@retval true on error
*/
bool to_binary(const JsonSerializationErrorHandler &error_handler,
String *str) const;
/**
Check if the wrapped JSON document is a binary value (a
json_binary::Value), and if that binary is pointing to data stored in the
given string.
This function can be used to check if overwriting the data in the string
might overwrite and corrupt the document contained in this wrapper.
@param str a string which contains JSON binary data
@retval true if the string contains data that the wrapped document
points to from its json_binary::Value representation
@retval false otherwise
*/
bool is_binary_backed_by(const String *str) const {
return !m_is_dom && m_value.is_backed_by(str);
}
/**
Format the JSON value to an external JSON string in buffer in
the format of ISO/IEC 10646.
@param[in,out] buffer the formatted string is appended, so make sure
the length is set correctly before calling
@param[in] json_quoted if the JSON value is a string and json_quoted
is false, don't perform quoting on the string.
This is only used by JSON_UNQUOTE.
@param[in] func_name The name of the function that called to_string().
@return false formatting went well, else true
*/
bool to_string(String *buffer, bool json_quoted, const char *func_name,
const JsonErrorHandler &depth_handler) const;
/**
Print this JSON document to the debug trace.
@param[in] message If given, the JSON document is prefixed with
this message.
*/
void dbug_print(const char *message,
const JsonErrorHandler &depth_handler) const;
/**
Format the JSON value to an external JSON string in buffer in the format of
ISO/IEC 10646. Add newlines and indentation for readability.
@param[in,out] buffer the buffer that receives the formatted string
(the string is appended, so make sure the length
is set correctly before calling)
@param[in] func_name the name of the calling function
@retval false on success
@retval true on error
*/
bool to_pretty_string(String *buffer, const char *func_name,
const JsonErrorHandler &depth_handler) const;
// Accessors
/**
Return the type of the wrapped JSON value
@return the type, or Json_dom::J_ERROR if the wrapper does not contain
a JSON value
*/
enum_json_type type() const;
/**
Return the MYSQL type of the opaque value, see #type(). Valid for
J_OPAQUE. Calling this method if the type is not J_OPAQUE will give
undefined results.
@return the type
*/
enum_field_types field_type() const;
/**
If this wrapper holds a JSON array, get an array value by indexing
into the array. Valid for J_ARRAY. Calling this method if the type is
not J_ARRAY will give undefined results.
@return the array value
*/
Json_wrapper operator[](size_t index) const;
/**
If this wrapper holds a JSON object, get the value corresponding
to the member key. Valid for J_OBJECT. Calling this method if the type is
not J_OBJECT will give undefined results.
@param[in] key name for identifying member
@return The member value. If there is no member with the specified
name, a value with type Json_dom::J_ERROR is returned.
*/
Json_wrapper lookup(std::string_view key) const;
/**
Get a pointer to the data of a JSON string or JSON opaque value.
The data is still owner by the wrapper. The data may not be null
terminated, so use in conjunction with @c get_data_length.
Valid for J_STRING and J_OPAQUE. Calling this method if the type is
not one of those will give undefined results.
@return the pointer
*/
const char *get_data() const;
/**
Get the length to the data of a JSON string or JSON opaque value.
Valid for J_STRING and J_OPAQUE. Calling this method if the type is
not one of those will give undefined results.
@return the length
*/
size_t get_data_length() const;
/**
Get the MySQL representation of a JSON decimal value.
Valid for J_DECIMAL. Calling this method if the type is
not J_DECIMAL will give undefined results.
@param[out] d the decimal value
@return false on success, true on failure (which would indicate an
internal error)
*/
bool get_decimal_data(my_decimal *d) const;
/**
Get the value of a JSON double number.
Valid for J_DOUBLE. Calling this method if the type is
not J_DOUBLE will give undefined results.
@return the value
*/
double get_double() const;
/**
Get the value of a JSON signed integer number.
Valid for J_INT. Calling this method if the type is
not J_INT will give undefined results.
@return the value
*/
longlong get_int() const;
/**
Get the value of a JSON unsigned integer number.
Valid for J_UINT. Calling this method if the type is
not J_UINT will give undefined results.
@return the value
*/
ulonglong get_uint() const;
/**
Get the value of a JSON date/time value. Valid for J_DATETIME, J_DATE
and J_TIMESTAMP. Calling this method if the type is not one of those
will give undefined results.
@param[out] t the date/time value
*/
void get_datetime(MYSQL_TIME *t) const;
/**
Get the value of a JSON time value. Valid for J_TIME.
Calling this method if the type is not J_TIME will give undefined results.
@param[out] t the date/time value
*/
void get_time(Time_val *t) const;
/**
Get a boolean value (a JSON true or false literal).
Valid for J_BOOLEAN. Calling this method if the type is
not J_BOOLEAN will give undefined results.
@return the value
*/
bool get_boolean() const;
/**
Finds all of the json sub-documents which match the path expression.
Puts the matches on an evolving vector of results.
This is a bit inefficient for binary wrappers because you can't
build up a binary array incrementally from its cells. Instead, you
have to turn each cell into a dom and then add the doms to a
dom array.
Calling this if #empty() returns true is an error.
Special care must be taken when the path expression contains more than one
ellipsis (**) token. That is because multiple paths with ellipses may
identify the same value. Consider the following document:
{ "a": { "x" : { "b": { "y": { "b": { "z": { "c": 100 } } } } } } }
The innermost value (the number 100) has the following unique,
non-wildcarded address:
$.a.x.b.y.b.z.c
That location is reached by both of the following paths which include
the ellipsis token:
$.a.x.b**.c
$.a.x.b.y.b**.c
And those addresses both satisfy the following path expression which has
two ellipses:
$.a**.b**.c
In this case, we only want to return one instance of $.a.x.b.y.b.z.c
Similarly, special care must be taken if an auto-wrapping array
path leg follows an ellipsis. Consider the following document:
{ "a": { "b" : [ 1, 2, 3 ] } }
The first element of the array (the number 1) can be reached with
either of these two non-wildcarded addresses, due to array auto-wrapping:
$.a.b[0]
$.a.b[0][0]
Both of those addresses match the following path expression, which
has an ellipsis followed by an auto-wrapping path leg:
$**[0]
@param[in] path the (possibly wildcarded) address of the sub-documents
@param[in] legs the number of legs to use from @a path
@param[out] hits the result of the search
@param[in] auto_wrap true of we match a final scalar with search for [0]
@param[in] only_need_one True if we can stop after finding one match
@retval false on success
@retval true on error
*/
bool seek(const Json_seekable_path &path, size_t legs,
Json_wrapper_vector *hits, bool auto_wrap, bool only_need_one);
/**
Compute the length of a document. This is the value which would be
returned by the JSON_LENGTH() system function. So, this returns
- for scalar values: 1
- for objects: the number of members
- for arrays: the number of cells
@returns 1, the number of members, or the number of cells
*/
size_t length() const;
/**
Compare this JSON value to another JSON value.
@param[in] other the other JSON value
@param[in] cs if given, this charset will be used in comparison of
string values
@retval -1 if this JSON value is less than the other JSON value
@retval 0 if the two JSON values are equal
@retval 1 if this JSON value is greater than the other JSON value
*/
int compare(const Json_wrapper &other,
const CHARSET_INFO *cs = nullptr) const;
/**
Extract an int (signed or unsigned) from the JSON if possible
coercing if need be.
@param[in] error_handler function to be called on conversion errors
@param[out] err true <=> error occur during coercion
@param[out] unsigned_flag Whether the value read from JSON data is
unsigned
@returns json value coerced to int
*/
longlong coerce_int(const JsonCoercionHandler &error_handler, bool *err,
bool *unsigned_flag) const;
/// Shorthand for coerce_int(error_handler, nullptr, nullptr).
longlong coerce_int(const JsonCoercionHandler &error_handler) const {
return coerce_int(error_handler, nullptr, nullptr);
}
/**
Extract a real from the JSON if possible, coercing if need be.
@param[in] error_handler function to be called on conversion errors
@param[out] err true <=> error occur during coercion
@returns json value coerced to real
*/
double coerce_real(const JsonCoercionHandler &error_handler, bool *err) const;
/// Shorthand for coerce_real(error_handler, nullptr).
double coerce_real(const JsonCoercionHandler &error_handler) const {
return coerce_real(error_handler, nullptr);
}
/**
Extract a decimal from the JSON if possible, coercing if need be.
@param[in] error_handler function to be called on conversion errors
@param[in,out] decimal_value a value buffer
@param[out] err true <=> error occur during coercion
@returns json value coerced to decimal
*/
my_decimal *coerce_decimal(const JsonCoercionHandler &error_handler,
my_decimal *decimal_value, bool *err) const;
/// Shorthand for coerce_decimal(error_handler, decimal_value, nullptr).
my_decimal *coerce_decimal(const JsonCoercionHandler &error_handler,
my_decimal *decimal_value) const {
return coerce_decimal(error_handler, decimal_value, nullptr);
}
/**
Extract a date from the JSON if possible, coercing if need be.
@param[in] error_handler function to be called on conversion errors
@param[in] deprecation_checker function to be called to check for
deprecated datetime format in ltime
@param[in,out] date a value buffer
@param[in] flags Flags to use for string -> date conversion
@returns json value coerced to date
*/
bool coerce_date(const JsonCoercionHandler &error_handler,
const JsonCoercionDeprecatedHandler &deprecation_checker,
Date_val *date, my_time_flags_t flags = 0) const;
/**
Extract a time value from the JSON if possible, coercing if need be.
@param[in] error_handler function to be called on conversion errors
@param[in] deprecation_checker function to be called to check for
deprecated datetime format in ltime
@param[in,out] time a value buffer
@returns json value coerced to time
*/
bool coerce_time(const JsonCoercionHandler &error_handler,
const JsonCoercionDeprecatedHandler &deprecation_checker,
Time_val *time) const;
/**
Extract a datetime from the JSON if possible, coercing if need be.
@param[in] error_handler function to be called on conversion errors
@param[in] deprecation_checker function to be called to check for
deprecated datetime format in ltime
@param[in,out] dt a value buffer
@param[in] flags Flags to use for string -> date conversion
@returns json value coerced to date
*/
bool coerce_datetime(const JsonCoercionHandler &error_handler,
const JsonCoercionDeprecatedHandler &deprecation_checker,
Datetime_val *dt, my_time_flags_t flags = 0) const;
/**
Make a sort key that can be used by filesort to order JSON values.
@param[out] to a buffer to which the sort key is written
@param[in] length the length of the sort key
@details Key storage format is following:
@verbatim
|<json type>< sort key >|
1 byte / variable length /
@endverbatim
JSON is assumed to be non-sql-null and valid (checked by caller).
Key length contains full length - the len prefix itself, json type and the
sort key.
All numeric types are stored as a number, without distinction to
double/decimal/int/etc. See @c make_json_numeric_sort_key().
Same is done to DATETIME and TIMESTAMP types.
For string and opaque types only the prefix that fits into the output buffer
is stored.
For JSON objects and arrays only their length (number of elements) is
stored, this is a limitation of current implementation.
*/
size_t make_sort_key(uchar *to, size_t length) const;
/**
Make a hash key that can be used by sql_executor.cc/unique_hash
in order to support SELECT DISTINCT
@param[in] hash_val An initial hash value.
*/
ulonglong make_hash_key(ulonglong hash_val) const;
void make_hash_key_common(Json_wrapper_hasher &hash_key) const;
/**
Calculate the amount of unused space inside a JSON binary value.
@param[in] error_handler the handler that is invoked if an error occurs
@param[out] space the amount of unused space, or zero if this is a DOM
@return false on success
@return true if the JSON binary value was invalid
*/
bool get_free_space(const JsonSerializationErrorHandler &error_handler,
size_t *space) const;
#ifdef MYSQL_SERVER
/**
Attempt a binary partial update by replacing the value at @a path with @a
new_value. On successful completion, the updated document will be available
in @a result, and this Json_wrapper will point to @a result instead of the
original binary representation. The modifications that have been applied,
will also be collected as binary diffs, which can be retrieved via
TABLE::get_binary_diffs().
@param field the column being updated
@param path the path of the value to update
@param new_value the new value
@param replace true if we use JSON_REPLACE semantics
@param[in,out] result buffer that holds the updated JSON document (is
empty if no partial update has been performed on
this Json_wrapper so far, or contains the binary
representation of the document in this wrapper
otherwise)
@param[out] partially_updated gets set to true if partial update was
successful, also if it was a no-op
@param[out] replaced_path gets set to true if the path was replaced,
will be false if this update is a no-op
@retval false if the update was successful, or if it was determined
that a full update was needed
@retval true if an error occurred
*/
bool attempt_binary_update(const Field_json *field,
const Json_seekable_path &path,
Json_wrapper *new_value, bool replace,
String *result, bool *partially_updated,
bool *replaced_path);
/**
Remove a path from a binary JSON document. On successful completion, the
updated document will be available in @a result, and this Json_wrapper will
point to @a result instead of the original binary representation. The
modifications that have been applied, will also be collected as binary
diffs, which can be retrieved via TABLE::get_binary_diffs().
@param field the column being updated
@param path the path to remove from the document
@param[in,out] result buffer that holds the updated JSON document (is
empty if no partial update has been performed on
this Json_wrapper so far, or contains the binary
representation of the document in this wrapper
otherwise)
@param[out] found_path gets set to true if the path is found in the
document, false otherwise
@retval false if the value was successfully updated
@retval true if an error occurred
*/
bool binary_remove(const Field_json *field, const Json_seekable_path &path,
String *result, bool *found_path);
#endif // ifdef MYSQL_SERVER
/**
Sort contents. Applicable to JSON arrays only.
*/
void sort(const CHARSET_INFO *cs = nullptr);
/**
Remove duplicate values. Applicable to JSON arrays only, array will be
sorted.
*/
void remove_duplicates(const CHARSET_INFO *cs = nullptr);
};
/**
Class that iterates over all members of a JSON object that is wrapped in a
Json_wrapper instance.
*/
class Json_wrapper_object_iterator {
public:
// Type aliases required by ForwardIterator.
using value_type = std::pair<std::string_view, Json_wrapper>;
using reference = const value_type &;
using pointer = const value_type *;
using difference_type = ptrdiff_t;
using iterator_category = std::forward_iterator_tag;
/**
Creates an iterator that iterates over all members of the given
Json_wrapper, if it wraps a JSON object. If the wrapper does not wrap a JSON
object, the result is undefined.
@param wrapper the Json_wrapper to iterate over
@param begin true to construct an iterator that points to the first member
of the object, false to construct a past-the-end iterator
*/
Json_wrapper_object_iterator(const Json_wrapper &wrapper, bool begin);
/// Forward iterators must be default constructible.
Json_wrapper_object_iterator() = default;
/// Advances the iterator to the next element.
Json_wrapper_object_iterator &operator++() {
if (is_dom())
++m_iter;
else
++m_current_element_index;
m_current_member_initialized = false;
return *this;
}
/**
Advances the iterator to the next element and returns an iterator that
points to the current element (post-increment operator).
*/
const Json_wrapper_object_iterator operator++(int) {
Json_wrapper_object_iterator copy = *this;
++(*this);
return copy;
}
/// Checks two iterators for equality.
bool operator==(const Json_wrapper_object_iterator &other) const {
return is_dom() ? m_iter == other.m_iter
: m_current_element_index == other.m_current_element_index;
}
/// Checks two iterators for inequality.
bool operator!=(const Json_wrapper_object_iterator &other) const {
return !(*this == other);
}
pointer operator->() {
if (!m_current_member_initialized) initialize_current_member();
return &m_current_member;
}
reference operator*() { return *this->operator->(); }
private:
/// Pair holding the key and value of the member pointed to by the iterator.
value_type m_current_member;
/// True if #m_current_member is initialized.
bool m_current_member_initialized{false};
/// The binary JSON object being iterated over, or nullptr for DOMs.
const json_binary::Value *m_binary_value;
/// The index of the current member in the binary JSON object.
size_t m_current_element_index;
/// Iterator pointing to the current member in the JSON DOM object.
Json_object::const_iterator m_iter;
/// Returns true if iterating over a DOM.
bool is_dom() const { return m_binary_value == nullptr; }
/// Fill #m_current_member with the key and value of the current member.
void initialize_current_member();
};
/**
A wrapper over a JSON object which provides an interface that can be iterated
over with a for-each loop.
*/
class Json_object_wrapper {
public:
using const_iterator = Json_wrapper_object_iterator;
explicit Json_object_wrapper(const Json_wrapper &wrapper)
: m_wrapper(wrapper) {}
const_iterator cbegin() const { return const_iterator(m_wrapper, true); }
const_iterator cend() const { return const_iterator(m_wrapper, false); }
const_iterator begin() const { return cbegin(); }
const_iterator end() const { return cend(); }
private:
const Json_wrapper &m_wrapper;
};
/**
Check if a string contains valid JSON text, without generating a
Json_dom representation of the document.
@param[in] text pointer to the beginning of the string
@param[in] length the length of the string
@return true if the string is valid JSON text, false otherwise
*/
bool is_valid_json_syntax(const char *text, size_t length);
/**
A class that is capable of holding objects of any sub-type of
Json_scalar. Used for pre-allocating space in query-duration memory
for JSON scalars that are to be returned by get_json_atom_wrapper().
This class should be replaced by std::variant when moving to C++17.
*/
class Json_scalar_holder {
/// Union of all concrete subclasses of Json_scalar.
union Any_json_scalar {
Json_string m_string;
Json_decimal m_decimal;
Json_int m_int;
Json_uint m_uint;
Json_double m_double;
Json_boolean m_boolean;
Json_null m_null;
Json_time m_time;
Json_datetime m_datetime;
Json_opaque m_opaque;
/// Constructor which initializes the union to hold a Json_null value.
Any_json_scalar() : m_null() {}
/// Destructor which delegates to Json_scalar's virtual destructor.
~Any_json_scalar() {
// All members have the same address, and all members are sub-types of
// Json_scalar, so we can take the address of an arbitrary member and
// convert it to Json_scalar.
Json_scalar *scalar = &m_null;
scalar->~Json_scalar();
}
};
/// The buffer in which the Json_scalar value is stored.
Any_json_scalar m_buffer;
/// Pointer to the held scalar, or nullptr if no value is held.
Json_scalar *m_scalar_ptr{nullptr};
public:
/// Get a pointer to the held object, or nullptr if there is none.
Json_scalar *get() { return m_scalar_ptr; }
/**
Construct a new Json_scalar value in this Json_scalar_holder.
If a value is already held, the old value is destroyed and replaced.
@tparam T which type of Json_scalar to create
@param args the arguments to T's constructor
*/
template <typename T, typename... Args>
void emplace(Args &&...args) {
static_assert(std::is_base_of<Json_scalar, T>::value, "Not a Json_scalar");
static_assert(sizeof(T) <= sizeof(m_buffer), "Buffer is too small");
m_scalar_ptr = &m_buffer.m_null;
m_scalar_ptr->~Json_scalar();
::new (m_scalar_ptr) T(std::forward<Args>(args)...);
}
};
/**
Check if one Json_wrapper contains all the elements of another
Json_wrapper.
@param[in] doc_wrapper the containing document
@param[in] containee_wr the possibly contained document
@param[out] result true if doc_wrapper contains containee_wr,
false otherwise
@retval false on success
@retval true on failure
*/
bool json_wrapper_contains(const Json_wrapper &doc_wrapper,
const Json_wrapper &containee_wr, bool *result);
/// Returns the name of the type of the JSON document contained in "doc".
std::string_view json_type_name(const Json_wrapper &doc);
/// The maximum length of the type name returned from JSON_TYPE.
extern const size_t kMaxJsonTypeNameLength;
#endif /* JSON_DOM_INCLUDED */ | c | github | https://github.com/mysql/mysql-server | sql-common/json_dom.h |
#!/usr/bin/env python3
import os
import tarfile
from urllib.request import urlopen
from urllib.error import URLError
from kiwixbuild.versions import base_deps_meta_version
from common import (
print_message,
run_kiwix_build,
upload,
make_deps_archive,
HOME,
PLATFORM_TARGET,
OS_NAME,
)
def download_base_archive(base_name):
url = "http://tmp.kiwix.org/ci/{}".format(base_name)
file_path = str(HOME / base_name)
batch_size = 1024 * 1024 * 8
with urlopen(url) as resource, open(file_path, "wb") as file:
while True:
batch = resource.read(batch_size)
if not batch:
break
print(".", end="", flush=True)
file.write(batch)
return file_path
ARCHIVE_NAME_TEMPLATE = "base_deps2_{os}_{platform}_{version}.tar.xz"
if PLATFORM_TARGET == 'flatpak':
base_dep_archive_name = "base_deps2_{}_flatpak.tar.xz".format(OS_NAME)
else:
base_dep_archive_name = ARCHIVE_NAME_TEMPLATE.format(
os=OS_NAME,
platform=PLATFORM_TARGET,
version=base_deps_meta_version,
)
print_message("Getting archive {}", base_dep_archive_name)
try:
local_filename = download_base_archive(base_dep_archive_name)
with tarfile.open(local_filename) as f:
f.extractall(str(HOME))
os.remove(str(local_filename))
except URLError:
print_message("Cannot get archive. Build dependencies")
if PLATFORM_TARGET == "android":
for arch in ("arm", "arm64", "x86", "x86_64"):
archive_name = ARCHIVE_NAME_TEMPLATE.format(
os=OS_NAME,
platform="android_{}".format(arch),
version=base_deps_meta_version,
)
print_message("Getting archive {}", archive_name)
try:
local_filename = download_base_archive(archive_name)
with tarfile.open(local_filename) as f:
f.extractall(str(HOME))
os.remove(str(local_filename))
except URLError:
pass
elif PLATFORM_TARGET == "flatpak":
print_message("Cannot get archive. Move on")
else:
run_kiwix_build("alldependencies", platform=PLATFORM_TARGET)
archive_file = make_deps_archive(name=base_dep_archive_name, full=True)
upload(archive_file, "ci@tmp.kiwix.org", "/data/tmp/ci")
os.remove(str(archive_file)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import random
import itertools
import paddle.fluid.core as core
import collections
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder
def randomize_probability(batch_size, class_num, dtype='float32'):
prob = np.random.uniform(
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
prob_sum = prob.sum(axis=1)
for i in xrange(len(prob)):
prob[i] /= prob_sum[i]
return prob
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
def __create_var__(name, var_name):
scope.var(var_name).get_tensor()
kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, _ = item[0], item[1]
__create_var__(in_name, sub_in_name)
else:
__create_var__(in_name, in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_out = outputs[out_name]
for item in sub_out:
sub_out_name, _ = item[0], item[1]
__create_var__(out_name, sub_out_name)
else:
__create_var__(out_name, out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_lod(var[1])
var = var[0]
tensor.set_dims(var.shape)
tensor.set(var, place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
elif isinstance(var, int):
scope.find_var(var_name).set_int(var)
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, sub_in_val = item[0], item[1]
__set_input__(sub_in_name, sub_in_val)
else:
__set_input__(in_name, inputs[in_name])
def get_numeric_gradient(place,
scope,
op,
inputs,
input_to_check,
output_names,
delta=0.005,
in_place=False):
# FIXME: change this method by compile time concepts
set_input(scope, op, inputs, place)
def product(dim):
return reduce(lambda a, b: a * b, dim, 1)
def get_output():
sum = []
for output_name in output_names:
op.run(scope, place)
sum.append(
np.array(scope.find_var(output_name).get_tensor()).mean())
return np.array(sum).mean()
tensor_to_check = scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.get_dims())
tensor_to_check_dtype = tensor_to_check.dtype()
if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
tensor_to_check_dtype = np.float32
elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
tensor_to_check_dtype = np.float64
else:
raise ValueError("Not supported data type " + str(
tensor_to_check_dtype))
gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)
def __get_elem__(tensor, i):
if tensor_to_check_dtype == np.float32:
return tensor.get_float_element(i)
else:
return tensor.get_double_element(i)
def __set_elem__(tensor, i, e):
if tensor_to_check_dtype == np.float32:
tensor.set_float_element(i, e)
else:
tensor.set_double_element(i, e)
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
if in_place:
set_input(scope, op, inputs, place)
# get one input element throw it's index i.
origin = __get_elem__(tensor_to_check, i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
__set_elem__(tensor_to_check, i, x_pos)
y_pos = get_output()
if in_place:
set_input(scope, op, inputs, place)
x_neg = origin - delta
__set_elem__(tensor_to_check, i, x_neg)
y_neg = get_output()
__set_elem__(tensor_to_check, i, origin)
gradient_flat[i] = (y_pos - y_neg) / delta / 2
return gradient_flat.reshape(tensor_to_check.get_dims())
def append_input_output(block, op_proto, np_list, is_input):
'''Insert VarDesc and generate Python variable instance'''
proto_list = op_proto.inputs if is_input else op_proto.outputs
def create_var(block, name, np_list, var_proto):
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
shape = None
lod_level = None
else:
np_value = np_list[name]
if isinstance(np_value, tuple):
shape = list(np_value[0].shape)
lod_level = len(np_value[1])
else:
shape = list(np_value.shape)
lod_level = 0
return block.create_var(
dtype="float32", shape=shape, lod_level=lod_level, name=name)
var_dict = {}
for var_proto in proto_list:
var_name = str(var_proto.name)
if is_input:
if (var_name not in np_list) and var_proto.dispensable:
continue
assert (var_name in np_list) or (var_proto.dispensable), \
"Missing {} as input".format(var_name)
if var_proto.duplicable:
assert isinstance(np_list[var_name], list), \
"Duplicable {} should be set as list".format(var_name)
var_list = []
for (name, np_value) in np_list[var_name]:
var_list.append(
create_var(block, name, {name: np_value}, var_proto))
var_dict[var_name] = var_list
else:
var_dict[var_name] = create_var(block, var_name, np_list, var_proto)
return var_dict
class OpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''Fix random seeds to remove randomness from tests'''
cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate()
np.random.seed(123)
random.seed(124)
@classmethod
def tearDownClass(cls):
'''Restore random seeds'''
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state)
def feed_var(self, input_vars, place):
feed_map = {}
for var_name in input_vars:
if isinstance(input_vars[var_name], list):
for name, np_value in self.inputs[var_name]:
tensor = core.LoDTensor()
if isinstance(np_value, tuple):
tensor.set(np_value[0], place)
tensor.set_lod(np_value[1])
else:
tensor.set(np_value, place)
feed_map[name] = tensor
else:
tensor = core.LoDTensor()
if isinstance(self.inputs[var_name], tuple):
tensor.set(self.inputs[var_name][0], place)
tensor.set_lod(self.inputs[var_name][1])
else:
tensor.set(self.inputs[var_name], place)
feed_map[var_name] = tensor
return feed_map
def calc_output(self, place):
outs, _ = self._calc_output(place)
return outs
def _calc_output(self, place):
op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
program = Program()
block = program.global_block()
inputs = append_input_output(block, op_proto, self.inputs, True)
outputs = append_input_output(block, op_proto, self.outputs, False)
op = block.append_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=self.attrs if hasattr(self, "attrs") else dict())
# infer variable type and infer shape in compile-time
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
fetch_list = []
for var_name, var in outputs.iteritems():
if var_name in self.outputs:
if isinstance(var, list):
for v in var:
fetch_list.append(v)
else:
fetch_list.append(var)
feed_map = self.feed_var(inputs, place)
exe = Executor(place)
outs = exe.run(program,
feed=feed_map,
fetch_list=fetch_list,
return_numpy=False)
return outs, fetch_list
def check_output_with_place(self, place, atol):
outs, fetch_list = self._calc_output(place)
for out_name, out_dup in Operator.get_op_outputs(self.op_type):
if out_name not in self.outputs:
continue
def find_actual(target_name, fetch_list):
found = [
i for i, var in enumerate(fetch_list)
if var.name == target_name
]
self.assertTrue(
len(found) == 1, "Found {} {}".format(
len(found), target_name))
return found[0]
if out_dup:
sub_out = self.outputs[out_name]
if not isinstance(sub_out, list):
raise AssertionError("sub_out type %s is not list",
type(sub_out))
for item in sub_out:
sub_out_name, expect = item[0], item[1]
idx = find_actual(sub_out_name, fetch_list)
actual = outs[idx]
actual_t = np.array(actual)
expect_t = expect[0] \
if isinstance(expect, tuple) else expect
self.assertTrue(
np.allclose(
actual_t, expect_t, atol=atol),
"Output (" + sub_out_name + ") has diff at " +
str(place))
if isinstance(expect, tuple):
self.assertListEqual(
actual.lod(), expect[1], "Output (" + sub_out_name +
") has different lod at " + str(place))
else:
idx = find_actual(out_name, fetch_list)
actual = outs[idx]
actual_t = np.array(actual)
expect = self.outputs[out_name]
expect_t = expect[0] if isinstance(expect, tuple) else expect
self.assertTrue(
np.allclose(
actual_t, expect_t, atol=atol),
"Output (" + out_name + ") has diff at " + str(place) +
str(actual_t) + "\n" + str(expect_t))
if isinstance(expect, tuple):
self.assertListEqual(actual.lod(), expect[1],
"Output (" + out_name +
") has different lod at " + str(place))
def check_output(self, atol=1e-5):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
places.append(core.CUDAPlace(0))
for place in places:
self.check_output_with_place(place, atol)
def check_output_customized(self, checker):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
places.append(core.CUDAPlace(0))
for place in places:
outs = self.calc_output(place)
outs = [np.array(out) for out in outs]
checker(outs)
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
abs_a = np.abs(a)
abs_a[abs_a < 1e-3] = 1
diff_mat = np.abs(a - b) / abs_a
max_diff = np.max(diff_mat)
def err_msg():
offset = np.argmax(diff_mat > max_relative_error)
return ("%s Variable %s max gradient diff %f over limit %f, "
"the first error element is %d, %f, %f") % (
msg_prefix, name, max_diff, max_relative_error,
offset, a.flatten()[offset], b.flatten()[offset])
self.assertLessEqual(max_diff, max_relative_error, err_msg())
def check_grad(self,
inputs_to_check,
output_names,
no_grad_set=None,
numeric_grad_delta=0.005,
in_place=False,
max_relative_error=0.005,
user_defined_grads=None):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
places.append(core.CUDAPlace(0))
for place in places:
self.check_grad_with_place(place, inputs_to_check, output_names,
no_grad_set, numeric_grad_delta,
in_place, max_relative_error,
user_defined_grads)
def check_grad_with_place(self,
place,
inputs_to_check,
output_names,
no_grad_set=None,
numeric_grad_delta=0.005,
in_place=False,
max_relative_error=0.005,
user_defined_grads=None):
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
op_attrs)
if no_grad_set is None:
no_grad_set = set()
if not type(output_names) is list:
output_names = [output_names]
numeric_grads = user_defined_grads or [
get_numeric_gradient(
place,
self.scope,
self.op,
self.inputs,
input_to_check,
output_names,
delta=numeric_grad_delta,
in_place=in_place) for input_to_check in inputs_to_check
]
analytic_grads = self._get_gradient(inputs_to_check, place,
output_names, no_grad_set)
self.__assert_is_close(numeric_grads, analytic_grads, inputs_to_check,
max_relative_error,
"Gradient Check On %s" % str(place))
@staticmethod
def _create_var_descs_(block, var_dict):
# FIXME: Try unify with `append_input_output`
for param_name in var_dict:
var = var_dict[param_name]
if not isinstance(var, list) and not isinstance(var, tuple):
var = [(param_name, var, None)]
if not isinstance(var[0], list) and not isinstance(var[0], tuple):
var = [(param_name, var[0], var[1])]
for i, item in enumerate(var):
if not isinstance(item[0], basestring):
item = [[param_name] + list(item)]
if len(item) == 2:
if isinstance(item[1], tuple):
var[i] = [item[0], item[1][0], item[1][1]]
else:
# only set var name and value, set lod to None
var[i] = list(item) + [None]
var_descs = [(block.create_var(
name=name, shape=each.shape, dtype=each.dtype), each, lod)
for name, each, lod in var]
yield param_name, var_descs
@staticmethod
def _merge_list(iterable):
return reduce(lambda a, b: list(a) + list(b), iterable, [])
@staticmethod
def _numpy_to_lod_tensor(np_value, lod, place):
tensor = core.LoDTensor()
tensor.set(np_value, place)
if lod is not None:
tensor.set_lod(lod)
return tensor
@staticmethod
def np_dtype_to_fluid_dtype(input):
"""Change the dtype of float16 numpy array
numpy float16 is binded to paddle::platform::float16
in tensor_py.h via the help of uint16 data type since
the internal memory representation of float16 is
uint16_t in paddle and np.uint16 in numpy, which are
themselves binded together by pybind.
Args:
input: input numpy array
Returns:
input: The dtype of input will be changed to np.uint16 if
it is originally np.float16, such that the internal memory
of input will be reinterpreted as of dtype np.uint16.
"""
if input.dtype == np.float16:
input.dtype = np.uint16
return input
def _get_gradient(self, input_to_check, place, output_names, no_grad_set):
prog = Program()
block = prog.global_block()
inputs_with_np = {
key: value
for (key, value) in OpTest._create_var_descs_(
block, getattr(self, 'inputs', {}))
}
outputs_with_np = {
key: val
for (key, val) in OpTest._create_var_descs_(
block, getattr(self, 'outputs', {}))
}
inputs = {
k: [item[0] for item in inputs_with_np[k]]
for k in inputs_with_np
}
outputs = {
k: [item[0] for item in outputs_with_np[k]]
for k in outputs_with_np
}
op = block.append_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=getattr(self, 'attrs', {}))
# infer variable type and infer shape in compile-time
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
mean_inputs = map(block.var, output_names)
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
type="mean")
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
type='scale',
attrs={'scale': 1.0 / float(len(avg_sum))})
op_loss.desc.infer_var_type(block.desc)
op_loss.desc.infer_shape(block.desc)
param_grad_list = append_backward(
loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)
feed_dict = {
item[0].name: OpTest._numpy_to_lod_tensor(item[1], item[2], place)
for p_name in inputs_with_np for item in inputs_with_np[p_name]
}
fetch_list = [g for p, g in param_grad_list]
executor = Executor(place)
return map(np.array,
executor.run(prog, feed_dict, fetch_list,
return_numpy=False)) | unknown | codeparrot/codeparrot-clean | ||
// RUN: %check_clang_tidy %s google-explicit-constructor %t -- --header-filter=.* -system-headers -- -isystem %S/Inputs/nolintbeginend
#include "error_in_include.inc"
// CHECK-MESSAGES: error_in_include.inc:1:11: warning: single-argument constructors must be marked explicit
#include "nolint_in_include.inc"
// CHECK-MESSAGES: Suppressed 1 warnings (1 NOLINT). | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-tidy/infrastructure/nolintbeginend-error-within-include.cpp |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of PowerSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class PowerSignOptimizer(optimizer.Optimizer):
"""Optimizer that implements the PowerSign update.
See [Bello et al., ICML2017],
[Neural Optimizer Search with RL](https://arxiv.org/abs/1709.07417).
"""
def __init__(self,
learning_rate=0.1,
base=math.e,
beta=0.9,
sign_decay_fn=None,
use_locking=False,
name='PowerSignOptimizer'):
"""Constructs a new PowerSignOptimizer object.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
t <- 0 (Initialize timestep)
```
Update:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
sign_decay <- sign_decay_fn(t)
update <- base ** (sign_decay * sign(g) * sign(m)) * g
variable <- variable - lr_t * update
```
Example usage for PowerSign-cd (PowerSign with cosine sign decay)
```
decay_steps = 1000
linear_decay_fn = sign_decays.get_cosine_decay_fn(decay_steps)
opt = PowerSignOptimizer(learning_rate=0.1, sign_decay_fn=linear_decay_fn)
```
Args:
learning_rate: learning_rate used when taking a step.
base: base used in optimizer.
beta: decay used for computing the moving average m.
sign_decay_fn: decay function applied to the sign(g) sign(m) quantity.
Takes global_step as an argument. See sign_decay.py for some examples.
use_locking: If True, use locks for update operations.
name: Optional name for the operations created iwhen applying gradients.
Defaults to "PowerSignOptimizer".
"""
super(PowerSignOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta = beta
self._logbase = math.log(base)
self._sign_decay_fn = sign_decay_fn
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta_t = None
self._logbase_t = None
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
if self._sign_decay_fn is not None:
self._sign_decay_t = ops.convert_to_tensor(
self._sign_decay_fn(global_step), name='sign_decay')
return super(PowerSignOptimizer, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def _create_slots(self, var_list):
# Create slots for the first moment.
for v in var_list:
self._zeros_slot(v, 'm', self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name='learning_rate')
self._beta_t = ops.convert_to_tensor(self._beta, name='beta')
self._logbase_t = ops.convert_to_tensor(self._logbase, name='logbase')
if self._sign_decay_fn is None:
self._sign_decay_t = ops.convert_to_tensor(1.0, name='sign_decay')
def _apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.apply_power_sign(
var,
m,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._logbase_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.resource_apply_power_sign(
var.handle,
m.handle,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._logbase_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
logbase_t = math_ops.cast(self._logbase_t, var.dtype.base_dtype)
e_t = math_ops.cast(math.e, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
m_t = state_ops.assign(
m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking)
sign_g = ops.IndexedSlices(
math_ops.sign(grad.values), grad.indices, dense_shape=grad.dense_shape)
sign_gm = ops.IndexedSlices(
array_ops.gather(math_ops.sign(m_t), sign_g.indices) * sign_g.values,
sign_g.indices,
dense_shape=sign_g.dense_shape)
sign_decayed = math_ops.cast(
self._sign_decay_t, var.dtype.base_dtype)
multiplier_values = math_ops.pow(
e_t, logbase_t * sign_decayed * sign_gm.values)
multiplier = ops.IndexedSlices(
multiplier_values, sign_gm.indices, dense_shape=sign_gm.dense_shape)
final_update = ops.IndexedSlices(
lr_t * multiplier.values * grad.values,
multiplier.indices,
dense_shape=multiplier.dense_shape)
var_update = state_ops.scatter_sub(
var,
final_update.indices,
final_update.values,
use_locking=self._use_locking)
return control_flow_ops.group(* [var_update, m_t]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import functools
import imp
import importlib
import inspect
import itertools
import logging
import os
import re
import sys
import time
import unittest
import threading
from os.path import join as opj
import unittest2
import openerp
import openerp.tools as tools
import openerp.release as release
from openerp.tools.safe_eval import safe_eval as eval
MANIFEST = '__openerp__.py'
README = ['README.rst', 'README.md', 'README.txt']
_logger = logging.getLogger(__name__)
# addons path as a list
ad_paths = []
hooked = False
# Modules already loaded
loaded = []
class AddonsImportHook(object):
"""
Import hook to load OpenERP addons from multiple paths.
OpenERP implements its own import-hook to load its addons. OpenERP
addons are Python modules. Originally, they were each living in their
own top-level namespace, e.g. the sale module, or the hr module. For
backward compatibility, `import <module>` is still supported. Now they
are living in `openerp.addons`. The good way to import such modules is
thus `import openerp.addons.module`.
"""
def find_module(self, module_name, package_path):
module_parts = module_name.split('.')
if len(module_parts) == 3 and module_name.startswith('openerp.addons.'):
return self # We act as a loader too.
def load_module(self, module_name):
if module_name in sys.modules:
return sys.modules[module_name]
_1, _2, module_part = module_name.split('.')
# Note: we don't support circular import.
f, path, descr = imp.find_module(module_part, ad_paths)
mod = imp.load_module('openerp.addons.' + module_part, f, path, descr)
sys.modules['openerp.addons.' + module_part] = mod
return mod
def initialize_sys_path():
"""
Setup an import-hook to be able to import OpenERP addons from the different
addons paths.
This ensures something like ``import crm`` (or even
``import openerp.addons.crm``) works even if the addons are not in the
PYTHONPATH.
"""
global ad_paths
global hooked
dd = tools.config.addons_data_dir
if dd not in ad_paths:
ad_paths.append(dd)
for ad in tools.config['addons_path'].split(','):
ad = os.path.abspath(tools.ustr(ad.strip()))
if ad not in ad_paths:
ad_paths.append(ad)
# add base module path
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'addons'))
if base_path not in ad_paths:
ad_paths.append(base_path)
if not hooked:
sys.meta_path.append(AddonsImportHook())
hooked = True
def get_module_path(module, downloaded=False, display_warning=True):
"""Return the path of the given module.
Search the addons paths and return the first path where the given
module is found. If downloaded is True, return the default addons
path if nothing else is found.
"""
initialize_sys_path()
for adp in ad_paths:
if os.path.exists(opj(adp, module)) or os.path.exists(opj(adp, '%s.zip' % module)):
return opj(adp, module)
if downloaded:
return opj(tools.config.addons_data_dir, module)
if display_warning:
_logger.warning('module %s: module not found', module)
return False
def get_module_filetree(module, dir='.'):
path = get_module_path(module)
if not path:
return False
dir = os.path.normpath(dir)
if dir == '.':
dir = ''
if dir.startswith('..') or (dir and dir[0] == '/'):
raise Exception('Cannot access file outside the module')
files = openerp.tools.osutil.listdir(path, True)
tree = {}
for f in files:
if not f.startswith(dir):
continue
if dir:
f = f[len(dir)+int(not dir.endswith('/')):]
lst = f.split(os.sep)
current = tree
while len(lst) != 1:
current = current.setdefault(lst.pop(0), {})
current[lst.pop(0)] = None
return tree
def get_module_resource(module, *args):
"""Return the full path of a resource of the given module.
:param module: module name
:param list(str) args: resource path components within module
:rtype: str
:return: absolute path to the resource
TODO name it get_resource_path
TODO make it available inside on osv object (self.get_resource_path)
"""
mod_path = get_module_path(module)
if not mod_path: return False
resource_path = opj(mod_path, *args)
if os.path.isdir(mod_path):
# the module is a directory - ignore zip behavior
if os.path.exists(resource_path):
return resource_path
return False
def get_module_icon(module):
iconpath = ['static', 'description', 'icon.png']
if get_module_resource(module, *iconpath):
return ('/' + module + '/') + '/'.join(iconpath)
return '/base/' + '/'.join(iconpath)
def get_module_root(path):
"""
Get closest module's root begining from path
# Given:
# /foo/bar/module_dir/static/src/...
get_module_root('/foo/bar/module_dir/static/')
# returns '/foo/bar/module_dir'
get_module_root('/foo/bar/module_dir/')
# returns '/foo/bar/module_dir'
get_module_root('/foo/bar')
# returns None
@param path: Path from which the lookup should start
@return: Module root path or None if not found
"""
while not os.path.exists(os.path.join(path, MANIFEST)):
new_path = os.path.abspath(os.path.join(path, os.pardir))
if path == new_path:
return None
path = new_path
return path
def load_information_from_description_file(module, mod_path=None):
"""
:param module: The name of the module (sale, purchase, ...)
:param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...)
"""
if not mod_path:
mod_path = get_module_path(module)
terp_file = mod_path and opj(mod_path, MANIFEST) or False
if terp_file:
info = {}
if os.path.isfile(terp_file):
# default values for descriptor
info = {
'application': False,
'author': '',
'auto_install': False,
'category': 'Uncategorized',
'depends': [],
'description': '',
'icon': get_module_icon(module),
'installable': True,
'license': 'AGPL-3',
'post_load': None,
'version': '1.0',
'web': False,
'website': '',
'sequence': 100,
'summary': '',
}
info.update(itertools.izip(
'depends data demo test init_xml update_xml demo_xml'.split(),
iter(list, None)))
f = tools.file_open(terp_file)
try:
info.update(eval(f.read()))
finally:
f.close()
if not info.get('description'):
readme_path = [opj(mod_path, x) for x in README
if os.path.isfile(opj(mod_path, x))]
if readme_path:
readme_text = tools.file_open(readme_path[0]).read()
info['description'] = readme_text
if 'active' in info:
# 'active' has been renamed 'auto_install'
info['auto_install'] = info['active']
info['version'] = adapt_version(info['version'])
return info
#TODO: refactor the logger in this file to follow the logging guidelines
# for 6.0
_logger.debug('module %s: no %s file found.', module, MANIFEST)
return {}
def init_module_models(cr, module_name, obj_list):
""" Initialize a list of models.
Call _auto_init and init on each model to create or update the
database tables supporting the models.
TODO better explanation of _auto_init and init.
"""
_logger.info('module %s: creating or updating database tables', module_name)
todo = []
for obj in obj_list:
result = obj._auto_init(cr, {'module': module_name})
if result:
todo += result
if hasattr(obj, 'init'):
obj.init(cr)
cr.commit()
for obj in obj_list:
obj._auto_end(cr, {'module': module_name})
cr.commit()
todo.sort(key=lambda x: x[0])
for t in todo:
t[1](cr, *t[2])
cr.commit()
def load_openerp_module(module_name):
""" Load an OpenERP module, if not already loaded.
This loads the module and register all of its models, thanks to either
the MetaModel metaclass, or the explicit instantiation of the model.
This is also used to load server-wide module (i.e. it is also used
when there is no model to register).
"""
global loaded
if module_name in loaded:
return
initialize_sys_path()
try:
mod_path = get_module_path(module_name)
__import__('openerp.addons.' + module_name)
# Call the module's post-load hook. This can done before any model or
# data has been initialized. This is ok as the post-load hook is for
# server-wide (instead of registry-specific) functionalities.
info = load_information_from_description_file(module_name)
if info['post_load']:
getattr(sys.modules['openerp.addons.' + module_name], info['post_load'])()
except Exception, e:
msg = "Couldn't load module %s" % (module_name)
_logger.critical(msg)
_logger.critical(e)
raise
else:
loaded.append(module_name)
def get_modules():
"""Returns the list of module names
"""
def listdir(dir):
def clean(name):
name = os.path.basename(name)
if name[-4:] == '.zip':
name = name[:-4]
return name
def is_really_module(name):
manifest_name = opj(dir, name, MANIFEST)
zipfile_name = opj(dir, name)
return os.path.isfile(manifest_name)
return map(clean, filter(is_really_module, os.listdir(dir)))
plist = []
initialize_sys_path()
for ad in ad_paths:
plist.extend(listdir(ad))
return list(set(plist))
def get_modules_with_version():
modules = get_modules()
res = dict.fromkeys(modules, adapt_version('1.0'))
for module in modules:
try:
info = load_information_from_description_file(module)
res[module] = info['version']
except Exception:
continue
return res
def adapt_version(version):
serie = release.major_version
if version == serie or not version.startswith(serie + '.'):
version = '%s.%s' % (serie, version)
return version
def get_test_modules(module):
""" Return a list of module for the addons potentially containing tests to
feed unittest2.TestLoader.loadTestsFromModule() """
# Try to import the module
modpath = 'openerp.addons.' + module
try:
mod = importlib.import_module('.tests', modpath)
except Exception, e:
# If module has no `tests` sub-module, no problem.
if str(e) != 'No module named tests':
_logger.exception('Can not `import %s`.', module)
return []
if hasattr(mod, 'fast_suite') or hasattr(mod, 'checks'):
_logger.warn(
"Found deprecated fast_suite or checks attribute in test module "
"%s. These have no effect in or after version 8.0.",
mod.__name__)
result = [mod_obj for name, mod_obj in inspect.getmembers(mod, inspect.ismodule)
if name.startswith('test_')]
return result
# Use a custom stream object to log the test executions.
class TestStream(object):
def __init__(self, logger_name='openerp.tests'):
self.logger = logging.getLogger(logger_name)
self.r = re.compile(r'^-*$|^ *... *$|^ok$')
def flush(self):
pass
def write(self, s):
if self.r.match(s):
return
first = True
level = logging.ERROR if s.startswith(('ERROR', 'FAIL', 'Traceback')) else logging.INFO
for c in s.splitlines():
if not first:
c = '` ' + c
first = False
self.logger.log(level, c)
current_test = None
def runs_at(test, hook, default):
# by default, tests do not run post install
test_runs = getattr(test, hook, default)
# for a test suite, we're done
if not isinstance(test, unittest.TestCase):
return test_runs
# otherwise check the current test method to see it's been set to a
# different state
method = getattr(test, test._testMethodName)
return getattr(method, hook, test_runs)
runs_at_install = functools.partial(runs_at, hook='at_install', default=True)
runs_post_install = functools.partial(runs_at, hook='post_install', default=False)
def run_unit_tests(module_name, dbname, position=runs_at_install):
"""
:returns: ``True`` if all of ``module_name``'s tests succeeded, ``False``
if any of them failed.
:rtype: bool
"""
global current_test
current_test = module_name
mods = get_test_modules(module_name)
threading.currentThread().testing = True
r = True
for m in mods:
tests = unwrap_suite(unittest2.TestLoader().loadTestsFromModule(m))
suite = unittest2.TestSuite(itertools.ifilter(position, tests))
if suite.countTestCases():
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
_logger.info('%s running tests.', m.__name__)
result = unittest2.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite)
if time.time() - t0 > 5:
_logger.log(25, "%s tested in %.2fs, %s queries", m.__name__, time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
if not result.wasSuccessful():
r = False
_logger.error("Module %s: %d failures, %d errors", module_name, len(result.failures), len(result.errors))
current_test = None
threading.currentThread().testing = False
return r
def unwrap_suite(test):
"""
Attempts to unpack testsuites (holding suites or cases) in order to
generate a single stream of terminals (either test cases or customized
test suites). These can then be checked for run/skip attributes
individually.
An alternative would be to use a variant of @unittest2.skipIf with a state
flag of some sort e.g. @unittest2.skipIf(common.runstate != 'at_install'),
but then things become weird with post_install as tests should *not* run
by default there
"""
if isinstance(test, unittest.TestCase):
yield test
return
subtests = list(test)
# custom test suite (no test cases)
if not len(subtests):
yield test
return
for item in itertools.chain.from_iterable(
itertools.imap(unwrap_suite, subtests)):
yield item
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, basestring))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, basestring))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, basestring))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, basestring))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, basestring))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual(string, string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual(string, string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\
It was the worst of times."
self.assertEqual(51, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
self.assertEqual('Hello "world"', string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual('Hello, world', string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual('Hello, world', string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual('Hello, ', hi)
self.assertEqual('world', there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual('Hello, world', hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual('Hello, ', original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(1, len(string)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
###############################################################################
#
# CalculateTariffInputMetaData
# Retrieve inputs required to run a calculation for the specified tariff, within a specified period of time.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CalculateTariffInputMetaData(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CalculateTariffInputMetaData Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CalculateTariffInputMetaData, self).__init__(temboo_session, '/Library/Genability/PricingAndCalc/CalculateTariffInputMetaData')
def new_input_set(self):
return CalculateTariffInputMetaDataInputSet()
def _make_result_set(self, result, path):
return CalculateTariffInputMetaDataResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CalculateTariffInputMetaDataChoreographyExecution(session, exec_id, path)
class CalculateTariffInputMetaDataInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CalculateTariffInputMetaData
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AppID(self, value):
"""
Set the value of the AppID input for this Choreo. ((required, string) The App ID provided by Genability.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('AppID', value)
def set_AppKey(self, value):
"""
Set the value of the AppKey input for this Choreo. ((required, string) The App Key provided by Genability.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('AppKey', value)
def set_BillingPeriod(self, value):
"""
Set the value of the BillingPeriod input for this Choreo. ((optional, string) Specify whether results retireved should be based on a billing period, or not. Default is set to: false.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('BillingPeriod', value)
def set_CityLimits(self, value):
"""
Set the value of the CityLimits input for this Choreo. ((optional, string) Specify whether electricity pricing information should be restricted to city limits, or not. Example input value: Inside.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('CityLimits', value)
def set_ConnectionType(self, value):
"""
Set the value of the ConnectionType input for this Choreo. ((optional, string) The connection type. For example: Primary.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('ConnectionType', value)
def set_FromDateTime(self, value):
"""
Set the value of the FromDateTime input for this Choreo. ((required, string) The date and time of the requested start of the price query. Must be in ISO 8601 format. Example: 2012-06-12T00:00:00.0-0700)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('FromDateTime', value)
def set_GroupBy(self, value):
"""
Set the value of the GroupBy input for this Choreo. ((optional, string) Specify how calculation details are displayed. For example retrieved details can be grouped by month, or year. Options include: Daily, Weekly, Month, Year.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('GroupBy', value)
def set_KeyName(self, value):
"""
Set the value of the KeyName input for this Choreo. ((optional, string) An applicability value. If an error is returned, indicating the need for an extra applicability parameter, use this variable to set the parameter name. For example: territoryID.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('KeyName', value)
def set_KeyValue(self, value):
"""
Set the value of the KeyValue input for this Choreo. ((conditional, string) The value for the specified KeyName variable. For example if KeyName is set to territoryID, you could provide 3385 for the KeyValue input.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('KeyValue', value)
def set_MasterTariffID(self, value):
"""
Set the value of the MasterTariffID input for this Choreo. ((required, string) A Genability tariff ID.)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('MasterTariffID', value)
def set_ToDateTime(self, value):
"""
Set the value of the ToDateTime input for this Choreo. ((required, string) The date and time of the requested start of the price query. Must be in ISO 8601 format. Example: 2012-06-12T00:00:00.0-0700)
"""
super(CalculateTariffInputMetaDataInputSet, self)._set_input('ToDateTime', value)
class CalculateTariffInputMetaDataResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CalculateTariffInputMetaData Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Genability.)
"""
return self._output.get('Response', None)
class CalculateTariffInputMetaDataChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CalculateTariffInputMetaDataResultSet(response, path) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/gpio/st,nomadik-gpio.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Nomadik GPIO controller
description:
The Nomadik GPIO driver handles Nomadik SoC GPIO blocks. This block has also
been called ST STA2X11. On the Nomadik platform, this driver is intertwined
with pinctrl-nomadik.
maintainers:
- Linus Walleij <linusw@kernel.org>
properties:
$nodename:
pattern: "^gpio@[0-9a-f]+$"
compatible:
enum:
- st,nomadik-gpio
- mobileye,eyeq5-gpio
reg:
maxItems: 1
interrupts:
maxItems: 1
"#gpio-cells":
const: 2
gpio-controller: true
interrupt-controller: true
"#interrupt-cells":
const: 2
gpio-bank:
description: System-wide GPIO bank index.
$ref: /schemas/types.yaml#/definitions/uint32
st,supports-sleepmode:
description: Whether the controller can sleep or not.
$ref: /schemas/types.yaml#/definitions/flag
clocks:
maxItems: 1
gpio-ranges:
maxItems: 1
ngpios:
minimum: 0
maximum: 32
resets:
maxItems: 1
required:
- compatible
- reg
- interrupts
- "#gpio-cells"
- gpio-controller
- interrupt-controller
- gpio-bank
unevaluatedProperties: false
allOf:
- if:
properties:
compatible:
contains:
const: mobileye,eyeq5-gpio
then:
properties:
st,supports-sleepmode: false
examples:
- |
gpio@8012e080 {
compatible = "st,nomadik-gpio";
reg = <0x8012e080 0x80>;
interrupts = <0 120 0x4>;
#gpio-cells = <2>;
gpio-controller;
#interrupt-cells = <2>;
interrupt-controller;
st,supports-sleepmode;
gpio-bank = <1>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/gpio/st,nomadik-gpio.yaml |
## Input
```javascript
// @enableAssumeHooksFollowRulesOfReact true
function Component(props) {
const x = {};
// In enableAssumeHooksFollowRulesOfReact mode hooks freeze their inputs and return frozen values
const y = useFoo(x);
// Thus both x and y are frozen here, and x can be independently memoized
bar(x, y);
return [x, y];
}
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime"; // @enableAssumeHooksFollowRulesOfReact true
function Component(props) {
const $ = _c(3);
let t0;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
t0 = {};
$[0] = t0;
} else {
t0 = $[0];
}
const x = t0;
const y = useFoo(x);
bar(x, y);
let t1;
if ($[1] !== y) {
t1 = [x, y];
$[1] = y;
$[2] = t1;
} else {
t1 = $[2];
}
return t1;
}
``` | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/immutable-hooks.expect.md |
'''
Created on 2013 mai 24
@author: peio
'''
import subprocess
import os
from tempfile import NamedTemporaryFile
from Bio import SeqIO
from Bio.Seq import Seq
class IsIndelError(Exception):
pass
class OutsideAlignment(Exception):
pass
class BetweenSegments(Exception):
pass
class SeqCoords(object):
'''This class creates a coord system relation between tow secuences.
First it alignes the sequences and creates de coord system.
It uses internally 0 coord system.
The coord system is a list of start, stop segments in both secuences
ej:
1 2 3 4 5 6
01234567890123456789012345678901234567890123456789012345678901234
ATCTAGGCTGCTACGATTAGCTGATCGATGTTATCGTAGATCTAGCTGATCATGCTAGCTGATCG
ATCTAGGCTGCTACGA-TAGCTGATCGATGTTATCGTAGATCTAGCTGATCATGC-AGCTGATCG
0-15, 17-54, 56-64
0-15, 16-53, 54-62
1 2 3 4 5 6
01234567890123456789012345678901234567890123456789012345678901234
012345678901234567890123 4567890123456789012345678901 2345678901234
ATCTAGGCTGCTACGATTAGCTGA-CGATGTTATCGTAGATCTAGCTGATCAT-CTAGCTGATCG
ATCTAGGCT-CTACGATTAGCTGATCGATGTTATCGTAGATC-AGCTGATCATGCTAGCTGATCG
012345678 90123456789012345678901234567890 123456789012345678901234
0-8, 10-23, 24-40, 42-51, 52-62
0-8, 9-22, 24-40, 41-50, 52-62
'''
def __init__(self, seq1, seq2):
"Both secuences are biopython secuences"
self.coord_system = self._get_coord_system(seq1, seq2)
self.seq1_name = seq1.id
self.seq2_name = seq2.id
self._seq2_len = len(seq2)
def _get_coord_system(self, seq1, seq2):
out_fhand, reverse = get_water_alignment(seq1, seq2)
self.reverse = reverse
coord_system = build_relations_from_aligment(out_fhand)
out_fhand.close()
return coord_system
def _reverse_pos(self, pos):
reverse = self.reverse
if reverse:
return self._seq2_len - pos - 1
else:
return pos
def _get_segment(self, pos, seq_name):
'returns the segment index of the given position'
segments = self.coord_system[seq_name]
for index, (start, stop) in enumerate(segments):
if pos >= start and pos <= stop:
return index, (start, stop)
if pos < segments[0][0] or pos > segments[-1][1]:
raise OutsideAlignment
else:
raise BetweenSegments
def _to_seq_pos(self, pos, to_seq1=True):
if to_seq1:
seq1_name = self.seq1_name
seq2_name = self.seq2_name
else:
seq2_name = self.seq1_name
seq1_name = self.seq2_name
segment2 = self._get_segment(pos, seq2_name)
segment2_index, segment2 = segment2
segment1 = self.coord_system[seq1_name][segment2_index]
return segment1[0] + pos - segment2[0]
def to_seq1_pos(self, seq2_pos):
seq2_pos = self._reverse_pos(seq2_pos)
return self._to_seq_pos(seq2_pos, to_seq1=True)
def to_seq2_pos(self, seq1_pos):
seq2_pos = self._to_seq_pos(seq1_pos, to_seq1=False)
return self._reverse_pos(seq2_pos)
def _to_seq_slice(self, start, end, to_seq1=True):
if to_seq1:
seq1_name = self.seq1_name
seq2_name = self.seq2_name
else:
seq2_name = self.seq1_name
seq1_name = self.seq2_name
stop = end - 1
segment2_start = self._get_segment(start, seq2_name)
segment2_stop = self._get_segment(stop, seq2_name)
segment2_index_start, segment2_start = segment2_start
segment2_index_stop, segment2_stop = segment2_stop
if segment2_index_start != segment2_index_stop:
raise BetweenSegments
segment1 = self.coord_system[seq1_name][segment2_index_start]
start = segment1[0] + start - segment2_start[0]
stop = segment1[0] + stop - segment2_stop[0]
return (start, stop + 1)
def to_seq1_slice(self, start, end):
if self.reverse:
start = self._reverse_pos(start)
end = self._reverse_pos(end)
slice2 = self._to_seq_slice(start, end, to_seq1=True)
if self.reverse:
return slice2[1], slice2[0]
return slice2
def to_seq2_slice(self, start, end):
slice1 = self._to_seq_slice(start, end, to_seq1=False)
if self.reverse:
start = self._reverse_pos(slice1[1])
end = self._reverse_pos(slice1[0])
else:
start, end = slice1
return (start, end)
def build_relations_from_aligment(fhand):
'It returns a relations dict given an alignment in markx10 format'
#print open(fhand.name).read()
#we parse the aligment
in_seq_section = 0
seq, al_start, seq_len = None, None, None
seq0_name = None
for line in fhand:
line = line.strip()
if not line:
continue
if line[0] == '>' and line[1] != '>':
if seq0_name is None:
seq0_name = line.split()[0][1:]
else:
seq1_name = line.split()[0][1:]
if in_seq_section:
seq0 = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': seq0_name}
in_seq_section += 1
seq = ''
continue
if not in_seq_section:
continue
if '; sq_len:' in line:
seq_len = int(line.split(':')[-1])
if '; al_display_start:' in line:
al_start = int(line.split(':')[-1])
if line[0] not in (';', '#'):
seq += line
seq1 = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': seq1_name}
#now we get the segments
gap = '-'
segments = []
segment0, segment1 = None, None
seq0_start, seq1_start = seq0['al_start'], seq1['al_start']
seq0_start_delta, seq1_start_delta = seq0_start, seq1_start
seq0_delta, seq1_delta = 0, 0
for index, (nucl0, nucl1) in enumerate(zip(seq0['seq'], seq1['seq'])):
seq0_index = seq0_start_delta + index - seq0_delta
seq1_index = seq1_start_delta + index - seq1_delta
if nucl0 == gap:
segment0 = seq0_start, seq0_index - 1
segment1 = seq1_start, seq1_index - 1
seq0_start = seq0_index
seq1_start = seq1_index + 1
seq0_delta += 1
elif nucl1 == gap:
segment0 = seq0_start, seq0_index - 1
segment1 = seq1_start, seq1_index - 1
seq1_start = seq1_index
seq0_start = seq0_index + 1
seq1_delta += 1
if segment0 and segment1:
segment = {seq0['name']: segment0, seq1['name']: segment1}
segments.append(segment)
segment0, segment1 = None, None
else:
segment0 = seq0_start, seq0_index
segment1 = seq1_start, seq1_index
segment = {seq0['name']: segment0, seq1['name']: segment1}
segments.append(segment)
relations = {}
for seg in segments:
for seq_name, limits in seg.items():
if seq_name not in relations:
relations[seq_name] = []
relations[seq_name].append(limits)
return relations
def _get_water_score(fhand):
for line in fhand:
if line.startswith('# Score:'):
return float(line.split(':')[1].strip())
return None
def get_water_alignment(seq1, seq2, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10'):
out_fhand = NamedTemporaryFile()
_do_water_alignment(seq1, seq2, out_fhand, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=False)
out_fhand2 = NamedTemporaryFile()
_do_water_alignment(seq1, seq2, out_fhand2, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=True)
forw_score = _get_water_score(out_fhand)
rev_score = _get_water_score(out_fhand2)
if forw_score > rev_score:
out_fhand.seek(0)
return out_fhand, False
else:
out_fhand2.seek(0)
return out_fhand2, True
def _do_water_alignment(seq1, seq2, out_fhand, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=False):
seq1_fhand = NamedTemporaryFile()
seq2_fhand = NamedTemporaryFile()
SeqIO.write(seq1, seq1_fhand, 'fasta')
SeqIO.write(seq2, seq2_fhand, 'fasta')
seq1_fhand.flush()
seq2_fhand.flush()
cmd = ['water', '-asequence', seq1_fhand.name, '-bsequence',
seq2_fhand.name, '-outfile', out_fhand.name, '-gapopen',
str(gap_open), '-gapextend', str(gap_extend), '-aformat3', out_fmt]
if reverse2:
cmd.append('-sreverse2')
stdout = open(os.devnull, 'w')
stderr = open(os.devnull, 'w')
subprocess.check_call(cmd, stdout=stdout, stderr=stderr)
def get_amino_change(seq_ref, seq_estscan, snv):
if snv.is_indel:
raise IsIndelError()
position = snv.pos
alt_allele = snv.alleles[1]
seq_coord = SeqCoords(seq_ref, seq_estscan)
estscan_pos = seq_coord.to_seq2_pos(position)
if estscan_pos is None:
return None
estscan_frame = (estscan_pos % 3) + 1
estscan_start = estscan_pos + estscan_frame - 1
estscan_stop = estscan_start + 2
# check if there is a frameshift in the ref_seq
ref_slice = seq_coord.to_seq1_slice(estscan_start, estscan_stop)
if ref_slice is None:
return None
ref_seq_aa = seq_ref[ref_slice[0]: ref_slice[1] + 1].seq[:3].translate()
estscan_seq_aa = seq_estscan[estscan_start: estscan_stop + 1].seq[:3]
ref_aa = str(estscan_seq_aa.translate())
if str(ref_seq_aa) != str(ref_aa):
return None
aminos = {'ref_amino': ref_aa, 'alt_amino': []}
for alt_allele in snv.alleles[1:]:
alt_seq = [nucl for nucl in (estscan_seq_aa)]
alt_seq[estscan_frame - 1] = alt_allele
alt_seq = Seq("".join(alt_seq))
alt_aa = str(alt_seq.translate())
aminos['alt_amino'].append(alt_aa)
return aminos | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.configurationsample.endpoint;
import org.springframework.boot.configurationsample.TestAccess;
import org.springframework.boot.configurationsample.TestEndpoint;
import org.springframework.boot.configurationsample.TestReadOperation;
/**
* A simple endpoint with no default override, with the same id as {@link SimpleEndpoint},
* but with no access by default.
*
* @author Moritz Halbritter
*/
@TestEndpoint(id = "simple", defaultAccess = TestAccess.NONE)
public class SimpleEndpoint3 {
@TestReadOperation
public String invoke() {
return "test";
}
} | java | github | https://github.com/spring-projects/spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/endpoint/SimpleEndpoint3.java |
//// [tests/cases/conformance/statements/VariableStatements/usingDeclarations/awaitUsingDeclarations.13.ts] ////
//// [awaitUsingDeclarations.13.ts]
await using x = null;
function f() {
await using x = null;
}
//// [awaitUsingDeclarations.13.js]
"use strict";
await using x = null;
function f() {
await using x = null;
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/awaitUsingDeclarations.13.js |
import sys
import spotipy
''' shows the albums and tracks for a given artist.
'''
def get_artist(name):
results = sp.search(q='artist:' + name, type='artist')
items = results['artists']['items']
if len(items) > 0:
return items[0]
else:
return None
def show_artist_albums(artist):
albums = []
results = sp.artist_albums(artist['id'], album_type='album')
albums.extend(results['items'])
while results['next']:
results = sp.next(results)
albums.extend(results['items'])
seen = set() # to avoid dups
albums.sort(key=lambda album:album['name'].lower())
for album in albums:
name = album['name']
if name not in seen:
print((' ' + name))
seen.add(name)
if __name__ == '__main__':
sp = spotipy.Spotify()
if len(sys.argv) < 2:
print(('Usage: {0} artist name'.format(sys.argv[0])))
else:
name = ' '.join(sys.argv[1:])
artist = get_artist(name)
if artist:
show_artist_albums(artist)
else:
print("Can't find that artist") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Install script for beancount.
"""
__author__ = "Martin Blais <blais@furius.ca>"
import os
from os.path import join, isfile
from setuptools import setup
# Install all scripts under bin.
scripts = [join('bin', x) for x in ('bean-web',
'bean-trial',
'bean-suck',
'bean-convert-ofx',
'bean-convert-paypal-csv',
'bean-convert-rbc-activity',
)]
def read_version():
try:
return open('VERSION', 'r').readline().strip()
except IOError, e:
raise SystemExit(
"Error: you must run setup from the root directory (%s)" % str(e))
setup(name="beancount",
version=read_version(),
description=\
"Command-line Double-Entry Accounting",
long_description="""
A double-entry accounting system that uses a simple text file format
as input. A few Python scripts are used to parse the contents of the
file, for example, to serve the contents as a locally running web
server. Scripts are provided to convert from OFX files into Ledger
input format, and other formats (easily extensible).
""",
license="GPL",
author="Martin Blais",
author_email="blais@furius.ca",
url="http://furius.ca/beancount",
package_dir = {'': 'lib/python'},
packages = ['beancount',
'beancount.web',
'beancount.fallback',
'beancount.fallback.elementtree',
],
scripts=scripts
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# facts standard oVirt documentation fragment
DOCUMENTATION = r'''
options:
fetch_nested:
description:
- If I(yes) the module will fetch additional data from the API.
- It will fetch IDs of the VMs disks, snapshots, etc. User can configure to fetch other
attributes of the nested entities by specifying C(nested_attributes).
type: bool
version_added: "2.3"
nested_attributes:
description:
- Specifies list of the attributes which should be fetched from the API.
- This parameter apply only when C(fetch_nested) is I(true).
type: list
version_added: "2.3"
auth:
description:
- "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
- C(username)[I(required)] - The name of the user, something like I(admin@internal).
Default value is set by I(OVIRT_USERNAME) environment variable.
- "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
- "C(url)- A string containing the API URL of the server, usually
something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable.
Either C(url) or C(hostname) is required."
- "C(hostname) - A string containing the hostname of the server, usually
something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable.
Either C(url) or C(hostname) is required."
- "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
- "C(insecure) - A boolean flag that indicates if the server TLS
certificate and host name should be checked."
- "C(ca_file) - A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If `C(ca_file)` parameter is not set, system wide
CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
- "C(kerberos) - A boolean flag indicating if Kerberos authentication
should be used instead of the default basic authentication."
- "C(headers) - Dictionary of HTTP headers to be added to each API call."
type: dict
required: true
requirements:
- python >= 2.7
- ovirt-engine-sdk-python >= 4.3.0
notes:
- "In order to use this module you have to install oVirt Python SDK.
To ensure it's installed with correct version you can create the following task:
pip: name=ovirt-engine-sdk-python version=4.3.0"
''' | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.human import HumanInputChatModel
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"HumanInputChatModel": "langchain_community.chat_models.human"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"HumanInputChatModel",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/chat_models/human.py |
'use strict';
self.console || (self.console = { 'log': function() {} });
addEventListener('message', function(e) {
if (e.data) {
try {
importScripts('../' + e.data);
} catch (e) {
var lineNumber = e.lineNumber,
message = (lineNumber == null ? '' : (lineNumber + ': ')) + e.message;
self._ = { 'VERSION': message };
}
postMessage(_.VERSION);
}
}); | javascript | github | https://github.com/lodash/lodash | test/asset/worker.js |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_TENSOR_HANDLE_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_TENSOR_HANDLE_H_
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
namespace tensorflow {
namespace eager {
struct RemoteTensorHandleInternal {
explicit RemoteTensorHandleInternal(const RemoteTensorHandle& tensor_handle)
: op_id(tensor_handle.op_id()), output_num(tensor_handle.output_num()) {}
RemoteTensorHandleInternal(int64_t op_id, int32_t output_num)
: op_id(op_id), output_num(output_num) {}
int64_t op_id;
int32_t output_num;
};
struct RemoteTensorHandleInternalHash {
std::size_t operator()(const RemoteTensorHandleInternal& handle) const {
return FingerprintCat64(handle.op_id, handle.output_num);
}
};
struct RemoteTensorHandleInternalEquals {
bool operator()(const RemoteTensorHandleInternal& first,
const RemoteTensorHandleInternal& second) const {
return first.op_id == second.op_id && first.output_num == second.output_num;
}
};
} // namespace eager
} // namespace tensorflow
#endif // TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_TENSOR_HANDLE_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h |
---
- hosts: all
gather_facts: no
tasks:
- name: test magic vars for hosts without any failed/unreachable (no serial)
assert:
that:
- ansible_play_batch | length == 3
- ansible_play_hosts | length == 3
- ansible_play_hosts_all | length == 3
run_once: True
- ping:
failed_when: "inventory_hostname == 'failed'"
- meta: clear_host_errors
- hosts: all
gather_facts: no
tasks:
- name: test host errors were cleared
assert:
that:
- ansible_play_batch | length == 3
- ansible_play_hosts | length == 3
- ansible_play_hosts_all | length == 3
run_once: True
- ping:
failed_when: "inventory_hostname == 'failed'"
- name: test magic vars exclude failed/unreachable hosts
assert:
that:
- ansible_play_batch | length == 1
- ansible_play_hosts | length == 1
- "ansible_play_batch == ['successful']"
- "ansible_play_hosts == ['successful']"
- ansible_play_hosts_all | length == 3
run_once: True
- hosts: all
gather_facts: no
tasks:
- name: test failed/unreachable persists between plays
assert:
that:
- ansible_play_batch | length == 1
- ansible_play_hosts | length == 1
- "ansible_play_batch == ['successful']"
- "ansible_play_hosts == ['successful']"
- ansible_play_hosts_all | length == 3
run_once: True | unknown | github | https://github.com/ansible/ansible | test/integration/targets/special_vars_hosts/playbook.yml |
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2010 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" dtr_test_execution:
code related to the execution of dtr test cases
We are provided access to a testManager with
dtr-specific testCases. We contact teh executionManager
to produce the system and server configurations we need
to execute a test.
"""
# imports
import os
import sys
import subprocess
import commands
import lib.test_mgmt.test_execution as test_execution
class testExecutor(test_execution.testExecutor):
""" dtr-specific testExecutor
We currently execute by sending test-case
data to client/drizzletest...for now
"""
def execute_testCase (self):
""" Execute a dtr testCase via calls to drizzletest (boo)
Eventually, we will replace drizzletest with pythonic
goodness, but we have these classes stored here for the moment
"""
test_execution.testExecutor.execute_testCase(self)
self.status = 0
# generate command line
drizzletest_cmd = self.generate_drizzletest_call()
# call drizzletest
self.execute_drizzletest(drizzletest_cmd)
# analyze results
self.current_test_status = self.process_drizzletest_output()
self.set_server_status(self.current_test_status)
def generate_drizzletest_call(self):
""" Produce the command line we use to call drizzletest
We have a healthy number of values, so we put this in a
nice function
"""
drizzletest_arguments = [ '--no-defaults'
, '--silent'
, '--tmpdir=%s' %(self.master_server.tmpdir)
, '--logdir=%s' %(self.master_server.logdir)
, '--port=%d' %(self.master_server.master_port)
, '--database=test'
, '--user=root'
, '--password='
#, '--testdir=%s' %(self.test_manager.testdir)
, '--test-file=%s' %(self.current_testcase.testpath)
, '--tail-lines=20'
, '--timer-file=%s' %(self.master_server.timer_file)
, '--result-file=%s' %(self.current_testcase.resultpath)
]
if self.record_flag:
# We want to record a new result
drizzletest_arguments.append('--record')
drizzletest_cmd = "%s %s %s" %( self.cmd_prefix
, self.master_server.code_tree.drizzletest
, " ".join(drizzletest_arguments))
return drizzletest_cmd
def execute_drizzletest(self, drizzletest_cmd):
""" Execute the commandline and return the result.
We use subprocess as we can pass os.environ dicts and whatnot
"""
testcase_name = self.current_testcase.fullname
self.time_manager.start(testcase_name,'test')
#retcode, output = self.system_manager.execute_cmd( drizzletest_cmd
# , must_pass = 0 )
drizzletest_outfile = os.path.join(self.logdir,'drizzletest.out')
drizzletest_output = open(drizzletest_outfile,'w')
drizzletest_subproc = subprocess.Popen( drizzletest_cmd
, shell=True
, cwd=self.system_manager.testdir
, env=self.working_environment
, stdout = drizzletest_output
, stderr = subprocess.STDOUT
)
drizzletest_subproc.wait()
retcode = drizzletest_subproc.returncode
execution_time = int(self.time_manager.stop(testcase_name)*1000) # millisec
drizzletest_output.close()
drizzletest_file = open(drizzletest_outfile,'r')
output = ''.join(drizzletest_file.readlines())
drizzletest_file.close()
self.logging.debug("drizzletest_retcode: %d" %(retcode))
self.current_test_retcode = retcode
self.current_test_output = output
self.current_test_exec_time = execution_time
def process_drizzletest_output(self):
""" Drizzletest has run, we now check out what we have """
retcode = self.current_test_retcode
if retcode == 0:
return 'pass'
elif retcode == 62 or retcode == 15872:
return 'skipped'
elif retcode == 63 or retcode == 1:
return 'fail'
else:
return 'fail' | unknown | codeparrot/codeparrot-clean | ||
"""Nearly exact trust-region optimization subproblem."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import (norm, get_lapack_funcs, solve_triangular,
cho_solve)
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
__all__ = ['_minimize_trustregion_exact',
'estimate_smallest_singular_value',
'singular_leading_submatrix',
'IterativeSubproblem']
def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None,
**trust_region_options):
"""
Minimization of scalar function of one or more variables using
a nearly exact trust-region algorithm.
Options
-------
initial_tr_radius : float
Initial trust-region radius.
max_tr_radius : float
Maximum value of the trust-region radius. No steps that are longer
than this value will be proposed.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than ``gtol`` before successful
termination.
"""
if jac is None:
raise ValueError('Jacobian is required for trust region '
'exact minimization.')
if hess is None:
raise ValueError('Hessian matrix is required for trust region '
'exact minimization.')
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
subproblem=IterativeSubproblem,
**trust_region_options)
def estimate_smallest_singular_value(U):
"""Given upper triangular matrix ``U`` estimate the smallest singular
value and the correspondent right singular vector in O(n**2) operations.
Parameters
----------
U : ndarray
Square upper triangular matrix.
Returns
-------
s_min : float
Estimated smallest singular value of the provided matrix.
z_min : ndarray
Estimatied right singular vector.
Notes
-----
The procedure is based on [1]_ and is done in two steps. First, it finds
a vector ``e`` with components selected from {+1, -1} such that the
solution ``w`` from the system ``U.T w = e`` is as large as possible.
Next it estimate ``U v = w``. The smallest singular value is close
to ``norm(w)/norm(v)`` and the right singular vector is close
to ``v/norm(v)``.
The estimation will be better more ill-conditioned is the matrix.
References
----------
.. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H.
An estimate for the condition number of a matrix. 1979.
SIAM Journal on Numerical Analysis, 16(2), 368-375.
"""
U = np.atleast_2d(U)
m, n = U.shape
if m != n:
raise ValueError("A square triangular matrix should be provided.")
# A vector `e` with components selected from {+1, -1}
# is selected so that the solution `w` to the system
# `U.T w = e` is as large as possible. Implementation
# based on algorithm 3.5.1, p. 142, from reference [2]
# adapted for lower triangular matrix.
p = np.zeros(n)
w = np.empty(n)
# Implemented according to: Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press. pp. 140-142.
for k in range(n):
wp = (1-p[k]) / U.T[k, k]
wm = (-1-p[k]) / U.T[k, k]
pp = p[k+1:] + U.T[k+1:, k]*wp
pm = p[k+1:] + U.T[k+1:, k]*wm
if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1):
w[k] = wp
p[k+1:] = pp
else:
w[k] = wm
p[k+1:] = pm
# The system `U v = w` is solved using backward substitution.
v = solve_triangular(U, w)
v_norm = norm(v)
w_norm = norm(w)
# Smallest singular value
s_min = w_norm / v_norm
# Associated vector
z_min = v / v_norm
return s_min, z_min
def gershgorin_bounds(H):
"""
Given a square matrix ``H`` compute upper
and lower bounds for its eigenvalues (Gregoshgorin Bounds).
Defined ref. [1].
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
H_diag = np.diag(H)
H_diag_abs = np.abs(H_diag)
H_row_sums = np.sum(np.abs(H), axis=1)
lb = np.min(H_diag + H_diag_abs - H_row_sums)
ub = np.max(H_diag - H_diag_abs + H_row_sums)
return lb, ub
def singular_leading_submatrix(A, U, k):
"""
Compute term that makes the leading ``k`` by ``k``
submatrix from ``A`` singular.
Parameters
----------
A : ndarray
Symmetric matrix that is not positive definite.
U : ndarray
Upper triangular matrix resulting of an incomplete
Cholesky decomposition of matrix ``A``.
k : int
Positive integer such that the leading k by k submatrix from
`A` is the first non-positive definite leading submatrix.
Returns
-------
delta : float
Amount that should be added to the element (k, k) of the
leading k by k submatrix of ``A`` to make it singular.
v : ndarray
A vector such that ``v.T B v = 0``. Where B is the matrix A after
``delta`` is added to its element (k, k).
"""
# Compute delta
delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1]
n = len(A)
# Inicialize v
v = np.zeros(n)
v[k-1] = 1
# Compute the remaining values of v by solving a triangular system.
if k != 1:
v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1])
return delta, v
class IterativeSubproblem(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by nearly exact iterative method.
Notes
-----
This subproblem solver was based on [1]_, [2]_ and [3]_,
which implement similar algorithms. The algorithm is basically
that of [1]_ but ideas from [2]_ and [3]_ were also used.
References
----------
.. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods",
Siam, pp. 169-200, 2000.
.. [2] J. Nocedal and S. Wright, "Numerical optimization",
Springer Science & Business Media. pp. 83-91, 2006.
.. [3] J.J. More and D.C. Sorensen, "Computing a trust region step",
SIAM Journal on Scientific and Statistical Computing, vol. 4(3),
pp. 553-572, 1983.
"""
# UPDATE_COEFF appears in reference [1]_
# in formula 7.3.14 (p. 190) named as "theta".
# As recommended there it value is fixed in 0.01.
UPDATE_COEFF = 0.01
EPS = np.finfo(float).eps
def __init__(self, x, fun, jac, hess, hessp=None,
k_easy=0.1, k_hard=0.2):
super(IterativeSubproblem, self).__init__(x, fun, jac, hess)
# When the trust-region shrinks in two consecutive
# calculations (``tr_radius < previous_tr_radius``)
# the lower bound ``lambda_lb`` may be reused,
# facilitating the convergence. To indicate no
# previous value is known at first ``previous_tr_radius``
# is set to -1 and ``lambda_lb`` to None.
self.previous_tr_radius = -1
self.lambda_lb = None
self.niter = 0
# ``k_easy`` and ``k_hard`` are parameters used
# to determine the stop criteria to the iterative
# subproblem solver. Take a look at pp. 194-197
# from reference _[1] for a more detailed description.
self.k_easy = k_easy
self.k_hard = k_hard
# Get Lapack function for cholesky decomposition.
# The implemented SciPy wrapper does not return
# the incomplete factorization needed by the method.
self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,))
# Get info about Hessian
self.dimension = len(self.hess)
self.hess_gershgorin_lb,\
self.hess_gershgorin_ub = gershgorin_bounds(self.hess)
self.hess_inf = norm(self.hess, np.Inf)
self.hess_fro = norm(self.hess, 'fro')
# A constant such that for vectors smaler than that
# backward substituition is not reliable. It was stabilished
# based on Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press., p.165.
self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf
def _initial_values(self, tr_radius):
"""Given a trust radius, return a good initial guess for
the damping factor, the lower bound and the upper bound.
The values were chosen accordingly to the guidelines on
section 7.3.8 (p. 192) from [1]_.
"""
# Upper bound for the damping factor
lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb,
self.hess_fro,
self.hess_inf))
# Lower bound for the damping factor
lambda_lb = max(0, -min(self.hess.diagonal()),
self.jac_mag/tr_radius - min(self.hess_gershgorin_ub,
self.hess_fro,
self.hess_inf))
# Improve bounds with previous info
if tr_radius < self.previous_tr_radius:
lambda_lb = max(self.lambda_lb, lambda_lb)
# Initial guess for the damping factor
if lambda_lb == 0:
lambda_initial = 0
else:
lambda_initial = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
return lambda_initial, lambda_lb, lambda_ub
def solve(self, tr_radius):
"""Solve quadratic subproblem"""
lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius)
n = self.dimension
hits_boundary = True
already_factorized = False
self.niter = 0
while True:
# Compute Cholesky factorization
if already_factorized:
already_factorized = False
else:
H = self.hess+lambda_current*np.eye(n)
U, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
self.niter += 1
# Check if factorization succeeded
if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO:
# Successful factorization
# Solve `U.T U p = s`
p = cho_solve((U, False), -self.jac)
p_norm = norm(p)
# Check for interior convergence
if p_norm <= tr_radius and lambda_current == 0:
hits_boundary = False
break
# Solve `U.T w = p`
w = solve_triangular(U, p, trans='T')
w_norm = norm(w)
# Compute Newton step accordingly to
# formula (4.44) p.87 from ref [2]_.
delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius
lambda_new = lambda_current + delta_lambda
if p_norm < tr_radius: # Inside boundary
s_min, z_min = estimate_smallest_singular_value(U)
ta, tb = self.get_boundaries_intersections(p, z_min,
tr_radius)
# Choose `step_len` with the smallest magnitude.
# The reason for this choice is explained at
# ref [3]_, p. 6 (Immediately before the formula
# for `tau`).
step_len = min([ta, tb], key=abs)
# Compute the quadratic term (p.T*H*p)
quadratic_term = np.dot(p, np.dot(H, p))
# Check stop criteria
relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2)
if relative_error <= self.k_hard:
p += step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Compute Cholesky factorization
H = self.hess + lambda_new*np.eye(n)
c, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
# Check if the factorization have succeeded
#
if info == 0: # Successful factorization
# Update damping factor
lambda_current = lambda_new
already_factorized = True
else: # Unsuccessful factorization
# Update uncertanty bounds
lambda_lb = max(lambda_lb, lambda_new)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Outside boundary
# Check stop criteria
relative_error = abs(p_norm - tr_radius) / tr_radius
if relative_error <= self.k_easy:
break
# Update uncertanty bounds
lambda_lb = lambda_current
# Update damping factor
lambda_current = lambda_new
elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO:
# jac_mag very close to zero
# Check for interior convergence
if lambda_current == 0:
p = np.zeros(n)
hits_boundary = False
break
s_min, z_min = estimate_smallest_singular_value(U)
step_len = tr_radius
# Check stop criteria
if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2:
p = step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Unsuccessful factorization
# Compute auxiliary terms
delta, v = singular_leading_submatrix(H, U, info)
v_norm = norm(v)
# Update uncertanty interval
lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
self.lambda_lb = lambda_lb
self.lambda_current = lambda_current
self.previous_tr_radius = tr_radius
return p, hits_boundary | unknown | codeparrot/codeparrot-clean | ||
name: Integration Tests Reusable
on:
workflow_call:
inputs:
name:
description: A unique identifer used for uploaded assets
type: string
test_type:
description: '"development" or "production"'
required: true
type: string
run_before_test:
description: >
Bash code to run before executing the test (e.g. setting environment
variables). Runs in the same step as the test.
type: string
default: ''
e2e_groups:
description: >
Size of the matrix used for running e2e tests (controls parallelism)
type: number
default: 6
integration_groups:
description: >
Size of the matrix used for running legacy integration tests (controls
parallelism)
type: number
default: 6
e2e_timeout_minutes:
type: number
default: 30
integration_timeout_minutes:
type: number
default: 30
num_retries:
type: number
default: 2
jobs:
# First, build Next.js to execute across tests.
build-next:
name: build-next
uses: ./.github/workflows/build_reusable.yml
with:
skipNativeBuild: yes
stepName: build-next
secrets: inherit
build-native:
name: build-native
uses: ./.github/workflows/build_reusable.yml
with:
skipInstallBuild: yes
stepName: build-native
secrets: inherit
generate-matrices:
runs-on: [self-hosted, linux, x64, metal]
steps:
- id: out
run: |
printf 'e2e=[%s]\n' \
"$(seq -s, 1 ${{ inputs.e2e_groups }})" | \
tee -a "$GITHUB_OUTPUT"
printf 'integration=[%s]\n' \
"$(seq -s, 1 ${{ inputs.integration_groups }})" | \
tee -a "$GITHUB_OUTPUT"
outputs:
e2e: ${{ steps.out.outputs.e2e }}
integration: ${{ steps.out.outputs.integration }}
# Actual test scheduling. These jobs mimic the normal test jobs.
# Refer build_and_test.yml for more details.
#
# We run tests in two parts. Legacy integration tests are run separately:
# https://github.com/vercel/next.js/blob/canary/contributing/core/testing.md#test-types-in-nextjs
test-e2e:
# Name must match `integrationTestJobs` in
# `./.github/actions/next-integration-stat`
name: >-
Next.js integration test (E2E and ${{ inputs.test_type }})
(${{ matrix.group }}/${{ inputs.e2e_groups }})
needs: [build-next, build-native, generate-matrices]
strategy:
fail-fast: false
matrix:
group: ${{ fromJSON(needs.generate-matrices.outputs.e2e) }}
uses: ./.github/workflows/build_reusable.yml
with:
afterBuild: |
# e2e and ${{ inputs.test_type }} tests with `node run-tests.js`
export NEXT_TEST_MODE=${{
inputs.test_type == 'development' && 'dev' || 'start'
}}
export NEXT_TEST_EMIT_ALL_OUTPUT=1
${{ inputs.run_before_test }}
node run-tests.js \
--group ${{ matrix.group }}/${{ inputs.e2e_groups }} \
--retries ${{ inputs.num_retries }} \
--type ${{ inputs.test_type }}
stepName: test-${{ inputs.name }}-${{ matrix.group }}
timeout_minutes: ${{ inputs.e2e_timeout_minutes }}
secrets: inherit
test-integration:
# Name must match `integrationTestJobs` in
# `./.github/actions/next-integration-stat`
name: >-
Next.js integration test (Integration)
(${{ matrix.group }}/${{ inputs.e2e_groups }})
needs: [build-next, build-native, generate-matrices]
strategy:
fail-fast: false
matrix:
group: ${{ fromJSON(needs.generate-matrices.outputs.integration) }}
uses: ./.github/workflows/build_reusable.yml
with:
nodeVersion: 20.9.0
afterBuild: |
# legacy integration tests with `node run-tests.js`
# HACK: Despite the name, these environment variables are just used to
# gate tests, so they're applicable to both turbopack and rspack tests
export ${{
inputs.test_type == 'development' &&
'TURBOPACK_DEV=1' ||
'TURBOPACK_BUILD=1'
}}
export NEXT_TEST_EMIT_ALL_OUTPUT=1
${{ inputs.run_before_test }}
node run-tests.js \
--group ${{ matrix.group }}/${{ inputs.integration_groups }} \
--retries ${{ inputs.num_retries }} \
--type integration
stepName: test-${{ inputs.name }}-integration-${{ matrix.group }}
timeout_minutes: ${{ inputs.integration_timeout_minutes }}
secrets: inherit
# Collect integration test results from execute_tests,
# Store it as github artifact for next step to consume.
collect_nextjs_development_integration_stat:
needs: [test-e2e, test-integration]
name: Next.js integration test development status report
runs-on: [self-hosted, linux, x64, metal]
if: always()
permissions:
pull-requests: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Collect integration test stat
uses: ./.github/actions/next-integration-stat
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Store artifacts
uses: actions/upload-artifact@v4
with:
name: test-results-${{ inputs.name }}
path: |
nextjs-test-results.json
failed-test-path-list.json
passed-test-path-list.json | unknown | github | https://github.com/vercel/next.js | .github/workflows/integration_tests_reusable.yml |
'''
Unit tests for oc clusterrole
'''
import copy
import os
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_clusterrole import OCClusterRole # noqa: E402
class OCClusterRoleTest(unittest.TestCase):
'''
Test class for OCClusterRole
'''
# run_ansible input parameters
params = {
'state': 'present',
'name': 'operations',
'rules': [
{'apiGroups': [''],
'attributeRestrictions': None,
'verbs': ['create', 'delete', 'deletecollection',
'get', 'list', 'patch', 'update', 'watch'],
'resources': ['persistentvolumes']}
],
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False,
}
@mock.patch('oc_clusterrole.locate_oc_binary')
@mock.patch('oc_clusterrole.Utils.create_tmpfile_copy')
@mock.patch('oc_clusterrole.Utils._write')
@mock.patch('oc_clusterrole.OCClusterRole._run')
def test_adding_a_clusterrole(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_binary):
''' Testing adding a project '''
params = copy.deepcopy(OCClusterRoleTest.params)
clusterrole = '''{
"apiVersion": "v1",
"kind": "ClusterRole",
"metadata": {
"creationTimestamp": "2017-03-27T14:19:09Z",
"name": "operations",
"resourceVersion": "23",
"selfLink": "/oapi/v1/clusterrolesoperations",
"uid": "57d358fe-12f8-11e7-874a-0ec502977670"
},
"rules": [
{
"apiGroups": [
""
],
"attributeRestrictions": null,
"resources": [
"persistentvolumes"
],
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
}
]
}'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
(1, '', 'Error from server: clusterrole "operations" not found'),
(1, '', 'Error from server: namespaces "operations" not found'),
(0, '', ''), # created
(0, clusterrole, ''), # fetch it
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
mock_loc_binary.side_effect = [
'oc',
]
# Act
results = OCClusterRole.run_ansible(params, False)
# Assert
self.assertTrue(results['changed'])
self.assertEqual(results['results']['returncode'], 0)
self.assertEqual(results['results']['results']['metadata']['name'], 'operations')
self.assertEqual(results['state'], 'present')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
mock.call(['oc', 'create', '-f', mock.ANY], None),
mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import sys
try:
import ctypes
import ctypes.util
except MemoryError:
# selinux execmem denial
# https://bugzilla.redhat.com/show_bug.cgi?id=488396
raise ImportError
SUPPORTED_PLATFORMS = (
'darwin',
'freebsd',
'dragonfly',
'linux2')
if sys.version_info < (2, 6) or \
sys.platform not in SUPPORTED_PLATFORMS:
raise ImportError("sendfile isn't supported on this platform")
_libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
_sendfile = _libc.sendfile
def sendfile(fdout, fdin, offset, nbytes):
if sys.platform == 'darwin':
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_voidp,
ctypes.c_int]
_nbytes = ctypes.c_uint64(nbytes)
result = _sendfile(fdin, fdout, offset, _nbytes, None, 0)
if result == -1:
e = ctypes.get_errno()
if e == errno.EAGAIN and _nbytes.value is not None:
return _nbytes.value
raise OSError(e, os.strerror(e))
return _nbytes.value
elif sys.platform in ('freebsd', 'dragonfly',):
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64,
ctypes.c_uint64, ctypes.c_voidp,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_int]
_sbytes = ctypes.c_uint64()
result = _sendfile(fdin, fdout, offset, nbytes, None, _sbytes, 0)
if result == -1:
e = ctypes.get_errno()
if e == errno.EAGAIN and _sbytes.value is not None:
return _sbytes.value
raise OSError(e, os.strerror(e))
return _sbytes.value
else:
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_size_t]
_offset = ctypes.c_uint64(offset)
sent = _sendfile(fdout, fdin, _offset, nbytes)
if sent == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
return sent | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import argparse
import cPickle as pkl
import gzip
import sys
import tables
import numpy
parser = argparse.ArgumentParser()
parser.add_argument("input",
type=argparse.FileType('rb'),
help="Pickle file")
parser.add_argument("output",
type=argparse.FileType('w'),
help="Output HDF5 file")
args = parser.parse_args()
class Index(tables.IsDescription):
pos = tables.UInt32Col()
length = tables.UInt32Col()
f = args.output
f = tables.open_file(f.name, f.mode)
earrays = f.createEArray(f.root, 'phrases',
tables.Int32Atom(),shape=(0,))
indices = f.createTable("/", 'indices',
Index, "a table of indices and lengths")
sfile = open(args.input.name, args.input.mode)
sarray = pkl.load(sfile)
sfile.close()
count = 0
pos = 0
for x in sarray:
earrays.append(numpy.array(x))
ind = indices.row
ind['pos'] = pos
ind['length'] = len(x)
ind.append()
pos += len(x)
count += 1
if count % 100000 == 0:
print count,
sys.stdout.flush()
indices.flush()
elif count % 10000 == 0:
print '.',
sys.stdout.flush()
f.close()
print 'processed', count, 'phrases' | unknown | codeparrot/codeparrot-clean | ||
use serde_derive::Serialize;
mod remote {
pub struct S {
a: u8,
}
impl S {
pub fn get(&self) -> u16 {
self.a as u16
}
}
}
#[derive(Serialize)]
#[serde(remote = "remote::S")]
struct S {
#[serde(getter = "remote::S::get")]
a: u8,
}
fn main() {} | rust | github | https://github.com/serde-rs/serde | test_suite/tests/ui/remote/wrong_getter.rs |
// Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package cli
import (
"fmt"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/testutils/datapathutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/datadriven"
)
// This test doctoring a secure cluster.
func TestDeclarativeRules(t *testing.T) {
defer leaktest.AfterTest(t)()
c := NewCLITest(TestCLIParams{T: t, NoServer: true})
defer c.Cleanup()
t.Run("declarative corpus validation standalone command", func(t *testing.T) {
invalidOut, err := c.RunWithCapture(fmt.Sprintf("debug declarative-print-rules %s op", "1.1"))
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "declarative-rules", "invalid_version"), func(t *testing.T, td *datadriven.TestData) string {
// Do not display the present current version within the output,
// for testing purposes. This can change from build to build, and
// need changes for every version bump.
return strings.Replace(invalidOut,
" "+clusterversion.Latest.String()+"\n",
" latest\n",
-1)
})
depOut, err := c.RunWithCapture(fmt.Sprintf("debug declarative-print-rules %s dep", clusterversion.PreviousRelease))
if err != nil {
t.Fatal(err)
}
datadriven.RunTest(t, datapathutils.TestDataPath(t, "declarative-rules", "deprules"), func(t *testing.T, td *datadriven.TestData) string {
return depOut
})
})
} | go | github | https://github.com/cockroachdb/cockroach | pkg/cli/declarative_print_rules_test.go |
# التدريب باستخدام نص برمجى
بالإضافة إلى دفاتر الملاحظات [notebooks](./notebooks) الخاصة بـ 🤗 Transformers، هناك أيضًا نصوص برمجية توضيحية تُظهر كيفية تدريب نموذج لمهمة باستخدام [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch) أو [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) أو [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax).
كما ستجد النصوص البرمجية التي استخدمناها في [مشاريع الأبحاث](https://github.com/huggingface/transformers-research-projects/) و [الأمثلة القديمة](https://github.com/huggingface/transformers/tree/main/examples/legacy) والتي ساهم بها المجتمع بشكل أساسي. هذه النصوص البرمجية غير مدعومة بشكل نشط وقد تتطلب إصدارًا محددًا من مكتبة 🤗 Transformers والذي من المحتمل أن يكون غير متوافق مع الإصدار الأحدث من المكتبة.
لا يُتوقع أن تعمل النصوص البرمجية التوضيحية بشكل مباشر على كل مشكلة، وقد تحتاج إلى تكييف النص البرمجي مع المشكلة التي تحاول حلها. ولمساعدتك في ذلك، تعرض معظم النصوص البرمجية كيفية معالجة البيانات قبل التدريب بشكل كامل، مما يتيح لك تحريرها حسب الحاجة لحالتك الاستخدام.
بالنسبة لأي ميزة ترغب في تنفيذها في نص برمجي توضيحي، يرجى مناقشتها في [المنتدى](https://discuss.huggingface.co/) أو في [قضية](https://github.com/huggingface/transformers/issues) قبل إرسال طلب سحب. وفي حين أننا نرحب بإصلاح الأخطاء، فمن غير المرجح أن نقوم بدمج طلب سحب الذي يضيف المزيد من الوظائف على حساب قابلية القراءة.
سيوضح هذا الدليل كيفية تشغيل نص برمجي توضيحي للتدريب على التلخيص في [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) و [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization). يُتوقع أن تعمل جميع الأمثلة مع كلا الإطارين ما لم يُنص على خلاف ذلك.
## الإعداد
لتشغيل الإصدار الأحدث من النصوص البرمجية التوضيحية بنجاح، يجب عليك **تثبيت 🤗 Transformers من المصدر** في بيئة افتراضية جديدة:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
pip install .
```
بالنسبة للإصدارات الأقدم من النصوص البرمجية التوضيحية، انقر فوق الزر أدناه:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
pip install .
```
بالنسبة للإصدارات الأقدم من النصوص البرمجية التوضيحية، انقر فوق الزر أدناه:
<details>
<summary>أمثلة للإصدارات الأقدم من 🤗 Transformers</summary>
<ul>
<li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li>
</ul>
</details>
ثم قم بالتبديل إلى النسخة الحالية من 🤗 Transformers إلى إصدار محدد، مثل v3.5.1 على سبيل المثال:
```bash
git checkout tags/v3.5.1
```
بعد إعداد إصدار المكتبة الصحيح، انتقل إلى مجلد الأمثلة الذي تختاره وقم بتثبيت المتطلبات المحددة:
```bash
pip install -r requirements.txt
```
## تشغيل نص برمجي
- يقوم النص البرمجي التوضيحي بتنزيل مجموعة بيانات ومعالجتها مسبقًا من مكتبة 🤗 [Datasets](https://huggingface.co/docs/datasets).
- ثم يقوم النص البرمجي بضبط نموذج بيانات دقيق باستخدام [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) على بنية تدعم الملخص.
- يوضح المثال التالي كيفية ضبط نموذج [T5-small](https://huggingface.co/google-t5/t5-small) على مجموعة بيانات [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail).
- يتطلب نموذج T5 معامل `source_prefix` إضافية بسبب الطريقة التي تم تدريبه بها. يتيح هذا المطالبة لـ T5 معرفة أن هذه مهمة التلخيص.
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
```
## التدريب الموزع والدقة المختلطة
يدعم [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) التدريب الموزع والدقة المختلطة، مما يعني أنه يمكنك أيضًا استخدامه في نص برمجي. لتمكين كلتا الميزتين:
- أضف معامل `fp16` لتمكين الدقة المختلطة.
- قم بتعيين عدد وحدات معالجة الرسومات (GPUs) التي تريد استخدامها باستخدام حجة `nproc_per_node`.
```bash
torchrun \
--nproc_per_node 8 pytorch/summarization/run_summarization.py \
--fp16 \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
```
تستخدم نصوص TensorFlow البرمجية استراتيجية [`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy) للتدريب الموزع، ولا تحتاج إلى إضافة أي معامﻻت إضافية إلى النص البرمجي التدريبي. سيستخدم نص TensorFlow البرمجي وحدات معالجة الرسومات (GPUs) متعددة بشكل افتراضي إذا كانت متوفرة.
## تشغيل نص برمجي على وحدة معالجة الدقة الفائقة (TPU)
تُعد وحدات معالجة الدقة الفائقة (TPUs) مصممة خصيصًا لتسريع الأداء. يدعم PyTorch وحدات معالجة الدقة الفائقة (TPUs) مع [XLA](https://www.tensorflow.org/xla) مجمع الدقة الفائقة للتعلم العميق (راجع [هنا](https://github.com/pytorch/xla/blob/master/README.md) لمزيد من التفاصيل). لاستخدام وحدة معالجة الدقة الفائقة (TPU)، قم بتشغيل نص `xla_spawn.py` البرمجي واستخدم معامل `num_cores` لتعيين عدد وحدات معالجة الدقة الفائقة (TPU) التي تريد استخدامها.
```bash
python xla_spawn.py --num_cores 8 \
summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
```
## تشغيل نص برمجي باستخدام 🤗 Accelerate
🤗 [Accelerate](https://huggingface.co/docs/accelerate) هي مكتبة خاصة بـ PyTorch فقط توفر طريقة موحدة لتدريب نموذج على عدة أنواع من الإعدادات (الاعتماد على وحدة المعالجة المركزية (CPU) فقط، أو وحدات معالجة الرسومات (GPUs) المتعددة، أو وحدات معالجة الدقة الفائقة (TPUs)) مع الحفاظ على الرؤية الكاملة لحلقة تدريب PyTorch. تأكد من تثبيت 🤗 Accelerate إذا لم يكن لديك بالفعل:
> ملاحظة: نظرًا لأن Accelerate في حالة تطوير سريع، يجب تثبيت إصدار Git من Accelerate لتشغيل النصوص البرمجية.
```bash
pip install git+https://github.com/huggingface/accelerate
```
بدلاً من إستخدام النص البرمجي `run_summarization.py` يجب عليك استخدام النص البرمجي `run_summarization_no_trainer.py` . ستكون النصوص البرمجية المدعومة من 🤗 Accelerate لها ملف `task_no_trainer.py` في المجلد. ابدأ بتشغيل الأمر التالي لإنشاء وحفظ ملف تكوين:
```bash
accelerate config
```
اختبر إعدادك للتأكد من أنه تم تكوينه بشكل صحيح:
```bash
accelerate test
```
الآن أنت مستعد لبدء التدريب:
```bash
accelerate launch run_summarization_no_trainer.py \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir ~/tmp/tst-summarization
```
## استخدام مجموعة بيانات مخصصة
يدعم النص البرمجي للتلخيص مجموعة بيانات مخصصة طالما أنها ملف CSV أو JSON Line. عندما تستخدم مجموعة بياناتك الخاصة، تحتاج إلى تحديد العديد من المعلمات الإضافية:
- `train_file` و`validation_file` يحددان مسار ملفات التدريب والتحقق الخاصة بك.
- `text_column` النص المدخل الذي سيتم تلخيصه.
- `summary_column` النص الملخص المستهدف الذي سيتم إخراجه.
سيبدو النص البرمجي للتلخيص الذي يستخدم مجموعة بيانات مخصصة على النحو التالي:
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--train_file path_to_csv_or_jsonlines_file \
--validation_file path_to_csv_or_jsonlines_file \
--text_column text_column_name \
--summary_column summary_column_name \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
```
## اختبار البرنامج النصي
من الجيد غالبًا تشغيل نصك البرمجي على عدد أقل من أمثلة مجموعة البيانات للتأكد من أن كل شيء يعمل كما هو متوقع قبل الالتزام بمجموعة بيانات كاملة والتي قد تستغرق ساعات لإكمالها. استخدم المعلمات التالية لتقليص مجموعة البيانات إلى عدد أقصى من العينات:
- `max_train_samples`
- `max_eval_samples`
- `max_predict_samples`
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--max_train_samples 50 \
--max_eval_samples 50 \
--max_predict_samples 50 \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
```
لا تدعم جميع أمثلة النصوص البرمجية المعلمة `max_predict_samples`. إذا لم تكن متأكدًا مما إذا كان نصك البرمجي يدعم هذه المعلمة، فأضف معلمة `-h` للتحقق:
```bash
examples/pytorch/summarization/run_summarization.py -h
```
## استئناف التدريب من نقطة تفتيش
خيار آخر مفيد لتمكينه هو استئناف التدريب من نقطة تفتيش سابقة. سيضمن ذلك أنك تستطيع الاستمرار من حيث توقفت دون البدء من جديد إذا تم مقاطعة تدريبك. هناك طريقتان لاستئناف التدريب من نقطة تفتيش.
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--resume_from_checkpoint path_to_specific_checkpoint \
--predict_with_generate
```
## شارك نموذجك
يمكن لجميع النصوص البرمجية رفع نموذجك النهائي إلى [مركز النماذج](https://huggingface.co/models). تأكد من تسجيل الدخول إلى Hugging Face قبل البدء:
```bash
hf auth login
```
ثم أضف المعلمة `push_to_hub` إلى النص البرمجي . ستقوم هذه المعلمة بإنشاء مستودع باستخدام اسم مستخدم Hugging Face واسم المجلد المحدد في `output_dir`.
لإعطاء مستودعك اسمًا محددًا، استخدم المعلمة `push_to_hub_model_id` لإضافته. سيتم عرض المستودع تلقائيًا ضمن مساحة الاسم الخاصة بك.
يوضح المثال التالي كيفية رفع نموذج باستخدام اسم مستودع محدد:
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--push_to_hub \
--push_to_hub_model_id finetuned-t5-cnn_dailymail \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
``` | unknown | github | https://github.com/huggingface/transformers | docs/source/ar/run_scripts.md |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
DataFrame-based machine learning APIs to let users quickly assemble and configure practical
machine learning pipelines.
"""
from pyspark.ml.base import Estimator, Model, Transformer, UnaryTransformer
from pyspark.ml.pipeline import Pipeline, PipelineModel
__all__ = ["Transformer", "UnaryTransformer", "Estimator", "Model", "Pipeline", "PipelineModel"] | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bridge\Doctrine\Tests\IdGenerator;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\Mapping\Entity;
use PHPUnit\Framework\TestCase;
use Symfony\Bridge\Doctrine\IdGenerator\UlidGenerator;
use Symfony\Component\Uid\Factory\UlidFactory;
use Symfony\Component\Uid\Ulid;
class UlidGeneratorTest extends TestCase
{
public function testUlidCanBeGenerated()
{
$em = (new \ReflectionClass(EntityManager::class))->newInstanceWithoutConstructor();
$generator = new UlidGenerator();
$ulid = $generator->generateId($em, new Entity());
$this->assertInstanceOf(Ulid::class, $ulid);
$this->assertTrue(Ulid::isValid($ulid));
}
public function testUlidFactory()
{
$ulid = new Ulid('00000000000000000000000000');
$em = (new \ReflectionClass(EntityManager::class))->newInstanceWithoutConstructor();
$factory = $this->createStub(UlidFactory::class);
$factory
->method('create')
->willReturn($ulid);
$generator = new UlidGenerator($factory);
$this->assertSame($ulid, $generator->generateId($em, new Entity()));
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bridge/Doctrine/Tests/IdGenerator/UlidGeneratorTest.php |
import datetime
from itertools import groupby
import warnings
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import connections
from django.db.models import Max
from django.http import Http404
from django.utils.timezone import make_aware, utc
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.views import APIView
from opaque_keys.edx.keys import CourseKey
from analytics_data_api.constants import enrollment_modes
from analytics_data_api.utils import dictfetchall, get_course_report_download_details
from analytics_data_api.v0 import models, serializers
from analytics_data_api.v0.exceptions import ReportFileNotFoundError
from analytics_data_api.v0.views.utils import raise_404_if_none
class BaseCourseView(generics.ListAPIView):
start_date = None
end_date = None
course_id = None
slug = None
allow_empty = False
def get(self, request, *args, **kwargs):
self.course_id = self.kwargs.get('course_id')
start_date = request.query_params.get('start_date')
end_date = request.query_params.get('end_date')
timezone = utc
self.start_date = self.parse_date(start_date, timezone)
self.end_date = self.parse_date(end_date, timezone)
return super(BaseCourseView, self).get(request, *args, **kwargs)
def parse_date(self, date, timezone):
if date:
try:
date = datetime.datetime.strptime(date, settings.DATETIME_FORMAT)
except ValueError:
date = datetime.datetime.strptime(date, settings.DATE_FORMAT)
date = make_aware(date, timezone)
return date
def apply_date_filtering(self, queryset):
raise NotImplementedError
@raise_404_if_none
def get_queryset(self):
queryset = self.model.objects.filter(course_id=self.course_id)
queryset = self.apply_date_filtering(queryset)
return queryset
def get_csv_filename(self):
course_key = CourseKey.from_string(self.course_id)
course_id = u'-'.join([course_key.org, course_key.course, course_key.run])
return u'{0}--{1}.csv'.format(course_id, self.slug)
def finalize_response(self, request, response, *args, **kwargs):
if request.META.get('HTTP_ACCEPT') == u'text/csv':
response['Content-Disposition'] = u'attachment; filename={}'.format(self.get_csv_filename())
return super(BaseCourseView, self).finalize_response(request, response, *args, **kwargs)
# pylint: disable=line-too-long
class CourseActivityWeeklyView(BaseCourseView):
"""
Get counts of users who performed specific activities in a course.
**Example request**
GET /api/v0/courses/{course_id}/activity/
**Response Values**
Returns a list of key/value pairs for student activities, as well as the
interval start and end dates and the course ID.
* any: The number of unique users who performed any action in the
course, including actions not counted in other categories in the
response.
* attempted_problem: The number of unique users who answered any
loncapa-based problem in the course.
* played_video: The number of unique users who started watching any
video in the course.
* posted_forum: The number of unique users who created a new post,
responded to a post, or submitted a comment on any discussion in
the course.
* interval_start: The time and date at which data started being
included in returned values.
* interval_end: The time and date at which data stopped being
included in returned values.
* course_id: The ID of the course for which data is returned.
* created: The date the counts were computed.
**Parameters**
You can specify the start and end dates for the time period for which
you want to get activity.
You specify dates in the format: YYYY-mm-ddTtttttt; for example,
``2014-12-15T000000``.
If no start or end dates are specified, the data for the week ending on
the previous day is returned.
start_date -- Date after which all data is returned (inclusive).
end_date -- Date before which all data is returned (exclusive).
"""
slug = u'engagement-activity'
model = models.CourseActivityWeekly
serializer_class = serializers.CourseActivityWeeklySerializer
def apply_date_filtering(self, queryset):
if self.start_date or self.end_date:
# Filter by start/end date
if self.start_date:
queryset = queryset.filter(interval_start__gte=self.start_date)
if self.end_date:
queryset = queryset.filter(interval_end__lt=self.end_date)
else:
# No date filter supplied, so only return data for the latest date
latest_date = queryset.aggregate(Max('interval_end'))
if latest_date:
latest_date = latest_date['interval_end__max']
queryset = queryset.filter(interval_end=latest_date)
return queryset
def get_queryset(self):
queryset = super(CourseActivityWeeklyView, self).get_queryset()
queryset = self.format_data(queryset)
return queryset
def _format_activity_type(self, activity_type):
activity_type = activity_type.lower()
# The data pipeline stores "any" as "active"; however, the API should display "any".
if activity_type == 'active':
activity_type = 'any'
return activity_type
def format_data(self, data):
"""
Group the data by date and combine multiple activity rows into a single row/element.
Arguments
data (iterable) -- Data to be formatted.
"""
formatted_data = []
for key, group in groupby(data, lambda x: (x.course_id, x.interval_start, x.interval_end)):
# Iterate over groups and create a single item with all activity types
item = {
u'course_id': key[0],
u'interval_start': key[1],
u'interval_end': key[2],
u'created': None
}
for activity in group:
activity_type = self._format_activity_type(activity.activity_type)
item[activity_type] = activity.count
item[u'created'] = max(activity.created, item[u'created']) if item[u'created'] else activity.created
formatted_data.append(item)
return formatted_data
class CourseActivityMostRecentWeekView(generics.RetrieveAPIView):
"""
Get counts of users who performed specific activities at least once during the most recently computed week.
**Example request**
GET /api/v0/courses/{course_id}/recent_activity/
**Response Values**
Returns a list of key/value pairs for student activities, as well as the
interval start and end dates and the course ID.
* activity_type: The type of activity counted. Possible values are:
* any: The number of unique users who performed any action in the
course, including actions not counted in other categories in the
response.
* attempted_problem: The number of unique users who answered any
loncapa-based problem in the course.
* played_video: The number of unique users who started watching
any video in the course.
* posted_forum: The number of unique users who created a new post,
responded to a post, or submitted a comment on any discussion in
the course.
* count: The number of unique users who performed the specified
action.
* interval_start: The time and date at which data started being
included in returned values.
* interval_end: The time and date at which data stopped being
included in returned values.
* course_id: The ID of the course for which data is returned.
**Parameters**
You can specify the activity type for which you want to get the count.
activity_type -- The type of activity: any (default), attempted_problem, played_video, posted_forum.
"""
serializer_class = serializers.CourseActivityByWeekSerializer
DEFAULT_ACTIVITY_TYPE = 'ACTIVE'
def _format_activity_type(self, activity_type):
"""
Modify the activity type parameter for use with our data.
Arguments:
activity_type (str): String to be formatted
"""
activity_type = activity_type.upper()
if activity_type == 'ANY':
activity_type = self.DEFAULT_ACTIVITY_TYPE
return activity_type
def _get_activity_type(self):
""" Retrieve the activity type from the query string. """
# Support the old label param
activity_type = self.request.query_params.get('label', None)
activity_type = activity_type or self.request.query_params.get('activity_type', self.DEFAULT_ACTIVITY_TYPE)
activity_type = self._format_activity_type(activity_type)
return activity_type
def get_object(self):
"""Select the activity report for the given course and activity type."""
warnings.warn('CourseActivityMostRecentWeekView has been deprecated! Use CourseActivityWeeklyView instead.',
DeprecationWarning)
course_id = self.kwargs.get('course_id')
activity_type = self._get_activity_type()
try:
return models.CourseActivityWeekly.get_most_recent(course_id, activity_type)
except ObjectDoesNotExist:
raise Http404
class BaseCourseEnrollmentView(BaseCourseView):
def apply_date_filtering(self, queryset):
if self.start_date or self.end_date:
# Filter by start/end date
if self.start_date:
queryset = queryset.filter(date__gte=self.start_date)
if self.end_date:
queryset = queryset.filter(date__lt=self.end_date)
else:
# No date filter supplied, so only return data for the latest date
latest_date = queryset.aggregate(Max('date'))
if latest_date:
latest_date = latest_date['date__max']
queryset = queryset.filter(date=latest_date)
return queryset
class CourseEnrollmentByBirthYearView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by birth year.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/birth_year/
**Response Values**
Returns an array with a collection for each year in which a user was
born. Each collection contains:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* birth_year: The birth year for which the enrollment count applies.
* count: The number of users who were born in the specified year.
* created: The date the count was computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for example,
``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment-age'
serializer_class = serializers.CourseEnrollmentByBirthYearSerializer
model = models.CourseEnrollmentByBirthYear
class CourseEnrollmentByEducationView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by education level.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/education/
**Response Values**
Returns a collection for each level of education reported by a user.
Each collection contains:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* education_level: The education level for which the enrollment
count applies.
* count: The number of userswho reported the specified education
level.
* created: The date the count was computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment-education'
serializer_class = serializers.CourseEnrollmentByEducationSerializer
model = models.CourseEnrollmentByEducation
class CourseEnrollmentByGenderView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by gender.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/gender/
**Response Values**
Returns the count of each gender specified by users:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* female: The count of self-identified female users.
* male: The count of self-identified male users.
* other: The count of self-identified other users.
* unknown: The count of users who did not specify a gender.
* created: The date the counts were computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment-gender'
serializer_class = serializers.CourseEnrollmentByGenderSerializer
model = models.CourseEnrollmentByGender
def get_queryset(self):
queryset = super(CourseEnrollmentByGenderView, self).get_queryset()
formatted_data = []
for key, group in groupby(queryset, lambda x: (x.course_id, x.date)):
# Iterate over groups and create a single item with gender data
item = {
u'course_id': key[0],
u'date': key[1],
u'created': None,
u'male': 0,
u'female': 0,
u'other': 0,
u'unknown': 0
}
for enrollment in group:
gender = enrollment.cleaned_gender.lower()
count = item.get(gender, 0)
count += enrollment.count
item[gender] = count
item[u'created'] = max(enrollment.created, item[u'created']) if item[u'created'] else enrollment.created
formatted_data.append(item)
return formatted_data
class CourseEnrollmentView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/
**Response Values**
Returns the count of enrolled users:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* count: The count of enrolled users.
* created: The date the count was computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment'
serializer_class = serializers.CourseEnrollmentDailySerializer
model = models.CourseEnrollmentDaily
class CourseEnrollmentModeView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by enrollment mode.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/mode/
**Response Values**
Returns the counts of users by mode:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* count: The count of currently enrolled users.
* cumulative_count: The cumulative total of all users ever enrolled.
* created: The date the counts were computed.
* honor: The number of users currently enrolled in honor code mode.
* professional: The number of users currently enrolled in professional mode.
* verified: The number of users currently enrolled in verified mode.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment_mode'
serializer_class = serializers.CourseEnrollmentModeDailySerializer
model = models.CourseEnrollmentModeDaily
def get_queryset(self):
queryset = super(CourseEnrollmentModeView, self).get_queryset()
formatted_data = []
for key, group in groupby(queryset, lambda x: (x.course_id, x.date)):
item = {
u'course_id': key[0],
u'date': key[1],
u'created': None
}
total = 0
cumulative_total = 0
for enrollment in group:
mode = enrollment.mode
item[mode] = enrollment.count
item[u'created'] = max(enrollment.created, item[u'created']) if item[u'created'] else enrollment.created
total += enrollment.count
cumulative_total += enrollment.cumulative_count
# Merge professional with non verified professional
item[enrollment_modes.PROFESSIONAL] = \
item.get(enrollment_modes.PROFESSIONAL, 0) + item.pop(enrollment_modes.PROFESSIONAL_NO_ID, 0)
item[u'count'] = total
item[u'cumulative_count'] = cumulative_total
formatted_data.append(item)
return formatted_data
# pylint: disable=line-too-long
class CourseEnrollmentByLocationView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by location.
Location is calculated based on the user's IP address. Users whose location
cannot be determined are counted as having a country.name of UNKNOWN.
Countries are denoted by their ISO 3166 country code.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/location/
**Response Values**
Returns counts of genders specified by users:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* country: Contains the following fields:
* alpha2: The two-letter country code.
* alpha3: The three-letter country code.
* name: The country name.
* count: The count of users from the country.
* created: The date the count was computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment-location'
serializer_class = serializers.CourseEnrollmentByCountrySerializer
model = models.CourseEnrollmentByCountry
def get_queryset(self):
# Get all of the data from the database
queryset = super(CourseEnrollmentByLocationView, self).get_queryset()
items = queryset.all()
# Data must be sorted in order for groupby to work properly
items = sorted(items, key=lambda x: x.country.alpha2)
# Items to be returned by this method
returned_items = []
# Group data by date, country, and course ID
for key, group in groupby(items, lambda x: (x.date, x.country.alpha2, x.course_id)):
count = 0
date = key[0]
country_code = key[1]
course_id = key[2]
created = None
for item in group:
created = max(created, item.created) if created else item.created
count += item.count
# pylint: disable=no-value-for-parameter,unexpected-keyword-arg
returned_items.append(models.CourseEnrollmentByCountry(
course_id=course_id,
date=date,
country_code=country_code,
count=count,
created=created
))
# Note: We are returning a list, instead of a queryset. This is
# acceptable since the consuming code simply expects the returned
# value to be iterable, not necessarily a queryset.
return returned_items
# pylint: disable=abstract-method
class ProblemsListView(BaseCourseView):
"""
Get the problems.
**Example request**
GET /api/v0/courses/{course_id}/problems/
**Response Values**
Returns a collection of submission counts and part IDs for each problem. Each collection contains:
* module_id: The ID of the problem.
* total_submissions: Total number of submissions.
* correct_submissions: Total number of *correct* submissions.
* part_ids: List of problem part IDs.
"""
serializer_class = serializers.ProblemSerializer
allow_empty = False
@raise_404_if_none
def get_queryset(self):
# last_response_count is the number of submissions for the problem part and must
# be divided by the number of problem parts to get the problem submission rather
# than the problem *part* submissions
aggregation_query = """
SELECT
module_id,
SUM(last_response_count)/COUNT(DISTINCT part_id) AS total_submissions,
SUM(CASE WHEN correct=1 THEN last_response_count ELSE 0 END)/COUNT(DISTINCT part_id) AS correct_submissions,
GROUP_CONCAT(DISTINCT part_id) AS part_ids,
MAX(created) AS created
FROM answer_distribution
WHERE course_id = %s
GROUP BY module_id;
"""
connection = connections[settings.ANALYTICS_DATABASE]
with connection.cursor() as cursor:
if connection.vendor == 'mysql':
# The default value of group_concat_max_len, 1024, is too low for some course data. Increase this value
# to its maximum possible value. For more information see
# http://code.openark.org/blog/mysql/those-oversized-undersized-variables-defaults.
cursor.execute("SET @@group_concat_max_len = @@max_allowed_packet;")
cursor.execute("DESCRIBE answer_distribution;")
column_names = [row[0] for row in cursor.fetchall()]
# Alternate query for sqlite test database
else:
cursor.execute("PRAGMA table_info(answer_distribution)")
column_names = [row[1] for row in cursor.fetchall()]
if u'last_response_count' in column_names:
cursor.execute(aggregation_query, [self.course_id])
else:
cursor.execute(aggregation_query.replace('last_response_count', 'count'), [self.course_id])
rows = dictfetchall(cursor)
for row in rows:
# Convert the comma-separated list into an array of strings.
row['part_ids'] = row['part_ids'].split(',')
# Convert the aggregated decimal fields to integers
row['total_submissions'] = int(row['total_submissions'])
row['correct_submissions'] = int(row['correct_submissions'])
# Rather than write custom SQL for the SQLite backend, simply parse the timestamp.
created = row['created']
if not isinstance(created, datetime.datetime):
row['created'] = datetime.datetime.strptime(created, '%Y-%m-%d %H:%M:%S')
return rows
# pylint: disable=abstract-method
class ProblemsAndTagsListView(BaseCourseView):
"""
Get the problems with the connected tags.
**Example request**
GET /api/v0/courses/{course_id}/problems_and_tags/
**Response Values**
Returns a collection of submission counts and tags for each problem. Each collection contains:
* module_id: The ID of the problem.
* total_submissions: Total number of submissions.
* correct_submissions: Total number of *correct* submissions.
* tags: Dictionary that contains pairs "tag key: [tag value 1, tag value 2, ..., tag value N]".
"""
serializer_class = serializers.ProblemsAndTagsSerializer
allow_empty = False
model = models.ProblemsAndTags
@raise_404_if_none
def get_queryset(self):
queryset = self.model.objects.filter(course_id=self.course_id)
items = queryset.all()
result = {}
for v in items:
if v.module_id in result:
if v.tag_name not in result[v.module_id]['tags']:
result[v.module_id]['tags'][v.tag_name] = []
result[v.module_id]['tags'][v.tag_name].append(v.tag_value)
result[v.module_id]['tags'][v.tag_name].sort()
if result[v.module_id]['created'] < v.created:
result[v.module_id]['created'] = v.created
else:
result[v.module_id] = {
'module_id': v.module_id,
'total_submissions': v.total_submissions,
'correct_submissions': v.correct_submissions,
'tags': {
v.tag_name: [v.tag_value]
},
'created': v.created
}
return result.values()
class VideosListView(BaseCourseView):
"""
Get data for the videos in a course.
**Example request**
GET /api/v0/courses/{course_id}/videos/
**Response Values**
Returns a collection of video views and metadata for each video.
For each video, the collection the following data.
* video_id: The ID of the video.
* encoded_module_id: The encoded module ID.
* duration: The length of the video in seconds.
* segment_length: The length of each segment of the video in seconds.
* users_at_start: The number of viewers at the start of the video.
* users_at_end: The number of viewers at the end of the video.
* created: The date the video data was updated.
"""
serializer_class = serializers.VideoSerializer
allow_empty = False
model = models.Video
def apply_date_filtering(self, queryset):
# no date filtering for videos -- just return the queryset
return queryset
class ReportDownloadView(APIView):
"""
Get information needed to download a CSV report
**Example request**
GET /api/v0/courses/{course_id}/reports/{report_name}/
**Response Values**
Returns a single object with data about the report, with the following data:
* course_id: The ID of the course
* report_name: The name of the report
* download_url: The Internet location from which the report can be downloaded
The object may also return these items, if supported by the storage backend:
* last_modified: The date the report was last updated
* expiration_date: The date through which the link will be valid
* file_size: The size in bytes of the CSV download
"""
enabled_reports = settings.ENABLED_REPORT_IDENTIFIERS
def get(self, _request, course_id, report_name):
if report_name in self.enabled_reports:
response = get_course_report_download_details(course_id, report_name)
return Response(response)
else:
raise ReportFileNotFoundError(course_id=course_id, report_name=report_name) | unknown | codeparrot/codeparrot-clean | ||
import { context, getOctokit } from '@actions/github'
import { info, getInput } from '@actions/core'
const { default: stripAnsi } = require('strip-ansi')
const fs = require('fs')
/// <reference path="./manifest" />
type Octokit = ReturnType<typeof getOctokit>
type Job = Awaited<
ReturnType<Octokit['rest']['actions']['listJobsForWorkflowRun']>
>['data']['jobs'][number]
// A comment marker to identify the comment created by this action.
const BOT_COMMENT_MARKER = `<!-- __marker__ next.js integration stats __marker__ -->`
// Header for the test report.
const commentTitlePre = `## Failing next.js integration test suites`
// Download logs for a job in a workflow run by reading redirect url from workflow log response.
async function fetchJobLogsFromWorkflow(
octokit: Octokit,
job: Job
): Promise<{ logs: string; job: Job }> {
console.log(
`fetchJobLogsFromWorkflow ${job.name}: Checking test results for the job`
)
// downloadJobLogsForWorkflowRun returns a redirect to the actual logs
// The returned URL is valid (without any additional auth) for 1 minute
const jobLogRedirectResponse =
await octokit.rest.actions.downloadJobLogsForWorkflowRun({
accept: 'application/vnd.github.v3+json',
...context.repo,
job_id: job.id,
})
console.log(
`fetchJobLogsFromWorkflow ${job.name}: Trying to get logs from redirect url ${jobLogRedirectResponse.url}`
)
// fetch the actual logs
const jobLogsResponse = await fetch(jobLogRedirectResponse.url, {
headers: {
Accept: 'application/vnd.github.v3+json',
},
})
console.log(
`fetchJobLogsFromWorkflow ${job.name}: Logs response status ${jobLogsResponse.status}`
)
if (!jobLogsResponse.ok) {
throw new Error(
`Failed to get logsUrl, got status ${jobLogsResponse.status}`
)
}
// this should be the check_run's raw logs including each line
// prefixed with a timestamp in format 2020-03-02T18:42:30.8504261Z
const logText: string = await jobLogsResponse.text()
const dateTimeStripped = logText
.split('\n')
.map((line) => line.substring('2020-03-02T19:39:16.8832288Z '.length))
const logs = dateTimeStripped.join('\n')
return { logs, job }
}
// Collect necessary inputs to run actions,
async function getInputs(): Promise<{
token: string
octokit: Octokit
prNumber: number | undefined
sha: string
noBaseComparison: boolean
shouldExpandResultMessages: boolean
}> {
const token = getInput('token')
const octokit = getOctokit(token)
const shouldExpandResultMessages =
getInput('expand_result_messages') === 'true'
if (!shouldExpandResultMessages) {
console.log('Test report comment will not include result messages.')
}
const prNumber = context?.payload?.pull_request?.number
const sha = context?.sha
// For the daily cron workflow, we don't compare to previous but post daily summary
const noBaseComparison = prNumber == null
if (prNumber != null) {
console.log('Trying to collect integration stats for PR', {
prNumber,
sha: sha,
})
const comments = await octokit.paginate(octokit.rest.issues.listComments, {
...context.repo,
issue_number: prNumber,
per_page: 200,
})
console.log('Found total comments for PR', comments?.length || 0)
// Get a comment from the bot if it exists, delete all of them.
// Due to test report can exceed single comment size limit, it can be multiple comments and sync those is not trivial.
// Instead, we just delete all of them and post a new one.
const existingComments = comments?.filter(
(comment) =>
comment?.user?.login === 'github-actions[bot]' &&
comment?.body?.includes(BOT_COMMENT_MARKER)
)
if (existingComments?.length) {
console.log('Found existing comments, deleting them')
for (const comment of existingComments) {
await octokit.rest.issues.deleteComment({
...context.repo,
comment_id: comment.id,
})
}
}
} else {
info('No PR number found in context, will not try to post comment.')
}
const inputs = {
token,
octokit,
prNumber,
sha,
noBaseComparison,
shouldExpandResultMessages,
}
console.log('getInputs: these inputs will be used to collect test results', {
...inputs,
token: !!token, // redact this
})
return inputs
}
// Iterate all the jobs in the current workflow run, collect & parse logs for failed jobs for the postprocessing.
async function getJobResults(
octokit: Octokit,
token: string,
sha: string
): Promise<TestResultManifest> {
console.log('Trying to collect next.js integration test logs')
const jobs = await octokit.paginate(
octokit.rest.actions.listJobsForWorkflowRun,
{
...context.repo,
run_id: context?.runId,
per_page: 50,
}
)
// Filter out next.js integration test jobs
const integrationTestJobs = jobs?.filter((job) =>
/Next\.js integration test \([^)]*\) \([^)]*\)/.test(job.name)
)
console.log(
`Logs found for ${integrationTestJobs.length} jobs`,
integrationTestJobs.map((job) => job.name)
)
// Iterate over all of next.js integration test jobs, read logs and collect failed test results if exists.
const fullJobLogsFromWorkflow = await Promise.all(
integrationTestJobs.map((job) => fetchJobLogsFromWorkflow(octokit, job))
)
console.log('Logs downloaded for all jobs')
const [jobResults, flakyMonitorJobResults] = fullJobLogsFromWorkflow.reduce(
(acc, { logs, job }) => {
const subset = job.name.includes('FLAKY_SUBSET')
const index = subset ? 1 : 0
const { id, run_id, run_url, html_url } = job
console.log('Parsing logs for job', { id, run_id, run_url, html_url })
const splittedLogs = logs.split('--test output start--')
// First item isn't test data, it's just the log header
splittedLogs.shift()
for (const logLine of splittedLogs) {
let testData: string | undefined
try {
testData = logLine.split('--test output end--')[0].trim()!
const data = JSON.parse(testData)
acc[index].push({
job: job.name,
data,
})
} catch (err) {
console.log('Failed to parse test results', {
id,
run_id,
run_url,
html_url,
testData,
})
}
}
return acc
},
[[], []] as [Array<JobResult>, Array<JobResult>]
)
console.log(`Flakyness test subset results`, { flakyMonitorJobResults })
const testResultManifest: TestResultManifest = {
ref: sha,
flakyMonitorJobResults: flakyMonitorJobResults,
result: jobResults,
}
// Collect all test results into single manifest to store into file. This'll allow to upload / compare test results
// across different runs.
fs.writeFileSync(
'./nextjs-test-results.json',
JSON.stringify(testResultManifest, null, 2)
)
return testResultManifest
}
// Get the latest base test results to diff against with current test results.
async function getTestResultDiffBase(
_octokit: Octokit
): Promise<TestResultManifest | null> {
// TODO: This code was previously written for the `vercel/turborepo`
// repository which used to have a `nextjs-integration-test-data` branch with
// all the previous test run data.
//
// The last update to that branch is from Dec 2023. If we want to support
// comparisions with the canary branch, we need to read this data from
// somewhere else.
return null
}
function withoutRetries(results: Array<JobResult>): Array<JobResult> {
results = results.slice().reverse()
const seenNames = new Set()
results = results.filter((job) => {
if (
job.data.testResults.some((testResult) => seenNames.has(testResult.name))
) {
return false
}
job.data.testResults.forEach((testResult) => seenNames.add(testResult.name))
return true
})
return results.reverse()
}
function getTestSummary(
sha: string,
baseResults: TestResultManifest | null,
jobResults: TestResultManifest
) {
// Read current tests summary
const {
currentTestFailedSuiteCount,
currentTestPassedSuiteCount,
currentTestTotalSuiteCount,
currentTestFailedCaseCount,
currentTestPassedCaseCount,
currentTestTotalCaseCount,
currentTestFailedNames,
} = withoutRetries(jobResults.result).reduce(
(acc, value) => {
const { data } = value
acc.currentTestFailedSuiteCount += data.numFailedTestSuites
acc.currentTestPassedSuiteCount += data.numPassedTestSuites
acc.currentTestTotalSuiteCount += data.numTotalTestSuites
acc.currentTestFailedCaseCount += data.numFailedTests
acc.currentTestPassedCaseCount += data.numPassedTests
acc.currentTestTotalCaseCount += data.numTotalTests
for (const testResult of data.testResults ?? []) {
if (testResult.status !== 'passed' && testResult.name.length > 2) {
acc.currentTestFailedNames.push(testResult.name)
}
}
return acc
},
{
currentTestFailedSuiteCount: 0,
currentTestPassedSuiteCount: 0,
currentTestTotalSuiteCount: 0,
currentTestFailedCaseCount: 0,
currentTestPassedCaseCount: 0,
currentTestTotalCaseCount: 0,
currentTestFailedNames: [] as Array<string>,
}
)
console.log(
'Current test summary',
JSON.stringify(
{
currentTestFailedSuiteCount,
currentTestPassedSuiteCount,
currentTestTotalSuiteCount,
currentTestFailedCaseCount,
currentTestPassedCaseCount,
currentTestTotalCaseCount,
currentTestFailedNames,
},
null,
2
)
)
if (!baseResults) {
console.log("There's no base to compare")
return `### Test summary
| | Current (${sha}) | Diff |
|---|---|---|
| Failed Suites | ${currentTestFailedSuiteCount} | N/A |
| Failed Cases | ${currentTestFailedCaseCount} | N/A |`
}
const {
baseTestFailedSuiteCount,
baseTestPassedSuiteCount,
baseTestTotalSuiteCount,
baseTestFailedCaseCount,
baseTestPassedCaseCount,
baseTestTotalCaseCount,
baseTestFailedNames,
} = withoutRetries(baseResults.result).reduce(
(acc, value) => {
const { data } = value
acc.baseTestFailedSuiteCount += data.numFailedTestSuites
acc.baseTestPassedSuiteCount += data.numPassedTestSuites
acc.baseTestTotalSuiteCount += data.numTotalTestSuites
acc.baseTestFailedCaseCount += data.numFailedTests
acc.baseTestPassedCaseCount += data.numPassedTests
acc.baseTestTotalCaseCount += data.numTotalTests
for (const testResult of data.testResults ?? []) {
if (testResult.status !== 'passed' && testResult.name.length > 2) {
acc.baseTestFailedNames.push(testResult.name)
}
}
return acc
},
{
baseTestFailedSuiteCount: 0,
baseTestPassedSuiteCount: 0,
baseTestTotalSuiteCount: 0,
baseTestFailedCaseCount: 0,
baseTestPassedCaseCount: 0,
baseTestTotalCaseCount: 0,
baseTestFailedNames: [] as Array<string>,
}
)
console.log(
'Base test summary',
JSON.stringify(
{
baseTestFailedSuiteCount,
baseTestPassedSuiteCount,
baseTestTotalSuiteCount,
baseTestFailedCaseCount,
baseTestPassedCaseCount,
baseTestTotalCaseCount,
baseTestFailedNames,
},
null,
2
)
)
let testSuiteDiff = ':zero:'
const suiteCountDiff = baseTestFailedSuiteCount - currentTestFailedSuiteCount
if (suiteCountDiff > 0) {
testSuiteDiff = `:arrow_down_small: ${suiteCountDiff}`
} else if (suiteCountDiff < 0) {
testSuiteDiff = `:arrow_up_small: ${-suiteCountDiff}`
}
let testCaseDiff = ':zero:'
const caseCountDiff = baseTestFailedCaseCount - currentTestFailedCaseCount
if (caseCountDiff > 0) {
testCaseDiff = `:arrow_down_small: ${caseCountDiff}`
} else if (caseCountDiff < 0) {
testCaseDiff = `:arrow_up_small: ${-caseCountDiff}`
}
// Append summary test report to the comment body
let ret = `### Test summary
| | ${`canary (${baseResults.ref}`} | Current (${sha}) | Diff (Failed) |
|---|---|---|---|
| Test suites | :red_circle: ${baseTestFailedSuiteCount} / :green_circle: ${baseTestPassedSuiteCount} (Total: ${baseTestTotalSuiteCount}) | :red_circle: ${currentTestFailedSuiteCount} / :green_circle: ${currentTestPassedSuiteCount} (Total: ${currentTestTotalSuiteCount}) | ${testSuiteDiff} |
| Test cases | :red_circle: ${baseTestFailedCaseCount} / :green_circle: ${baseTestPassedCaseCount} (Total: ${baseTestTotalCaseCount}) | :red_circle: ${currentTestFailedCaseCount} / :green_circle: ${currentTestPassedCaseCount} (Total: ${currentTestTotalCaseCount}) | ${testCaseDiff} |
`
const fixedTests = baseTestFailedNames.filter(
(name) => !currentTestFailedNames.includes(name)
)
const newFailedTests = currentTestFailedNames.filter(
(name) => !baseTestFailedNames.includes(name)
)
/*
//NOTE: upstream test can be flaky, so this can appear intermittently
//even if there aren't actual fix. To avoid confusion, do not display this
//for now.
if (fixedTests.length > 0) {
ret += `\n:white_check_mark: **Fixed tests:**\n\n${fixedTests
.map((t) => (t.length > 5 ? `\t- ${t}` : t))
.join(" \n")}`;
}*/
if (newFailedTests.length > 0) {
ret += `\n:x: **Newly failed tests:**\n\n${newFailedTests
.map((t) => (t.length > 5 ? `\t- ${t}` : t))
.join(' \n')}`
}
console.log('Newly failed tests', JSON.stringify(newFailedTests, null, 2))
console.log('Fixed tests', JSON.stringify(fixedTests, null, 2))
return ret
}
// Create a markdown formatted comment body for the PR
// with marker prefix to look for existing comment for the subsequent runs.
const createFormattedComment = (comment: {
header: Array<string>
contents: Array<string>
}) => {
return (
[
`${commentTitlePre} ${BOT_COMMENT_MARKER}`,
...(comment.header ?? []),
].join(`\n`) +
`\n\n` +
comment.contents.join(`\n`)
)
}
// Higher order fn to create a function that creates a comment on a PR
const createCommentPostAsync =
(octokit: Octokit, prNumber?: number) => async (body: string) => {
if (!prNumber) {
console.log(
"This workflow run doesn't seem to be triggered via PR, there's no corresponding PR number. Skipping creating a comment."
)
return
}
const result = await octokit.rest.issues.createComment({
...context.repo,
issue_number: prNumber,
body,
})
console.log('Created a new comment', result.data.html_url)
}
// An action report failed next.js integration test with --turbopack
async function run() {
const {
token,
octokit,
prNumber,
sha,
noBaseComparison,
shouldExpandResultMessages,
} = await getInputs()
// Collect current PR's failed test results
const jobResults = await getJobResults(octokit, token, sha)
// Get the base to compare against
const baseResults = noBaseComparison
? null
: await getTestResultDiffBase(octokit)
const postCommentAsync = createCommentPostAsync(octokit, prNumber)
const failedTestLists = []
const passedTestsLists = []
// Collect failed test results for each job. We don't use this actively yet.
const perJobFailedLists = {}
// Consturct a comment body to post test report with summary & full details.
const comments = jobResults.result.reduce((acc, value, _idx) => {
const { data: testData } = value
const commentValues = []
// each job have nested array of test results
// Fill in each individual test suite failures
const groupedFails = {}
let resultMessage = ''
for (const testResult of testData.testResults ?? []) {
resultMessage += stripAnsi(testResult?.message)
resultMessage += '\n\n'
const failedAssertions = testResult?.assertionResults?.filter(
(res) => res.status === 'failed'
)
for (const fail of failedAssertions ?? []) {
const ancestorKey = fail?.ancestorTitles?.join(' > ')!
if (!groupedFails[ancestorKey]) {
groupedFails[ancestorKey] = []
}
groupedFails[ancestorKey].push(fail)
}
}
let hasFailedTest = false
for (const test of testData.testResults ?? []) {
if (test.status !== 'passed') {
const failedTest = test.name
if (!failedTestLists.includes(failedTest)) {
commentValues.push(`\`${failedTest}\``)
failedTestLists.push(failedTest)
if (!perJobFailedLists[value.job]) {
perJobFailedLists[value.job] = []
}
perJobFailedLists[value.job].push(failedTest)
}
} else {
passedTestsLists.push(test.name)
}
}
if (hasFailedTest) commentValues.push(`\n`)
// Currently there are too many test failures to post since it creates several comments.
// Only expands if explicitly requested in the option.
if (shouldExpandResultMessages) {
for (const group of Object.keys(groupedFails).sort()) {
const fails = groupedFails[group]
commentValues.push(`\n`)
fails.forEach((fail) => {
commentValues.push(`- ${group} > ${fail.title}`)
})
}
resultMessage = resultMessage.trim()
const strippedResultMessage =
resultMessage.length >= 50000
? resultMessage.substring(0, 50000) +
`...\n(Test result messages are too long, cannot post full message in comment. See the action logs for the full message.)`
: resultMessage
if (resultMessage.length >= 50000) {
console.log(
'Test result messages are too long, comment will post stripped.'
)
}
commentValues.push(`<details>`)
commentValues.push(`<summary>Expand output</summary>`)
commentValues.push(strippedResultMessage)
commentValues.push(`</details>`)
commentValues.push(`\n`)
}
// Check last comment body's length, append or either create new comment depends on the length of the text.
const commentIdxToUpdate = acc.length - 1
if (
acc.length === 0 ||
commentValues.join(`\n`).length +
acc[commentIdxToUpdate].contents.join(`\n`).length >
60000
) {
acc.push({
header: [`Commit: ${sha}`],
contents: commentValues,
})
} else {
acc[commentIdxToUpdate].contents.push(...commentValues)
}
return acc
}, [])
const commentsWithSummary = [
// First comment is always a summary
{
header: [`Commit: ${sha}`],
contents: [
getTestSummary(sha, noBaseComparison ? null : baseResults, jobResults),
],
},
...comments,
]
const isMultipleComments = comments.length > 1
try {
// Store the list of failed test paths to a file
fs.writeFileSync(
'./failed-test-path-list.json',
JSON.stringify(
failedTestLists.filter((x) => x.length > 5),
null,
2
)
)
fs.writeFileSync(
'./passed-test-path-list.json',
JSON.stringify(passedTestsLists, null, 2)
)
if (!prNumber) {
return
}
if (jobResults.result.length === 0) {
console.log('No failed test results found :tada:')
await postCommentAsync(
`### Next.js test passes :green_circle: ${BOT_COMMENT_MARKER}` +
`\nCommit: ${sha}\n`
)
return
}
for (const [idx, comment] of commentsWithSummary.entries()) {
const value = {
...comment,
}
if (isMultipleComments) {
value.header.push(
`**(Report ${idx + 1}/${commentsWithSummary.length})**`
)
}
// Add collapsible details for full test report
if (idx > 0) {
value.contents = [
`<details>`,
`<summary>Expand full test reports</summary>`,
`\n`,
...value.contents,
`</details>`,
]
}
const commentBodyText = createFormattedComment(value)
await postCommentAsync(commentBodyText)
}
} catch (error) {
console.error('Failed to post comment', error)
// Comment update should succeed, otherwise let CI fails
throw error
}
}
run() | typescript | github | https://github.com/vercel/next.js | .github/actions/next-integration-stat/src/index.ts |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import re
import time
from socket import error as SocketError
import errno
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.base import NodeState
from libcloud.utils.xml import fixxpath, findall
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.common.dimensiondata import DimensionDataServerCpuSpecification
from plumbery.exception import PlumberyException
from plumbery.infrastructure import PlumberyInfrastructure
from plumbery.plogging import plogging
from plumbery.util import retry
from plumbery.polishers.monitoring import MonitoringConfiguration
__all__ = ['PlumberyNodes']
class PlumberyNodes(object):
"""
Cloud automation for computing nodes
:param facility: the underlying physical facility
:type facility: :class:`plumbery.PlumberFacility`
A node is a virtual machine with some permanent storage, and one or several
network connections. It can have many attributes, such as the number of
CPU, some memory, an operating system, etc.
Example::
from plumbery.nodes import PlumberyNodes
nodes = PlumberyNodes(facility)
nodes.build_blueprint(blueprint, container)
In this example an instance is initialised at the given facility, and then
it is asked to create nodes described in the provided blueprint.
This is not covering network and the security, but only the nodes.
Attributes:
facility (PlumberyFacility):
a handle to the physical facility where nodes are deployed
are implemented
"""
plumbery = None
def __init__(self, facility=None):
"""Put nodes in context"""
# handle to parent parameters and functions
self.facility = facility
self.region = facility.region
self.backup = facility.backup
self.plumbery = facility.plumbery
def __repr__(self):
return "<PlumberyNodes facility: {}>".format(self.facility)
def build_blueprint(self, blueprint, container):
"""
Create missing nodes
:param blueprint: the blueprint to build
:type blueprint: ``dict``
:param container: the container where nodes will be built
:type container: :class:`plumbery.PlumberyInfrastructure`
"""
plogging.debug("Building nodes of blueprint '{}'".format(
blueprint['target']))
self.facility.power_on()
if ('nodes' not in blueprint
or not isinstance(blueprint['nodes'], list)):
plogging.debug("No nodes have been defined in '{}'".format(
blueprint['target']))
blueprint['nodes'] = []
for item in blueprint['nodes']:
if type(item) is dict:
label = list(item.keys())[0]
settings = list(item.values())[0]
else:
label = item
settings = {}
for label in self.expand_labels(label):
plogging.info("Creating node '{}'".format(label))
if self.get_node(label):
plogging.info("- already there")
continue
description = '#plumbery'
if 'description' in settings:
description = settings['description'] + ' #plumbery'
if 'appliance' in settings:
imageName = settings['appliance']
else:
imageName = None
image = self.facility.get_image(imageName)
if image is None:
raise PlumberyException("Error: unable to find image "
"for '{}'!".format(imageName))
plogging.debug("- using image '{}'".format(image.name))
cpu = None
if 'cpu' in settings:
tokens = str(settings['cpu']).split(' ')
if len(tokens) < 2:
tokens.append('1')
if len(tokens) < 3:
tokens.append('standard')
if (int(tokens[0]) < 1
or int(tokens[0]) > 32):
plogging.info("- cpu should be between 1 and 32")
elif (int(tokens[1]) < 1
or int(tokens[1]) > 2):
plogging.info("- core per cpu should be either 1 or 2")
elif tokens[2].upper() not in ('STANDARD',
'HIGHPERFORMANCE'):
plogging.info("- cpu speed should be either 'standard'"
" or 'highspeed'")
else:
cpu = DimensionDataServerCpuSpecification(
cpu_count=tokens[0],
cores_per_socket=tokens[1],
performance=tokens[2].upper())
plogging.debug("- assigning {} cpus".format(
cpu.cpu_count))
plogging.debug("- core per cpu: {}".format(
cpu.cores_per_socket))
plogging.debug("- cpu performance: {}".format(
cpu.performance.lower()))
memory = None
if 'memory' in settings:
memory = int(settings['memory'])
if memory < 1 or memory > 256:
plogging.info("- memory should be between 1 and 256")
memory = None
else:
plogging.debug("- assigning {} GB of memory".format(
memory))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
continue
if container.domain is None:
plogging.info("- missing network domain")
continue
if container.network is None:
plogging.info("- missing Ethernet network")
continue
primary_ipv4 = None
if 'glue' in settings:
for line in settings['glue']:
tokens = line.strip(' ').split(' ')
token = tokens.pop(0)
if token.lower() == 'primary':
token = container.network.name
if token != container.network.name:
continue
if len(tokens) < 1:
break
plogging.info("Glueing node '{}' to network '{}'"
.format(label, token))
numbers = tokens.pop(0).strip('.').split('.')
subnet = container.network.private_ipv4_range_address.split('.')
while len(numbers) < 4:
numbers.insert(0, subnet[3-len(numbers)])
primary_ipv4 = '.'.join(numbers)
plogging.debug("- using address '{}'"
.format(primary_ipv4))
break
retries = 2
should_start = False
while True:
try:
if primary_ipv4 is not None:
self.region.create_node(
name=label,
image=image,
auth=NodeAuthPassword(
self.plumbery.get_shared_secret()),
ex_network_domain=container.domain,
ex_primary_ipv4=primary_ipv4,
ex_cpu_specification=cpu,
ex_memory_gb=memory,
ex_is_started=should_start,
ex_description=description)
else:
self.region.create_node(
name=label,
image=image,
auth=NodeAuthPassword(
self.plumbery.get_shared_secret()),
ex_network_domain=container.domain,
ex_vlan=container.network,
ex_cpu_specification=cpu,
ex_memory_gb=memory,
ex_is_started=should_start,
ex_description=description)
plogging.info("- in progress")
if should_start: # stop the node after start
plogging.info("- waiting for node to be deployed")
node = None
while True:
node = self.get_node(label)
if node is None:
plogging.error("- aborted - missing node '{}'".format(label))
return
if node.extra['status'].action is None:
break
if (node is not None
and node.extra['status'].failure_reason is not None):
plogging.error("- aborted - failed deployment "
"of node '{}'".format(label))
return
time.sleep(20)
if node is not None:
self.region.ex_shutdown_graceful(node)
plogging.info("- shutting down after deployment")
except SocketError as feedback:
if feedback.errno == errno.ECONNRESET and retries > 0:
retries -= 1
time.sleep(10)
continue
else:
plogging.info("- unable to create node")
plogging.error(str(feedback))
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not now")
plogging.error(str(feedback))
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
plogging.error(str(feedback))
elif ('INVALID_INPUT_DATA: Cannot deploy server '
'with Software Labels in the "Stopped" state.' in
str(feedback)):
should_start = True
continue
else:
plogging.info("- unable to create node")
plogging.error(str(feedback))
break
def destroy_blueprint(self, blueprint):
"""
Destroys nodes of a given blueprint
:param blueprint: the blueprint to build
:type blueprint: ``dict``
"""
self.facility.power_on()
infrastructure = PlumberyInfrastructure(self.facility)
container = infrastructure.get_container(blueprint)
if ('nodes' not in blueprint
or not isinstance(blueprint['nodes'], list)):
return
# destroy in reverse order
for item in reversed(blueprint['nodes']):
if type(item) is dict:
label = list(item)[0]
settings = item[label]
else:
label = str(item)
settings = {}
for label in self.expand_labels(label):
node = self.get_node(label)
if node is None:
plogging.info("Destroying node '{}'".format(label))
plogging.info("- not found")
continue
if 'destroy' in settings and settings['destroy'] == 'never':
plogging.info("Destroying node '{}'".format(label))
plogging.info("- this node can never be destroyed")
return False
timeout = 300
tick = 6
while node.extra['status'].action == 'SHUTDOWN_SERVER':
time.sleep(tick)
node = self.get_node(label)
timeout -= tick
if timeout < 0:
break
if node.state == NodeState.RUNNING:
plogging.info("Destroying node '{}'".format(label))
plogging.info("- skipped - node is up and running")
continue
if self.plumbery.safeMode:
plogging.info("Destroying node '{}'".format(label))
plogging.info("- skipped - safe mode")
continue
configuration = MonitoringConfiguration(
engine=container.facility.plumbery,
facility=container.facility)
configuration.deconfigure(node, settings)
self._detach_node(node, settings)
container._detach_node_from_internet(node)
plogging.info("Destroying node '{}'".format(label))
while True:
try:
self.region.destroy_node(node)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
elif 'SERVER_STARTED' in str(feedback):
plogging.info("- skipped - node is up and running")
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return False
else:
plogging.info("- unable to destroy node")
plogging.error(str(feedback))
break
def _detach_node(self, node, settings):
"""
Detach a node from multiple networks
:param node: the target node
:type node: :class:`libcloud.compute.base.Node`
This function removes all secondary network interfaces to a node, and
any potential translation to the public Internet.
"""
if node is None:
return True
if ('running' in settings
and settings['running'] == 'always'
and node.state == NodeState.RUNNING):
return True
for interface in self._list_secondary_interfaces(node):
plogging.info("Detaching node '{}' from network '{}'".format(
node.name, interface['network']))
while True:
try:
self.region.ex_destroy_nic(interface['id'])
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
elif 'NO_CHANGE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to detach node")
plogging.error(str(feedback))
return False
break
return True
@classmethod
def expand_labels(self, label):
"""
Designates multiple nodes with a simple label
:param label: the label to be expanded, e.g., ``server[1..2]_eu``
:type label: ``str``
:return: a list of names, e.g., ``['server1_eu', 'server2_eu']``
:rtype: ``list`` of ``str``
This function creates multiple names where applicable::
>>>nodes.expand_labels('mongodb')
['mongodb']
>>>nodes.expand_labels('mongodb[1..3]_eu')
['mongodb1_eu', 'mongodb2_eu', 'mongodb3_eu']
"""
matches = re.match(r'(.*)\[([0-9]+)..([0-9]+)\](.*)', label)
if matches is None:
if re.match("^[0-9a-zA-Z]([0-9a-zA-Z\-]{0,61}[0-9a-zA-Z])?$", label) is None:
plogging.warning("Warning: '{}' is not a valid hostname"
.format(label))
return [label]
labels = []
for index in range(int(matches.group(2)), int(matches.group(3))+1):
label = matches.group(1)+str(index)+matches.group(4)
if re.match("^[0-9a-zA-Z]([0-9a-zA-Z\-]{0,61}[0-9a-zA-Z])?$", label) is None:
plogging.warning("Warning: '{}' is not a valid hostname"
.format(label))
labels.append(label)
return labels
@retry(SocketError)
def get_node(self, path):
"""
Retrieves a node by name
:param path: the name of the target node, or its location
:type path: ``str`` or ``list``of ``str``
:return: the target node, or None
:rtype: :class:`libcloud.compute.base.Node`
This function always make a real API call to get fresh state of the
target node. Therefore, it can be used in loops where you monitor
the evolution of the node during build or other change operation.
This function searches firstly at the current facility. If the
name is a complete path to a remote node, then plumbery looks
there. If a different region is provided, then authentication is done
against the related endpoint.
For example if ``MyServer`` has been defined in a data centre in
Europe::
>>>infrastructure.get_ethernet('MyServer')
>>>infrastructure.get_ethernet(['EU6', 'MyServer'])
Looking for remote node 'EU6::MyServer'
- found it
>>>infrastructure.get_ethernet(['dd-eu', 'EU6', 'MyServer'])
Looking for offshore node 'dd-eu::EU6::MyServer'
- found it
"""
if isinstance(path, str):
path = path.split('::')
node = None
if len(path) == 2: # force offshore lookup if needed
target_region = self.facility.get_region(path[0])
if target_region != self.facility.get_region():
path.insert(0, target_region)
if len(path) == 1: # local name
self.facility.power_on()
for node in self.region.list_nodes():
if node.extra['datacenterId'] != self.facility.get_location_id():
continue
if node.name == path[0]:
self._enrich_node(node)
return node
elif len(path) == 2: # different location, same region
self.facility.power_on()
try:
self.region.ex_get_location_by_id(path[0])
except IndexError:
plogging.warning("'{}' is unknown".format(path[0]))
return None
plogging.debug("Looking for remote node '{}'"
.format('::'.join(path)))
for node in self.region.list_nodes():
if node.extra['datacenterId'] != path[0]:
continue
if node.name == path[1]:
plogging.debug("- found it")
self._enrich_node(node)
return node
elif len(path) == 3: # other region
offshore = self.plumbery.get_compute_driver(region=path[0])
try:
remoteLocation = offshore.ex_get_location_by_id(path[1])
except IndexError:
plogging.warning("'{}' is unknown".format(path[1]))
return None
plogging.debug("Looking for offshore node '{}'"
.format('::'.join(path)))
for node in offshore.list_nodes():
if node.extra['datacenterId'] != path[1]:
continue
if node.name == path[2]:
plogging.debug("- found it")
self._enrich_node(node, region=offshore)
return node
return None
def _enrich_node(self, node, region=None):
"""
Adds attributes to a node
This function is a hack, aiming to complement the nice job done by
Libcloud:
- add public IPv4 if one exists
- add disk size, ids, etc.
"""
if region is None:
region = self.region
# hack because the driver does not report public ipv4 accurately
if len(node.public_ips) < 1:
domain = region.ex_get_network_domain(
node.extra['networkDomainId'])
for rule in self.region.ex_list_nat_rules(domain):
if rule.internal_ip == node.private_ips[0]:
node.public_ips.append(rule.external_ip)
break
# hack to retrieve disk information
node.extra['disks'] = []
try:
element = region.connection.request_with_orgId_api_2(
'server/server/%s' % node.id).object
for disk in findall(element, 'disk', TYPES_URN):
scsiId = int(disk.get('scsiId'))
speed = disk.get('speed')
id = disk.get('id')
sizeGb = int(disk.get('sizeGb'))
node.extra['disks'].append({
'scsiId': scsiId,
'speed': speed,
'id': id,
'size': sizeGb
})
except Exception as feedback:
if 'RESOURCE_NOT_FOUND' in str(feedback):
pass
else:
plogging.info("Error: unable to retrieve storage information")
plogging.error(str(feedback))
@classmethod
def list_nodes(self, blueprint):
"""
Retrieves the list of nodes that have been defined for this blueprint.
:return: names of nodes defined for this blueprint
:rtype: ``list`` of ``str`` or []
"""
labels = set()
if 'nodes' in blueprint:
for item in blueprint['nodes']:
if type(item) is dict:
label = list(item)[0]
else:
label = str(item)
for label in PlumberyNodes.expand_labels(label):
labels.add(label)
return list(labels)
def _list_secondary_interfaces(self, node):
"""
Retrieves the list of secondary interfaces
This is a hack. Code here should really go to the Libcloud driver in
libcloud.compute.drivers.dimensiondata.py _to_node()
"""
element = self.region.connection.request_with_orgId_api_2(
'server/server/%s' % node.id).object
if element.find(fixxpath('networkInfo', TYPES_URN)) is None:
return []
interfaces = []
items = element.findall(
fixxpath('networkInfo/additionalNic', TYPES_URN))
for item in items:
interfaces.append({'id': item.get('id'),
'network': item.get('vlanName')})
return interfaces
def polish_blueprint(self, blueprint, polishers, container):
"""
Walks a named blueprint for this facility and polish related resources
:param blueprint: the blueprint to build
:type blueprint: ``dict``
:param polishers: polishers to be applied
:type polishers: list of :class:`plumbery.PlumberyPolisher`
:param container: where these nodes are located
:type container: list of :class:`plumbery.PlumberyInfrastructure`
"""
if 'nodes' not in blueprint:
return
for item in blueprint['nodes']:
if type(item) is dict:
label = list(item)[0]
settings = item[label]
else:
label = str(item)
settings = {}
for label in self.expand_labels(label):
node = self.get_node(label)
settings['name'] = label
for polisher in polishers:
polisher.shine_node(node, settings, container)
def start_blueprint(self, blueprint):
"""
Starts nodes of a given blueprint at this facility
:param blueprint: the blueprint to build
:type blueprint: ``dict``
"""
if 'nodes' not in blueprint:
return
for item in blueprint['nodes']:
if type(item) is dict:
label = list(item)[0]
else:
label = item
for label in self.expand_labels(label):
self.start_node(label)
def start_node(self, node):
"""
Starts one node
:param node: the target node, or its name
:type node: :class:`Node` or ``str``
"""
if isinstance(node, str):
name = node
node = self.get_node(name)
else:
name = node.name
plogging.info("Starting node '{}'".format(name))
if node is None:
plogging.info("- not found")
return
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return
while True:
try:
self.region.ex_start_node(node)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'SERVER_STARTED' in str(feedback):
plogging.info("- skipped - node is up and running")
else:
plogging.info("- unable to start node")
plogging.error(str(feedback))
break
def stop_blueprint(self, blueprint):
"""
Stops nodes of the given blueprint at this facility
:param blueprint: the blueprint to build
:type blueprint: ``dict``
You can use the following setting to prevent plumbery from stopping a
node::
- sql:
domain: *vdc1
ethernet: *data
nodes:
- slaveSQL:
running: always
"""
if 'nodes' not in blueprint:
return
for item in blueprint['nodes']:
if type(item) is dict:
label = list(item.keys())[0]
settings = list(item.values())[0]
else:
label = item
settings = {}
for label in self.expand_labels(label):
self.stop_node(label, settings)
def stop_node(self, node, settings={}):
"""
Stops one node
:param node: the target node, or its name
:type node: :class:`Node` or ``str``
:param settings: additional attributes for this node
:type settings: ``dict``
"""
if isinstance(node, str):
name = node
node = self.get_node(name)
else:
name = node.name
plogging.info("Stopping node '{}'".format(name))
if node is None:
plogging.info("- not found")
return
if ('running' in settings
and settings['running'] == 'always'
and node.state == NodeState.RUNNING):
plogging.info("- skipped - node has to stay always on")
return
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return
retry = True
while True:
try:
self.region.ex_shutdown_graceful(node)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'UNEXPECTED_ERROR' in str(feedback):
time.sleep(10)
continue
elif 'VMWARE_TOOLS_INVALID_STATUS' in str(feedback):
# prevent transient errors
if retry:
retry = False
time.sleep(30)
continue
plogging.info("- unable to shutdown gracefully "
"- invalid VMware tools")
plogging.info("- powering the node off")
try:
self.region.ex_power_off(node)
plogging.info("- in progress")
except Exception as feedback:
if 'SERVER_STOPPED' in str(feedback):
plogging.info("- already stopped")
else:
plogging.info("- unable to stop node")
plogging.error(str(feedback))
elif 'SERVER_STOPPED' in str(feedback):
plogging.info("- already stopped")
else:
plogging.info("- unable to stop node")
plogging.error(str(feedback))
break | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# MySQL querying and updates for a recipe site
# By Jesse Hunt
import argparse
import sys
import MySQLdb
import yaml
def get_database_variable(yaml_file, global_variable):
""" gets global variable given string variable name"""
with open(yaml_file, 'r') as f:
doc = yaml.load(f)
txt = doc[global_variable]
return txt
class recipeDatabase():
"""Class to do all the database lifting on maybeyou.live"""
def __init__(self, yaml_file):
# grab all the yaml info
db_host = get_database_variable(yaml_file, "db_host")
db_user = get_database_variable(yaml_file, "db_user")
db_pass = get_database_variable(yaml_file, "db_pass")
db_name = get_database_variable(yaml_file, "db_name")
self.db = MySQLdb.connect(host=db_host, user=db_user, passwd=db_pass,
db=db_name)
self.cur = self.db.cursor()
def destroy(self,):
"""close database connection"""
self.db.commit()
self.db.close()
def get_recipes(self, name=None, restrictions=None):
"""go get the recipes, probably for the front page"""
q = ("SELECT id, name, calories, carbs, lipids, protein, sugar ")
if not name and not restrictions:
self.cur.execute(q + "FROM recipes")
else:
self.cur.execute(q + "FROM recipes where name='{0}'".format(name))
# here's the return from the database
tuple_o_recipes = self.cur.fetchall()
final_list = []
# formatting things much more nicely
for row in tuple_o_recipes:
new_dict = {}
new_dict["id"] = row[0]
new_dict["name"] = row[1]
new_dict["calories"] = row[2]
new_dict["carbs"] = row[3]
new_dict["lipids"] = row[4]
new_dict["protein"] = row[5]
new_dict["sugar"] = row[6]
final_list.append(new_dict)
return final_list
def insert_new_recipe(self, name, cuisine, prep_time, meal_type,
pre_vs_post, points_dict, macros_dict):
"""Takes recipe info, inserts to database"""
q = ("""INSERT into recipes VALUES (0, {0}, 0, {1}, {2}, {3}, """ +
"""{4}, {5}, {6});""").format(name, cuisine, prep_time,
meal_type, pre_vs_post,
points_dict, macros_dict)
self.cur.execute(q)
def get_food(self, name=None):
"""go get the base food and info about it"""
q = ("SELECT ndbno, name, calories, carbs, lipids, protein, sugar ")
self.cur.execute(q + "FROM base_foods where name='{0}'".format(name))
# here's the return from the database
tuple_o_foods = self.cur.fetchall()
final_list = []
# formatting things much more nicely
for row in tuple_o_foods:
new_dict = {}
new_dict["ndbno"] = row[0]
new_dict["name"] = row[1]
new_dict["calories"] = row[2]
new_dict["carbs"] = row[3]
new_dict["lipids"] = row[4]
new_dict["protein"] = row[5]
new_dict["sugar"] = row[6]
final_list.append(new_dict)
return final_list
def get_all_foods(self):
"""go get the base foods by their ndbno"""
self.cur.execute("SELECT ndbno FROM base_foods")
# here's the return from the database
food_ids = self.cur.fetchall()
final_list = []
# formatting things much more nicely
for row in food_ids:
final_list.append(str(row[0]))
return final_list
def update_food(self, ndbno, calories, carbs, protein, sugar, lipids,
fiber, measurements):
"""go update the base foods by their ndbno"""
# formatting things much more nicely
q = ('''update base_foods set calories="{0}", carbs="{1}",''' +
'''protein="{2}", sugar="{3}", lipids="{4}", fiber="{5}", ''' +
'''measurements="{6}" where ''' +
'''ndbno="{7}"''').format(calories, carbs, protein, sugar,
lipids, fiber, measurements, ndbno)
self.cur.execute(q)
def insert_food(self, ndbno, name, calories, carbs, protein, sugar,
lipids, fiber, measurements):
"""Takes food info, inserts to database"""
q = ("""INSERT into base_foods VALUES""" +
"""("{0}", "{1}", "{2}", "{3}", "{4}", "{5}", "{6}",""" +
""""{7}", "{8}")""").format(ndbno, name, calories, carbs, protein,
sugar, lipids, fiber, measurements)
self.cur.execute(q)
if __name__ == "__main__":
usage = 'Usage: myll_db.py [-q/-i/-n] [<input>/<query>/<name>]'
help = 'A script that lets you query/update a recipe database. ' + usage
parser = argparse.ArgumentParser(description=help)
parser.add_argument('--input', '-i', action='store_true', default=False,
help='This has to be a dictionary of information ' +
'about the recipe: name, cuisine, prep_time' +
', meal_type')
parser.add_argument('--query', '-q', action='store_true', default=False,
help='query the database for a reciepe(s). Will ' +
'return all if you do not specify other options.')
parser.add_argument('--name', '-n', help='recipe name to query',
default=None)
args = parser.parse_args()
input_dict = args.input
query = args.query
name = args.name
if not args.input and not args.query:
print usage
database = recipeDatabase("../.config/database_config.yaml")
if input_dict:
database.insert_new_recipe(name, cuisine, prep_time, meal_type,
pre_vs_post, points_dict, macros_dict)
if query:
if name:
recipe_list = database.get_recipes(name)
for row in recipe_list:
print row
else:
recipe_list = database.get_recipes()
for row in recipe_list:
print row
database.destroy() | unknown | codeparrot/codeparrot-clean | ||
# Example adapted from VisPy, which is released under the BSD license
# This script enables multiple isosurfaces based on the variable 'level' defined in glsl code.
# The color and transparency is cauculated according to 'level' and 'threshold'.
# TODO: improve colormap, current one is kind of random.
# TODO: make variable 'level' uniform or rewrite the visual class which could accetp 'level' as arguments.
import sys
import numpy as np
from vispy import app, scene
from vispy.gloo import gl
from astropy.io import fits
from vispy.visuals.volume import frag_dict, FRAG_SHADER
# Custom shader to replace existing iso one
ISO_SNIPPETS = dict(
before_loop="""
vec4 total_color = vec4(0.0); // final color
vec4 src = vec4(0.0);
vec4 dst = vec4(0.0);
vec3 dstep = 1.5 / u_shape; // step to sample derivative
gl_FragColor = vec4(0.0);
float val_prev = 0;
float outa = 0;
vec3 loc_prev = vec3(0.0);
vec3 loc_mid = vec3(0.0);
int level = 4; // level is the number of isosurface layers
""",
in_loop="""
for (int i=0; i<level; i++){
// render from outside to inside
if (val < u_threshold*(1.0-i/float(level)) && val_prev > u_threshold*(1.0-i/float(level))){
// Use bisection to find correct position of contour
for (int i=0; i<20; i++) {
loc_mid = 0.5 * (loc_prev + loc);
val = $sample(u_volumetex, loc_mid).g;
if (val < u_threshold) {
loc = loc_mid;
} else {
loc_prev = loc_mid;
}
}
dst = $cmap(val); // this will call colormap function if have
dst = calculateColor(dst, loc, dstep);
dst.a = 1. * (1.0 - i/float(level)); // transparency
src = total_color;
outa = src.a + dst.a * (1 - src.a);
total_color = (src * src.a + dst * dst.a * (1 - src.a)) / outa;
total_color.a = outa;
}
}
val_prev = val;
loc_prev = loc;
""",
after_loop="""
gl_FragColor = total_color;
""",
)
ISO_FRAG_SHADER = FRAG_SHADER.format(**ISO_SNIPPETS)
frag_dict['iso'] = ISO_FRAG_SHADER
hdu = fits.open('L1448_13CO.fits')[0]
data = np.nan_to_num(hdu.data)
# Create a canvas with a 3D viewport
canvas = scene.SceneCanvas(keys='interactive', config={'depth_size': 24})
view = canvas.central_widget.add_view()
from vispy.color import BaseColormap
# create colormaps that work well for translucent and additive volume rendering
# TODO: use a better color scheme to replace
class TransFire(BaseColormap):
glsl_map = """
vec4 translucent_fire(float t) {
return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05));
}
"""
class TransGrays(BaseColormap):
glsl_map = """
vec4 translucent_grays(float t) {
return vec4(t, t, t, t*0.05);
}
"""
data /= 4
from scipy.ndimage import gaussian_filter
data = gaussian_filter(data, 1)
# Create isosurface visual
'''
# threshold : float
# The threshold to use for the isosurface render method. By default
# the mean of the given volume is used.
# clim : tuple of two floats | None
# The contrast limits. The values in the volume are mapped to
# black and white corresponding to these values.
'''
surface = scene.visuals.Volume(data, clim=(0, 1), method='iso',
parent=view.scene, cmap=TransGrays(),
relative_step_size=0.5, emulate_texture=True)
surface.shared_program['u_threshold'] = 0.8
# bind uniforms
# surface.set_gl_state('translucent', cull_face=False)
# Add a 3D axis to keep us oriented
axis = scene.visuals.XYZAxis(parent=view.scene)
# Use a 3D camera
# Manual bounds; Mesh visual does not provide bounds yet
# Note how you can set bounds before assigning the camera to the viewbox
cam = scene.TurntableCamera(elevation=30, azimuth=30)
view.camera = cam
if __name__ == '__main__':
canvas.show()
if sys.flags.interactive == 0:
app.run() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rafthttp
import (
"bytes"
"reflect"
"testing"
"go.etcd.io/etcd/client/pkg/v3/types"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.etcd.io/raft/v3/raftpb"
)
func TestMsgAppV2(t *testing.T) {
tests := []raftpb.Message{
linkHeartbeatMessage,
{
Type: raftpb.MsgApp,
From: 1,
To: 2,
Term: 1,
LogTerm: 1,
Index: 0,
Entries: []raftpb.Entry{
{Term: 1, Index: 1, Data: []byte("some data")},
{Term: 1, Index: 2, Data: []byte("some data")},
{Term: 1, Index: 3, Data: []byte("some data")},
},
},
// consecutive MsgApp
{
Type: raftpb.MsgApp,
From: 1,
To: 2,
Term: 1,
LogTerm: 1,
Index: 3,
Entries: []raftpb.Entry{
{Term: 1, Index: 4, Data: []byte("some data")},
},
},
linkHeartbeatMessage,
// consecutive MsgApp after linkHeartbeatMessage
{
Type: raftpb.MsgApp,
From: 1,
To: 2,
Term: 1,
LogTerm: 1,
Index: 4,
Entries: []raftpb.Entry{
{Term: 1, Index: 5, Data: []byte("some data")},
},
},
// MsgApp with higher term
{
Type: raftpb.MsgApp,
From: 1,
To: 2,
Term: 3,
LogTerm: 1,
Index: 5,
Entries: []raftpb.Entry{
{Term: 3, Index: 6, Data: []byte("some data")},
},
},
linkHeartbeatMessage,
// consecutive MsgApp
{
Type: raftpb.MsgApp,
From: 1,
To: 2,
Term: 3,
LogTerm: 2,
Index: 6,
Entries: []raftpb.Entry{
{Term: 3, Index: 7, Data: []byte("some data")},
},
},
// consecutive empty MsgApp
{
Type: raftpb.MsgApp,
From: 1,
To: 2,
Term: 3,
LogTerm: 2,
Index: 7,
Entries: nil,
},
linkHeartbeatMessage,
}
b := &bytes.Buffer{}
enc := newMsgAppV2Encoder(b, &stats.FollowerStats{})
dec := newMsgAppV2Decoder(b, types.ID(2), types.ID(1))
for i, tt := range tests {
if err := enc.encode(&tt); err != nil {
t.Errorf("#%d: unexpected encode message error: %v", i, err)
continue
}
m, err := dec.decode()
if err != nil {
t.Errorf("#%d: unexpected decode message error: %v", i, err)
continue
}
if !reflect.DeepEqual(m, tt) {
t.Errorf("#%d: message = %+v, want %+v", i, m, tt)
}
}
} | go | github | https://github.com/etcd-io/etcd | server/etcdserver/api/rafthttp/msgappv2_codec_test.go |
from cache_toolbox.core import get_cached_content, set_cached_content, del_cached_content
from opaque_keys.edx.locations import Location
from django.test import TestCase
class Content:
def __init__(self, location, content):
self.location = location
self.content = content
def get_id(self):
return self.location.to_deprecated_son()
class CachingTestCase(TestCase):
# Tests for https://edx.lighthouseapp.com/projects/102637/tickets/112-updating-asset-does-not-refresh-the-cached-copy
unicodeLocation = Location(u'c4x', u'mitX', u'800', u'run', u'thumbnail', u'monsters.jpg')
# Note that some of the parts are strings instead of unicode strings
nonUnicodeLocation = Location('c4x', u'mitX', u'800', u'run', 'thumbnail', 'monsters.jpg')
mockAsset = Content(unicodeLocation, 'my content')
def test_put_and_get(self):
set_cached_content(self.mockAsset)
self.assertEqual(self.mockAsset.content, get_cached_content(self.unicodeLocation).content,
'should be stored in cache with unicodeLocation')
self.assertEqual(self.mockAsset.content, get_cached_content(self.nonUnicodeLocation).content,
'should be stored in cache with nonUnicodeLocation')
def test_delete(self):
set_cached_content(self.mockAsset)
del_cached_content(self.nonUnicodeLocation)
self.assertEqual(None, get_cached_content(self.unicodeLocation),
'should not be stored in cache with unicodeLocation')
self.assertEqual(None, get_cached_content(self.nonUnicodeLocation),
'should not be stored in cache with nonUnicodeLocation') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import xml.sax
from tests.unit import unittest
import boto.resultset
from boto.ec2.elb.loadbalancer import LoadBalancer
LISTENERS_RESPONSE = r"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<DescribeLoadBalancersResult>
<LoadBalancerDescriptions>
<member>
<SecurityGroups/>
<CreatedTime>2013-07-09T19:18:00.520Z</CreatedTime>
<LoadBalancerName>elb-boto-unit-test</LoadBalancerName>
<HealthCheck>
<Interval>30</Interval>
<Target>TCP:8000</Target>
<HealthyThreshold>10</HealthyThreshold>
<Timeout>5</Timeout>
<UnhealthyThreshold>2</UnhealthyThreshold>
</HealthCheck>
<ListenerDescriptions>
<member>
<PolicyNames/>
<Listener>
<Protocol>HTTP</Protocol>
<LoadBalancerPort>80</LoadBalancerPort>
<InstanceProtocol>HTTP</InstanceProtocol>
<InstancePort>8000</InstancePort>
</Listener>
</member>
<member>
<PolicyNames/>
<Listener>
<Protocol>HTTP</Protocol>
<LoadBalancerPort>8080</LoadBalancerPort>
<InstanceProtocol>HTTP</InstanceProtocol>
<InstancePort>80</InstancePort>
</Listener>
</member>
<member>
<PolicyNames/>
<Listener>
<Protocol>TCP</Protocol>
<LoadBalancerPort>2525</LoadBalancerPort>
<InstanceProtocol>TCP</InstanceProtocol>
<InstancePort>25</InstancePort>
</Listener>
</member>
</ListenerDescriptions>
<Instances/>
<Policies>
<AppCookieStickinessPolicies/>
<OtherPolicies/>
<LBCookieStickinessPolicies/>
</Policies>
<AvailabilityZones>
<member>us-east-1a</member>
</AvailabilityZones>
<CanonicalHostedZoneName>elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com</CanonicalHostedZoneName>
<CanonicalHostedZoneNameID>Z3DZXE0Q79N41H</CanonicalHostedZoneNameID>
<Scheme>internet-facing</Scheme>
<SourceSecurityGroup>
<OwnerAlias>amazon-elb</OwnerAlias>
<GroupName>amazon-elb-sg</GroupName>
</SourceSecurityGroup>
<DNSName>elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com</DNSName>
<BackendServerDescriptions/>
<Subnets/>
</member>
</LoadBalancerDescriptions>
</DescribeLoadBalancersResult>
<ResponseMetadata>
<RequestId>5763d932-e8cc-11e2-a940-11136cceffb8</RequestId>
</ResponseMetadata>
</DescribeLoadBalancersResponse>
"""
class TestListenerResponseParsing(unittest.TestCase):
def test_parse_complex(self):
rs = boto.resultset.ResultSet([
('member', LoadBalancer)
])
h = boto.handler.XmlHandler(rs, None)
xml.sax.parseString(LISTENERS_RESPONSE, h)
listeners = rs[0].listeners
self.assertEqual(
sorted([l.get_complex_tuple() for l in listeners]),
[
(80, 8000, 'HTTP', 'HTTP'),
(2525, 25, 'TCP', 'TCP'),
(8080, 80, 'HTTP', 'HTTP'),
]
)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import index from 'tailwindcss/index.css'
import preflight from 'tailwindcss/preflight.css'
import theme from 'tailwindcss/theme.css'
import utilities from 'tailwindcss/utilities.css'
export const css = {
index,
preflight,
theme,
utilities,
} | typescript | github | https://github.com/tailwindlabs/tailwindcss | packages/@tailwindcss-browser/src/assets.ts |
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cgi
import re
import os
try:
import cPickle as pickle
except:
import pickle
import unicodedata
import urllib
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
class Addon:
'''
This class provides a lot of code that is used across many XBMC addons
in the hope that it will simplify some of the common tasks an addon needs
to perform.
Mostly this is achieved by providing a wrapper around commonly used parts
of :mod:`xbmc`, :mod:`xbmcaddon`, :mod:`xbmcgui` and :mod:`xbmcplugin`.
You probably want to have exactly one instance of this class in your addon
which you can call from anywhere in your code.
Example::
import sys
from t0mm0.common.addon import Addon
addon = Addon('my.plugin.id', argv=sys.argv)
'''
def __init__(self, addon_id, argv=None):
'''
Args:
addon_id (str): Your addon's id (eg. 'plugin.video.t0mm0.test').
Kwargs:
argv (list): List of arguments passed to your addon if applicable
(eg. sys.argv).
'''
self.addon = xbmcaddon.Addon(id=addon_id)
if argv:
self.url = argv[0]
self.handle = int(argv[1])
self.queries = self.parse_query(argv[2][1:])
def get_author(self):
'''Returns the addon author as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('author')
def get_changelog(self):
'''Returns the addon changelog.'''
return self.addon.getAddonInfo('changelog')
def get_description(self):
'''Returns the addon description as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('description')
def get_disclaimer(self):
'''Returns the addon disclaimer as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('disclaimer')
def get_fanart(self):
'''Returns the full path to the addon fanart.'''
return self.addon.getAddonInfo('fanart')
def get_icon(self):
'''Returns the full path to the addon icon.'''
return self.addon.getAddonInfo('icon')
def get_id(self):
'''Returns the addon id as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('id')
def get_name(self):
'''Returns the addon name as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('name')
def get_path(self):
'''Returns the full path to the addon directory.'''
return self.addon.getAddonInfo('path')
def get_profile(self):
'''
Returns the full path to the addon profile directory
(useful for storing files needed by the addon such as cookies).
'''
return xbmc.translatePath(self.addon.getAddonInfo('profile'))
def get_stars(self):
'''Returns the number of stars for this addon.'''
return self.addon.getAddonInfo('stars')
def get_summary(self):
'''Returns the addon summary as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('summary')
def get_type(self):
'''
Returns the addon summary as defined in ``addon.xml``
(eg. xbmc.python.pluginsource).
'''
return self.addon.getAddonInfo('type')
def get_version(self):
'''Returns the addon version as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('version')
def get_setting(self, setting):
'''
Returns an addon setting. Settings must be defined in your addon's
``resources/settings.xml`` file.
Args:
setting (str): Name of the setting to be retrieved.
Returns:
str containing the requested setting.
'''
return self.addon.getSetting(setting)
def set_setting(self, setting, value):
'''
Sets an addon setting. Settings must be defined in your addon's
``resources/settings.xml`` file.
Args:
setting (str): Name of the setting to be retrieved.
value (str): Value of the setting
'''
self.addon.setSetting(id=setting, value=value)
def get_string(self, string_id):
'''
Returns a localized string. Strings must be defined in your addon's
``resources/language/[lang_name]/strings.xml`` file.
Args:
string_id (int): id of the translated string to retrieve.
Returns:
str containing the localized requested string.
'''
return self.addon.getLocalizedString(string_id)
def parse_query(self, query, defaults={'mode': 'main'}):
'''
Parse a query string as used in a URL or passed to your addon by XBMC.
Example:
>>> addon.parse_query('name=test&type=basic')
{'mode': 'main', 'name': 'test', 'type': 'basic'}
Args:
query (str): A query string.
Kwargs:
defaults (dict): A dictionary containing key/value pairs parsed
from the query string. If a key is repeated in the query string
its value will be a list containing all of that keys values.
'''
queries = cgi.parse_qs(query)
q = defaults
for key, value in queries.items():
if len(value) == 1:
q[key] = value[0]
else:
q[key] = value
return q
def build_plugin_url(self, queries):
'''
Returns a ``plugin://`` URL which can be used to call the addon with
the specified queries.
Example:
>>> addon.build_plugin_url({'name': 'test', 'type': 'basic'})
'plugin://your.plugin.id/?name=test&type=basic'
Args:
queries (dict): A dctionary of keys/values to be added to the
``plugin://`` URL.
R*etuns:
A string containing a fully formed ``plugin://`` URL.
'''
out_dict = {}
for k, v in queries.iteritems():
if isinstance(v, unicode):
v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
v.decode('utf8')
out_dict[k] = v
return self.url + '?' + urllib.urlencode(out_dict)
def log(self, msg, level=xbmc.LOGNOTICE):
'''
Writes a string to the XBMC log file. The addon name is inserted into
the beginning of the message automatically to help you find relevent
messages in the log file.
The available log levels are defined in the :mod:`xbmc` module and are
currently as follows::
xbmc.LOGDEBUG = 0
xbmc.LOGERROR = 4
xbmc.LOGFATAL = 6
xbmc.LOGINFO = 1
xbmc.LOGNONE = 7
xbmc.LOGNOTICE = 2
xbmc.LOGSEVERE = 5
xbmc.LOGWARNING = 3
Args:
msg (str or unicode): The message to be written to the log file.
Kwargs:
level (int): The XBMC log level to write at.
'''
#msg = unicodedata.normalize('NFKD', unicode(msg)).encode('ascii',
# 'ignore')
xbmc.log('%s: %s' % (self.get_name(), msg), level)
def log_error(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGERROR`` error level. Use when something has gone wrong in
your addon code. This will show up in the log prefixed with 'ERROR:'
whether you have debugging switched on or not.
'''
self.log(msg, xbmc.LOGERROR)
def log_debug(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGDEBUG`` error level. Use this when you want to print out lots
of detailed information that is only usefull for debugging. This will
show up in the log only when debugging is enabled in the XBMC settings,
and will be prefixed with 'DEBUG:'.
'''
self.log(msg, xbmc.LOGDEBUG)
def log_notice(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGNOTICE`` error level. Use for general log messages. This will
show up in the log prefixed with 'NOTICE:' whether you have debugging
switched on or not.
'''
self.log(msg, xbmc.LOGNOTICE)
def show_ok_dialog(self, msg, title=None, is_error=False):
'''
Display an XBMC dialog with a message and a single 'OK' button. The
message is also written to the XBMC log file at the appropriate log
level.
.. warning::
Don't forget that `msg` must be a list of strings and not just a
string even if you only want to display a single line!
Example::
addon.show_ok_dialog(['My message'], 'My Addon')
Args:
msg (list of strings): The message to be displayed in the dialog.
Only the first 3 list items will be displayed.
Kwargs:
title (str): String to be displayed as the title of the dialog box.
Defaults to the addon name.
is_error (bool): If ``True``, the log message will be written at
the ERROR log level, otherwise NOTICE will be used.
'''
if not title:
title = self.get_name()
log_msg = ' '.join(msg)
while len(msg) < 3:
msg.append('')
if is_error:
self.log_error(log_msg)
else:
self.log_notice(log_msg)
xbmcgui.Dialog().ok(title, msg[0], msg[1], msg[2])
def show_error_dialog(self, msg):
'''
Convenience method to show an XBMC dialog box with a single OK button
and also write the message to the log file at the ERROR log level.
The title of the dialog will be the addon's name with the prefix
'Error: '.
.. warning::
Don't forget that `msg` must be a list of strings and not just a
string even if you only want to display a single line!
Args:
msg (list of strings): The message to be displayed in the dialog.
Only the first 3 list items will be displayed.
'''
self.show_ok_dialog(msg, 'Error: %s' % self.get_name(), True)
def show_small_popup(self, title='', msg='', delay=5000, image=''):
'''
Displays a small popup box in the lower right corner. The default delay
is 5 seconds.
Code inspired by anarchintosh and daledude's Icefilms addon.
Example::
import os
logo = os.path.join(addon.get_path(), 'art','logo.jpg')
addon.show_small_popup('MyAddonName','Is now loaded enjoy', 5000, logo)
Kwargs:
title (str): title to be displayed at the top of the box
msg (str): Main message body
delay (int): delay in milliseconds until it disapears
image (str): Path to the image you want to display
'''
xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")' %
(title, msg, delay, image))
def show_countdown(self, time_to_wait, title='', text=''):
'''
Show a countdown dialog with a progress bar for XBMC while delaying
execution. Necessary for some filehosters eg. megaupload
The original version of this code came from Anarchintosh.
Args:
time_to_wait (int): number of seconds to pause for.
Kwargs:
title (str): Displayed in the title of the countdown dialog. Default
is blank.
text (str): A line of text to be displayed in the dialog. Default
is blank.
Returns:
``True`` if countdown is allowed to complete, ``False`` if the
user cancelled the countdown.
'''
dialog = xbmcgui.DialogProgress()
ret = dialog.create(title)
self.log_notice('waiting %d secs' % time_to_wait)
secs = 0
increment = 100 / time_to_wait
cancelled = False
while secs <= time_to_wait:
if (dialog.iscanceled()):
cancelled = True
break
if secs != 0:
xbmc.sleep(1000)
secs_left = time_to_wait - secs
if secs_left == 0:
percent = 100
else:
percent = increment * secs
remaining_display = ('Wait %d seconds for the ' +
'video stream to activate...') % secs_left
dialog.update(percent, text, remaining_display)
secs += 1
if cancelled == True:
self.log_notice('countdown cancelled')
return False
else:
self.log_debug('countdown finished waiting')
return True
def show_settings(self):
'''Shows the settings dialog for this addon.'''
self.addon.openSettings()
def resolve_url(self, stream_url):
'''
Tell XBMC that you have resolved a URL (or not!).
This method should be called as follows:
#. The user selects a list item that has previously had ``isPlayable``
set (this is true for items added with :meth:`add_item`,
:meth:`add_music_item` or :meth:`add_music_item`)
#. Your code resolves the item requested by the user to a media URL
#. Your addon calls this method with the resolved URL
Args:
stream_url (str or ``False``): If a string, tell XBMC that the
media URL ha been successfully resolved to stream_url. If ``False``
or an empty string tell XBMC the resolving failed and pop up an
error messsage.
'''
if stream_url:
self.log_debug('resolved to: %s' % stream_url)
xbmcplugin.setResolvedUrl(self.handle, True,
xbmcgui.ListItem(path=stream_url))
else:
self.show_error_dialog(['sorry, failed to resolve URL :('])
xbmcplugin.setResolvedUrl(self.handle, False, xbmcgui.ListItem())
def get_playlist(self, pl_type, new=False):
'''
Return a :class:`xbmc.Playlist` object of the specified type.
The available playlist types are defined in the :mod:`xbmc` module and
are currently as follows::
xbmc.PLAYLIST_MUSIC = 0
xbmc.PLAYLIST_VIDEO = 1
.. seealso::
:meth:`get_music_playlist`, :meth:`get_video_playlist`
Args:
pl_type (int): The type of playlist to get.
new (bool): If ``False`` (default), get the current
:class:`xbmc.Playlist` object of the type specified. If ``True``
then return a new blank :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
pl = xbmc.PlayList(pl_type)
if new:
pl.clear()
return pl
def get_music_playlist(self, new=False):
'''
Convenience method to return a music :class:`xbmc.Playlist` object.
.. seealso::
:meth:`get_playlist`
Kwargs:
new (bool): If ``False`` (default), get the current music
:class:`xbmc.Playlist` object. If ``True`` then return a new blank
music :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
self.get_playlist(xbmc.PLAYLIST_MUSIC, new)
def get_video_playlist(self, new=False):
'''
Convenience method to return a video :class:`xbmc.Playlist` object.
.. seealso::
:meth:`get_playlist`
Kwargs:
new (bool): If ``False`` (default), get the current video
:class:`xbmc.Playlist` object. If ``True`` then return a new blank
video :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
self.get_playlist(xbmc.PLAYLIST_VIDEO, new)
def add_item(self, queries, infolabels, properties=None, contextmenu_items='', context_replace=False, img='',
fanart='', resolved=False, total_items=0, playlist=False, item_type='video',
is_folder=False):
'''
Adds an item to the list of entries to be displayed in XBMC or to a
playlist.
Use this method when you want users to be able to select this item to
start playback of a media file. ``queries`` is a dict that will be sent
back to the addon when this item is selected::
add_item({'host': 'youtube.com', 'media_id': 'ABC123XYZ'},
{'title': 'A youtube vid'})
will add a link to::
plugin://your.plugin.id/?host=youtube.com&media_id=ABC123XYZ
.. seealso::
:meth:`add_music_item`, :meth:`add_video_item`,
:meth:`add_directory`
Args:
queries (dict): A set of keys/values to be sent to the addon when
the user selects this item.
infolabels (dict): A dictionary of information about this media
(see the `XBMC Wiki InfoLabels entry
<http://wiki.xbmc.org/?title=InfoLabels>`_).
Kwargs:
properties (dict): A dictionary of properties that can be set on a list item
(see the `XBMC Wiki InfoLabels entry and locate Property() elements
<http://wiki.xbmc.org/?title=InfoLabels>`_).
contextmenu_items (list): A list of contextmenu items
context_replace (bool): To replace the xbmc default contextmenu items
img (str): A URL to an image file to be used as an icon for this
entry.
fanart (str): A URL to a fanart image for this entry.
resolved (str): If not empty, ``queries`` will be ignored and
instead the added item will be the exact contentes of ``resolved``.
total_items (int): Total number of items to be added in this list.
If supplied it enables XBMC to show a progress bar as the list of
items is being built.
playlist (playlist object): If ``False`` (default), the item will
be added to the list of entries to be displayed in this directory.
If a playlist object is passed (see :meth:`get_playlist`) then
the item will be added to the playlist instead
item_type (str): The type of item to add (eg. 'music', 'video' or
'pictures')
'''
infolabels = self.unescape_dict(infolabels)
if not resolved:
if not is_folder:
queries['play'] = 'True'
play = self.build_plugin_url(queries)
else:
play = resolved
listitem = xbmcgui.ListItem(infolabels['title'], iconImage=img,
thumbnailImage=img)
listitem.setInfo(item_type, infolabels)
listitem.setProperty('IsPlayable', 'true')
listitem.setProperty('fanart_image', fanart)
if properties:
for prop in properties.items():
listitem.setProperty(prop[0], prop[1])
if contextmenu_items:
listitem.addContextMenuItems(contextmenu_items, replaceItems=context_replace)
if playlist is not False:
self.log_debug('adding item: %s - %s to playlist' % \
(infolabels['title'], play))
playlist.add(play, listitem)
else:
self.log_debug('adding item: %s - %s' % (infolabels['title'], play))
xbmcplugin.addDirectoryItem(self.handle, play, listitem,
isFolder=is_folder,
totalItems=total_items)
def add_video_item(self, queries, infolabels, properties=None, contextmenu_items='', context_replace=False,
img='', fanart='', resolved=False, total_items=0, playlist=False):
'''
Convenience method to add a video item to the directory list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, properties, contextmenu_items, context_replace, img, fanart,
resolved, total_items, playlist, item_type='video')
def add_music_item(self, queries, infolabels, properties=None, contextmenu_items='', context_replace=False,
img='', fanart='', resolved=False, total_items=0, playlist=False):
'''
Convenience method to add a music item to the directory list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, properties, contextmenu_items, img, context_replace, fanart,
resolved, total_items, playlist, item_type='music')
def add_directory(self, queries, infolabels, properties=None, contextmenu_items='', context_replace=False,
img='', fanart='', total_items=0, is_folder=True):
'''
Convenience method to add a directory to the display list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, properties, contextmenu_items, context_replace, img, fanart,
total_items=total_items, resolved=self.build_plugin_url(queries),
is_folder=is_folder)
def end_of_directory(self):
'''Tell XBMC that we have finished adding items to this directory.'''
xbmcplugin.endOfDirectory(self.handle)
def _decode_callback(self, matches):
'''Callback method used by :meth:`decode`.'''
id = matches.group(1)
try:
return unichr(int(id))
except:
return id
def decode(self, data):
'''
Regular expression to convert entities such as ``,`` to the correct
characters. It is called by :meth:`unescape` and so it is not required
to call it directly.
This method was found `on the web <http://stackoverflow.com/questions/1208916/decoding-html-entities-with-python/1208931#1208931>`_
Args:
data (str): String to be cleaned.
Returns:
Cleaned string.
'''
return re.sub("&#(\d+)(;|(?=\s))", self._decode_callback, data).strip()
def unescape(self, text):
'''
Decodes HTML entities in a string.
You can add more entities to the ``rep`` dictionary.
Args:
text (str): String to be cleaned.
Returns:
Cleaned string.
'''
try:
text = self.decode(text)
rep = {'<': '<',
'>': '>',
'"': '"',
'’': '\'',
'´': '\'',
}
for s, r in rep.items():
text = text.replace(s, r)
# this has to be last:
text = text.replace("&", "&")
#we don't want to fiddle with non-string types
except TypeError:
pass
return text
def unescape_dict(self, d):
'''
Calls :meth:`unescape` on all values in a dictionary.
Args:
d (dict): A dictionary containing string values
Returns:
A dictionary with HTML entities removed from the values.
'''
out = {}
for key, value in d.items():
out[key] = self.unescape(value)
return out
def save_data(self, filename, data):
'''
Saves the data structure using pickle. If the addon data path does
not exist it will be automatically created. This save function has
the same restrictions as the pickle module.
Args:
filename (string): name of the file you want to save data to. This
file will be saved in your addon's profile directory.
data (data object/string): you want to save.
Returns:
True on success
False on failure
'''
profile_path = self.get_profile()
try:
os.makedirs(profile_path)
except:
pass
save_path = os.path.join(profile_path, filename)
try:
pickle.dump(data, open(save_path, 'wb'))
return True
except pickle.PickleError:
return False
def load_data(self,filename):
'''
Load the data that was saved with save_data() and returns the
data structure.
Args:
filename (string): Name of the file you want to load data from. This
file will be loaded from your addons profile directory.
Returns:
Data stucture on success
False on failure
'''
profile_path = self.get_profile()
load_path = os.path.join(profile_path, filename)
print profile_path
if not os.path.isfile(load_path):
self.log_debug('%s does not exist' % load_path)
return False
try:
data = pickle.load(open(load_path))
except:
return False
return data | unknown | codeparrot/codeparrot-clean | ||
--
-- Test cube datatype
--
CREATE EXTENSION cube;
-- Check whether any of our opclasses fail amvalidate
SELECT amname, opcname
FROM pg_opclass opc LEFT JOIN pg_am am ON am.oid = opcmethod
WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid);
--
-- testing the input and output functions
--
-- Any number (a one-dimensional point)
SELECT '1'::cube AS cube;
SELECT '-1'::cube AS cube;
SELECT '1.'::cube AS cube;
SELECT '-1.'::cube AS cube;
SELECT '.1'::cube AS cube;
SELECT '-.1'::cube AS cube;
SELECT '1.0'::cube AS cube;
SELECT '-1.0'::cube AS cube;
SELECT 'infinity'::cube AS cube;
SELECT '-infinity'::cube AS cube;
SELECT 'NaN'::cube AS cube;
SELECT '.1234567890123456'::cube AS cube;
SELECT '+.1234567890123456'::cube AS cube;
SELECT '-.1234567890123456'::cube AS cube;
-- simple lists (points)
SELECT '()'::cube AS cube;
SELECT '1,2'::cube AS cube;
SELECT '(1,2)'::cube AS cube;
SELECT '1,2,3,4,5'::cube AS cube;
SELECT '(1,2,3,4,5)'::cube AS cube;
-- double lists (cubes)
SELECT '(),()'::cube AS cube;
SELECT '(0),(0)'::cube AS cube;
SELECT '(0),(1)'::cube AS cube;
SELECT '[(0),(0)]'::cube AS cube;
SELECT '[(0),(1)]'::cube AS cube;
SELECT '(0,0,0,0),(0,0,0,0)'::cube AS cube;
SELECT '(0,0,0,0),(1,0,0,0)'::cube AS cube;
SELECT '[(0,0,0,0),(0,0,0,0)]'::cube AS cube;
SELECT '[(0,0,0,0),(1,0,0,0)]'::cube AS cube;
-- invalid input: parse errors
SELECT ''::cube AS cube;
SELECT 'ABC'::cube AS cube;
SELECT '[]'::cube AS cube;
SELECT '[()]'::cube AS cube;
SELECT '[(1)]'::cube AS cube;
SELECT '[(1),]'::cube AS cube;
SELECT '[(1),2]'::cube AS cube;
SELECT '[(1),(2),(3)]'::cube AS cube;
SELECT '1,'::cube AS cube;
SELECT '1,2,'::cube AS cube;
SELECT '1,,2'::cube AS cube;
SELECT '(1,)'::cube AS cube;
SELECT '(1,2,)'::cube AS cube;
SELECT '(1,,2)'::cube AS cube;
-- invalid input: semantic errors and trailing garbage
SELECT '[(1),(2)],'::cube AS cube; -- 0
SELECT '[(1,2,3),(2,3)]'::cube AS cube; -- 1
SELECT '[(1,2),(1,2,3)]'::cube AS cube; -- 1
SELECT '(1),(2),'::cube AS cube; -- 2
SELECT '(1,2,3),(2,3)'::cube AS cube; -- 3
SELECT '(1,2),(1,2,3)'::cube AS cube; -- 3
SELECT '(1,2,3)ab'::cube AS cube; -- 4
SELECT '(1,2,3)a'::cube AS cube; -- 5
SELECT '(1,2)('::cube AS cube; -- 5
SELECT '1,2ab'::cube AS cube; -- 6
SELECT '1 e7'::cube AS cube; -- 6
SELECT '1,2a'::cube AS cube; -- 7
SELECT '1..2'::cube AS cube; -- 7
SELECT '-1e-700'::cube AS cube; -- out of range
-- Also try it with non-error-throwing API
SELECT pg_input_is_valid('(1,2)', 'cube');
SELECT pg_input_is_valid('[(1),]', 'cube');
SELECT pg_input_is_valid('-1e-700', 'cube');
SELECT * FROM pg_input_error_info('-1e-700', 'cube');
--
-- Testing building cubes from float8 values
--
SELECT cube(0::float8);
SELECT cube(1::float8);
SELECT cube(1,2);
SELECT cube(cube(1,2),3);
SELECT cube(cube(1,2),3,4);
SELECT cube(cube(cube(1,2),3,4),5);
SELECT cube(cube(cube(1,2),3,4),5,6);
--
-- Test that the text -> cube cast was installed.
--
SELECT '(0)'::text::cube;
--
-- Test the float[] -> cube cast
--
SELECT cube('{0,1,2}'::float[], '{3,4,5}'::float[]);
SELECT cube('{0,1,2}'::float[], '{3}'::float[]);
SELECT cube(NULL::float[], '{3}'::float[]);
SELECT cube('{0,1,2}'::float[]);
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]);
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
-- test for limits: this should pass
SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,100)));
-- and this should fail
SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,101)));
--
-- Test point processing
--
SELECT cube('(1,2),(1,2)'); -- cube_in
SELECT cube('{0,1,2}'::float[], '{0,1,2}'::float[]); -- cube_a_f8_f8
SELECT cube('{5,6,7,8}'::float[]); -- cube_a_f8
SELECT cube(1.37); -- cube_f8
SELECT cube(1.37, 1.37); -- cube_f8_f8
SELECT cube(cube(1,1), 42); -- cube_c_f8
SELECT cube(cube(1,2), 42); -- cube_c_f8
SELECT cube(cube(1,1), 42, 42); -- cube_c_f8_f8
SELECT cube(cube(1,1), 42, 24); -- cube_c_f8_f8
SELECT cube(cube(1,2), 42, 42); -- cube_c_f8_f8
SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
--
-- create too big cube from literal
select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube;
select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube;
-- from an array
select cube(array(SELECT 0 as a FROM generate_series(1,101)));
select cube(array(SELECT 0 as a FROM generate_series(1,101)),array(SELECT 0 as a FROM generate_series(1,101)));
-- extend cube beyond limit
-- this should work
select cube(array(SELECT 0 as a FROM generate_series(1,100)));
select cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100)));
-- this should fail
select cube(cube(array(SELECT 0 as a FROM generate_series(1,100))), 0);
select cube(cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100))), 0, 0);
--
-- testing the operators
--
-- equality/inequality:
--
SELECT '24, 33.20'::cube = '24, 33.20'::cube AS bool;
SELECT '24, 33.20'::cube != '24, 33.20'::cube AS bool;
SELECT '24, 33.20'::cube = '24, 33.21'::cube AS bool;
SELECT '24, 33.20'::cube != '24, 33.21'::cube AS bool;
SELECT '(2,0),(3,1)'::cube = '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool;
SELECT '(2,0),(3,1)'::cube = '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool;
-- "lower than" / "greater than"
-- (these operators are not useful for anything but ordering)
--
SELECT '1'::cube > '2'::cube AS bool;
SELECT '1'::cube < '2'::cube AS bool;
SELECT '1,1'::cube > '1,2'::cube AS bool;
SELECT '1,1'::cube < '1,2'::cube AS bool;
SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool;
SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool;
SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,1),(3,1,0,0,0)'::cube AS bool;
SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,1),(3,1,0,0,0)'::cube AS bool;
SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool;
SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool;
SELECT '(2,0,0,0,0),(3,1,0,0,1)'::cube > '(2,0),(3,1)'::cube AS bool;
SELECT '(2,0,0,0,0),(3,1,0,0,1)'::cube < '(2,0),(3,1)'::cube AS bool;
SELECT '(2,0,0,0,1),(3,1,0,0,0)'::cube > '(2,0),(3,1)'::cube AS bool;
SELECT '(2,0,0,0,1),(3,1,0,0,0)'::cube < '(2,0),(3,1)'::cube AS bool;
SELECT '(2,0,0,0,0),(3,1,0,0,0)'::cube > '(2,0),(3,1)'::cube AS bool;
SELECT '(2,0,0,0,0),(3,1,0,0,0)'::cube < '(2,0),(3,1)'::cube AS bool;
-- "overlap"
--
SELECT '1'::cube && '1'::cube AS bool;
SELECT '1'::cube && '2'::cube AS bool;
SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '0'::cube AS bool;
SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '1'::cube AS bool;
SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '1,1,1'::cube AS bool;
SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(1,1,1),(2,2,2)]'::cube AS bool;
SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(1,1),(2,2)]'::cube AS bool;
SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(2,1,1),(2,2,2)]'::cube AS bool;
-- "contained in" (the left operand is the cube entirely enclosed by
-- the right operand):
--
SELECT '0'::cube <@ '0'::cube AS bool;
SELECT '0,0,0'::cube <@ '0,0,0'::cube AS bool;
SELECT '0,0'::cube <@ '0,0,1'::cube AS bool;
SELECT '0,0,0'::cube <@ '0,0,1'::cube AS bool;
SELECT '1,0,0'::cube <@ '0,0,1'::cube AS bool;
SELECT '(1,0,0),(0,0,1)'::cube <@ '(1,0,0),(0,0,1)'::cube AS bool;
SELECT '(1,0,0),(0,0,1)'::cube <@ '(-1,-1,-1),(1,1,1)'::cube AS bool;
SELECT '(1,0,0),(0,0,1)'::cube <@ '(-1,-1,-1,-1),(1,1,1,1)'::cube AS bool;
SELECT '0'::cube <@ '(-1),(1)'::cube AS bool;
SELECT '1'::cube <@ '(-1),(1)'::cube AS bool;
SELECT '-1'::cube <@ '(-1),(1)'::cube AS bool;
SELECT '(-1),(1)'::cube <@ '(-1),(1)'::cube AS bool;
SELECT '(-1),(1)'::cube <@ '(-1,-1),(1,1)'::cube AS bool;
SELECT '(-2),(1)'::cube <@ '(-1),(1)'::cube AS bool;
SELECT '(-2),(1)'::cube <@ '(-1,-1),(1,1)'::cube AS bool;
-- "contains" (the left operand is the cube that entirely encloses the
-- right operand)
--
SELECT '0'::cube @> '0'::cube AS bool;
SELECT '0,0,0'::cube @> '0,0,0'::cube AS bool;
SELECT '0,0,1'::cube @> '0,0'::cube AS bool;
SELECT '0,0,1'::cube @> '0,0,0'::cube AS bool;
SELECT '0,0,1'::cube @> '1,0,0'::cube AS bool;
SELECT '(1,0,0),(0,0,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool;
SELECT '(-1,-1,-1),(1,1,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool;
SELECT '(-1,-1,-1,-1),(1,1,1,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool;
SELECT '(-1),(1)'::cube @> '0'::cube AS bool;
SELECT '(-1),(1)'::cube @> '1'::cube AS bool;
SELECT '(-1),(1)'::cube @> '-1'::cube AS bool;
SELECT '(-1),(1)'::cube @> '(-1),(1)'::cube AS bool;
SELECT '(-1,-1),(1,1)'::cube @> '(-1),(1)'::cube AS bool;
SELECT '(-1),(1)'::cube @> '(-2),(1)'::cube AS bool;
SELECT '(-1,-1),(1,1)'::cube @> '(-2),(1)'::cube AS bool;
-- Test of distance function
--
SELECT cube_distance('(0)'::cube,'(2,2,2,2)'::cube);
SELECT cube_distance('(0)'::cube,'(.3,.4)'::cube);
SELECT cube_distance('(2,3,4)'::cube,'(2,3,4)'::cube);
SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube);
SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube);
-- Test of cube function (text to cube)
--
SELECT cube('(1,1.2)'::text);
SELECT cube(NULL);
-- Test of cube_dim function (dimensions stored in cube)
--
SELECT cube_dim('(0)'::cube);
SELECT cube_dim('(0,0)'::cube);
SELECT cube_dim('(0,0,0)'::cube);
SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
-- Test of cube_ll_coord function (retrieves LL coordinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 3);
SELECT cube_ll_coord('(1,2),(1,2)'::cube, 1);
SELECT cube_ll_coord('(1,2),(1,2)'::cube, 2);
SELECT cube_ll_coord('(1,2),(1,2)'::cube, 3);
SELECT cube_ll_coord('(42,137)'::cube, 1);
SELECT cube_ll_coord('(42,137)'::cube, 2);
SELECT cube_ll_coord('(42,137)'::cube, 3);
-- Test of cube_ur_coord function (retrieves UR coordinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 3);
SELECT cube_ur_coord('(1,2),(1,2)'::cube, 1);
SELECT cube_ur_coord('(1,2),(1,2)'::cube, 2);
SELECT cube_ur_coord('(1,2),(1,2)'::cube, 3);
SELECT cube_ur_coord('(42,137)'::cube, 1);
SELECT cube_ur_coord('(42,137)'::cube, 2);
SELECT cube_ur_coord('(42,137)'::cube, 3);
-- Test of cube_is_point
--
SELECT cube_is_point('(0)'::cube);
SELECT cube_is_point('(0,1,2)'::cube);
SELECT cube_is_point('(0,1,2),(0,1,2)'::cube);
SELECT cube_is_point('(0,1,2),(-1,1,2)'::cube);
SELECT cube_is_point('(0,1,2),(0,-1,2)'::cube);
SELECT cube_is_point('(0,1,2),(0,1,-2)'::cube);
-- Test of cube_enlarge (enlarging and shrinking cubes)
--
SELECT cube_enlarge('(0)'::cube, 0, 0);
SELECT cube_enlarge('(0)'::cube, 0, 1);
SELECT cube_enlarge('(0)'::cube, 0, 2);
SELECT cube_enlarge('(2),(-2)'::cube, 0, 4);
SELECT cube_enlarge('(0)'::cube, 1, 0);
SELECT cube_enlarge('(0)'::cube, 1, 1);
SELECT cube_enlarge('(0)'::cube, 1, 2);
SELECT cube_enlarge('(2),(-2)'::cube, 1, 4);
SELECT cube_enlarge('(0)'::cube, -1, 0);
SELECT cube_enlarge('(0)'::cube, -1, 1);
SELECT cube_enlarge('(0)'::cube, -1, 2);
SELECT cube_enlarge('(2),(-2)'::cube, -1, 4);
SELECT cube_enlarge('(0,0,0)'::cube, 1, 0);
SELECT cube_enlarge('(0,0,0)'::cube, 1, 2);
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, 1, 2);
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, 3, 2);
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -1, 2);
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -23, 5);
SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -24, 5);
-- Test of cube_union (MBR for two cubes)
--
SELECT cube_union('(1,2),(3,4)'::cube, '(5,6,7),(8,9,10)'::cube);
SELECT cube_union('(1,2)'::cube, '(4,2,0,0)'::cube);
SELECT cube_union('(1,2),(1,2)'::cube, '(4,2),(4,2)'::cube);
SELECT cube_union('(1,2),(1,2)'::cube, '(1,2),(1,2)'::cube);
SELECT cube_union('(1,2),(1,2)'::cube, '(1,2,0),(1,2,0)'::cube);
-- Test of cube_inter
--
SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (16,15)'::cube); -- intersects
SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (6,5)'::cube); -- includes
SELECT cube_inter('(1,2),(10,11)'::cube, '(13,14), (16,15)'::cube); -- no intersection
SELECT cube_inter('(1,2),(10,11)'::cube, '(3,14), (16,15)'::cube); -- no intersection, but one dimension intersects
SELECT cube_inter('(1,2),(10,11)'::cube, '(10,11), (16,15)'::cube); -- point intersection
SELECT cube_inter('(1,2,3)'::cube, '(1,2,3)'::cube); -- point args
SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args
-- Test of cube_size
--
SELECT cube_size('(4,8),(15,16)'::cube);
SELECT cube_size('(42,137)'::cube);
-- Test of distances (euclidean distance may not be bit-exact)
--
SET extra_float_digits = 0;
SELECT cube_distance('(1,1)'::cube, '(4,5)'::cube);
SELECT '(1,1)'::cube <-> '(4,5)'::cube as d_e;
RESET extra_float_digits;
SELECT distance_chebyshev('(1,1)'::cube, '(4,5)'::cube);
SELECT '(1,1)'::cube <=> '(4,5)'::cube as d_c;
SELECT distance_taxicab('(1,1)'::cube, '(4,5)'::cube);
SELECT '(1,1)'::cube <#> '(4,5)'::cube as d_t;
-- zero for overlapping
SELECT cube_distance('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube);
SELECT distance_chebyshev('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube);
SELECT distance_taxicab('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube);
-- coordinate access
SELECT cube(array[10,20,30], array[40,50,60])->1;
SELECT cube(array[40,50,60], array[10,20,30])->1;
SELECT cube(array[10,20,30], array[40,50,60])->6;
SELECT cube(array[10,20,30], array[40,50,60])->0;
SELECT cube(array[10,20,30], array[40,50,60])->7;
SELECT cube(array[10,20,30], array[40,50,60])->-1;
SELECT cube(array[10,20,30], array[40,50,60])->-6;
SELECT cube(array[10,20,30])->3;
SELECT cube(array[10,20,30])->6;
SELECT cube(array[10,20,30])->-6;
-- "normalized" coordinate access
SELECT cube(array[10,20,30], array[40,50,60])~>1;
SELECT cube(array[40,50,60], array[10,20,30])~>1;
SELECT cube(array[10,20,30], array[40,50,60])~>2;
SELECT cube(array[40,50,60], array[10,20,30])~>2;
SELECT cube(array[10,20,30], array[40,50,60])~>3;
SELECT cube(array[40,50,60], array[10,20,30])~>3;
SELECT cube(array[40,50,60], array[10,20,30])~>0;
SELECT cube(array[40,50,60], array[10,20,30])~>4;
SELECT cube(array[40,50,60], array[10,20,30])~>(-1);
-- Load some example data and build the index
--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
-- Test sorting
SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
-- Test index-only scans
SET enable_bitmapscan = false;
EXPLAIN (COSTS OFF)
SELECT c FROM test_cube WHERE c <@ '(3000,1000),(0,0)' ORDER BY c;
SELECT c FROM test_cube WHERE c <@ '(3000,1000),(0,0)' ORDER BY c;
RESET enable_bitmapscan;
-- Test kNN
INSERT INTO test_cube VALUES ('(1,1)'), ('(100000)'), ('(0, 100000)'); -- Some corner cases
SET enable_seqscan = false;
-- Test different metrics
SET extra_float_digits = 0;
SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5;
RESET extra_float_digits;
SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5;
SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5;
-- Test sorting by coordinates
SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound
SELECT c~>2, c FROM test_cube ORDER BY c~>2 LIMIT 15; -- ascending by right bound
SELECT c~>3, c FROM test_cube ORDER BY c~>3 LIMIT 15; -- ascending by lower bound
SELECT c~>4, c FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by upper bound
SELECT c~>(-1), c FROM test_cube ORDER BY c~>(-1) LIMIT 15; -- descending by left bound
SELECT c~>(-2), c FROM test_cube ORDER BY c~>(-2) LIMIT 15; -- descending by right bound
SELECT c~>(-3), c FROM test_cube ORDER BY c~>(-3) LIMIT 15; -- descending by lower bound
SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upper bound
-- Same queries with sequential scan (should give the same results as above)
RESET enable_seqscan;
SET enable_indexscan = OFF;
SET extra_float_digits = 0;
SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5;
RESET extra_float_digits;
SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5;
SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5;
SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound
SELECT c~>2, c FROM test_cube ORDER BY c~>2 LIMIT 15; -- ascending by right bound
SELECT c~>3, c FROM test_cube ORDER BY c~>3 LIMIT 15; -- ascending by lower bound
SELECT c~>4, c FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by upper bound
SELECT c~>(-1), c FROM test_cube ORDER BY c~>(-1) LIMIT 15; -- descending by left bound
SELECT c~>(-2), c FROM test_cube ORDER BY c~>(-2) LIMIT 15; -- descending by right bound
SELECT c~>(-3), c FROM test_cube ORDER BY c~>(-3) LIMIT 15; -- descending by lower bound
SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upper bound
RESET enable_indexscan; | sql | github | https://github.com/postgres/postgres | contrib/cube/sql/cube.sql |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "AssertEqualsCheck.h"
#include "llvm/ADT/StringMap.h"
#include <string>
using namespace clang::ast_matchers;
namespace clang::tidy::objc {
// Mapping from `XCTAssert*Equal` to `XCTAssert*EqualObjects` name.
static const llvm::StringMap<StringRef> NameMap{
{"XCTAssertEqual", "XCTAssertEqualObjects"},
{"XCTAssertNotEqual", "XCTAssertNotEqualObjects"},
};
void AssertEqualsCheck::registerMatchers(MatchFinder *Finder) {
for (const auto &[CurrName, _] : NameMap) {
Finder->addMatcher(
binaryOperator(anyOf(hasOperatorName("!="), hasOperatorName("==")),
isExpandedFromMacro(std::string(CurrName)),
anyOf(hasLHS(hasType(qualType(
hasCanonicalType(asString("NSString *"))))),
hasRHS(hasType(qualType(
hasCanonicalType(asString("NSString *")))))))
.bind(CurrName),
this);
}
}
void AssertEqualsCheck::check(
const ast_matchers::MatchFinder::MatchResult &Result) {
for (const auto &[CurrName, TargetName] : NameMap) {
if (const auto *Root = Result.Nodes.getNodeAs<BinaryOperator>(CurrName)) {
const SourceManager *Sm = Result.SourceManager;
// The macros are nested two levels, so going up twice.
auto MacroCallsite = Sm->getImmediateMacroCallerLoc(
Sm->getImmediateMacroCallerLoc(Root->getBeginLoc()));
diag(MacroCallsite,
(Twine("use ") + TargetName + " for comparing objects").str())
<< FixItHint::CreateReplacement(
clang::CharSourceRange::getCharRange(
MacroCallsite,
MacroCallsite.getLocWithOffset(CurrName.size())),
TargetName);
}
}
}
} // namespace clang::tidy::objc | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/objc/AssertEqualsCheck.cpp |
""" Peripheral On Demand global define
"""
import periphondemand
#global
POD_CONFIG="~/.podrc"
POD_PATH = periphondemand.__path__[0]
PLATFORMPATH = "/platforms"
BUSPATH = "/busses/"
TEMPLATESPATH = "/templates"
TOOLCHAINPATH = "/toolchains"
SIMULATIONPATH = "/simulation"
SYNTHESISPATH = "/synthesis"
DRIVERSPATH = "/drivers"
# extension
TCLEXT = ".tcl"
ARCHIVEEXT = ".zip"
XMLEXT = ".xml"
VHDLEXT = ".vhd"
UCFEXT = ".ucf"
BITSTREAMEXT = ".bit"
PODSCRIPTEXT = ".pod"
HDLEXT = ["vhdl","vhd","v"]
#for components
LIBRARYPATH = "/library"
COMPONENTSPATH = "/components"
HDLDIR = "hdl"
DRIVERS_TEMPLATES_PATH = "/drivers_templates"
# for project
BINARYPROJECTPATH = "/binaries"
OBJSPATH = "/objs"
BINARY_PREFIX = "top_"
BINARY_SUFFIX = ".bit"
# template
HEADERTPL = "/headervhdl.tpl"
# color (see VT100 console manual for more details)
COLOR_DEBUG="\033[32;7m" # White on green
COLOR_ERROR="\033[31;7m" # white on red
COLOR_ERROR_MESSAGE="\033[31;1m" # red on white
COLOR_WARNING="\033[32;7m" # white on green
COLOR_WARNING_MESSAGE="\033[32;1m" # green on white
COLOR_INFO="\033[34;7m" # white on blue
COLOR_INFO_MESSAGE="\033[34;1m" # blue on white
COLOR_SHELL="\033[33;3m" # green on black
COLOR_END="\033[0m" | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pylab as p
def mvr(assembly):
machs = []
tube_r = []
capsule_r = []
for m in np.arange(.781,.95, .01):
hl.Mach_pod_max = m
hl.run()
machs.append(m)
tube_r.append(hl.pod.radius_inlet_back_outer)
capsule_r.append(hl.flow_limit.radius_tube)
print machs
print tube_r
print capsule_r
def mva(assembly):
machs = []
tubeA = []
podA = []
areaR = [] #area Ratio --> Tube/Pod
for m in np.arange(.7,.95, .01):
assembly.Mach_pod_max = m
assembly.run()
machs.append(m)
tubeA.append(assembly.flow_limit._tube_area)
podA.append(assembly.flow_limit._inlet_area)
areaR.append(assembly.flow_limit._tube_area/assembly.flow_limit._inlet_area)
print machs
print tubeA
print podA
print areaR
def mvb(assembly):
machs = []
batt = []
compE = []
timeT = []
for m in np.arange(.7,.95, .01):
assembly.Mach_pod_max = m
assembly.run()
machs.append(m)
batt.append(assembly.mission.energy)
compE.append(assembly.mission.pwr_req)
timeT.append(assembly.mission.time)
print machs
print batt
print compE
print timeT | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Infrastructure Toolkit}.
"""
import sys
import argparse
from entropy.i18n import _
from _entropy.eit.commands.descriptor import EitCommandDescriptor
from _entropy.eit.commands.commit import EitCommit
class EitRepack(EitCommit):
"""
Main Eit repack command.
"""
NAME = "repack"
ALIASES = ["rp"]
INTRODUCTION = """\
Recrate the whole Entropy package from live system through
the Source Package Manager. This allows the latter to regenerate
its metadata (useful in case of dependency changes).
The package must be already available in the queried repository.
"""
SEE_ALSO = "eit-add(1), eit-commit(1)"
def _get_parser(self):
""" Overridden from EitCommit """
descriptor = EitCommandDescriptor.obtain_descriptor(
EitRepack.NAME)
parser = argparse.ArgumentParser(
description=descriptor.get_description(),
formatter_class=argparse.RawDescriptionHelpFormatter,
prog="%s %s" % (sys.argv[0], EitRepack.NAME))
parser.add_argument("packages", nargs='+', metavar="<package>",
help=_("package names"))
parser.add_argument("--in", metavar="<repository>",
help=_("repack to given repository"),
default=None, dest="into")
return parser
def parse(self):
""" Overridden from EitCommit """
parser = self._get_parser()
try:
nsargs = parser.parse_args(self._args)
except IOError as err:
sys.stderr.write("%s\n" % (err,))
return parser.print_help, []
# setup atoms variable before spawning commit
self._repackage = nsargs.packages[:]
return self._call_exclusive, [self._commit, nsargs.into]
EitCommandDescriptor.register(
EitCommandDescriptor(
EitRepack,
EitRepack.NAME,
_('rebuild packages in repository'))
) | unknown | codeparrot/codeparrot-clean | ||
from distutils.core import setup, Extension
import os, sys
XEN_ROOT = "../.."
extra_compile_args = [ "-fno-strict-aliasing", "-Werror" ]
PATH_XEN = XEN_ROOT + "/tools/include"
PATH_LIBXC = XEN_ROOT + "/tools/libxc"
PATH_LIBXL = XEN_ROOT + "/tools/libxl"
PATH_XENSTORE = XEN_ROOT + "/tools/xenstore"
xc = Extension("xc",
extra_compile_args = extra_compile_args,
include_dirs = [ PATH_XEN, PATH_LIBXC + "/include", "xen/lowlevel/xc" ],
library_dirs = [ PATH_LIBXC ],
libraries = [ "xenctrl", "xenguest" ],
depends = [ PATH_LIBXC + "/libxenctrl.so", PATH_LIBXC + "/libxenguest.so" ],
sources = [ "xen/lowlevel/xc/xc.c" ])
xs = Extension("xs",
extra_compile_args = extra_compile_args,
include_dirs = [ PATH_XEN, PATH_XENSTORE + "/include", "xen/lowlevel/xs" ],
library_dirs = [ PATH_XENSTORE ],
libraries = [ "xenstore" ],
depends = [ PATH_XENSTORE + "/libxenstore.so" ],
sources = [ "xen/lowlevel/xs/xs.c" ])
xl = Extension("xl",
extra_compile_args = extra_compile_args,
include_dirs = [ PATH_XEN, PATH_LIBXL, PATH_LIBXC + "/include", "xen/lowlevel/xl" ],
library_dirs = [ PATH_LIBXL ],
libraries = [ "xenlight" ],
depends = [ PATH_LIBXL + "/libxenlight.so" ],
sources = [ "xen/lowlevel/xl/xl.c", "xen/lowlevel/xl/_pyxl_types.c" ])
plat = os.uname()[0]
modules = [ xc, xs ]
#modules.extend([ xl ])
setup(name = 'xen',
version = '3.0',
description = 'Xen',
packages = ['xen',
'xen.lowlevel',
],
ext_package = "xen.lowlevel",
ext_modules = modules
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
图的抽象数据类型
ADT Graph:
Graph(self) # 构造一个新图
is_empty(self) # 是否为空图
vertex_num(self) # 顶点数
edge_num(self) # 边数
vertices(self) # 所有顶点的集合
edges(self) # 所有边的集合
add_vertex(self, vertex) # 添加一个顶点
add_edge(self, v1, v2) # 添加一条边
get_edge(self, v1, v2) # 获得v1到v2的路径信息
out_edge(self, v) # 获得从v出发的所有边
degree(self, v) 检查v的度
"""
class GraphError(ValueError):
pass
class Graph(object):
"""使用邻接表实现图结构"""
def __init__(self, mat=[], unconn=0):
"""
从邻接矩阵构造初始邻接表
支持从空图出发来构建图对象
构造的表是有序邻接表
目前缺陷:没有检查初始邻接矩阵的有序性
@mat 默认邻接矩阵
@unconn 非连接的默认值
"""
super(Graph, self).__init__()
# self._mat = mat
vnum = len(mat) # 顶点个数
# 检查邻接矩阵是否为方阵
for x in mat:
if len(x) != vnum:
raise GraphError(u'mat参数不是方阵')
# 同样是按下标构建邻接表对象,输出格式[[(1,0)],[(2,1),(2,2)],[]]
# 每个子list表示一个顶点,有序
# 每个子list中的元组表示(vj, val),即(边的另一个点,权值),有序
self._mat = [Graph._out_edges(x, unconn) for x in mat]
self._vnum = vnum
self._unconn = unconn
def add_vertex(self):
self._mat.append([])
self._vnum += 1
return self._vnum - 1
def vertex_num(self):
return self._vnum
def add_edge(self, vi, vj, val=1):
"""
vi
vj
val
"""
if self._vnum == 0:
raise GraphError(u'无法为空图添加边,请先使用add_vertex()添加边')
if self._invalid(vi) or self._invalid(vj):
raise GraphError(u'边值不合法:', (vi, vj))
row = self._mat[vi]
i = 0
while i < len(row):
if row[i][0] == vj:
self._mat[vi][i] = (vj, val)
return
# 因为下标是有序的,所以一旦row[i][0]大于vj就说明没有这条边
if row[i][0] > vj:
break
i += 1
self._mat[vi].insert(i, (vj, val))
def _invalid(self, v):
"""检查顶点v是否合法,不合法返回True"""
return v < 0 or v >= self._vnum
def get_edge(self, vi, vj):
if self._invalid(vi) or self._invalid(vj):
raise GraphError(u'边值不合法:', (vi, vj))
for i, val in self._mat[vi]:
if i == vj:
return val
return self._unconn
def out_edges(self, vi):
"""
获得vi为顶点的所有出边
返回值为(vj, val),即(另一个顶点,边的权值)
"""
if self._invalid(vi):
raise GraphError(u'边值不合法:', vi)
return self._mat[vi]
@staticmethod
def _out_edges(row, unconn):
"""提取顶点的有效出连接"""
return [(i, val) for i, val in enumerate(row) if val != unconn]
def __str__(self):
return ('Graph[\n' + ',\n'.join([str(x) for x in self._mat])\
+ '\n]Unconn: ' + str(self._unconn))
def test():
mat = [[0,0,1],[1,0,1],[0,1,0]]
g = Graph(mat)
print g
vi = g.add_vertex()
g.add_edge(vi,0,1)
g.add_edge(2,2,3)
print g
print g.out_edges(2)
if __name__ == '__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types import (
ContainerListResponse,
ContainerCreateResponse,
ContainerRetrieveResponse,
)
from openai.pagination import SyncCursorPage, AsyncCursorPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestContainers:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
container = client.containers.create(
name="name",
)
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
container = client.containers.create(
name="name",
expires_after={
"anchor": "last_active_at",
"minutes": 0,
},
file_ids=["string"],
memory_limit="1g",
)
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.containers.with_raw_response.create(
name="name",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.containers.with_streaming_response.create(
name="name",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerCreateResponse, container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
container = client.containers.retrieve(
"container_id",
)
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.containers.with_raw_response.retrieve(
"container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.containers.with_streaming_response.retrieve(
"container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
container = client.containers.list()
assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
container = client.containers.list(
after="after",
limit=0,
order="asc",
)
assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.containers.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.containers.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
container = client.containers.delete(
"container_id",
)
assert container is None
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.containers.with_raw_response.delete(
"container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert container is None
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.containers.with_streaming_response.delete(
"container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert container is None
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.with_raw_response.delete(
"",
)
class TestAsyncContainers:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.create(
name="name",
)
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.create(
name="name",
expires_after={
"anchor": "last_active_at",
"minutes": 0,
},
file_ids=["string"],
memory_limit="1g",
)
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.with_raw_response.create(
name="name",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.with_streaming_response.create(
name="name",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = await response.parse()
assert_matches_type(ContainerCreateResponse, container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.retrieve(
"container_id",
)
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.with_raw_response.retrieve(
"container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.with_streaming_response.retrieve(
"container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = await response.parse()
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.list()
assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.list(
after="after",
limit=0,
order="asc",
)
assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = await response.parse()
assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.delete(
"container_id",
)
assert container is None
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.with_raw_response.delete(
"container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert container is None
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.with_streaming_response.delete(
"container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = await response.parse()
assert container is None
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.with_raw_response.delete(
"",
) | python | github | https://github.com/openai/openai-python | tests/api_resources/test_containers.py |
"""Support for the Drizzle database via the mysql-python adapter.
MySQL-Python is available at:
http://sourceforge.net/projects/mysql-python
Connecting
-----------
Connect string format::
drizzle+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
"""
from sqlalchemy.dialects.drizzle.base import (
DrizzleDialect,
DrizzleExecutionContext,
DrizzleCompiler,
DrizzleIdentifierPreparer)
from sqlalchemy.connectors.mysqldb import (
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBConnector)
class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext,
DrizzleExecutionContext):
pass
class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler):
pass
class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer,
DrizzleIdentifierPreparer):
pass
class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect):
execution_ctx_cls = DrizzleExecutionContext_mysqldb
statement_compiler = DrizzleCompiler_mysqldb
preparer = DrizzleIdentifierPreparer_mysqldb
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return 'utf8'
dialect = DrizzleDialect_mysqldb | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_get(self, context, instance_id):
# BAD_UUID is something that does not exist
if instance_id == 'BAD_UUID':
raise exception.InstanceNotFound(instance_id=instance_id)
else:
return {
'id': 1,
'uuid': instance_id,
'vm_state': vm_states.ACTIVE,
'task_state': None, 'host': 'host1'
}
def fake_service_get_by_compute_host(self, context, host):
if host == 'bad-host':
raise exception.ComputeHostNotFound(host=host)
else:
return {
'host_name': host,
'service': 'compute',
'zone': 'nova'
}
class EvacuateTest(test.NoDBTestCase):
_methods = ('resize', 'evacuate')
def setUp(self):
super(EvacuateTest, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
def _get_admin_context(self, user_id='fake', project_id='fake'):
ctxt = context.get_admin_context()
ctxt.user_id = user_id
ctxt.project_id = project_id
return ctxt
def test_evacuate_with_valid_instance(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'onSharedStorage': 'false',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_evacuate_with_underscore_in_hostname(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
# NOTE: The hostname grammar in RFC952 does not allow for
# underscores in hostnames. However, we should test that it
# is supported because it sometimes occurs in real systems.
'host': 'underscore_hostname',
'onSharedStorage': 'false',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_evacuate_with_invalid_instance(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % 'BAD_UUID')
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'onSharedStorage': 'false',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 404)
def test_evacuate_with_active_service(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.content_type = 'application/json'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'onSharedStorage': 'false',
'adminPass': 'MyNewPass'
}
})
def fake_evacuate(*args, **kwargs):
raise exception.ComputeServiceInUse("Service still in use")
self.stubs.Set(compute_api.API, 'evacuate', fake_evacuate)
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_evacuate_instance_with_no_target(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_evacuate_instance_without_on_shared_storage(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_evacuate_instance_with_bad_target(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'bad-host',
'onSharedStorage': 'false',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 404)
def test_evacuate_instance_with_target(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'onSharedStorage': 'false',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
resp = req.get_response(app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
self.assertEqual("MyNewPass", resp_json['adminPass'])
def test_evacuate_shared_and_pass(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'onSharedStorage': 'True',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_evacuate_not_shared_pass_generated(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'onSharedStorage': 'False',
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
resp = req.get_response(app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
def test_evacuate_shared(self):
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'onSharedStorage': 'True',
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_not_admin(self):
ctxt = context.RequestContext('fake', 'fake', is_admin=False)
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my-host',
'onSharedStorage': 'True',
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 403) | unknown | codeparrot/codeparrot-clean | ||
import pickle
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import FileField, ValidationError
from django.test import SimpleTestCase
class FileFieldTest(SimpleTestCase):
def test_filefield_1(self):
f = FileField()
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('', '')
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None, '')
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
no_file_msg = "'No file was submitted. Check the encoding type on the form.'"
with self.assertRaisesMessage(ValidationError, no_file_msg):
f.clean(SimpleUploadedFile('', b''))
with self.assertRaisesMessage(ValidationError, no_file_msg):
f.clean(SimpleUploadedFile('', b''), '')
self.assertEqual('files/test3.pdf', f.clean(None, 'files/test3.pdf'))
with self.assertRaisesMessage(ValidationError, no_file_msg):
f.clean('some content that is not a file')
with self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'"):
f.clean(SimpleUploadedFile('name', None))
with self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'"):
f.clean(SimpleUploadedFile('name', b''))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
self.assertIsInstance(
f.clean(SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode())),
SimpleUploadedFile
)
self.assertIsInstance(
f.clean(SimpleUploadedFile('name', b'Some File Content'), 'files/test4.pdf'),
SimpleUploadedFile
)
def test_filefield_2(self):
f = FileField(max_length=5)
with self.assertRaisesMessage(ValidationError, "'Ensure this filename has at most 5 characters (it has 18).'"):
f.clean(SimpleUploadedFile('test_maxlength.txt', b'hello world'))
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertIsInstance(f.clean(SimpleUploadedFile('name', b'Some File Content')), SimpleUploadedFile)
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertIsInstance(f.clean(SimpleUploadedFile('name', b'')), SimpleUploadedFile)
def test_filefield_changed(self):
"""
The value of data will more than likely come from request.FILES. The
value of initial data will likely be a filename stored in the database.
Since its value is of no use to a FileField it is ignored.
"""
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f.has_changed('', None))
# A file was uploaded and no initial data.
self.assertTrue(f.has_changed('', {'filename': 'resume.txt', 'content': 'My resume'}))
# A file was not uploaded, but there is initial data
self.assertFalse(f.has_changed('resume.txt', None))
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
self.assertTrue(f.has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'}))
def test_file_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(FileField())), FileField) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
"""
Test Manager Self Test - Dummy Test Driver.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
import sys;
print('dummydriver.py: hello world!');
print('dummydriver.py: args: %s' % (sys.argv,));
if sys.argv[-1] in [ 'all', 'execute' ]:
import time;
for i in range(10, 1, -1):
print('dummydriver.py: %u...', i);
sys.stdout.flush();
time.sleep(1);
print('dummydriver.py: ...0! done');
sys.exit(0); | unknown | codeparrot/codeparrot-clean | ||
"""
Regression test for <https://bugs.freedesktop.org/show_bug.cgi?id=32952>,
wherein chat states in MUCs were misparsed, and MUC chat states in general.
"""
from servicetest import assertEquals, assertLength, EventPattern
from gabbletest import exec_test, elem, make_muc_presence, sync_stream
from mucutil import join_muc_and_check
import ns
import constants as cs
MUC = 'ohai@groupchat.google.com'
BOB = MUC + '/bob'
def get_state_notification(stanza):
for x in stanza.elements():
if x.uri == ns.CHAT_STATES:
return x
return None
def check_state_notification(elem, name, allow_body=False):
assertEquals('message', elem.name)
assertEquals('groupchat', elem['type'])
notification = get_state_notification(elem)
assert notification is not None, elem.toXml()
assert notification.name == name, notification.toXml()
if not allow_body:
assert len(elem.children) == 1, elem.toXml()
def test(q, bus, conn, stream):
(chan, user, bob) = join_muc_and_check(q, bus, conn, stream,
MUC)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(bob, cs.CHAT_STATE_INACTIVE))
stream.send(
elem('message', from_=BOB, to='test@localhost/Resource',
type='groupchat', jid='bob@bob.bob')(
elem(ns.CHAT_STATES, 'composing'),
elem('google:nosave', 'x', value='disabled'),
elem('http://jabber.org/protocol/archive', 'record', otr='false'),
))
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_COMPOSING, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_COMPOSING,
states.get(bob, cs.CHAT_STATE_INACTIVE))
stream.send(
elem('message', from_=BOB, to='test@localhost/Resource',
type='groupchat', jid='bob@bob.bob')(
elem(ns.CHAT_STATES, 'paused'),
elem('google:nosave', 'x', value='disabled'),
elem('http://jabber.org/protocol/archive', 'record', otr='false'),
))
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_PAUSED, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_PAUSED,
states.get(bob, cs.CHAT_STATE_INACTIVE))
# Bob leaves
presence = make_muc_presence('owner', 'none', MUC, 'bob')
presence['type'] = 'unavailable'
stream.send(presence)
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_GONE, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
# Bob no longer has any chat state at all
assertEquals(None, states.get(bob, None))
# Sending chat states:
# Composing...
chan.ChatState.SetChatState(cs.CHAT_STATE_COMPOSING)
stream_message = q.expect('stream-message')
check_state_notification(stream_message.stanza, 'composing')
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_COMPOSING,
states.get(user, cs.CHAT_STATE_INACTIVE))
# XEP 0085:
# every content message SHOULD contain an <active/> notification.
chan.send_msg_sync('hi.')
stream_message = q.expect('stream-message')
stanza = stream_message.stanza
check_state_notification(stanza, 'active', allow_body=True)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_ACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
bodies = list(stanza.elements(uri=ns.CLIENT, name='body'))
assertLength(1, bodies)
assertEquals(u'hi.', bodies[0].children[0])
# If we get an error with type='wait', stop sending chat states.
stanza['type'] = 'error'
stanza['from'] = MUC
stanza['to'] = 'test@localhost/Resource'
error = stanza.addElement('error')
error['type'] = 'wait'
error.addElement((ns.STANZA, 'resource-constraint'))
stream.send(stanza)
q.expect('dbus-signal', signal='MessageReceived',
predicate=lambda e: e.args[0][0]['message-type'] == cs.MT_DELIVERY_REPORT)
q.forbid_events([
EventPattern('stream-message', to=MUC,
predicate=lambda e: get_state_notification(e.stanza) is not None)
])
# User starts typing again but nothing should be seen or heard on the stream.
chan.ChatState.SetChatState(cs.CHAT_STATE_COMPOSING)
sync_stream(q, stream)
if __name__ == '__main__':
exec_test(test) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KFP pipeline orchestrating BigQuery and Cloud AI Platform services."""
import os
from helper_components import evaluate_model
from helper_components import retrieve_best_run
from jinja2 import Template
import kfp
from kfp.components import func_to_container_op
from kfp.dsl.types import Dict
from kfp.dsl.types import GCPProjectID
from kfp.dsl.types import GCPRegion
from kfp.dsl.types import GCSPath
from kfp.dsl.types import String
from kfp.gcp import use_gcp_secret
# Defaults and environment settings
BASE_IMAGE = os.getenv('BASE_IMAGE')
TRAINER_IMAGE = os.getenv('TRAINER_IMAGE')
RUNTIME_VERSION = os.getenv('RUNTIME_VERSION')
PYTHON_VERSION = os.getenv('PYTHON_VERSION')
COMPONENT_URL_SEARCH_PREFIX = os.getenv('COMPONENT_URL_SEARCH_PREFIX')
USE_KFP_SA = os.getenv('USE_KFP_SA')
TRAINING_FILE_PATH = 'datasets/training/data.csv'
VALIDATION_FILE_PATH = 'datasets/validation/data.csv'
TESTING_FILE_PATH = 'datasets/testing/data.csv'
# Parameter defaults
SPLITS_DATASET_ID = 'splits'
HYPERTUNE_SETTINGS = """
{
"hyperparameters": {
"goal": "MAXIMIZE",
"maxTrials": 6,
"maxParallelTrials": 3,
"hyperparameterMetricTag": "accuracy",
"enableTrialEarlyStopping": True,
"params": [
{
"parameterName": "max_iter",
"type": "DISCRETE",
"discreteValues": [500, 1000]
},
{
"parameterName": "alpha",
"type": "DOUBLE",
"minValue": 0.0001,
"maxValue": 0.001,
"scaleType": "UNIT_LINEAR_SCALE"
}
]
}
}
"""
# Helper functions
def generate_sampling_query(source_table_name, num_lots, lots):
"""Prepares the data sampling query."""
sampling_query_template = """
SELECT *
FROM
`{{ source_table }}` AS cover
WHERE
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})
"""
query = Template(sampling_query_template).render(
source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1])
return query
# Create component factories
component_store = kfp.components.ComponentStore(
local_search_paths=None, url_search_prefixes=[COMPONENT_URL_SEARCH_PREFIX])
bigquery_query_op = component_store.load_component('bigquery/query')
mlengine_train_op = component_store.load_component('ml_engine/train')
mlengine_deploy_op = component_store.load_component('ml_engine/deploy')
retrieve_best_run_op = func_to_container_op(
retrieve_best_run, base_image=BASE_IMAGE)
evaluate_model_op = func_to_container_op(evaluate_model, base_image=BASE_IMAGE)
@kfp.dsl.pipeline(
name='Covertype Classifier Training',
description='The pipeline training and deploying the Covertype classifierpipeline_yaml'
)
def covertype_train(project_id,
region,
source_table_name,
gcs_root,
dataset_id,
evaluation_metric_name,
evaluation_metric_threshold,
model_id,
version_id,
replace_existing_version,
hypertune_settings=HYPERTUNE_SETTINGS,
dataset_location='US'):
"""Orchestrates training and deployment of an sklearn model."""
# Create the training split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4])
training_file_path = '{}/{}'.format(gcs_root, TRAINING_FILE_PATH)
create_training_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=training_file_path,
dataset_location=dataset_location)
# Create the validation split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[8])
validation_file_path = '{}/{}'.format(gcs_root, VALIDATION_FILE_PATH)
create_validation_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=validation_file_path,
dataset_location=dataset_location)
# Create the testing split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[9])
testing_file_path = '{}/{}'.format(gcs_root, TESTING_FILE_PATH)
create_testing_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=testing_file_path,
dataset_location=dataset_location)
# Tune hyperparameters
tune_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--hptune', 'True'
]
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir/hypertune',
kfp.dsl.RUN_ID_PLACEHOLDER)
hypertune = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=tune_args,
training_input=hypertune_settings)
# Retrieve the best trial
get_best_trial = retrieve_best_run_op(
project_id, hypertune.outputs['job_id'])
# Train the model on a combined training and validation datasets
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir', kfp.dsl.RUN_ID_PLACEHOLDER)
train_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--alpha',
get_best_trial.outputs['alpha'], '--max_iter',
get_best_trial.outputs['max_iter'], '--hptune', 'False'
]
train_model = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=train_args)
# Evaluate the model on the testing split
eval_model = evaluate_model_op(
dataset_path=str(create_testing_split.outputs['output_gcs_path']),
model_path=str(train_model.outputs['job_dir']),
metric_name=evaluation_metric_name)
# Deploy the model if the primary metric is better than threshold
with kfp.dsl.Condition(eval_model.outputs['metric_value'] > evaluation_metric_threshold):
deploy_model = mlengine_deploy_op(
model_uri=train_model.outputs['job_dir'],
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=RUNTIME_VERSION,
python_version=PYTHON_VERSION,
replace_existing_version=replace_existing_version)
# Configure the pipeline to run using the service account defined
# in the user-gcp-sa k8s secret
if USE_KFP_SA == 'True':
kfp.dsl.get_pipeline_conf().add_op_transformer(
use_gcp_secret('user-gcp-sa')) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rafthttp
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"sync"
"time"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
"golang.org/x/time/rate"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/pkg/v3/httputil"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.etcd.io/raft/v3/raftpb"
)
const (
streamTypeMessage streamType = "message"
streamTypeMsgAppV2 streamType = "msgappv2"
streamBufSize = 4096
)
var (
errUnsupportedStreamType = fmt.Errorf("unsupported stream type")
// the key is in string format "major.minor.patch"
supportedStream = map[string][]streamType{
"2.0.0": {},
"2.1.0": {streamTypeMsgAppV2, streamTypeMessage},
"2.2.0": {streamTypeMsgAppV2, streamTypeMessage},
"2.3.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.0.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.1.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.2.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.3.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.4.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.5.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.6.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.7.0": {streamTypeMsgAppV2, streamTypeMessage},
}
)
type streamType string
func (t streamType) endpoint(lg *zap.Logger) string {
switch t {
case streamTypeMsgAppV2:
return path.Join(RaftStreamPrefix, "msgapp")
case streamTypeMessage:
return path.Join(RaftStreamPrefix, "message")
default:
if lg != nil {
lg.Panic("unhandled stream type", zap.String("stream-type", t.String()))
}
return ""
}
}
func (t streamType) String() string {
switch t {
case streamTypeMsgAppV2:
return "stream MsgApp v2"
case streamTypeMessage:
return "stream Message"
default:
return "unknown stream"
}
}
// linkHeartbeatMessage is a special message used as heartbeat message in
// link layer. It never conflicts with messages from raft because raft
// doesn't send out messages without From and To fields.
var linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat}
func isLinkHeartbeatMessage(m *raftpb.Message) bool {
return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0
}
type outgoingConn struct {
t streamType
io.Writer
http.Flusher
io.Closer
localID types.ID
peerID types.ID
}
// streamWriter writes messages to the attached outgoingConn.
type streamWriter struct {
lg *zap.Logger
localID types.ID
peerID types.ID
status *peerStatus
fs *stats.FollowerStats
r Raft
mu sync.Mutex // guard field working and closer
closer io.Closer
working bool
msgc chan raftpb.Message
connc chan *outgoingConn
stopc chan struct{}
done chan struct{}
}
// startStreamWriter creates a streamWrite and starts a long running go-routine that accepts
// messages and writes to the attached outgoing connection.
func startStreamWriter(lg *zap.Logger, local, id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter {
w := &streamWriter{
lg: lg,
localID: local,
peerID: id,
status: status,
fs: fs,
r: r,
msgc: make(chan raftpb.Message, streamBufSize),
connc: make(chan *outgoingConn),
stopc: make(chan struct{}),
done: make(chan struct{}),
}
go w.run()
return w
}
func (cw *streamWriter) run() {
var (
msgc chan raftpb.Message
heartbeatc <-chan time.Time
t streamType
enc encoder
flusher http.Flusher
batched int
)
tickc := time.NewTicker(ConnReadTimeout / 3)
defer tickc.Stop()
unflushed := 0
if cw.lg != nil {
cw.lg.Info(
"started stream writer with remote peer",
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
}
for {
select {
case <-heartbeatc:
err := enc.encode(&linkHeartbeatMessage)
unflushed += linkHeartbeatMessage.Size()
if err == nil {
flusher.Flush()
batched = 0
sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
unflushed = 0
continue
}
cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error())
sentFailures.WithLabelValues(cw.peerID.String()).Inc()
cw.close()
if cw.lg != nil {
cw.lg.Warn(
"lost TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
}
heartbeatc, msgc = nil, nil
case m := <-msgc:
err := enc.encode(&m)
if err == nil {
unflushed += m.Size()
if len(msgc) == 0 || batched > streamBufSize/2 {
flusher.Flush()
sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
unflushed = 0
batched = 0
} else {
batched++
}
continue
}
cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error())
cw.close()
if cw.lg != nil {
cw.lg.Warn(
"lost TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
}
heartbeatc, msgc = nil, nil
cw.r.ReportUnreachable(m.To)
sentFailures.WithLabelValues(cw.peerID.String()).Inc()
case conn := <-cw.connc:
cw.mu.Lock()
closed := cw.closeUnlocked()
t = conn.t
switch conn.t {
case streamTypeMsgAppV2:
enc = newMsgAppV2Encoder(conn.Writer, cw.fs)
case streamTypeMessage:
enc = &messageEncoder{w: conn.Writer}
default:
if cw.lg != nil {
cw.lg.Panic("unhandled stream type", zap.String("stream-type", t.String()))
}
}
if cw.lg != nil {
cw.lg.Info(
"set message encoder",
zap.String("from", conn.localID.String()),
zap.String("to", conn.peerID.String()),
zap.String("stream-type", t.String()),
)
}
flusher = conn.Flusher
unflushed = 0
cw.status.activate()
cw.closer = conn.Closer
cw.working = true
cw.mu.Unlock()
if closed {
if cw.lg != nil {
cw.lg.Warn(
"closed TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
}
}
if cw.lg != nil {
cw.lg.Info(
"established TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
}
heartbeatc, msgc = tickc.C, cw.msgc
case <-cw.stopc:
if cw.close() {
if cw.lg != nil {
cw.lg.Warn(
"closed TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
}
}
if cw.lg != nil {
cw.lg.Info(
"stopped TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
}
close(cw.done)
return
}
}
}
func (cw *streamWriter) writec() (chan<- raftpb.Message, bool) {
cw.mu.Lock()
defer cw.mu.Unlock()
return cw.msgc, cw.working
}
func (cw *streamWriter) close() bool {
cw.mu.Lock()
defer cw.mu.Unlock()
return cw.closeUnlocked()
}
func (cw *streamWriter) closeUnlocked() bool {
if !cw.working {
return false
}
if err := cw.closer.Close(); err != nil {
if cw.lg != nil {
cw.lg.Warn(
"failed to close connection with remote peer",
zap.String("remote-peer-id", cw.peerID.String()),
zap.Error(err),
)
}
}
if len(cw.msgc) > 0 {
cw.r.ReportUnreachable(uint64(cw.peerID))
}
cw.msgc = make(chan raftpb.Message, streamBufSize)
cw.working = false
return true
}
func (cw *streamWriter) attach(conn *outgoingConn) bool {
select {
case cw.connc <- conn:
return true
case <-cw.done:
return false
}
}
func (cw *streamWriter) stop() {
close(cw.stopc)
<-cw.done
}
// streamReader is a long-running go-routine that dials to the remote stream
// endpoint and reads messages from the response body returned.
type streamReader struct {
lg *zap.Logger
peerID types.ID
typ streamType
tr *Transport
picker *urlPicker
status *peerStatus
recvc chan<- raftpb.Message
propc chan<- raftpb.Message
rl *rate.Limiter // alters the frequency of dial retrial attempts
errorc chan<- error
mu sync.Mutex
paused bool
closer io.Closer
ctx context.Context
cancel context.CancelFunc
done chan struct{}
}
func (cr *streamReader) start() {
cr.done = make(chan struct{})
if cr.errorc == nil {
cr.errorc = cr.tr.ErrorC
}
if cr.ctx == nil {
cr.ctx, cr.cancel = context.WithCancel(context.Background())
}
go cr.run()
}
func (cr *streamReader) run() {
t := cr.typ
if cr.lg != nil {
cr.lg.Info(
"started stream reader with remote peer",
zap.String("stream-reader-type", t.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
)
}
for {
rc, err := cr.dial(t)
if err != nil {
if !errors.Is(err, errUnsupportedStreamType) {
cr.status.deactivate(failureType{source: t.String(), action: "dial"}, err.Error())
}
} else {
cr.status.activate()
if cr.lg != nil {
cr.lg.Info(
"established TCP streaming connection with remote peer",
zap.String("stream-reader-type", cr.typ.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
)
}
err = cr.decodeLoop(rc, t)
if cr.lg != nil {
cr.lg.Warn(
"lost TCP streaming connection with remote peer",
zap.String("stream-reader-type", cr.typ.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(err),
)
}
switch {
// all data is read out
case errors.Is(err, io.EOF):
// connection is closed by the remote
case transport.IsClosedConnError(err):
default:
cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error())
}
}
// Wait for a while before new dial attempt
err = cr.rl.Wait(cr.ctx)
if cr.ctx.Err() != nil {
if cr.lg != nil {
cr.lg.Info(
"stopped stream reader with remote peer",
zap.String("stream-reader-type", t.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
)
}
close(cr.done)
return
}
if err != nil {
if cr.lg != nil {
cr.lg.Warn(
"rate limit on stream reader with remote peer",
zap.String("stream-reader-type", t.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(err),
)
}
}
}
}
func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
var dec decoder
cr.mu.Lock()
switch t {
case streamTypeMsgAppV2:
dec = newMsgAppV2Decoder(rc, cr.tr.ID, cr.peerID)
case streamTypeMessage:
dec = &messageDecoder{r: rc}
default:
if cr.lg != nil {
cr.lg.Panic("unknown stream type", zap.String("type", t.String()))
}
}
select {
case <-cr.ctx.Done():
cr.mu.Unlock()
if err := rc.Close(); err != nil {
return err
}
return io.EOF
default:
cr.closer = rc
}
cr.mu.Unlock()
// gofail: labelRaftDropHeartbeat:
for {
m, err := dec.decode()
if err != nil {
cr.mu.Lock()
cr.close()
cr.mu.Unlock()
return err
}
// gofail: var raftDropHeartbeat struct{}
// continue labelRaftDropHeartbeat
receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size()))
cr.mu.Lock()
paused := cr.paused
cr.mu.Unlock()
if paused {
continue
}
if isLinkHeartbeatMessage(&m) {
// raft is not interested in link layer
// heartbeat message, so we should ignore
// it.
continue
}
recvc := cr.recvc
if m.Type == raftpb.MsgProp {
recvc = cr.propc
}
select {
case recvc <- m:
default:
if cr.status.isActive() {
if cr.lg != nil {
cr.lg.Warn(
"dropped internal Raft message since receiving buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", types.ID(m.To).String()),
zap.Bool("remote-peer-active", cr.status.isActive()),
)
}
} else {
if cr.lg != nil {
cr.lg.Warn(
"dropped Raft message since receiving buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", types.ID(m.To).String()),
zap.Bool("remote-peer-active", cr.status.isActive()),
)
}
}
recvFailures.WithLabelValues(types.ID(m.From).String()).Inc()
}
}
}
func (cr *streamReader) stop() {
cr.mu.Lock()
cr.cancel()
cr.close()
cr.mu.Unlock()
<-cr.done
}
func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
u := cr.picker.pick()
uu := u
uu.Path = path.Join(t.endpoint(cr.lg), cr.tr.ID.String())
if cr.lg != nil {
cr.lg.Debug(
"dial stream reader",
zap.String("from", cr.tr.ID.String()),
zap.String("to", cr.peerID.String()),
zap.String("address", uu.String()),
)
}
req, err := http.NewRequest(http.MethodGet, uu.String(), nil)
if err != nil {
cr.picker.unreachable(u)
return nil, fmt.Errorf("failed to make http request to %v (%w)", u, err)
}
req.Header.Set("X-Server-From", cr.tr.ID.String())
req.Header.Set("X-Server-Version", version.Version)
req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
req.Header.Set("X-Etcd-Cluster-ID", cr.tr.ClusterID.String())
req.Header.Set("X-Raft-To", cr.peerID.String())
setPeerURLsHeader(req, cr.tr.URLs)
req = req.WithContext(cr.ctx)
cr.mu.Lock()
select {
case <-cr.ctx.Done():
cr.mu.Unlock()
return nil, fmt.Errorf("stream reader is stopped")
default:
}
cr.mu.Unlock()
resp, err := cr.tr.streamRt.RoundTrip(req)
if err != nil {
cr.picker.unreachable(u)
return nil, err
}
rv := serverVersion(resp.Header)
lv := semver.Must(semver.NewVersion(version.Version))
if compareMajorMinorVersion(rv, lv) == -1 && !checkStreamSupport(rv, t) {
httputil.GracefulClose(resp)
cr.picker.unreachable(u)
return nil, errUnsupportedStreamType
}
switch resp.StatusCode {
case http.StatusGone:
httputil.GracefulClose(resp)
cr.picker.unreachable(u)
reportCriticalError(errMemberRemoved, cr.errorc)
return nil, errMemberRemoved
case http.StatusOK:
return resp.Body, nil
case http.StatusNotFound:
httputil.GracefulClose(resp)
cr.picker.unreachable(u)
return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID)
case http.StatusPreconditionFailed:
b, err := io.ReadAll(resp.Body)
if err != nil {
cr.picker.unreachable(u)
return nil, err
}
httputil.GracefulClose(resp)
cr.picker.unreachable(u)
switch strings.TrimSuffix(string(b), "\n") {
case errIncompatibleVersion.Error():
if cr.lg != nil {
cr.lg.Warn(
"request sent was ignored by remote peer due to server version incompatibility",
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(errIncompatibleVersion),
)
}
return nil, errIncompatibleVersion
case ErrClusterIDMismatch.Error():
if cr.lg != nil {
cr.lg.Warn(
"request sent was ignored by remote peer due to cluster ID mismatch",
zap.String("remote-peer-id", cr.peerID.String()),
zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("local-member-cluster-id", cr.tr.ClusterID.String()),
zap.Error(ErrClusterIDMismatch),
)
}
return nil, ErrClusterIDMismatch
default:
return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b))
}
default:
httputil.GracefulClose(resp)
cr.picker.unreachable(u)
return nil, fmt.Errorf("unhandled http status %d", resp.StatusCode)
}
}
func (cr *streamReader) close() {
if cr.closer != nil {
if err := cr.closer.Close(); err != nil {
if cr.lg != nil {
cr.lg.Warn(
"failed to close remote peer connection",
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(err),
)
}
}
}
cr.closer = nil
}
func (cr *streamReader) pause() {
cr.mu.Lock()
defer cr.mu.Unlock()
cr.paused = true
}
func (cr *streamReader) resume() {
cr.mu.Lock()
defer cr.mu.Unlock()
cr.paused = false
}
// checkStreamSupport checks whether the stream type is supported in the
// given version.
func checkStreamSupport(v *semver.Version, t streamType) bool {
nv := &semver.Version{Major: v.Major, Minor: v.Minor}
for _, s := range supportedStream[nv.String()] {
if s == t {
return true
}
}
return false
} | go | github | https://github.com/etcd-io/etcd | server/etcdserver/api/rafthttp/stream.go |
from nose import SkipTest
from nose.tools import assert_raises, assert_true, assert_equal, raises
import networkx as nx
from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
class TestConvertNumpy(object):
@classmethod
def setupClass(cls):
global np, sp, sparse, np_assert_equal
try:
import numpy as np
import scipy as sp
import scipy.sparse as sparse
np_assert_equal=np.testing.assert_equal
except ImportError:
raise SkipTest('SciPy sparse library not available.')
def __init__(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph())
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = cycle_graph(4)
e = g.edges()
source = [u for u,v in e]
dest = [v for u,v in e]
weight = [s+10 for s in source]
ex = zip(source, dest, weight)
G.add_weighted_edges_from(ex)
return G
def assert_equal(self, G1, G2):
assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
assert_true( sorted(G1.edges())==sorted(G2.edges()) )
def identity_conversion(self, G, A, create_using):
GG = nx.from_scipy_sparse_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
ACSR = A.tocsr()
GI = create_using.__class__(ACSR)
self.assert_equal(G, GI)
ACOO = A.tocoo()
GI = create_using.__class__(ACOO)
self.assert_equal(G, GI)
ACSC = A.tocsc()
GI = create_using.__class__(ACSC)
self.assert_equal(G, GI)
AD = A.todense()
GI = create_using.__class__(AD)
self.assert_equal(G, GI)
AA = A.toarray()
GI = create_using.__class__(AA)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square sparse array."
A = sp.sparse.lil_matrix([[1,2,3],[4,5,6]])
assert_raises(nx.NetworkXError, nx.from_scipy_sparse_matrix, A)
def test_identity_graph_matrix(self):
"Conversion from graph to sparse matrix to graph."
A = nx.to_scipy_sparse_matrix(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_matrix(self):
"Conversion from digraph to sparse matrix to digraph."
A = nx.to_scipy_sparse_matrix(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_matrix(self):
"""Conversion from weighted graph to sparse matrix to weighted graph."""
A = nx.to_scipy_sparse_matrix(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_matrix(self):
"""Conversion from weighted digraph to sparse matrix to weighted digraph."""
A = nx.to_scipy_sparse_matrix(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to sparse matrix to graph with nodelist."""
P4 = path_graph(4)
P3 = path_graph(3)
nodelist = P3.nodes()
A = nx.to_scipy_sparse_matrix(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
assert_raises(nx.NetworkXError, nx.to_numpy_matrix, P3,
nodelist=nodelist)
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3))
for n in range(3) )
P4 = path_graph(4)
A = nx.to_scipy_sparse_matrix(P4)
np_assert_equal(A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
np_assert_equal(0.5*A.todense(),
nx.to_scipy_sparse_matrix(WP4).todense())
np_assert_equal(0.3*A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight='other').todense())
def test_format_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3))
for n in range(3) )
P4 = path_graph(4)
A = nx.to_scipy_sparse_matrix(P4, format='csr')
np_assert_equal(A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
A = nx.to_scipy_sparse_matrix(P4, format='csc')
np_assert_equal(A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
A = nx.to_scipy_sparse_matrix(P4, format='coo')
np_assert_equal(A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
A = nx.to_scipy_sparse_matrix(P4, format='bsr')
np_assert_equal(A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
A = nx.to_scipy_sparse_matrix(P4, format='lil')
np_assert_equal(A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
A = nx.to_scipy_sparse_matrix(P4, format='dia')
np_assert_equal(A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
A = nx.to_scipy_sparse_matrix(P4, format='dok')
np_assert_equal(A.todense(),
nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
@raises(nx.NetworkXError)
def test_format_keyword_fail(self):
WP4 = nx.Graph()
WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3))
for n in range(3) )
P4 = path_graph(4)
nx.to_scipy_sparse_matrix(P4, format='any_other') | unknown | codeparrot/codeparrot-clean | ||
# Unix SMB/CIFS implementation.
# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for the Auth and AuthZ logging.
"""
from samba import auth
import samba.tests
from samba.messaging import Messaging
from samba.dcerpc.messaging import MSG_AUTH_LOG, AUTH_EVENT_NAME
from samba.dcerpc import samr
import time
import json
import os
from samba import smb
from samba.samdb import SamDB
import samba.tests.auth_log_base
from samba.credentials import Credentials, DONT_USE_KERBEROS, MUST_USE_KERBEROS
class AuthLogTestsNcalrpc(samba.tests.auth_log_base.AuthLogTestBase):
def setUp(self):
super(AuthLogTestsNcalrpc, self).setUp()
self.remoteAddress = "/root/ncalrpc_as_system"
def tearDown(self):
super(AuthLogTestsNcalrpc , self).tearDown()
def _test_rpc_ncaclrpc(self, authTypes, binding, creds,
protection, checkFunction):
def isLastExpectedMessage( msg):
return (
msg["type"] == "Authorization" and
msg["Authorization"]["serviceDescription"] == "DCE/RPC" and
msg["Authorization"]["authType"] == authTypes[0] and
msg["Authorization"]["transportProtection"] == protection
)
if binding:
binding = "[%s]" % binding
samr.samr("ncalrpc:%s" % binding, self.get_loadparm(), creds)
messages = self.waitForMessages( isLastExpectedMessage)
checkFunction(messages, authTypes, protection)
def rpc_ncacn_np_ntlm_check(self, messages, authTypes, protection):
expected_messages = len(authTypes)
self.assertEquals(expected_messages,
len(messages),
"Did not receive the expected number of messages")
# Check the first message it should be an Authorization
msg = messages[0]
self.assertEquals("Authorization", msg["type"])
self.assertEquals("DCE/RPC",
msg["Authorization"]["serviceDescription"])
self.assertEquals(authTypes[1], msg["Authorization"]["authType"])
self.assertEquals("NONE", msg["Authorization"]["transportProtection"])
# Check the second message it should be an Authentication
msg = messages[1]
self.assertEquals("Authentication", msg["type"])
self.assertEquals("NT_STATUS_OK", msg["Authentication"]["status"])
self.assertEquals("DCE/RPC",
msg["Authentication"]["serviceDescription"])
self.assertEquals(authTypes[2], msg["Authentication"]["authDescription"])
def test_ncalrpc_ntlm_dns_sign(self):
creds = self.insta_creds(template=self.get_credentials(),
kerberos_state=DONT_USE_KERBEROS)
self._test_rpc_ncaclrpc(["NTLMSSP",
"ncalrpc",
"NTLMSSP"],
"", creds, "SIGN",
self.rpc_ncacn_np_ntlm_check)
def test_ncalrpc_ntlm_dns_seal(self):
creds = self.insta_creds(template=self.get_credentials(),
kerberos_state=DONT_USE_KERBEROS)
self._test_rpc_ncaclrpc(["NTLMSSP",
"ncalrpc",
"NTLMSSP"],
"seal", creds, "SEAL",
self.rpc_ncacn_np_ntlm_check) | unknown | codeparrot/codeparrot-clean | ||
#
# Test suite for Optik. Supplied by Johannes Gijsbers
# (taradino@softhome.net) -- translated from the original Optik
# test suite to this PyUnit-based version.
#
# $Id$
#
import sys
import os
import re
import copy
import unittest
from io import StringIO
from test import support
from optparse import make_option, Option, \
TitledHelpFormatter, OptionParser, OptionGroup, \
SUPPRESS_USAGE, OptionError, OptionConflictError, \
BadOptionError, OptionValueError, Values
from optparse import _match_abbrev
from optparse import _parse_num
retype = type(re.compile(''))
class InterceptedError(Exception):
def __init__(self,
error_message=None,
exit_status=None,
exit_message=None):
self.error_message = error_message
self.exit_status = exit_status
self.exit_message = exit_message
def __str__(self):
return self.error_message or self.exit_message or "intercepted error"
class InterceptingOptionParser(OptionParser):
def exit(self, status=0, msg=None):
raise InterceptedError(exit_status=status, exit_message=msg)
def error(self, msg):
raise InterceptedError(error_message=msg)
class BaseTest(unittest.TestCase):
def assertParseOK(self, args, expected_opts, expected_positional_args):
"""Assert the options are what we expected when parsing arguments.
Otherwise, fail with a nicely formatted message.
Keyword arguments:
args -- A list of arguments to parse with OptionParser.
expected_opts -- The options expected.
expected_positional_args -- The positional arguments expected.
Returns the options and positional args for further testing.
"""
(options, positional_args) = self.parser.parse_args(args)
optdict = vars(options)
self.assertEqual(optdict, expected_opts,
"""
Options are %(optdict)s.
Should be %(expected_opts)s.
Args were %(args)s.""" % locals())
self.assertEqual(positional_args, expected_positional_args,
"""
Positional arguments are %(positional_args)s.
Should be %(expected_positional_args)s.
Args were %(args)s.""" % locals ())
return (options, positional_args)
def assertRaises(self,
func,
args,
kwargs,
expected_exception,
expected_message):
"""
Assert that the expected exception is raised when calling a
function, and that the right error message is included with
that exception.
Arguments:
func -- the function to call
args -- positional arguments to `func`
kwargs -- keyword arguments to `func`
expected_exception -- exception that should be raised
expected_message -- expected exception message (or pattern
if a compiled regex object)
Returns the exception raised for further testing.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
try:
func(*args, **kwargs)
except expected_exception as err:
actual_message = str(err)
if isinstance(expected_message, retype):
self.assertTrue(expected_message.search(actual_message),
"""\
expected exception message pattern:
/%s/
actual exception message:
'''%s'''
""" % (expected_message.pattern, actual_message))
else:
self.assertEqual(actual_message,
expected_message,
"""\
expected exception message:
'''%s'''
actual exception message:
'''%s'''
""" % (expected_message, actual_message))
return err
else:
self.fail("""expected exception %(expected_exception)s not raised
called %(func)r
with args %(args)r
and kwargs %(kwargs)r
""" % locals ())
# -- Assertions used in more than one class --------------------
def assertParseFail(self, cmdline_args, expected_output):
"""
Assert the parser fails with the expected message. Caller
must ensure that self.parser is an InterceptingOptionParser.
"""
try:
self.parser.parse_args(cmdline_args)
except InterceptedError as err:
self.assertEqual(err.error_message, expected_output)
else:
self.assertFalse("expected parse failure")
def assertOutput(self,
cmdline_args,
expected_output,
expected_status=0,
expected_error=None):
"""Assert the parser prints the expected output on stdout."""
save_stdout = sys.stdout
try:
try:
sys.stdout = StringIO()
self.parser.parse_args(cmdline_args)
finally:
output = sys.stdout.getvalue()
sys.stdout = save_stdout
except InterceptedError as err:
self.assertTrue(
isinstance(output, str),
"expected output to be an ordinary string, not %r"
% type(output))
if output != expected_output:
self.fail("expected: \n'''\n" + expected_output +
"'''\nbut got \n'''\n" + output + "'''")
self.assertEqual(err.exit_status, expected_status)
self.assertEqual(err.exit_message, expected_error)
else:
self.assertFalse("expected parser.exit()")
def assertTypeError(self, func, expected_message, *args):
"""Assert that TypeError is raised when executing func."""
self.assertRaises(func, args, None, TypeError, expected_message)
def assertHelp(self, parser, expected_help):
actual_help = parser.format_help()
if actual_help != expected_help:
raise self.failureException(
'help text failure; expected:\n"' +
expected_help + '"; got:\n"' +
actual_help + '"\n')
# -- Test make_option() aka Option -------------------------------------
# It's not necessary to test correct options here. All the tests in the
# parser.parse_args() section deal with those, because they're needed
# there.
class TestOptionChecks(BaseTest):
def setUp(self):
self.parser = OptionParser(usage=SUPPRESS_USAGE)
def assertOptionError(self, expected_message, args=[], kwargs={}):
self.assertRaises(make_option, args, kwargs,
OptionError, expected_message)
def test_opt_string_empty(self):
self.assertTypeError(make_option,
"at least one option string must be supplied")
def test_opt_string_too_short(self):
self.assertOptionError(
"invalid option string 'b': must be at least two characters long",
["b"])
def test_opt_string_short_invalid(self):
self.assertOptionError(
"invalid short option string '--': must be "
"of the form -x, (x any non-dash char)",
["--"])
def test_opt_string_long_invalid(self):
self.assertOptionError(
"invalid long option string '---': "
"must start with --, followed by non-dash",
["---"])
def test_attr_invalid(self):
self.assertOptionError(
"option -b: invalid keyword arguments: bar, foo",
["-b"], {'foo': None, 'bar': None})
def test_action_invalid(self):
self.assertOptionError(
"option -b: invalid action: 'foo'",
["-b"], {'action': 'foo'})
def test_type_invalid(self):
self.assertOptionError(
"option -b: invalid option type: 'foo'",
["-b"], {'type': 'foo'})
self.assertOptionError(
"option -b: invalid option type: 'tuple'",
["-b"], {'type': tuple})
def test_no_type_for_action(self):
self.assertOptionError(
"option -b: must not supply a type for action 'count'",
["-b"], {'action': 'count', 'type': 'int'})
def test_no_choices_list(self):
self.assertOptionError(
"option -b/--bad: must supply a list of "
"choices for type 'choice'",
["-b", "--bad"], {'type': "choice"})
def test_bad_choices_list(self):
typename = type('').__name__
self.assertOptionError(
"option -b/--bad: choices must be a list of "
"strings ('%s' supplied)" % typename,
["-b", "--bad"],
{'type': "choice", 'choices':"bad choices"})
def test_no_choices_for_type(self):
self.assertOptionError(
"option -b: must not supply choices for type 'int'",
["-b"], {'type': 'int', 'choices':"bad"})
def test_no_const_for_action(self):
self.assertOptionError(
"option -b: 'const' must not be supplied for action 'store'",
["-b"], {'action': 'store', 'const': 1})
def test_no_nargs_for_action(self):
self.assertOptionError(
"option -b: 'nargs' must not be supplied for action 'count'",
["-b"], {'action': 'count', 'nargs': 2})
def test_callback_not_callable(self):
self.assertOptionError(
"option -b: callback not callable: 'foo'",
["-b"], {'action': 'callback',
'callback': 'foo'})
def dummy(self):
pass
def test_callback_args_no_tuple(self):
self.assertOptionError(
"option -b: callback_args, if supplied, "
"must be a tuple: not 'foo'",
["-b"], {'action': 'callback',
'callback': self.dummy,
'callback_args': 'foo'})
def test_callback_kwargs_no_dict(self):
self.assertOptionError(
"option -b: callback_kwargs, if supplied, "
"must be a dict: not 'foo'",
["-b"], {'action': 'callback',
'callback': self.dummy,
'callback_kwargs': 'foo'})
def test_no_callback_for_action(self):
self.assertOptionError(
"option -b: callback supplied ('foo') for non-callback option",
["-b"], {'action': 'store',
'callback': 'foo'})
def test_no_callback_args_for_action(self):
self.assertOptionError(
"option -b: callback_args supplied for non-callback option",
["-b"], {'action': 'store',
'callback_args': 'foo'})
def test_no_callback_kwargs_for_action(self):
self.assertOptionError(
"option -b: callback_kwargs supplied for non-callback option",
["-b"], {'action': 'store',
'callback_kwargs': 'foo'})
def test_no_single_dash(self):
self.assertOptionError(
"invalid long option string '-debug': "
"must start with --, followed by non-dash",
["-debug"])
self.assertOptionError(
"option -d: invalid long option string '-debug': must start with"
" --, followed by non-dash",
["-d", "-debug"])
self.assertOptionError(
"invalid long option string '-debug': "
"must start with --, followed by non-dash",
["-debug", "--debug"])
class TestOptionParser(BaseTest):
def setUp(self):
self.parser = OptionParser()
self.parser.add_option("-v", "--verbose", "-n", "--noisy",
action="store_true", dest="verbose")
self.parser.add_option("-q", "--quiet", "--silent",
action="store_false", dest="verbose")
def test_add_option_no_Option(self):
self.assertTypeError(self.parser.add_option,
"not an Option instance: None", None)
def test_add_option_invalid_arguments(self):
self.assertTypeError(self.parser.add_option,
"invalid arguments", None, None)
def test_get_option(self):
opt1 = self.parser.get_option("-v")
self.assertIsInstance(opt1, Option)
self.assertEqual(opt1._short_opts, ["-v", "-n"])
self.assertEqual(opt1._long_opts, ["--verbose", "--noisy"])
self.assertEqual(opt1.action, "store_true")
self.assertEqual(opt1.dest, "verbose")
def test_get_option_equals(self):
opt1 = self.parser.get_option("-v")
opt2 = self.parser.get_option("--verbose")
opt3 = self.parser.get_option("-n")
opt4 = self.parser.get_option("--noisy")
self.assertTrue(opt1 is opt2 is opt3 is opt4)
def test_has_option(self):
self.assertTrue(self.parser.has_option("-v"))
self.assertTrue(self.parser.has_option("--verbose"))
def assertTrueremoved(self):
self.assertTrue(self.parser.get_option("-v") is None)
self.assertTrue(self.parser.get_option("--verbose") is None)
self.assertTrue(self.parser.get_option("-n") is None)
self.assertTrue(self.parser.get_option("--noisy") is None)
self.assertFalse(self.parser.has_option("-v"))
self.assertFalse(self.parser.has_option("--verbose"))
self.assertFalse(self.parser.has_option("-n"))
self.assertFalse(self.parser.has_option("--noisy"))
self.assertTrue(self.parser.has_option("-q"))
self.assertTrue(self.parser.has_option("--silent"))
def test_remove_short_opt(self):
self.parser.remove_option("-n")
self.assertTrueremoved()
def test_remove_long_opt(self):
self.parser.remove_option("--verbose")
self.assertTrueremoved()
def test_remove_nonexistent(self):
self.assertRaises(self.parser.remove_option, ('foo',), None,
ValueError, "no such option 'foo'")
@support.impl_detail('Relies on sys.getrefcount', cpython=True)
def test_refleak(self):
# If an OptionParser is carrying around a reference to a large
# object, various cycles can prevent it from being GC'd in
# a timely fashion. destroy() breaks the cycles to ensure stuff
# can be cleaned up.
big_thing = [42]
refcount = sys.getrefcount(big_thing)
parser = OptionParser()
parser.add_option("-a", "--aaarggh")
parser.big_thing = big_thing
parser.destroy()
#self.assertEqual(refcount, sys.getrefcount(big_thing))
del parser
self.assertEqual(refcount, sys.getrefcount(big_thing))
class TestOptionValues(BaseTest):
def setUp(self):
pass
def test_basics(self):
values = Values()
self.assertEqual(vars(values), {})
self.assertEqual(values, {})
self.assertNotEqual(values, {"foo": "bar"})
self.assertNotEqual(values, "")
dict = {"foo": "bar", "baz": 42}
values = Values(defaults=dict)
self.assertEqual(vars(values), dict)
self.assertEqual(values, dict)
self.assertNotEqual(values, {"foo": "bar"})
self.assertNotEqual(values, {})
self.assertNotEqual(values, "")
self.assertNotEqual(values, [])
class TestTypeAliases(BaseTest):
def setUp(self):
self.parser = OptionParser()
def test_str_aliases_string(self):
self.parser.add_option("-s", type="str")
self.assertEqual(self.parser.get_option("-s").type, "string")
def test_type_object(self):
self.parser.add_option("-s", type=str)
self.assertEqual(self.parser.get_option("-s").type, "string")
self.parser.add_option("-x", type=int)
self.assertEqual(self.parser.get_option("-x").type, "int")
# Custom type for testing processing of default values.
_time_units = { 's' : 1, 'm' : 60, 'h' : 60*60, 'd' : 60*60*24 }
def _check_duration(option, opt, value):
try:
if value[-1].isdigit():
return int(value)
else:
return int(value[:-1]) * _time_units[value[-1]]
except (ValueError, IndexError):
raise OptionValueError(
'option %s: invalid duration: %r' % (opt, value))
class DurationOption(Option):
TYPES = Option.TYPES + ('duration',)
TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER)
TYPE_CHECKER['duration'] = _check_duration
class TestDefaultValues(BaseTest):
def setUp(self):
self.parser = OptionParser()
self.parser.add_option("-v", "--verbose", default=True)
self.parser.add_option("-q", "--quiet", dest='verbose')
self.parser.add_option("-n", type="int", default=37)
self.parser.add_option("-m", type="int")
self.parser.add_option("-s", default="foo")
self.parser.add_option("-t")
self.parser.add_option("-u", default=None)
self.expected = { 'verbose': True,
'n': 37,
'm': None,
's': "foo",
't': None,
'u': None }
def test_basic_defaults(self):
self.assertEqual(self.parser.get_default_values(), self.expected)
def test_mixed_defaults_post(self):
self.parser.set_defaults(n=42, m=-100)
self.expected.update({'n': 42, 'm': -100})
self.assertEqual(self.parser.get_default_values(), self.expected)
def test_mixed_defaults_pre(self):
self.parser.set_defaults(x="barf", y="blah")
self.parser.add_option("-x", default="frob")
self.parser.add_option("-y")
self.expected.update({'x': "frob", 'y': "blah"})
self.assertEqual(self.parser.get_default_values(), self.expected)
self.parser.remove_option("-y")
self.parser.add_option("-y", default=None)
self.expected.update({'y': None})
self.assertEqual(self.parser.get_default_values(), self.expected)
def test_process_default(self):
self.parser.option_class = DurationOption
self.parser.add_option("-d", type="duration", default=300)
self.parser.add_option("-e", type="duration", default="6m")
self.parser.set_defaults(n="42")
self.expected.update({'d': 300, 'e': 360, 'n': 42})
self.assertEqual(self.parser.get_default_values(), self.expected)
self.parser.set_process_default_values(False)
self.expected.update({'d': 300, 'e': "6m", 'n': "42"})
self.assertEqual(self.parser.get_default_values(), self.expected)
class TestProgName(BaseTest):
"""
Test that %prog expands to the right thing in usage, version,
and help strings.
"""
def assertUsage(self, parser, expected_usage):
self.assertEqual(parser.get_usage(), expected_usage)
def assertVersion(self, parser, expected_version):
self.assertEqual(parser.get_version(), expected_version)
def test_default_progname(self):
# Make sure that program name taken from sys.argv[0] by default.
save_argv = sys.argv[:]
try:
sys.argv[0] = os.path.join("foo", "bar", "baz.py")
parser = OptionParser("%prog ...", version="%prog 1.2")
expected_usage = "Usage: baz.py ...\n"
self.assertUsage(parser, expected_usage)
self.assertVersion(parser, "baz.py 1.2")
self.assertHelp(parser,
expected_usage + "\n" +
"Options:\n"
" --version show program's version number and exit\n"
" -h, --help show this help message and exit\n")
finally:
sys.argv[:] = save_argv
def test_custom_progname(self):
parser = OptionParser(prog="thingy",
version="%prog 0.1",
usage="%prog arg arg")
parser.remove_option("-h")
parser.remove_option("--version")
expected_usage = "Usage: thingy arg arg\n"
self.assertUsage(parser, expected_usage)
self.assertVersion(parser, "thingy 0.1")
self.assertHelp(parser, expected_usage + "\n")
class TestExpandDefaults(BaseTest):
def setUp(self):
self.parser = OptionParser(prog="test")
self.help_prefix = """\
Usage: test [options]
Options:
-h, --help show this help message and exit
"""
self.file_help = "read from FILE [default: %default]"
self.expected_help_file = self.help_prefix + \
" -f FILE, --file=FILE read from FILE [default: foo.txt]\n"
self.expected_help_none = self.help_prefix + \
" -f FILE, --file=FILE read from FILE [default: none]\n"
def test_option_default(self):
self.parser.add_option("-f", "--file",
default="foo.txt",
help=self.file_help)
self.assertHelp(self.parser, self.expected_help_file)
def test_parser_default_1(self):
self.parser.add_option("-f", "--file",
help=self.file_help)
self.parser.set_default('file', "foo.txt")
self.assertHelp(self.parser, self.expected_help_file)
def test_parser_default_2(self):
self.parser.add_option("-f", "--file",
help=self.file_help)
self.parser.set_defaults(file="foo.txt")
self.assertHelp(self.parser, self.expected_help_file)
def test_no_default(self):
self.parser.add_option("-f", "--file",
help=self.file_help)
self.assertHelp(self.parser, self.expected_help_none)
def test_default_none_1(self):
self.parser.add_option("-f", "--file",
default=None,
help=self.file_help)
self.assertHelp(self.parser, self.expected_help_none)
def test_default_none_2(self):
self.parser.add_option("-f", "--file",
help=self.file_help)
self.parser.set_defaults(file=None)
self.assertHelp(self.parser, self.expected_help_none)
def test_float_default(self):
self.parser.add_option(
"-p", "--prob",
help="blow up with probability PROB [default: %default]")
self.parser.set_defaults(prob=0.43)
expected_help = self.help_prefix + \
" -p PROB, --prob=PROB blow up with probability PROB [default: 0.43]\n"
self.assertHelp(self.parser, expected_help)
def test_alt_expand(self):
self.parser.add_option("-f", "--file",
default="foo.txt",
help="read from FILE [default: *DEFAULT*]")
self.parser.formatter.default_tag = "*DEFAULT*"
self.assertHelp(self.parser, self.expected_help_file)
def test_no_expand(self):
self.parser.add_option("-f", "--file",
default="foo.txt",
help="read from %default file")
self.parser.formatter.default_tag = None
expected_help = self.help_prefix + \
" -f FILE, --file=FILE read from %default file\n"
self.assertHelp(self.parser, expected_help)
# -- Test parser.parse_args() ------------------------------------------
class TestStandard(BaseTest):
def setUp(self):
options = [make_option("-a", type="string"),
make_option("-b", "--boo", type="int", dest='boo'),
make_option("--foo", action="append")]
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
option_list=options)
def test_required_value(self):
self.assertParseFail(["-a"], "-a option requires 1 argument")
def test_invalid_integer(self):
self.assertParseFail(["-b", "5x"],
"option -b: invalid integer value: '5x'")
def test_no_such_option(self):
self.assertParseFail(["--boo13"], "no such option: --boo13")
def test_long_invalid_integer(self):
self.assertParseFail(["--boo=x5"],
"option --boo: invalid integer value: 'x5'")
def test_empty(self):
self.assertParseOK([], {'a': None, 'boo': None, 'foo': None}, [])
def test_shortopt_empty_longopt_append(self):
self.assertParseOK(["-a", "", "--foo=blah", "--foo="],
{'a': "", 'boo': None, 'foo': ["blah", ""]},
[])
def test_long_option_append(self):
self.assertParseOK(["--foo", "bar", "--foo", "", "--foo=x"],
{'a': None,
'boo': None,
'foo': ["bar", "", "x"]},
[])
def test_option_argument_joined(self):
self.assertParseOK(["-abc"],
{'a': "bc", 'boo': None, 'foo': None},
[])
def test_option_argument_split(self):
self.assertParseOK(["-a", "34"],
{'a': "34", 'boo': None, 'foo': None},
[])
def test_option_argument_joined_integer(self):
self.assertParseOK(["-b34"],
{'a': None, 'boo': 34, 'foo': None},
[])
def test_option_argument_split_negative_integer(self):
self.assertParseOK(["-b", "-5"],
{'a': None, 'boo': -5, 'foo': None},
[])
def test_long_option_argument_joined(self):
self.assertParseOK(["--boo=13"],
{'a': None, 'boo': 13, 'foo': None},
[])
def test_long_option_argument_split(self):
self.assertParseOK(["--boo", "111"],
{'a': None, 'boo': 111, 'foo': None},
[])
def test_long_option_short_option(self):
self.assertParseOK(["--foo=bar", "-axyz"],
{'a': 'xyz', 'boo': None, 'foo': ["bar"]},
[])
def test_abbrev_long_option(self):
self.assertParseOK(["--f=bar", "-axyz"],
{'a': 'xyz', 'boo': None, 'foo': ["bar"]},
[])
def test_defaults(self):
(options, args) = self.parser.parse_args([])
defaults = self.parser.get_default_values()
self.assertEqual(vars(defaults), vars(options))
def test_ambiguous_option(self):
self.parser.add_option("--foz", action="store",
type="string", dest="foo")
self.assertParseFail(["--f=bar"],
"ambiguous option: --f (--foo, --foz?)")
def test_short_and_long_option_split(self):
self.assertParseOK(["-a", "xyz", "--foo", "bar"],
{'a': 'xyz', 'boo': None, 'foo': ["bar"]},
[])
def test_short_option_split_long_option_append(self):
self.assertParseOK(["--foo=bar", "-b", "123", "--foo", "baz"],
{'a': None, 'boo': 123, 'foo': ["bar", "baz"]},
[])
def test_short_option_split_one_positional_arg(self):
self.assertParseOK(["-a", "foo", "bar"],
{'a': "foo", 'boo': None, 'foo': None},
["bar"])
def test_short_option_consumes_separator(self):
self.assertParseOK(["-a", "--", "foo", "bar"],
{'a': "--", 'boo': None, 'foo': None},
["foo", "bar"])
self.assertParseOK(["-a", "--", "--foo", "bar"],
{'a': "--", 'boo': None, 'foo': ["bar"]},
[])
def test_short_option_joined_and_separator(self):
self.assertParseOK(["-ab", "--", "--foo", "bar"],
{'a': "b", 'boo': None, 'foo': None},
["--foo", "bar"]),
def test_hyphen_becomes_positional_arg(self):
self.assertParseOK(["-ab", "-", "--foo", "bar"],
{'a': "b", 'boo': None, 'foo': ["bar"]},
["-"])
def test_no_append_versus_append(self):
self.assertParseOK(["-b3", "-b", "5", "--foo=bar", "--foo", "baz"],
{'a': None, 'boo': 5, 'foo': ["bar", "baz"]},
[])
def test_option_consumes_optionlike_string(self):
self.assertParseOK(["-a", "-b3"],
{'a': "-b3", 'boo': None, 'foo': None},
[])
def test_combined_single_invalid_option(self):
self.parser.add_option("-t", action="store_true")
self.assertParseFail(["-test"],
"no such option: -e")
class TestBool(BaseTest):
def setUp(self):
options = [make_option("-v",
"--verbose",
action="store_true",
dest="verbose",
default=''),
make_option("-q",
"--quiet",
action="store_false",
dest="verbose")]
self.parser = OptionParser(option_list = options)
def test_bool_default(self):
self.assertParseOK([],
{'verbose': ''},
[])
def test_bool_false(self):
(options, args) = self.assertParseOK(["-q"],
{'verbose': 0},
[])
self.assertTrue(options.verbose is False)
def test_bool_true(self):
(options, args) = self.assertParseOK(["-v"],
{'verbose': 1},
[])
self.assertTrue(options.verbose is True)
def test_bool_flicker_on_and_off(self):
self.assertParseOK(["-qvq", "-q", "-v"],
{'verbose': 1},
[])
class TestChoice(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.add_option("-c", action="store", type="choice",
dest="choice", choices=["one", "two", "three"])
def test_valid_choice(self):
self.assertParseOK(["-c", "one", "xyz"],
{'choice': 'one'},
["xyz"])
def test_invalid_choice(self):
self.assertParseFail(["-c", "four", "abc"],
"option -c: invalid choice: 'four' "
"(choose from 'one', 'two', 'three')")
def test_add_choice_option(self):
self.parser.add_option("-d", "--default",
choices=["four", "five", "six"])
opt = self.parser.get_option("-d")
self.assertEqual(opt.type, "choice")
self.assertEqual(opt.action, "store")
class TestCount(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.v_opt = make_option("-v", action="count", dest="verbose")
self.parser.add_option(self.v_opt)
self.parser.add_option("--verbose", type="int", dest="verbose")
self.parser.add_option("-q", "--quiet",
action="store_const", dest="verbose", const=0)
def test_empty(self):
self.assertParseOK([], {'verbose': None}, [])
def test_count_one(self):
self.assertParseOK(["-v"], {'verbose': 1}, [])
def test_count_three(self):
self.assertParseOK(["-vvv"], {'verbose': 3}, [])
def test_count_three_apart(self):
self.assertParseOK(["-v", "-v", "-v"], {'verbose': 3}, [])
def test_count_override_amount(self):
self.assertParseOK(["-vvv", "--verbose=2"], {'verbose': 2}, [])
def test_count_override_quiet(self):
self.assertParseOK(["-vvv", "--verbose=2", "-q"], {'verbose': 0}, [])
def test_count_overriding(self):
self.assertParseOK(["-vvv", "--verbose=2", "-q", "-v"],
{'verbose': 1}, [])
def test_count_interspersed_args(self):
self.assertParseOK(["--quiet", "3", "-v"],
{'verbose': 1},
["3"])
def test_count_no_interspersed_args(self):
self.parser.disable_interspersed_args()
self.assertParseOK(["--quiet", "3", "-v"],
{'verbose': 0},
["3", "-v"])
def test_count_no_such_option(self):
self.assertParseFail(["-q3", "-v"], "no such option: -3")
def test_count_option_no_value(self):
self.assertParseFail(["--quiet=3", "-v"],
"--quiet option does not take a value")
def test_count_with_default(self):
self.parser.set_default('verbose', 0)
self.assertParseOK([], {'verbose':0}, [])
def test_count_overriding_default(self):
self.parser.set_default('verbose', 0)
self.assertParseOK(["-vvv", "--verbose=2", "-q", "-v"],
{'verbose': 1}, [])
class TestMultipleArgs(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.add_option("-p", "--point",
action="store", nargs=3, type="float", dest="point")
def test_nargs_with_positional_args(self):
self.assertParseOK(["foo", "-p", "1", "2.5", "-4.3", "xyz"],
{'point': (1.0, 2.5, -4.3)},
["foo", "xyz"])
def test_nargs_long_opt(self):
self.assertParseOK(["--point", "-1", "2.5", "-0", "xyz"],
{'point': (-1.0, 2.5, -0.0)},
["xyz"])
def test_nargs_invalid_float_value(self):
self.assertParseFail(["-p", "1.0", "2x", "3.5"],
"option -p: "
"invalid floating-point value: '2x'")
def test_nargs_required_values(self):
self.assertParseFail(["--point", "1.0", "3.5"],
"--point option requires 3 arguments")
class TestMultipleArgsAppend(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.add_option("-p", "--point", action="store", nargs=3,
type="float", dest="point")
self.parser.add_option("-f", "--foo", action="append", nargs=2,
type="int", dest="foo")
self.parser.add_option("-z", "--zero", action="append_const",
dest="foo", const=(0, 0))
def test_nargs_append(self):
self.assertParseOK(["-f", "4", "-3", "blah", "--foo", "1", "666"],
{'point': None, 'foo': [(4, -3), (1, 666)]},
["blah"])
def test_nargs_append_required_values(self):
self.assertParseFail(["-f4,3"],
"-f option requires 2 arguments")
def test_nargs_append_simple(self):
self.assertParseOK(["--foo=3", "4"],
{'point': None, 'foo':[(3, 4)]},
[])
def test_nargs_append_const(self):
self.assertParseOK(["--zero", "--foo", "3", "4", "-z"],
{'point': None, 'foo':[(0, 0), (3, 4), (0, 0)]},
[])
class TestVersion(BaseTest):
def test_version(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
version="%prog 0.1")
save_argv = sys.argv[:]
try:
sys.argv[0] = os.path.join(os.curdir, "foo", "bar")
self.assertOutput(["--version"], "bar 0.1\n")
finally:
sys.argv[:] = save_argv
def test_no_version(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.assertParseFail(["--version"],
"no such option: --version")
# -- Test conflicting default values and parser.parse_args() -----------
class TestConflictingDefaults(BaseTest):
"""Conflicting default values: the last one should win."""
def setUp(self):
self.parser = OptionParser(option_list=[
make_option("-v", action="store_true", dest="verbose", default=1)])
def test_conflict_default(self):
self.parser.add_option("-q", action="store_false", dest="verbose",
default=0)
self.assertParseOK([], {'verbose': 0}, [])
def test_conflict_default_none(self):
self.parser.add_option("-q", action="store_false", dest="verbose",
default=None)
self.assertParseOK([], {'verbose': None}, [])
class TestOptionGroup(BaseTest):
def setUp(self):
self.parser = OptionParser(usage=SUPPRESS_USAGE)
def test_option_group_create_instance(self):
group = OptionGroup(self.parser, "Spam")
self.parser.add_option_group(group)
group.add_option("--spam", action="store_true",
help="spam spam spam spam")
self.assertParseOK(["--spam"], {'spam': 1}, [])
def test_add_group_no_group(self):
self.assertTypeError(self.parser.add_option_group,
"not an OptionGroup instance: None", None)
def test_add_group_invalid_arguments(self):
self.assertTypeError(self.parser.add_option_group,
"invalid arguments", None, None)
def test_add_group_wrong_parser(self):
group = OptionGroup(self.parser, "Spam")
group.parser = OptionParser()
self.assertRaises(self.parser.add_option_group, (group,), None,
ValueError, "invalid OptionGroup (wrong parser)")
def test_group_manipulate(self):
group = self.parser.add_option_group("Group 2",
description="Some more options")
group.set_title("Bacon")
group.add_option("--bacon", type="int")
self.assertTrue(self.parser.get_option_group("--bacon"), group)
# -- Test extending and parser.parse_args() ----------------------------
class TestExtendAddTypes(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
option_class=self.MyOption)
self.parser.add_option("-a", None, type="string", dest="a")
self.parser.add_option("-f", "--file", type="file", dest="file")
def tearDown(self):
if os.path.isdir(support.TESTFN):
os.rmdir(support.TESTFN)
elif os.path.isfile(support.TESTFN):
os.unlink(support.TESTFN)
class MyOption (Option):
def check_file(option, opt, value):
if not os.path.exists(value):
raise OptionValueError("%s: file does not exist" % value)
elif not os.path.isfile(value):
raise OptionValueError("%s: not a regular file" % value)
return value
TYPES = Option.TYPES + ("file",)
TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER)
TYPE_CHECKER["file"] = check_file
def test_filetype_ok(self):
support.create_empty_file(support.TESTFN)
self.assertParseOK(["--file", support.TESTFN, "-afoo"],
{'file': support.TESTFN, 'a': 'foo'},
[])
def test_filetype_noexist(self):
self.assertParseFail(["--file", support.TESTFN, "-afoo"],
"%s: file does not exist" %
support.TESTFN)
def test_filetype_notfile(self):
os.mkdir(support.TESTFN)
self.assertParseFail(["--file", support.TESTFN, "-afoo"],
"%s: not a regular file" %
support.TESTFN)
class TestExtendAddActions(BaseTest):
def setUp(self):
options = [self.MyOption("-a", "--apple", action="extend",
type="string", dest="apple")]
self.parser = OptionParser(option_list=options)
class MyOption (Option):
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, parser, value,
values)
def test_extend_add_action(self):
self.assertParseOK(["-afoo,bar", "--apple=blah"],
{'apple': ["foo", "bar", "blah"]},
[])
def test_extend_add_action_normal(self):
self.assertParseOK(["-a", "foo", "-abar", "--apple=x,y"],
{'apple': ["foo", "bar", "x", "y"]},
[])
# -- Test callbacks and parser.parse_args() ----------------------------
class TestCallback(BaseTest):
def setUp(self):
options = [make_option("-x",
None,
action="callback",
callback=self.process_opt),
make_option("-f",
"--file",
action="callback",
callback=self.process_opt,
type="string",
dest="filename")]
self.parser = OptionParser(option_list=options)
def process_opt(self, option, opt, value, parser_):
if opt == "-x":
self.assertEqual(option._short_opts, ["-x"])
self.assertEqual(option._long_opts, [])
self.assertTrue(parser_ is self.parser)
self.assertTrue(value is None)
self.assertEqual(vars(parser_.values), {'filename': None})
parser_.values.x = 42
elif opt == "--file":
self.assertEqual(option._short_opts, ["-f"])
self.assertEqual(option._long_opts, ["--file"])
self.assertTrue(parser_ is self.parser)
self.assertEqual(value, "foo")
self.assertEqual(vars(parser_.values), {'filename': None, 'x': 42})
setattr(parser_.values, option.dest, value)
else:
self.fail("Unknown option %r in process_opt." % opt)
def test_callback(self):
self.assertParseOK(["-x", "--file=foo"],
{'filename': "foo", 'x': 42},
[])
def test_callback_help(self):
# This test was prompted by SF bug #960515 -- the point is
# not to inspect the help text, just to make sure that
# format_help() doesn't crash.
parser = OptionParser(usage=SUPPRESS_USAGE)
parser.remove_option("-h")
parser.add_option("-t", "--test", action="callback",
callback=lambda: None, type="string",
help="foo")
expected_help = ("Options:\n"
" -t TEST, --test=TEST foo\n")
self.assertHelp(parser, expected_help)
class TestCallbackExtraArgs(BaseTest):
def setUp(self):
options = [make_option("-p", "--point", action="callback",
callback=self.process_tuple,
callback_args=(3, int), type="string",
dest="points", default=[])]
self.parser = OptionParser(option_list=options)
def process_tuple(self, option, opt, value, parser_, len, type):
self.assertEqual(len, 3)
self.assertTrue(type is int)
if opt == "-p":
self.assertEqual(value, "1,2,3")
elif opt == "--point":
self.assertEqual(value, "4,5,6")
value = tuple(map(type, value.split(",")))
getattr(parser_.values, option.dest).append(value)
def test_callback_extra_args(self):
self.assertParseOK(["-p1,2,3", "--point", "4,5,6"],
{'points': [(1,2,3), (4,5,6)]},
[])
class TestCallbackMeddleArgs(BaseTest):
def setUp(self):
options = [make_option(str(x), action="callback",
callback=self.process_n, dest='things')
for x in range(-1, -6, -1)]
self.parser = OptionParser(option_list=options)
# Callback that meddles in rargs, largs
def process_n(self, option, opt, value, parser_):
# option is -3, -5, etc.
nargs = int(opt[1:])
rargs = parser_.rargs
if len(rargs) < nargs:
self.fail("Expected %d arguments for %s option." % (nargs, opt))
dest = parser_.values.ensure_value(option.dest, [])
dest.append(tuple(rargs[0:nargs]))
parser_.largs.append(nargs)
del rargs[0:nargs]
def test_callback_meddle_args(self):
self.assertParseOK(["-1", "foo", "-3", "bar", "baz", "qux"],
{'things': [("foo",), ("bar", "baz", "qux")]},
[1, 3])
def test_callback_meddle_args_separator(self):
self.assertParseOK(["-2", "foo", "--"],
{'things': [('foo', '--')]},
[2])
class TestCallbackManyArgs(BaseTest):
def setUp(self):
options = [make_option("-a", "--apple", action="callback", nargs=2,
callback=self.process_many, type="string"),
make_option("-b", "--bob", action="callback", nargs=3,
callback=self.process_many, type="int")]
self.parser = OptionParser(option_list=options)
def process_many(self, option, opt, value, parser_):
if opt == "-a":
self.assertEqual(value, ("foo", "bar"))
elif opt == "--apple":
self.assertEqual(value, ("ding", "dong"))
elif opt == "-b":
self.assertEqual(value, (1, 2, 3))
elif opt == "--bob":
self.assertEqual(value, (-666, 42, 0))
def test_many_args(self):
self.assertParseOK(["-a", "foo", "bar", "--apple", "ding", "dong",
"-b", "1", "2", "3", "--bob", "-666", "42",
"0"],
{"apple": None, "bob": None},
[])
class TestCallbackCheckAbbrev(BaseTest):
def setUp(self):
self.parser = OptionParser()
self.parser.add_option("--foo-bar", action="callback",
callback=self.check_abbrev)
def check_abbrev(self, option, opt, value, parser):
self.assertEqual(opt, "--foo-bar")
def test_abbrev_callback_expansion(self):
self.assertParseOK(["--foo"], {}, [])
class TestCallbackVarArgs(BaseTest):
def setUp(self):
options = [make_option("-a", type="int", nargs=2, dest="a"),
make_option("-b", action="store_true", dest="b"),
make_option("-c", "--callback", action="callback",
callback=self.variable_args, dest="c")]
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
option_list=options)
def variable_args(self, option, opt, value, parser):
self.assertTrue(value is None)
value = []
rargs = parser.rargs
while rargs:
arg = rargs[0]
if ((arg[:2] == "--" and len(arg) > 2) or
(arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")):
break
else:
value.append(arg)
del rargs[0]
setattr(parser.values, option.dest, value)
def test_variable_args(self):
self.assertParseOK(["-a3", "-5", "--callback", "foo", "bar"],
{'a': (3, -5), 'b': None, 'c': ["foo", "bar"]},
[])
def test_consume_separator_stop_at_option(self):
self.assertParseOK(["-c", "37", "--", "xxx", "-b", "hello"],
{'a': None,
'b': True,
'c': ["37", "--", "xxx"]},
["hello"])
def test_positional_arg_and_variable_args(self):
self.assertParseOK(["hello", "-c", "foo", "-", "bar"],
{'a': None,
'b': None,
'c':["foo", "-", "bar"]},
["hello"])
def test_stop_at_option(self):
self.assertParseOK(["-c", "foo", "-b"],
{'a': None, 'b': True, 'c': ["foo"]},
[])
def test_stop_at_invalid_option(self):
self.assertParseFail(["-c", "3", "-5", "-a"], "no such option: -5")
# -- Test conflict handling and parser.parse_args() --------------------
class ConflictBase(BaseTest):
def setUp(self):
options = [make_option("-v", "--verbose", action="count",
dest="verbose", help="increment verbosity")]
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
option_list=options)
def show_version(self, option, opt, value, parser):
parser.values.show_version = 1
class TestConflict(ConflictBase):
"""Use the default conflict resolution for Optik 1.2: error."""
def assertTrueconflict_error(self, func):
err = self.assertRaises(
func, ("-v", "--version"), {'action' : "callback",
'callback' : self.show_version,
'help' : "show version"},
OptionConflictError,
"option -v/--version: conflicting option string(s): -v")
self.assertEqual(err.msg, "conflicting option string(s): -v")
self.assertEqual(err.option_id, "-v/--version")
def test_conflict_error(self):
self.assertTrueconflict_error(self.parser.add_option)
def test_conflict_error_group(self):
group = OptionGroup(self.parser, "Group 1")
self.assertTrueconflict_error(group.add_option)
def test_no_such_conflict_handler(self):
self.assertRaises(
self.parser.set_conflict_handler, ('foo',), None,
ValueError, "invalid conflict_resolution value 'foo'")
class TestConflictResolve(ConflictBase):
def setUp(self):
ConflictBase.setUp(self)
self.parser.set_conflict_handler("resolve")
self.parser.add_option("-v", "--version", action="callback",
callback=self.show_version, help="show version")
def test_conflict_resolve(self):
v_opt = self.parser.get_option("-v")
verbose_opt = self.parser.get_option("--verbose")
version_opt = self.parser.get_option("--version")
self.assertTrue(v_opt is version_opt)
self.assertTrue(v_opt is not verbose_opt)
self.assertEqual(v_opt._long_opts, ["--version"])
self.assertEqual(version_opt._short_opts, ["-v"])
self.assertEqual(version_opt._long_opts, ["--version"])
self.assertEqual(verbose_opt._short_opts, [])
self.assertEqual(verbose_opt._long_opts, ["--verbose"])
def test_conflict_resolve_help(self):
self.assertOutput(["-h"], """\
Options:
--verbose increment verbosity
-h, --help show this help message and exit
-v, --version show version
""")
def test_conflict_resolve_short_opt(self):
self.assertParseOK(["-v"],
{'verbose': None, 'show_version': 1},
[])
def test_conflict_resolve_long_opt(self):
self.assertParseOK(["--verbose"],
{'verbose': 1},
[])
def test_conflict_resolve_long_opts(self):
self.assertParseOK(["--verbose", "--version"],
{'verbose': 1, 'show_version': 1},
[])
class TestConflictOverride(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.set_conflict_handler("resolve")
self.parser.add_option("-n", "--dry-run",
action="store_true", dest="dry_run",
help="don't do anything")
self.parser.add_option("--dry-run", "-n",
action="store_const", const=42, dest="dry_run",
help="dry run mode")
def test_conflict_override_opts(self):
opt = self.parser.get_option("--dry-run")
self.assertEqual(opt._short_opts, ["-n"])
self.assertEqual(opt._long_opts, ["--dry-run"])
def test_conflict_override_help(self):
self.assertOutput(["-h"], """\
Options:
-h, --help show this help message and exit
-n, --dry-run dry run mode
""")
def test_conflict_override_args(self):
self.assertParseOK(["-n"],
{'dry_run': 42},
[])
# -- Other testing. ----------------------------------------------------
_expected_help_basic = """\
Usage: bar.py [options]
Options:
-a APPLE throw APPLEs at basket
-b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the
evil spirits that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later fooing
-h, --help show this help message and exit
"""
_expected_help_long_opts_first = """\
Usage: bar.py [options]
Options:
-a APPLE throw APPLEs at basket
--boo=NUM, -b NUM shout "boo!" NUM times (in order to frighten away all the
evil spirits that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later fooing
--help, -h show this help message and exit
"""
_expected_help_title_formatter = """\
Usage
=====
bar.py [options]
Options
=======
-a APPLE throw APPLEs at basket
--boo=NUM, -b NUM shout "boo!" NUM times (in order to frighten away all the
evil spirits that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later fooing
--help, -h show this help message and exit
"""
_expected_help_short_lines = """\
Usage: bar.py [options]
Options:
-a APPLE throw APPLEs at basket
-b NUM, --boo=NUM shout "boo!" NUM times (in order to
frighten away all the evil spirits
that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later
fooing
-h, --help show this help message and exit
"""
_expected_very_help_short_lines = """\
Usage: bar.py [options]
Options:
-a APPLE
throw
APPLEs at
basket
-b NUM, --boo=NUM
shout
"boo!" NUM
times (in
order to
frighten
away all
the evil
spirits
that cause
trouble and
mayhem)
--foo=FOO
store FOO
in the foo
list for
later
fooing
-h, --help
show this
help
message and
exit
"""
class TestHelp(BaseTest):
def setUp(self):
self.parser = self.make_parser(80)
def make_parser(self, columns):
options = [
make_option("-a", type="string", dest='a',
metavar="APPLE", help="throw APPLEs at basket"),
make_option("-b", "--boo", type="int", dest='boo',
metavar="NUM",
help=
"shout \"boo!\" NUM times (in order to frighten away "
"all the evil spirits that cause trouble and mayhem)"),
make_option("--foo", action="append", type="string", dest='foo',
help="store FOO in the foo list for later fooing"),
]
# We need to set COLUMNS for the OptionParser constructor, but
# we must restore its original value -- otherwise, this test
# screws things up for other tests when it's part of the Python
# test suite.
with support.EnvironmentVarGuard() as env:
env['COLUMNS'] = str(columns)
return InterceptingOptionParser(option_list=options)
def assertHelpEquals(self, expected_output):
save_argv = sys.argv[:]
try:
# Make optparse believe bar.py is being executed.
sys.argv[0] = os.path.join("foo", "bar.py")
self.assertOutput(["-h"], expected_output)
finally:
sys.argv[:] = save_argv
def test_help(self):
self.assertHelpEquals(_expected_help_basic)
def test_help_old_usage(self):
self.parser.set_usage("Usage: %prog [options]")
self.assertHelpEquals(_expected_help_basic)
def test_help_long_opts_first(self):
self.parser.formatter.short_first = 0
self.assertHelpEquals(_expected_help_long_opts_first)
def test_help_title_formatter(self):
with support.EnvironmentVarGuard() as env:
env["COLUMNS"] = "80"
self.parser.formatter = TitledHelpFormatter()
self.assertHelpEquals(_expected_help_title_formatter)
def test_wrap_columns(self):
# Ensure that wrapping respects $COLUMNS environment variable.
# Need to reconstruct the parser, since that's the only time
# we look at $COLUMNS.
self.parser = self.make_parser(60)
self.assertHelpEquals(_expected_help_short_lines)
self.parser = self.make_parser(0)
self.assertHelpEquals(_expected_very_help_short_lines)
def test_help_unicode(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.add_option("-a", action="store_true", help="ol\u00E9!")
expect = """\
Options:
-h, --help show this help message and exit
-a ol\u00E9!
"""
self.assertHelpEquals(expect)
def test_help_unicode_description(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
description="ol\u00E9!")
expect = """\
ol\u00E9!
Options:
-h, --help show this help message and exit
"""
self.assertHelpEquals(expect)
def test_help_description_groups(self):
self.parser.set_description(
"This is the program description for %prog. %prog has "
"an option group as well as single options.")
group = OptionGroup(
self.parser, "Dangerous Options",
"Caution: use of these options is at your own risk. "
"It is believed that some of them bite.")
group.add_option("-g", action="store_true", help="Group option.")
self.parser.add_option_group(group)
expect = """\
Usage: bar.py [options]
This is the program description for bar.py. bar.py has an option group as
well as single options.
Options:
-a APPLE throw APPLEs at basket
-b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the
evil spirits that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later fooing
-h, --help show this help message and exit
Dangerous Options:
Caution: use of these options is at your own risk. It is believed
that some of them bite.
-g Group option.
"""
self.assertHelpEquals(expect)
self.parser.epilog = "Please report bugs to /dev/null."
self.assertHelpEquals(expect + "\nPlease report bugs to /dev/null.\n")
class TestMatchAbbrev(BaseTest):
def test_match_abbrev(self):
self.assertEqual(_match_abbrev("--f",
{"--foz": None,
"--foo": None,
"--fie": None,
"--f": None}),
"--f")
def test_match_abbrev_error(self):
s = "--f"
wordmap = {"--foz": None, "--foo": None, "--fie": None}
self.assertRaises(
_match_abbrev, (s, wordmap), None,
BadOptionError, "ambiguous option: --f (--fie, --foo, --foz?)")
class TestParseNumber(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser()
self.parser.add_option("-n", type=int)
self.parser.add_option("-l", type=int)
def test_parse_num_fail(self):
self.assertRaises(
_parse_num, ("", int), {},
ValueError,
re.compile(r"invalid literal for int().*: '?'?"))
self.assertRaises(
_parse_num, ("0xOoops", int), {},
ValueError,
re.compile(r"invalid literal for int().*: s?'?0xOoops'?"))
def test_parse_num_ok(self):
self.assertEqual(_parse_num("0", int), 0)
self.assertEqual(_parse_num("0x10", int), 16)
self.assertEqual(_parse_num("0XA", int), 10)
self.assertEqual(_parse_num("010", int), 8)
self.assertEqual(_parse_num("0b11", int), 3)
self.assertEqual(_parse_num("0b", int), 0)
def test_numeric_options(self):
self.assertParseOK(["-n", "42", "-l", "0x20"],
{ "n": 42, "l": 0x20 }, [])
self.assertParseOK(["-n", "0b0101", "-l010"],
{ "n": 5, "l": 8 }, [])
self.assertParseFail(["-n008"],
"option -n: invalid integer value: '008'")
self.assertParseFail(["-l0b0123"],
"option -l: invalid integer value: '0b0123'")
self.assertParseFail(["-l", "0x12x"],
"option -l: invalid integer value: '0x12x'")
def test_main():
support.run_unittest(__name__)
if __name__ == '__main__':
test_main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.web.webmvcfnrunning
import org.springframework.context.annotation.Bean
import org.springframework.context.annotation.Configuration
import org.springframework.http.converter.HttpMessageConverters
import org.springframework.web.servlet.config.annotation.CorsRegistry
import org.springframework.web.servlet.config.annotation.ViewResolverRegistry
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer
import org.springframework.web.servlet.function.RouterFunction
// tag::snippet[]
@Configuration
class WebConfiguration : WebMvcConfigurer {
@Bean
fun routerFunctionA(): RouterFunction<*> {
TODO()
}
@Bean
fun routerFunctionB(): RouterFunction<*> {
TODO()
}
override fun configureMessageConverters(builder: HttpMessageConverters.ServerBuilder) {
TODO()
}
override fun addCorsMappings(registry: CorsRegistry) {
TODO()
}
override fun configureViewResolvers(registry: ViewResolverRegistry) {
TODO()
}
}
// end::snippet[] | kotlin | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/kotlin/org/springframework/docs/web/webmvcfnrunning/WebConfiguration.kt |
# Copyright (c) 2009 Aldo Cortesi
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2012 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import logging
from multiprocessing import Value
import pytest
import libqtile.log_utils
import libqtile.utils
from libqtile import hook
from libqtile.resources import default_config
from test.conftest import BareConfig
# TODO: more tests required.
# 1. Check all hooks that can be fired
class Call:
def __init__(self, val):
self.val = val
def __call__(self, val):
self.val = val
@pytest.fixture
def hook_fixture():
libqtile.log_utils.init_log(logging.CRITICAL, log_path=None, log_color=False)
yield
hook.clear()
def test_cannot_fire_unknown_event():
with pytest.raises(libqtile.utils.QtileError):
hook.fire("unknown")
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber():
test = Call(0)
hook.subscribe.group_window_add(test)
hook.fire("group_window_add", 8)
assert test.val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co)
hook.fire("group_window_add", 8)
assert val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async_co():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co(8))
hook.fire("group_window_add")
assert val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async_in_existing_loop():
async def t():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co(8))
hook.fire("group_window_add")
await asyncio.sleep(0)
assert val == 8
asyncio.run(t())
@pytest.mark.usefixtures("hook_fixture")
def test_subscribers_can_be_added_removed():
test = Call(0)
hook.subscribe.group_window_add(test)
assert hook.subscriptions
hook.clear()
assert not hook.subscriptions
@pytest.mark.usefixtures("hook_fixture")
def test_can_unsubscribe_from_hook():
test = Call(0)
hook.subscribe.group_window_add(test)
hook.fire("group_window_add", 3)
assert test.val == 3
hook.unsubscribe.group_window_add(test)
hook.fire("group_window_add", 4)
assert test.val == 3
def test_can_subscribe_to_startup_hooks(manager_nospawn):
config = BareConfig
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
manager = manager_nospawn
manager.startup_once_calls = Value('i', 0)
manager.startup_calls = Value('i', 0)
manager.startup_complete_calls = Value('i', 0)
def inc_startup_once_calls():
manager.startup_once_calls.value += 1
def inc_startup_calls():
manager.startup_calls.value += 1
def inc_startup_complete_calls():
manager.startup_complete_calls.value += 1
hook.subscribe.startup_once(inc_startup_once_calls)
hook.subscribe.startup(inc_startup_calls)
hook.subscribe.startup_complete(inc_startup_complete_calls)
manager.start(config)
assert manager.startup_once_calls.value == 1
assert manager.startup_calls.value == 1
assert manager.startup_complete_calls.value == 1
# Restart and check that startup_once doesn't fire again
manager.terminate()
manager.start(config, no_spawn=True)
assert manager.startup_once_calls.value == 1
assert manager.startup_calls.value == 2
assert manager.startup_complete_calls.value == 2
@pytest.mark.usefixtures('hook_fixture')
def test_can_update_by_selection_change(manager):
test = Call(0)
hook.subscribe.selection_change(test)
hook.fire('selection_change', 'hello')
assert test.val == 'hello'
@pytest.mark.usefixtures('hook_fixture')
def test_can_call_by_selection_notify(manager):
test = Call(0)
hook.subscribe.selection_notify(test)
hook.fire('selection_notify', 'hello')
assert test.val == 'hello' | unknown | codeparrot/codeparrot-clean | ||
from genetica.common import Environment, Individual
from genetica.single import Population
class SingleEnvironment(Environment.CommonEnvironment):
def __init__(self, name=None, objectives=None, var_ranges=None, settings=None, fitness=None):
Individ = Individual.RealCoded
Pop = Population.SinglePopulation
Environment.CommonEnvironment.__init__(self, name=name, objectives=objectives, var_ranges=var_ranges, settings=settings, Individual=Individ, Population=Pop, fitness=fitness)
def initialize_population(self):
self.population = self.Population(self.Individual, self.size, self.crossover_rate, self.mutation_rate, self.var_ranges, self.objectives)
self.fitness.calculation(self.population.individuals) #fitness function calculation for 0 generation
self.population.update()
self.population.collect_statistics() # averaging
self.save() # write data to logfiles
def step(self): # algorithm itself
self.generation += 1
self.population.evolve() # select, breed, mutate => new generation
self.fitness.calculation(self.population.individuals) # fitness function calculation of next gen
self.population.update()
self.population.collect_statistics() # averaging
self.save() # write data to logfiles | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import datetime
import mock
from nose.tools import eq_
import mkt
from mkt.developers.cron import (_flag_rereview_adult, exclude_new_region,
process_iarc_changes, send_new_region_emails)
from mkt.developers.models import ActivityLog
from mkt.site.tests import TestCase, user_factory, WebappTestCase
from mkt.site.utils import app_factory
from mkt.webapps.models import IARCInfo, RatingDescriptors, RatingInteractives
class TestSendNewRegionEmails(WebappTestCase):
@mock.patch('mkt.developers.cron._region_email')
def test_called(self, _region_email_mock):
eq_(self.app.enable_new_regions, True)
send_new_region_emails([mkt.regions.GBR])
eq_(list(_region_email_mock.call_args_list[0][0][0]), [self.app.id])
@mock.patch('mkt.developers.cron._region_email')
def test_not_called_with_exclusions(self, _region_email_mock):
self.app.addonexcludedregion.create(region=mkt.regions.GBR.id)
send_new_region_emails([mkt.regions.GBR])
eq_(list(_region_email_mock.call_args_list[0][0][0]), [])
@mock.patch('mkt.developers.cron._region_email')
def test_not_called_with_enable_new_regions_false(self,
_region_email_mock):
"""Check enable_new_regions is False by default."""
self.app.update(enable_new_regions=False)
send_new_region_emails([mkt.regions.GBR])
eq_(list(_region_email_mock.call_args_list[0][0][0]), [])
class TestExcludeNewRegion(WebappTestCase):
@mock.patch('mkt.developers.cron._region_exclude')
def test_not_called_enable_new_regions_true(self, _region_exclude_mock):
eq_(self.app.enable_new_regions, True)
exclude_new_region([mkt.regions.GBR])
eq_(list(_region_exclude_mock.call_args_list[0][0][0]), [])
@mock.patch('mkt.developers.cron._region_exclude')
def test_not_called_with_ordinary_exclusions(self, _region_exclude_mock):
self.app.addonexcludedregion.create(region=mkt.regions.GBR.id)
exclude_new_region([mkt.regions.GBR])
eq_(list(_region_exclude_mock.call_args_list[0][0][0]), [])
@mock.patch('mkt.developers.cron._region_exclude')
def test_called_with_enable_new_regions_false(self, _region_exclude_mock):
# Check enable_new_regions is False by default.
self.app.update(enable_new_regions=False)
exclude_new_region([mkt.regions.GBR])
eq_(list(_region_exclude_mock.call_args_list[0][0][0]), [self.app.id])
class TestIARCChangesCron(TestCase):
@mock.patch('lib.iarc.utils.render_xml')
def test_no_date(self, _render):
process_iarc_changes()
_render.assert_called_with('get_rating_changes.xml', {
'date_from': datetime.date.today() - datetime.timedelta(days=1),
'date_to': datetime.date.today(),
})
@mock.patch('lib.iarc.utils.render_xml')
def test_with_date(self, _render):
date = datetime.date(2001, 1, 11)
process_iarc_changes(date.strftime('%Y-%m-%d'))
_render.assert_called_with('get_rating_changes.xml', {
'date_from': date - datetime.timedelta(days=1),
'date_to': date,
})
def test_processing(self):
"""
The mock client always returns the same data. Set up the app so it
matches the submission ID and verify the data is saved as expected.
"""
mkt.set_user(user_factory())
app = app_factory()
IARCInfo.objects.create(addon=app, submission_id=52,
security_code='FZ32CU8')
app.set_descriptors([
'has_classind_violence',
'has_esrb_strong_lang',
'has_pegi_language', 'has_pegi_online',
'has_usk_lang',
])
app.set_interactives([])
app.set_content_ratings({
mkt.ratingsbodies.CLASSIND: mkt.ratingsbodies.CLASSIND_L
})
process_iarc_changes()
app = app.reload()
# Check ratings. CLASSIND should get updated.
cr = app.content_ratings.get(
ratings_body=mkt.ratingsbodies.CLASSIND.id)
eq_(cr.rating, mkt.ratingsbodies.CLASSIND_14.id)
cr = app.content_ratings.get(ratings_body=mkt.ratingsbodies.ESRB.id)
eq_(cr.rating, mkt.ratingsbodies.ESRB_M.id)
assert ActivityLog.objects.filter(
action=mkt.LOG.CONTENT_RATING_CHANGED.id).count()
# Check descriptors.
rd = RatingDescriptors.objects.get(addon=app)
self.assertSetEqual(rd.to_keys(), [
'has_esrb_strong_lang',
'has_classind_lang',
'has_pegi_lang', 'has_pegi_online',
'has_usk_lang',
])
# Check interactives.
ri = RatingInteractives.objects.get(addon=app)
self.assertSetEqual(ri.to_keys(), [
'has_shares_info', 'has_shares_location', 'has_digital_purchases',
'has_users_interact'
])
def test_rereview_flag_adult(self):
mkt.set_user(user_factory())
app = app_factory()
app.set_content_ratings({
mkt.ratingsbodies.ESRB: mkt.ratingsbodies.ESRB_E,
mkt.ratingsbodies.CLASSIND: mkt.ratingsbodies.CLASSIND_18,
})
_flag_rereview_adult(app, mkt.ratingsbodies.ESRB,
mkt.ratingsbodies.ESRB_T)
assert not app.rereviewqueue_set.count()
assert not ActivityLog.objects.filter(
action=mkt.LOG.CONTENT_RATING_TO_ADULT.id).exists()
# Adult should get flagged to rereview.
_flag_rereview_adult(app, mkt.ratingsbodies.ESRB,
mkt.ratingsbodies.ESRB_A)
eq_(app.rereviewqueue_set.count(), 1)
eq_(ActivityLog.objects.filter(
action=mkt.LOG.CONTENT_RATING_TO_ADULT.id).count(), 1)
# Test things same same if rating stays the same as adult.
app.set_content_ratings({
mkt.ratingsbodies.ESRB: mkt.ratingsbodies.ESRB_A,
})
_flag_rereview_adult(app, mkt.ratingsbodies.ESRB,
mkt.ratingsbodies.ESRB_A)
eq_(app.rereviewqueue_set.count(), 1)
eq_(ActivityLog.objects.filter(
action=mkt.LOG.CONTENT_RATING_TO_ADULT.id).count(), 1) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import re
import collections
STRIP_QUOTE = re.compile(r"^'(.+)'$")
PHASE_POST_TRANSFORM = 'post_transform'
PHASE_PRE_LOAD = 'pre-load'
PHASE_AFTER_LOAD = 'after-load'
def filter_method(*args, **kwargs):
def _filter_deco(fn):
def wrapper(self, *args2):
return fn(self, *args2)
defaults = None
if fn.__defaults__:
default_padding = [None] * (len(args) - (len(fn.__defaults__)))
defaults = default_padding + list(fn.__defaults__)
wrapper.filter_data = {
'name': fn.__name__,
'params': args,
'defaults': defaults,
'async': kwargs.get('async', False)
}
return wrapper
return _filter_deco
class FiltersFactory:
def __init__(self, filter_classes):
self.filter_classes_map = {}
for cls in filter_classes:
filter_name = cls.pre_compile()
self.filter_classes_map[filter_name] = cls
def create_instances(self, context, filter_params):
filter_instances = collections.defaultdict(list)
if not filter_params:
return FiltersRunner(filter_instances)
filter_params = filter_params.split('):')
last_idx = len(filter_params) - 1
for i, param in enumerate(filter_params):
filter_name = param.split('(')[0]
cls = self.filter_classes_map.get(filter_name, None)
if cls is None:
continue
if i != last_idx:
param = param + ')'
instance = cls.init_if_valid(param, context)
if instance:
filter_instances[getattr(cls, 'phase', PHASE_POST_TRANSFORM)].append(instance)
return FiltersRunner(filter_instances)
class FiltersRunner:
def __init__(self, filter_instances):
self.filter_instances = filter_instances
def apply_filters(self, phase, callback):
filters = self.filter_instances.get(phase, None)
if not filters:
callback()
return
def exec_one_filter():
if len(filters) == 0:
callback()
return
f = filters.pop(0)
f.run(exec_one_filter)
exec_one_filter()
class BaseFilter(object):
PositiveNumber = {
'regex': r'[\d]+',
'parse': int
}
NegativeNumber = {
'regex': r'[-]%s' % PositiveNumber['regex'],
'parse': int
}
Number = {
'regex': r'[-]?%s' % PositiveNumber['regex'],
'parse': int
}
DecimalNumber = {
'regex': r'[-]?(?:(?:[\d]+\.?[\d]*)|(?:[\d]*\.?[\d]+))',
'parse': float
}
Boolean = {
'regex': r'[Tt]rue|[Ff]alse|1|0',
'parse': lambda v: v == 'true' or v == 'True' or v == '1'
}
String = {
'regex': r"(?:'.+?')|(?:[^,]+?)",
'parse': lambda v: STRIP_QUOTE.sub(r'\1', v)
}
@classmethod
def pre_compile(cls):
meths = [f for f in list(cls.__dict__.values()) if hasattr(f, 'filter_data')]
if len(meths) == 0:
return
cls.runnable_method = meths[0]
filter_data = cls.runnable_method.filter_data
cls.async_filter = filter_data['async']
cls.compile_regex(filter_data)
return filter_data['name']
@classmethod
def compile_regex(cls, filter_data):
params = filter_data['params']
defaults = filter_data.get('defaults', None)
regexes = []
parsers = []
for i, param in enumerate(params):
val = (type(param) == dict) and (param['regex'], param['parse']) or (param, None)
comma = optional = ''
if defaults and defaults[i] is not None:
optional = '?'
if i > 0:
comma = ','
regexes.append(r'(?:%s\s*(%s)\s*)%s' % (comma, val[0], optional))
parsers.append(val[1])
cls.parsers = parsers
cls.regex_str = r'%s\(%s\)' % (filter_data['name'], ''.join(regexes))
cls.regex = re.compile(cls.regex_str)
@classmethod
def init_if_valid(cls, param, context):
instance = cls(param, context)
if instance.params is not None:
return instance
else:
return None
def __init__(self, params, context=None):
params = self.regex.match(params) if self.regex else None
if params:
params = [parser(param) if parser else param for parser, param in zip(self.parsers, params.groups()) if param]
self.params = params
self.context = context
self.engine = context.modules.engine if context and context.modules else None
def create_multi_engine_callback(self, callback, engines_count):
self.engines_count = engines_count
def single_callback(*args):
self.engines_count -= 1
if self.engines_count == 0:
callback(*args)
return single_callback
def run(self, callback=None):
if self.params is None:
return
if self.engine:
if self.engine.is_multiple():
engines_to_run = self.engine.frame_engines()
else:
engines_to_run = [self.engine]
else:
engines_to_run = [None]
results = []
if self.async_filter:
callback = self.create_multi_engine_callback(callback, len(engines_to_run))
for engine in engines_to_run:
self.engine = engine
if self.async_filter:
self.runnable_method(callback, *self.params)
else:
results.append(self.runnable_method(*self.params))
if (not self.async_filter) and callback:
callback()
return results | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# BOLT documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.intersphinx", "sphinx.ext.todo"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "BOLT"
copyright = "2015-%d, BOLT team" % date.today().year
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%Y-%m-%d"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "friendly"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "haiku"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# If given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers use
# this as icon for tabs, windows and bookmarks. It should be a Windows-style
# icon file (.ico), which is 16x16 or 32x32 pixels large. Default: None. The
# image file will be copied to the _static directory of the output HTML, but
# only if the file does not already exist there.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%Y-%m-%d"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {"index": ["indexsidebar.html"]}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "boltdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("contents", "bolt.tex", "BOLT Documentation", "LLVM project", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("contents", "bolt", "BOLT Documentation", ["LLVM project"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"contents",
"BOLT",
"BOLT Documentation",
"LLVM project",
"BOLT",
"Binary Optimization and Layout Tool",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# FIXME: Define intersphinx configuration.
intersphinx_mapping = {}
# -- Options for extensions ----------------------------------------------------
# Enable this if you want TODOs to show up in the generated documentation.
todo_include_todos = True | python | github | https://github.com/llvm/llvm-project | bolt/docs/conf.py |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from pydantic import NonNegativeInt
@dataclass
class ConfigurationDetails:
"""Represents the details of a configuration."""
section: str | None = None
@dataclass
class ConnectionDetails:
"""Represents the details of a connection."""
conn_id: str | None = None
team_name: str | None = None
@dataclass
class DagDetails:
"""Represents the details of a DAG."""
id: str | None = None
team_name: str | None = None
@dataclass
class BackfillDetails:
"""
Represents the details of a backfill.
.. deprecated:: 3.1.8
Use DagAccessEntity.Run instead for a dag level access control.
"""
id: NonNegativeInt | None = None
@dataclass
class AssetDetails:
"""Represents the details of an asset."""
id: str | None = None
@dataclass
class AssetAliasDetails:
"""Represents the details of an asset alias."""
id: str | None = None
@dataclass
class PoolDetails:
"""Represents the details of a pool."""
name: str | None = None
team_name: str | None = None
@dataclass
class TeamDetails:
"""Represents the details of a team."""
name: str | None = None
@dataclass
class VariableDetails:
"""Represents the details of a variable."""
key: str | None = None
team_name: str | None = None
class AccessView(Enum):
"""Enum of specific views the user tries to access."""
CLUSTER_ACTIVITY = "CLUSTER_ACTIVITY"
DOCS = "DOCS"
IMPORT_ERRORS = "IMPORT_ERRORS"
JOBS = "JOBS"
PLUGINS = "PLUGINS"
PROVIDERS = "PROVIDERS"
TRIGGERS = "TRIGGERS"
WEBSITE = "WEBSITE"
class DagAccessEntity(Enum):
"""Enum of DAG entities the user tries to access."""
AUDIT_LOG = "AUDIT_LOG"
CODE = "CODE"
DEPENDENCIES = "DEPENDENCIES"
HITL_DETAIL = "HITL_DETAIL"
RUN = "RUN"
TASK = "TASK"
TASK_INSTANCE = "TASK_INSTANCE"
TASK_LOGS = "TASK_LOGS"
VERSION = "VERSION"
WARNING = "WARNING"
XCOM = "XCOM" | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/api_fastapi/auth/managers/models/resource_details.py |
/*-------------------------------------------------------------------------
*
* compress_io.h
* Interface to compress_io.c routines
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/bin/pg_dump/compress_io.h
*
*-------------------------------------------------------------------------
*/
#ifndef __COMPRESS_IO__
#define __COMPRESS_IO__
#include "pg_backup_archiver.h"
/*
* Default size used for IO buffers
*
* When changing this value, it's necessary to check the relevant test cases
* still exercise all the branches. This applies especially if the value is
* increased, in which case some loops may not get iterated.
*/
#define DEFAULT_IO_BUFFER_SIZE (128 * 1024)
extern char *supports_compression(const pg_compress_specification compression_spec);
/*
* Prototype for callback function used in writeData()
*/
typedef void (*WriteFunc) (ArchiveHandle *AH, const char *buf, size_t len);
/*
* Prototype for callback function used in readData()
*
* readData will call the read function repeatedly, until it returns 0 to signal
* EOF. readData passes a buffer to read the data into in *buf, of length
* *buflen. If that's not big enough for the callback function, it can free() it
* and malloc() a new one, returning the new buffer and its size in *buf and
* *buflen.
*
* Returns the number of bytes read into *buf, or 0 on EOF.
*/
typedef size_t (*ReadFunc) (ArchiveHandle *AH, char **buf, size_t *buflen);
typedef struct CompressorState CompressorState;
struct CompressorState
{
/*
* Read all compressed data from the input stream (via readF) and print it
* out with ahwrite().
*/
void (*readData) (ArchiveHandle *AH, CompressorState *cs);
/*
* Compress and write data to the output stream (via writeF).
*/
void (*writeData) (ArchiveHandle *AH, CompressorState *cs,
const void *data, size_t dLen);
/*
* End compression and flush internal buffers if any.
*/
void (*end) (ArchiveHandle *AH, CompressorState *cs);
/*
* Callback function to read from an already processed input stream
*/
ReadFunc readF;
/*
* Callback function to write an already processed chunk of data.
*/
WriteFunc writeF;
/*
* Compression specification for this state.
*/
pg_compress_specification compression_spec;
/*
* Private data to be used by the compressor.
*/
void *private_data;
};
extern CompressorState *AllocateCompressor(const pg_compress_specification compression_spec,
ReadFunc readF,
WriteFunc writeF);
extern void EndCompressor(ArchiveHandle *AH, CompressorState *cs);
/*
* Compress File Handle
*/
typedef struct CompressFileHandle CompressFileHandle;
struct CompressFileHandle
{
/*
* Open a file in mode.
*
* Pass either 'path' or 'fd' depending on whether a file path or a file
* descriptor is available. 'mode' can be one of 'r', 'rb', 'w', 'wb',
* 'a', and 'ab'. Requires an already initialized CompressFileHandle.
*
* Returns true on success and false on error.
*/
bool (*open_func) (const char *path, int fd, const char *mode,
CompressFileHandle *CFH);
/*
* Open a file for writing.
*
* 'mode' can be one of 'w', 'wb', 'a', and 'ab'. Requires an already
* initialized CompressFileHandle.
*
* Returns true on success and false on error.
*/
bool (*open_write_func) (const char *path, const char *mode,
CompressFileHandle *CFH);
/*
* Read up to 'size' bytes of data from the file and store them into
* 'ptr'.
*
* Returns number of bytes read (this might be less than 'size' if EOF was
* reached). Exits via pg_fatal for all error conditions.
*/
size_t (*read_func) (void *ptr, size_t size,
CompressFileHandle *CFH);
/*
* Write 'size' bytes of data into the file from 'ptr'.
*
* Returns nothing, exits via pg_fatal for all error conditions.
*/
void (*write_func) (const void *ptr, size_t size,
CompressFileHandle *CFH);
/*
* Read at most size - 1 characters from the compress file handle into
* 's'.
*
* Stop if an EOF or a newline is found first. 's' is always null
* terminated and contains the newline if it was found.
*
* Returns 's' on success, and NULL on error or when end of file occurs
* while no characters have been read.
*/
char *(*gets_func) (char *s, int size, CompressFileHandle *CFH);
/*
* Read the next character from the compress file handle as 'unsigned
* char' cast into 'int'.
*
* Returns the character read on success and throws an internal error
* otherwise. It treats EOF as error.
*/
int (*getc_func) (CompressFileHandle *CFH);
/*
* Test if EOF is reached in the compress file handle.
*
* Returns true if it is reached.
*/
bool (*eof_func) (CompressFileHandle *CFH);
/*
* Close an open file handle.
*
* Returns true on success and false on error.
*/
bool (*close_func) (CompressFileHandle *CFH);
/*
* Get a pointer to a string that describes an error that occurred during
* a compress file handle operation.
*/
const char *(*get_error_func) (CompressFileHandle *CFH);
/*
* Compression specification for this file handle.
*/
pg_compress_specification compression_spec;
/*
* Private data to be used by the compressor.
*/
void *private_data;
};
/*
* Initialize a compress file handle with the requested compression.
*/
extern CompressFileHandle *InitCompressFileHandle(const pg_compress_specification compression_spec);
/*
* Initialize a compress file stream. Infer the compression algorithm
* from 'path', either by examining its suffix or by appending the supported
* suffixes in 'path'.
*/
extern CompressFileHandle *InitDiscoverCompressFileHandle(const char *path,
const char *mode);
extern bool EndCompressFileHandle(CompressFileHandle *CFH);
#endif | c | github | https://github.com/postgres/postgres | src/bin/pg_dump/compress_io.h |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.plugin.scanner.test_classes;
import org.elasticsearch.plugin.Extensible;
@Extensible
public interface ExtensibleInterface {} | java | github | https://github.com/elastic/elasticsearch | build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleInterface.java |
#!/usr/bin/env python
"""
Add views for new dominate command.
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2009 Torsten Werner <twerner@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import psycopg2
def do_update(self):
print "Add views for generate_filelist to database."
try:
c = self.db.cursor()
print "Drop old views."
c.execute("DROP VIEW IF EXISTS binaries_suite_arch CASCADE")
c.execute("DROP VIEW IF EXISTS newest_all_associations CASCADE")
c.execute("DROP VIEW IF EXISTS obsolete_any_by_all_associations CASCADE")
c.execute("DROP VIEW IF EXISTS newest_any_associations CASCADE")
c.execute("DROP VIEW IF EXISTS obsolete_any_associations CASCADE")
c.execute("DROP VIEW IF EXISTS source_suite CASCADE")
c.execute("DROP VIEW IF EXISTS newest_source CASCADE")
c.execute("DROP VIEW IF EXISTS newest_src_association CASCADE")
c.execute("DROP VIEW IF EXISTS any_associations_source CASCADE")
c.execute("DROP VIEW IF EXISTS src_associations_src CASCADE")
c.execute("DROP VIEW IF EXISTS almost_obsolete_src_associations CASCADE")
c.execute("DROP VIEW IF EXISTS obsolete_src_associations CASCADE")
c.execute("DROP VIEW IF EXISTS bin_associations_binaries CASCADE")
c.execute("DROP VIEW IF EXISTS src_associations_bin CASCADE")
c.execute("DROP VIEW IF EXISTS almost_obsolete_all_associations CASCADE")
c.execute("DROP VIEW IF EXISTS obsolete_all_associations CASCADE")
print "Create new views."
c.execute("""
CREATE VIEW binaries_suite_arch AS
SELECT bin_associations.id, binaries.id AS bin, binaries.package,
binaries.version, binaries.source, bin_associations.suite,
suite.suite_name, binaries.architecture, architecture.arch_string
FROM binaries JOIN bin_associations ON binaries.id = bin_associations.bin
JOIN suite ON suite.id = bin_associations.suite
JOIN architecture ON binaries.architecture = architecture.id;
""")
c.execute("""
CREATE VIEW newest_all_associations AS
SELECT package, max(version) AS version, suite, architecture
FROM binaries_suite_arch
WHERE architecture = 2 GROUP BY package, suite, architecture;
""")
c.execute("""
CREATE VIEW obsolete_any_by_all_associations AS
SELECT binaries_suite_arch.id, binaries_suite_arch.package,
binaries_suite_arch.version, binaries_suite_arch.suite,
binaries_suite_arch.architecture
FROM binaries_suite_arch
JOIN newest_all_associations
ON (binaries_suite_arch.package = newest_all_associations.package AND
binaries_suite_arch.version < newest_all_associations.version AND
binaries_suite_arch.suite = newest_all_associations.suite AND
binaries_suite_arch.architecture > 2);
""")
c.execute("""
CREATE VIEW newest_any_associations AS
SELECT package, max(version) AS version, suite, architecture
FROM binaries_suite_arch
WHERE architecture > 2 GROUP BY package, suite, architecture;
""")
c.execute("""
CREATE VIEW obsolete_any_associations AS
SELECT id, binaries_suite_arch.architecture, binaries_suite_arch.version,
binaries_suite_arch.package, binaries_suite_arch.suite
FROM binaries_suite_arch
JOIN newest_any_associations
ON binaries_suite_arch.architecture = newest_any_associations.architecture AND
binaries_suite_arch.package = newest_any_associations.package AND
binaries_suite_arch.suite = newest_any_associations.suite AND
binaries_suite_arch.version != newest_any_associations.version;
""")
c.execute("""
CREATE VIEW source_suite AS
SELECT src_associations.id, source.id AS src , source.source, source.version,
src_associations.suite, suite.suite_name
FROM source
JOIN src_associations ON source.id = src_associations.source
JOIN suite ON suite.id = src_associations.suite;
""")
c.execute("""
CREATE VIEW newest_source AS
SELECT source, max(version) AS version, suite
FROM source_suite
GROUP BY source, suite;
""")
c.execute("""
CREATE VIEW newest_src_association AS
SELECT id, src, source, version, suite
FROM source_suite
JOIN newest_source USING (source, version, suite);
""")
c.execute("""
CREATE VIEW any_associations_source AS
SELECT bin_associations.id, bin_associations.suite, binaries.id AS bin,
binaries.package, binaries.version AS binver, binaries.architecture,
source.id AS src, source.source, source.version AS srcver
FROM bin_associations
JOIN binaries ON bin_associations.bin = binaries.id AND architecture != 2
JOIN source ON binaries.source = source.id;
""")
c.execute("""
CREATE VIEW src_associations_src AS
SELECT src_associations.id, src_associations.suite, source.id AS src,
source.source, source.version
FROM src_associations
JOIN source ON src_associations.source = source.id;
""")
c.execute("""
CREATE VIEW almost_obsolete_src_associations AS
SELECT src_associations_src.id, src_associations_src.src,
src_associations_src.source, src_associations_src.version, suite
FROM src_associations_src
LEFT JOIN any_associations_source USING (src, suite)
WHERE bin IS NULL;
""")
c.execute("""
CREATE VIEW obsolete_src_associations AS
SELECT almost.id, almost.src, almost.source, almost.version, almost.suite
FROM almost_obsolete_src_associations as almost
JOIN newest_src_association AS newest
ON almost.source = newest.source AND
almost.version < newest.version AND
almost.suite = newest.suite;
""")
c.execute("""
CREATE VIEW bin_associations_binaries AS
SELECT bin_associations.id, bin_associations.bin, binaries.package,
binaries.version, bin_associations.suite, binaries.architecture
FROM bin_associations
JOIN binaries ON bin_associations.bin = binaries.id;
""")
c.execute("""
CREATE VIEW src_associations_bin AS
SELECT src_associations.id, src_associations.source, src_associations.suite,
binaries.id AS bin, binaries.architecture
FROM src_associations
JOIN source ON src_associations.source = source.id
JOIN binaries ON source.id = binaries.source;
""")
c.execute("""
CREATE VIEW almost_obsolete_all_associations AS
SELECT bin_associations_binaries.id AS id, bin, bin_associations_binaries.package,
bin_associations_binaries.version, suite
FROM bin_associations_binaries
LEFT JOIN src_associations_bin USING (bin, suite, architecture)
WHERE source IS NULL AND architecture = 2;
""")
c.execute("""
CREATE VIEW obsolete_all_associations AS
SELECT almost.id, almost.bin, almost.package, almost.version, almost.suite
FROM almost_obsolete_all_associations AS almost
JOIN newest_all_associations AS newest
ON almost.package = newest.package AND
almost.version < newest.version AND
almost.suite = newest.suite;
""")
print "Committing"
c.execute("UPDATE config SET value = '25' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.InternalError as msg:
self.db.rollback()
raise DBUpdateError("Database error, rollback issued. Error message : %s" % (str(msg))) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.