code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.signatureSubstitution;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.signatureSubstitution.AbstractAnalysisApiSymbolAsSignatureTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/signatureSubstitution/symbolAsSignature")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeNormalAnalysisSourceModuleAnalysisApiSymbolAsSignatureTestGenerated extends AbstractAnalysisApiSymbolAsSignatureTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInSymbolAsSignature() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/signatureSubstitution/symbolAsSignature"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("function.kt")
public void testFunction() {
runTest("analysis/analysis-api/testData/components/signatureSubstitution/symbolAsSignature/function.kt");
}
@Test
@TestMetadata("propertyGetter.kt")
public void testPropertyGetter() {
runTest("analysis/analysis-api/testData/components/signatureSubstitution/symbolAsSignature/propertyGetter.kt");
}
@Test
@TestMetadata("propertyNoAccessors.kt")
public void testPropertyNoAccessors() {
runTest("analysis/analysis-api/testData/components/signatureSubstitution/symbolAsSignature/propertyNoAccessors.kt");
}
@Test
@TestMetadata("propertySetter.kt")
public void testPropertySetter() {
runTest("analysis/analysis-api/testData/components/signatureSubstitution/symbolAsSignature/propertySetter.kt");
}
@Test
@TestMetadata("propertyWithAccessors.kt")
public void testPropertyWithAccessors() {
runTest("analysis/analysis-api/testData/components/signatureSubstitution/symbolAsSignature/propertyWithAccessors.kt");
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/signatureSubstitution/FirIdeNormalAnalysisSourceModuleAnalysisApiSymbolAsSignatureTestGenerated.java |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/demos/colors/colortest.py
import reportlab.pdfgen.canvas
from reportlab.lib import colors
from reportlab.lib.units import inch
def run():
c = reportlab.pdfgen.canvas.Canvas('colortest.pdf')
#do a test of CMYK interspersed with RGB
#first do RGB values
framePage(c, 'Color Demo - RGB Space and CMYK spaces interspersed' )
y = 700
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'cyan')
c.setFillColorCMYK(1,0,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'red')
c.setFillColorRGB(1,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'magenta')
c.setFillColorCMYK(0,1,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'green')
c.setFillColorRGB(0,1,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'yellow')
c.setFillColorCMYK(0,0,1,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'blue')
c.setFillColorRGB(0,0,1)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'black')
c.setFillColorCMYK(0,0,0,1)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.showPage()
#do all named colors
framePage(c, 'Color Demo - RGB Space - page %d' % c.getPageNumber())
all_colors = reportlab.lib.colors.getAllNamedColors().items()
all_colors.sort() # alpha order by name
c.setFont('Times-Roman', 12)
c.drawString(72,730, 'This shows all the named colors in the HTML standard.')
y = 700
for (name, color) in all_colors:
c.setFillColor(colors.black)
c.drawString(100, y, name)
c.setFillColor(color)
c.rect(200, y-10, 300, 30, fill=1)
y = y - 40
if y < 100:
c.showPage()
framePage(c, 'Color Demo - RGB Space - page %d' % c.getPageNumber())
y = 700
c.save()
def framePage(canvas, title):
canvas.setFont('Times-BoldItalic',20)
canvas.drawString(inch, 10.5 * inch, title)
canvas.setFont('Times-Roman',10)
canvas.drawCentredString(4.135 * inch, 0.75 * inch,
'Page %d' % canvas.getPageNumber())
#draw a border
canvas.setStrokeColorRGB(1,0,0)
canvas.setLineWidth(5)
canvas.line(0.8 * inch, inch, 0.8 * inch, 10.75 * inch)
#reset carefully afterwards
canvas.setLineWidth(1)
canvas.setStrokeColorRGB(0,0,0)
if __name__ == '__main__':
run() | unknown | codeparrot/codeparrot-clean | ||
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import itertools
from powerline.segment import gen_segment_getter, process_segment, get_fallback_segment
from powerline.lib.unicode import u, safe_unicode
def requires_segment_info(func):
func.powerline_requires_segment_info = True
return func
def requires_filesystem_watcher(func):
func.powerline_requires_filesystem_watcher = True
return func
def new_empty_segment_line():
return {
'left': [],
'right': []
}
def add_spaces_left(pl, amount, segment):
return (' ' * amount) + segment['contents']
def add_spaces_right(pl, amount, segment):
return segment['contents'] + (' ' * amount)
def add_spaces_center(pl, amount, segment):
amount, remainder = divmod(amount, 2)
return (' ' * (amount + remainder)) + segment['contents'] + (' ' * amount)
expand_functions = {
'l': add_spaces_right,
'r': add_spaces_left,
'c': add_spaces_center,
}
class Theme(object):
def __init__(self,
ext,
theme_config,
common_config,
pl,
get_module_attr,
top_theme,
colorscheme,
main_theme_config=None,
run_once=False,
shutdown_event=None):
self.colorscheme = colorscheme
self.dividers = theme_config['dividers']
self.dividers = dict((
(key, dict((k, u(v))
for k, v in val.items()))
for key, val in self.dividers.items()
))
try:
self.cursor_space_multiplier = 1 - (theme_config['cursor_space'] / 100)
except KeyError:
self.cursor_space_multiplier = None
self.cursor_columns = theme_config.get('cursor_columns')
self.spaces = theme_config['spaces']
self.segments = []
self.EMPTY_SEGMENT = {
'contents': None,
'highlight': {'fg': False, 'bg': False, 'attrs': 0}
}
self.pl = pl
theme_configs = [theme_config]
if main_theme_config:
theme_configs.append(main_theme_config)
get_segment = gen_segment_getter(
pl,
ext,
common_config,
theme_configs,
theme_config.get('default_module'),
get_module_attr,
top_theme
)
for segdict in itertools.chain((theme_config['segments'],),
theme_config['segments'].get('above', ())):
self.segments.append(new_empty_segment_line())
for side in ['left', 'right']:
for segment in segdict.get(side, []):
segment = get_segment(segment, side)
if segment:
if not run_once:
if segment['startup']:
try:
segment['startup'](pl, shutdown_event)
except Exception as e:
pl.error('Exception during {0} startup: {1}', segment['name'], str(e))
continue
self.segments[-1][side].append(segment)
def shutdown(self):
for line in self.segments:
for segments in line.values():
for segment in segments:
try:
segment['shutdown']()
except TypeError:
pass
def get_divider(self, side='left', type='soft'):
'''Return segment divider.'''
return self.dividers[side][type]
def get_spaces(self):
return self.spaces
def get_line_number(self):
return len(self.segments)
def get_segments(self, side=None, line=0, segment_info=None, mode=None):
'''Return all segments.
Function segments are called, and all segments get their before/after
and ljust/rjust properties applied.
:param int line:
Line number for which segments should be obtained. Is counted from
zero (botmost line).
'''
for side in [side] if side else ['left', 'right']:
parsed_segments = []
for segment in self.segments[line][side]:
if segment['display_condition'](self.pl, segment_info, mode):
process_segment(
self.pl,
side,
segment_info,
parsed_segments,
segment,
mode,
self.colorscheme,
)
for segment in parsed_segments:
self.pl.prefix = segment['name']
try:
width = segment['width']
align = segment['align']
if width == 'auto' and segment['expand'] is None:
segment['expand'] = expand_functions.get(align)
if segment['expand'] is None:
self.pl.error('Align argument must be “r”, “l” or “c”, not “{0}”', align)
try:
segment['contents'] = segment['before'] + u(
segment['contents'] if segment['contents'] is not None else ''
) + segment['after']
except Exception as e:
self.pl.exception('Failed to compute segment contents: {0}', str(e))
segment['contents'] = safe_unicode(segment.get('contents'))
# Align segment contents
if segment['width'] and segment['width'] != 'auto':
if segment['align'] == 'l':
segment['contents'] = segment['contents'].ljust(segment['width'])
elif segment['align'] == 'r':
segment['contents'] = segment['contents'].rjust(segment['width'])
elif segment['align'] == 'c':
segment['contents'] = segment['contents'].center(segment['width'])
# We need to yield a copy of the segment, or else mode-dependent
# segment contents can’t be cached correctly e.g. when caching
# non-current window contents for vim statuslines
yield segment.copy()
except Exception as e:
self.pl.exception('Failed to compute segment: {0}', str(e))
fallback = get_fallback_segment()
fallback.update(side=side)
yield fallback | unknown | codeparrot/codeparrot-clean | ||
# -*- test-case-name: twisted.test.test_roots -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted Python Roots: an abstract hierarchy representation for Twisted.
Maintainer: Glyph Lefkowitz
"""
# System imports
import types
from twisted.python import reflect
class NotSupportedError(NotImplementedError):
"""
An exception meaning that the tree-manipulation operation
you're attempting to perform is not supported.
"""
class Request:
"""I am an abstract representation of a request for an entity.
I also function as the response. The request is responded to by calling
self.write(data) until there is no data left and then calling
self.finish().
"""
# This attribute should be set to the string name of the protocol being
# responded to (e.g. HTTP or FTP)
wireProtocol = None
def write(self, data):
"""Add some data to the response to this request.
"""
raise NotImplementedError("%s.write" % reflect.qual(self.__class__))
def finish(self):
"""The response to this request is finished; flush all data to the network stream.
"""
raise NotImplementedError("%s.finish" % reflect.qual(self.__class__))
class Entity:
"""I am a terminal object in a hierarchy, with no children.
I represent a null interface; certain non-instance objects (strings and
integers, notably) are Entities.
Methods on this class are suggested to be implemented, but are not
required, and will be emulated on a per-protocol basis for types which do
not handle them.
"""
def render(self, request):
"""
I produce a stream of bytes for the request, by calling request.write()
and request.finish().
"""
raise NotImplementedError("%s.render" % reflect.qual(self.__class__))
class Collection:
"""I represent a static collection of entities.
I contain methods designed to represent collections that can be dynamically
created.
"""
def __init__(self, entities=None):
"""Initialize me.
"""
if entities is not None:
self.entities = entities
else:
self.entities = {}
def getStaticEntity(self, name):
"""Get an entity that was added to me using putEntity.
This method will return 'None' if it fails.
"""
return self.entities.get(name)
def getDynamicEntity(self, name, request):
"""Subclass this to generate an entity on demand.
This method should return 'None' if it fails.
"""
def getEntity(self, name, request):
"""Retrieve an entity from me.
I will first attempt to retrieve an entity statically; static entities
will obscure dynamic ones. If that fails, I will retrieve the entity
dynamically.
If I cannot retrieve an entity, I will return 'None'.
"""
ent = self.getStaticEntity(name)
if ent is not None:
return ent
ent = self.getDynamicEntity(name, request)
if ent is not None:
return ent
return None
def putEntity(self, name, entity):
"""Store a static reference on 'name' for 'entity'.
Raises a KeyError if the operation fails.
"""
self.entities[name] = entity
def delEntity(self, name):
"""Remove a static reference for 'name'.
Raises a KeyError if the operation fails.
"""
del self.entities[name]
def storeEntity(self, name, request):
"""Store an entity for 'name', based on the content of 'request'.
"""
raise NotSupportedError("%s.storeEntity" % reflect.qual(self.__class__))
def removeEntity(self, name, request):
"""Remove an entity for 'name', based on the content of 'request'.
"""
raise NotSupportedError("%s.removeEntity" % reflect.qual(self.__class__))
def listStaticEntities(self):
"""Retrieve a list of all name, entity pairs that I store references to.
See getStaticEntity.
"""
return self.entities.items()
def listDynamicEntities(self, request):
"""A list of all name, entity that I can generate on demand.
See getDynamicEntity.
"""
return []
def listEntities(self, request):
"""Retrieve a list of all name, entity pairs I contain.
See getEntity.
"""
return self.listStaticEntities() + self.listDynamicEntities(request)
def listStaticNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getStaticEntity.
"""
return self.entities.keys()
def listDynamicNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getDynamicEntity.
"""
return []
def listNames(self, request):
"""Retrieve a list of all names for entities that I contain.
See getEntity.
"""
return self.listStaticNames()
class ConstraintViolation(Exception):
"""An exception raised when a constraint is violated.
"""
class Constrained(Collection):
"""A collection that has constraints on its names and/or entities."""
def nameConstraint(self, name):
"""A method that determines whether an entity may be added to me with a given name.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def entityConstraint(self, entity):
"""A method that determines whether an entity may be added to me.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def reallyPutEntity(self, name, entity):
Collection.putEntity(self, name, entity)
def putEntity(self, name, entity):
"""Store an entity if it meets both constraints.
Otherwise raise a ConstraintViolation.
"""
if self.nameConstraint(name):
if self.entityConstraint(entity):
self.reallyPutEntity(name, entity)
else:
raise ConstraintViolation("Entity constraint violated.")
else:
raise ConstraintViolation("Name constraint violated.")
class Locked(Constrained):
"""A collection that can be locked from adding entities."""
locked = 0
def lock(self):
self.locked = 1
def entityConstraint(self, entity):
return not self.locked
class Homogenous(Constrained):
"""A homogenous collection of entities.
I will only contain entities that are an instance of the class or type
specified by my 'entityType' attribute.
"""
entityType = types.InstanceType
def entityConstraint(self, entity):
if isinstance(entity, self.entityType):
return 1
else:
raise ConstraintViolation("%s of incorrect type (%s)" %
(entity, self.entityType))
def getNameType(self):
return "Name"
def getEntityType(self):
return self.entityType.__name__ | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_TF2XLA_LIB_UTIL_H_
#define TENSORFLOW_COMPILER_TF2XLA_LIB_UTIL_H_
#include <cstdint>
#include "absl/types/span.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
// Returns a floating point scalar constant of 'type' with 'value'.
// If 'type' is complex, returns a real value with zero imaginary component.
xla::XlaOp FloatLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
double value);
// Makes a 1D tensor [0, ..., x, y] from two tensors x and y with zeros
// prepended until the array is length n_dims.
xla::XlaOp PrependZerosInMajorDims(xla::XlaOp x,
absl::Span<const xla::XlaOp> starts);
// Returns a integer scalar constant of 'type' with 'value'.
// If 'type' is complex, returns a real value with zero imaginary component.
xla::XlaOp IntegerLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
int64_t value);
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_TF2XLA_LIB_UTIL_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/tf2xla/lib/util.h |
import { test } from '../../test';
export default test({
warnings: [
{
code: 'css_unused_selector',
message: 'Unused CSS selector ".unused:has(y)"',
start: {
line: 41,
column: 1,
character: 378
},
end: {
line: 41,
column: 15,
character: 392
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector ".unused:has(:global(y))"',
start: {
line: 44,
column: 1,
character: 413
},
end: {
line: 44,
column: 24,
character: 436
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "x:has(.unused)"',
start: {
line: 47,
column: 1,
character: 457
},
end: {
line: 47,
column: 15,
character: 471
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector ":global(.foo):has(.unused)"',
start: {
line: 50,
column: 1,
character: 492
},
end: {
line: 50,
column: 27,
character: 518
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "x:has(y):has(.unused)"',
start: {
line: 60,
column: 1,
character: 626
},
end: {
line: 60,
column: 22,
character: 647
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector ".unused"',
start: {
line: 79,
column: 2,
character: 852
},
end: {
line: 79,
column: 9,
character: 859
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector ".unused x:has(y)"',
start: {
line: 95,
column: 1,
character: 1006
},
end: {
line: 95,
column: 17,
character: 1022
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector ".unused:has(.unused)"',
start: {
line: 98,
column: 1,
character: 1043
},
end: {
line: 98,
column: 21,
character: 1063
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "x:has(> z)"',
start: {
line: 108,
column: 1,
character: 1163
},
end: {
line: 108,
column: 11,
character: 1173
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "x:has(> d)"',
start: {
line: 111,
column: 1,
character: 1194
},
end: {
line: 111,
column: 11,
character: 1204
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "x:has(~ y)"',
start: {
line: 131,
column: 1,
character: 1396
},
end: {
line: 131,
column: 11,
character: 1406
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "d:has(+ f)"',
start: {
line: 141,
column: 1,
character: 1494
},
end: {
line: 141,
column: 11,
character: 1504
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "f:has(~ d)"',
start: {
line: 144,
column: 1,
character: 1525
},
end: {
line: 144,
column: 11,
character: 1535
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector ":has(.unused)"',
start: {
line: 152,
column: 2,
character: 1608
},
end: {
line: 152,
column: 15,
character: 1621
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "&:has(.unused)"',
start: {
line: 158,
column: 2,
character: 1679
},
end: {
line: 158,
column: 16,
character: 1693
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector ":global(.foo):has(.unused)"',
start: {
line: 166,
column: 1,
character: 1763
},
end: {
line: 166,
column: 27,
character: 1789
}
},
{
code: 'css_unused_selector',
message: 'Unused CSS selector "h:has(> h > i)"',
start: {
line: 173,
column: 1,
character: 1848
},
end: {
line: 173,
column: 15,
character: 1862
}
}
]
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/css/samples/has/_config.js |
{
"data": [
{
"id": 26838044,
"label": "prometheus-linode-sd-exporter-1",
"group": "",
"status": "running",
"created": "2021-05-12T04:23:44",
"updated": "2021-05-12T04:23:44",
"type": "g6-standard-2",
"ipv4": [
"45.33.82.151",
"96.126.108.16",
"192.168.170.51",
"192.168.201.25"
],
"ipv6": "2600:3c03::f03c:92ff:fe1a:1382/128",
"image": "linode/arch",
"region": "us-east",
"specs": {
"disk": 81920,
"memory": 4096,
"vcpus": 2,
"gpus": 0,
"transfer": 4000
},
"alerts": {
"cpu": 180,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
},
{
"id": 26837992,
"label": "prometheus-linode-sd-exporter-4",
"group": "",
"status": "running",
"created": "2021-05-12T04:22:06",
"updated": "2021-05-12T04:22:06",
"type": "g6-nanode-1",
"ipv4": [
"66.228.47.103",
"172.104.18.104",
"192.168.148.94"
],
"ipv6": "2600:3c03::f03c:92ff:fe1a:fb4c/128",
"image": "linode/ubuntu20.04",
"region": "us-east",
"specs": {
"disk": 25600,
"memory": 1024,
"vcpus": 1,
"gpus": 0,
"transfer": 1000
},
"alerts": {
"cpu": 90,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
}
],
"page": 1,
"pages": 1,
"results": 2
} | json | github | https://github.com/prometheus/prometheus | discovery/linode/testdata/us-east/v4/linode/instances.json |
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
from collections import OrderedDict
import unittest
import test
from test.test_unittest.testmock import support
from test.test_unittest.testmock.support import SomeClass, is_instance
from test.support.import_helper import DirsOnSysPath
from test.test_importlib.util import uncache
from unittest.mock import (
NonCallableMock, CallableMixin, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a): pass
def f(self, a): pass
def g(self): pass
foo = 'bar'
@staticmethod
def static_method(): pass
@classmethod
def class_method(cls): pass
class Bar(object):
def a(self): pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo): pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_string_as_target(self):
msg = "'Something' must be the actual object to be patched, not a str"
with self.assertRaisesRegex(TypeError, msg):
patch.object('Something', 'do_something')
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertNotHasAttr(SomeClass, 'frooble')
def test_patch_wont_create_by_default(self):
with self.assertRaises(AttributeError):
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test(): pass
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
with self.assertRaises(AttributeError):
@patch.object(SomeClass, 'ord', sentinel.Frooble)
def test(): pass
test()
self.assertNotHasAttr(SomeClass, 'ord')
def test_patch_builtins_without_create(self):
@patch(__name__+'.ord')
def test_ord(mock_ord):
mock_ord.return_value = 101
return ord('c')
@patch(__name__+'.open')
def test_open(mock_open):
m = mock_open.return_value
m.read.return_value = 'abcd'
fobj = open('doesnotexists.txt')
data = fobj.read()
fobj.close()
return data
self.assertEqual(test_ord(), 101)
self.assertEqual(test_open(), 'abcd')
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
test_class_attr = 'whatever'
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_patch_dict_as_context_manager(self):
foo = {'a': 'b'}
with patch.dict(foo, a='c') as patched:
self.assertEqual(patched, {'a': 'c'})
self.assertEqual(foo, {'a': 'b'})
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name(): pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
with self.assertRaises(NameError):
test()
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_dict_decorator_resolution(self):
# bpo-35512: Ensure that patch with a string target resolves to
# the new dictionary during function call
original = support.target.copy()
@patch.dict('test.test_unittest.testmock.support.target', {'bar': 'BAR'})
def test():
self.assertEqual(support.target, {'foo': 'BAZ', 'bar': 'BAR'})
try:
support.target = {'foo': 'BAZ'}
test()
self.assertEqual(support.target, {'foo': 'BAZ'})
finally:
support.target = original
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
# bpo-36366: calling stop without start will return None.
patcher = patch(foo_name, 'bar', 3)
self.assertIsNone(patcher.stop())
def test_stop_idempotent(self):
# bpo-36366: calling stop on an already stopped patch will return None.
patcher = patch(foo_name, 'bar', 3)
patcher.start()
patcher.stop()
self.assertIsNone(patcher.stop())
def test_exit_idempotent(self):
patcher = patch(foo_name, 'bar', 3)
with patcher:
patcher.__exit__(None, None, None)
def test_second_start_failure(self):
patcher = patch(foo_name, 'bar', 3)
patcher.start()
try:
self.assertRaises(RuntimeError, patcher.start)
finally:
patcher.stop()
def test_second_enter_failure(self):
patcher = patch(foo_name, 'bar', 3)
with patcher:
self.assertRaises(RuntimeError, patcher.start)
def test_second_start_after_stop(self):
patcher = patch(foo_name, 'bar', 3)
patcher.start()
patcher.stop()
patcher.start()
patcher.stop()
def test_property_setters(self):
mock_object = Mock()
mock_bar = mock_object.bar
patcher = patch.object(mock_object, 'bar', 'x')
with patcher:
self.assertEqual(patcher.is_local, False)
self.assertIs(patcher.target, mock_object)
self.assertEqual(patcher.temp_original, mock_bar)
patcher.is_local = True
patcher.target = mock_bar
patcher.temp_original = mock_object
self.assertEqual(patcher.is_local, True)
self.assertIs(patcher.target, mock_bar)
self.assertEqual(patcher.temp_original, mock_object)
# if changes are left intact, they may lead to disruption as shown below (it might be what someone needs though)
self.assertEqual(mock_bar.bar, mock_object)
self.assertEqual(mock_object.bar, 'x')
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_stop_without_start(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertFalse(patcher.stop())
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a): pass
def f(self, a): pass
def g(self): pass
foo = 'bar'
class Bar(object):
def a(self): pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function.assert_not_called()
self.assertRaises(AssertionError, function.assert_called)
self.assertRaises(AssertionError, function.assert_called_once)
function(1)
self.assertRaises(AssertionError, function.assert_not_called)
function.assert_called_with(1)
function.assert_called()
function.assert_called_once()
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_staticmethod(self):
with patch('%s.Foo.static_method' % __name__, autospec=True) as method:
Foo.static_method()
method.assert_called_once_with()
def test_autospec_classmethod(self):
with patch('%s.Foo.class_method' % __name__, autospec=True) as method:
Foo.class_method()
method.assert_called_once_with()
def test_autospec_staticmethod_signature(self):
# Patched methods which are decorated with @staticmethod should have the same signature
class Foo:
@staticmethod
def static_method(a, b=10, *, c): pass
Foo.static_method(1, 2, c=3)
with patch.object(Foo, 'static_method', autospec=True) as method:
method(1, 2, c=3)
self.assertRaises(TypeError, method)
self.assertRaises(TypeError, method, 1)
self.assertRaises(TypeError, method, 1, 2, 3, c=4)
def test_autospec_classmethod_signature(self):
# Patched methods which are decorated with @classmethod should have the same signature
class Foo:
@classmethod
def class_method(cls, a, b=10, *, c): pass
Foo.class_method(1, 2, c=3)
with patch.object(Foo, 'class_method', autospec=True) as method:
method(1, 2, c=3)
self.assertRaises(TypeError, method)
self.assertRaises(TypeError, method, 1)
self.assertRaises(TypeError, method, 1, 2, 3, c=4)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(mock)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(mock)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertNotHasAttr(Foo, 'blam')
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1(): pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2(): pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3(): pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1(): pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2(): pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3(): pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func(): pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func(): pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('test.test_unittest.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
with uncache('squizz'):
squizz = Mock()
sys.modules['squizz'] = squizz
squizz.squozz = 6
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propagates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
with uncache('squizz'):
squizz = Mock()
sys.modules['squizz'] = squizz
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propagated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propagated')
def test_name_resolution_import_rebinding(self):
# Currently mock.patch uses pkgutil.resolve_name(), but repeat
# similar tests just for the case.
# The same data is also used for testing import in test_import and
# pkgutil.resolve_name() in test_pkgutil.
path = os.path.join(os.path.dirname(test.__file__), 'test_import', 'data')
def check(name):
p = patch(name)
p.start()
p.stop()
def check_error(name):
p = patch(name)
self.assertRaises(AttributeError, p.start)
with uncache('package3', 'package3.submodule'), DirsOnSysPath(path):
check('package3.submodule.A.attr')
check_error('package3.submodule.B.attr')
with uncache('package3', 'package3.submodule'), DirsOnSysPath(path):
check('package3.submodule:A.attr')
check_error('package3.submodule:B.attr')
with uncache('package3', 'package3.submodule'), DirsOnSysPath(path):
check('package3:submodule.B.attr')
check_error('package3:submodule.A.attr')
check('package3.submodule.A.attr')
check_error('package3.submodule.B.attr')
check('package3:submodule.B.attr')
check_error('package3:submodule.A.attr')
with uncache('package3', 'package3.submodule'), DirsOnSysPath(path):
check('package3:submodule.B.attr')
check_error('package3:submodule.A.attr')
check('package3.submodule:A.attr')
check_error('package3.submodule:B.attr')
check('package3:submodule.B.attr')
check_error('package3:submodule.A.attr')
def test_name_resolution_import_rebinding2(self):
path = os.path.join(os.path.dirname(test.__file__), 'test_import', 'data')
def check(name):
p = patch(name)
p.start()
p.stop()
def check_error(name):
p = patch(name)
self.assertRaises(AttributeError, p.start)
with uncache('package4', 'package4.submodule'), DirsOnSysPath(path):
check('package4.submodule.A.attr')
check_error('package4.submodule.B.attr')
with uncache('package4', 'package4.submodule'), DirsOnSysPath(path):
check('package4.submodule:A.attr')
check_error('package4.submodule:B.attr')
with uncache('package4', 'package4.submodule'), DirsOnSysPath(path):
check('package4:submodule.B.attr')
check_error('package4:submodule.A.attr')
check('package4.submodule.A.attr')
check_error('package4.submodule.B.attr')
check('package4:submodule.A.attr')
check_error('package4:submodule.B.attr')
with uncache('package4', 'package4.submodule'), DirsOnSysPath(path):
check('package4:submodule.B.attr')
check_error('package4:submodule.A.attr')
check('package4.submodule:A.attr')
check_error('package4.submodule:B.attr')
check('package4:submodule.A.attr')
check_error('package4:submodule.B.attr')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
def test_stopall_lifo(self):
stopped = []
class thing(object):
one = two = three = None
def get_patch(attribute):
class mypatch(_patch):
def stop(self):
stopped.append(attribute)
return super(mypatch, self).stop()
return mypatch(lambda: thing, attribute, None, None,
False, None, None, None, {})
[get_patch(val).start() for val in ("one", "two", "three")]
patch.stopall()
self.assertEqual(stopped, ["three", "two", "one"])
def test_patch_dict_stopall(self):
dic1 = {}
dic2 = {1: 'a'}
dic3 = {1: 'A', 2: 'B'}
origdic1 = dic1.copy()
origdic2 = dic2.copy()
origdic3 = dic3.copy()
patch.dict(dic1, {1: 'I', 2: 'II'}).start()
patch.dict(dic2, {2: 'b'}).start()
@patch.dict(dic3)
def patched():
del dic3[1]
patched()
self.assertNotEqual(dic1, origdic1)
self.assertNotEqual(dic2, origdic2)
self.assertEqual(dic3, origdic3)
patch.stopall()
self.assertEqual(dic1, origdic1)
self.assertEqual(dic2, origdic2)
self.assertEqual(dic3, origdic3)
def test_patch_and_patch_dict_stopall(self):
original_unlink = os.unlink
original_chdir = os.chdir
dic1 = {}
dic2 = {1: 'A', 2: 'B'}
origdic1 = dic1.copy()
origdic2 = dic2.copy()
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
patch.dict(dic1, {1: 'I', 2: 'II'}).start()
patch.dict(dic2).start()
del dic2[1]
self.assertIsNot(os.unlink, original_unlink)
self.assertIsNot(os.chdir, original_chdir)
self.assertNotEqual(dic1, origdic1)
self.assertNotEqual(dic2, origdic2)
patch.stopall()
self.assertIs(os.unlink, original_unlink)
self.assertIs(os.chdir, original_chdir)
self.assertEqual(dic1, origdic1)
self.assertEqual(dic2, origdic2)
def test_special_attrs(self):
def foo(x=0):
"""TEST"""
return x
with patch.object(foo, '__defaults__', (1, )):
self.assertEqual(foo(), 1)
self.assertEqual(foo(), 0)
orig_doc = foo.__doc__
with patch.object(foo, '__doc__', "FUN"):
self.assertEqual(foo.__doc__, "FUN")
self.assertEqual(foo.__doc__, orig_doc)
with patch.object(foo, '__module__', "testpatch2"):
self.assertEqual(foo.__module__, "testpatch2")
self.assertEqual(foo.__module__, __name__)
with patch.object(foo, '__annotations__', dict([('s', 1, )])):
self.assertEqual(foo.__annotations__, dict([('s', 1, )]))
self.assertEqual(foo.__annotations__, dict())
def foo(*a, x=0):
return x
with patch.object(foo, '__kwdefaults__', dict([('x', 1, )])):
self.assertEqual(foo(), 1)
self.assertEqual(foo(), 0)
def test_patch_orderdict(self):
foo = OrderedDict()
foo['a'] = object()
foo['b'] = 'python'
original = foo.copy()
update_values = list(zip('cdefghijklmnopqrstuvwxyz', range(26)))
patched_values = list(foo.items()) + update_values
with patch.dict(foo, OrderedDict(update_values)):
self.assertEqual(list(foo.items()), patched_values)
self.assertEqual(foo, original)
with patch.dict(foo, update_values):
self.assertEqual(list(foo.items()), patched_values)
self.assertEqual(foo, original)
def test_dotted_but_module_not_loaded(self):
# This exercises the AttributeError branch of _dot_lookup.
# make sure it's there
import test.test_unittest.testmock.support
# now make sure it's not:
with patch.dict('sys.modules'):
del sys.modules['test.test_unittest.testmock.support']
del sys.modules['test.test_unittest.testmock']
del sys.modules['test.test_unittest']
del sys.modules['test']
# now make sure we can patch based on a dotted path:
@patch('test.test_unittest.testmock.support.X')
def test(mock):
pass
test()
def test_invalid_target(self):
class Foo:
pass
for target in ['', 12, Foo()]:
with self.subTest(target=target):
with self.assertRaises(TypeError):
patch(target)
def test_cant_set_kwargs_when_passing_a_mock(self):
@patch('test.test_unittest.testmock.support.X', new=object(), x=1)
def test(): pass
with self.assertRaises(TypeError):
test()
def test_patch_proxy_object(self):
@patch("test.test_unittest.testmock.support.g", new_callable=MagicMock())
def test(_):
pass
test()
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_unittest/testmock/testpatch.py |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("migrations", "5_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)] | python | github | https://github.com/django/django | tests/migrations/test_migrations_squashed_complex/6_auto.py |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
from ..target.semihost import SemihostIOHandler
# Open mode flags
O_RDONLY = 0x0
O_WRONLY = 0x1
O_RDWR = 0x2
O_APPEND = 0x8
O_CREAT = 0x200
O_TRUNC = 0x400
O_EXCL = 0x800
# Offset added to file descriptor numbers returned from gdb. This offset is to make
# sure we don't overlap with the standard I/O file descriptors 1, 2, and 3 (fds must be
# non-zero for semihosting).
FD_OFFSET = 4
##
# @brief Semihosting file I/O handler that performs GDB syscalls.
class GDBSyscallIOHandler(SemihostIOHandler):
def __init__(self, server):
super(GDBSyscallIOHandler, self).__init__()
self._server = server
def open(self, fnptr, fnlen, mode):
# Handle standard I/O.
fd, _ = self._std_open(fnptr, fnlen, mode)
if fd is not None:
return fd
# Convert mode string to flags.
modeval = 0
hasplus = '+' in mode
if 'r' in mode:
if hasplus:
modeval |= O_RDWR
else:
modeval |= O_RDONLY
elif 'w' in mode:
if hasplus:
modeval |= O_RDWR | O_CREAT | O_TRUNC
else:
modeval |= O_WRONLY | O_CREAT | O_TRUNC
elif 'a' in mode:
if hasplus:
modeval |= O_RDWR | O_APPEND | O_CREAT
else:
modeval |= O_WRONLY | O_APPEND | O_CREAT
result, self._errno = self._server.syscall('open,%x/%x,%x,%x' % (fnptr, fnlen + 1, modeval, 0777))
if result != -1:
result += FD_OFFSET
return result
def close(self, fd):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('close,%x' % (fd))
return result
# syscall return: number of bytes written
# semihost return: 0 is success, or number of bytes not written
def write(self, fd, ptr, length):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('write,%x,%x,%x' % (fd, ptr, length))
return length - result
# syscall return: number of bytes read
# semihost return: 0 is success, length is EOF, number of bytes not read
def read(self, fd, ptr, length):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('read,%x,%x,%x' % (fd, ptr, length))
return length - result
def readc(self):
ptr = self.agent.target.readCoreRegister('sp') - 4
result, self._errno = self._server.syscall('read,0,%x,1' % (ptr))
if result != -1:
result = self.agent.target.read8(ptr)
return result
def istty(self, fd):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('isatty,%x' % (fd))
return result
def seek(self, fd, pos):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('lseek,%x,%x,0' % (fd, pos))
return 0 if result is not -1 else -1
def flen(self, fd):
fd -= FD_OFFSET
ptr = self.agent.target.readCoreRegister('sp') - 64
result, self._errno = self._server.syscall('fstat,%x,%x' % (fd, ptr))
if result != -1:
# Fields in stat struct are big endian as written by gdb.
size = self.agent.target.readBlockMemoryUnaligned8(ptr, 8)
result = (size[0] << 56) \
| (size[1] << 48) \
| (size[2] << 40) \
| (size[3] << 32) \
| (size[4] << 24) \
| (size[5] << 16) \
| (size[6] << 8) \
| (size[7])
return result | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cl_bond
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bond port on Cumulus Linux
description:
- Configures a bond interface on Cumulus Linux To configure a bridge port
use the cl_bridge module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bonding found in the
Cumulus User Guide at U(http://docs.cumulusnetworks.com).
options:
name:
description:
- Name of the interface.
required: true
alias_name:
description:
- Description of the port.
ipv4:
description:
- List of IPv4 addresses to configure on the interface.
In the form I(X.X.X.X/YY).
ipv6:
description:
- List of IPv6 addresses to configure on the interface.
In the form I(X:X:X::X/YYY).
addr_method:
description:
- Configures the port to use DHCP.
To enable this feature use the option I(dhcp).
choices: ['dhcp']
mtu:
description:
- Set MTU. Configure Jumbo Frame by setting MTU to I(9000).
virtual_ip:
description:
- Define IPv4 virtual IP used by the Cumulus Linux VRR feature.
virtual_mac:
description:
- Define Ethernet mac associated with Cumulus Linux VRR feature.
vids:
description:
- In vlan-aware mode, lists VLANs defined under the interface.
mstpctl_bpduguard:
description:
- Enables BPDU Guard on a port in vlan-aware mode.
choices:
- true
- false
mstpctl_portnetwork:
description:
- Enables bridge assurance in vlan-aware mode.
choices:
- true
- false
mstpctl_portadminedge:
description:
- Enables admin edge port.
choices:
- true
- false
clag_id:
description:
- Specify a unique clag_id for every dual connected bond on each
peer switch. The value must be between 1 and 65535 and must be the
same on both peer switches in order for the bond to be considered
dual-connected.
pvid:
description:
- In vlan-aware mode, defines vlan that is the untagged vlan.
miimon:
description:
- The mii link monitoring interval.
default: 100
mode:
description:
- The bond mode, as of Cumulus Linux 2.5 only LACP bond mode is
supported.
default: '802.3ad'
min_links:
description:
- Minimum number of links.
default: 1
lacp_bypass_allow:
description:
- Enable LACP bypass.
lacp_bypass_period:
description:
- Period for enabling LACP bypass. Max value is 900.
lacp_bypass_priority:
description:
- List of ports and priorities. Example I("swp1=10, swp2=20").
lacp_bypass_all_active:
description:
- Activate all interfaces for bypass.
It is recommended to configure all_active instead
of using bypass_priority.
lacp_rate:
description:
- The lacp rate.
default: 1
slaves:
description:
- Bond members.
required: True
xmit_hash_policy:
description:
- Transmit load balancing algorithm. As of Cumulus Linux 2.5 only
I(layer3+4) policy is supported.
default: layer3+4
location:
description:
- Interface directory location.
default:
- '/etc/network/interfaces.d'
requirements: [ Alternate Debian network interface manager - \
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- As this module writes the interface directory location, ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bond interface with IP address
- cl_bond:
name: bond0
slaves: "swp4-5"
ipv4: 10.1.1.1/24
notify: reload networking
# configure bond as a dual-connected clag bond
- cl_bond:
name: bond1
slaves: "swp1s0 swp2s0"
clag_id: 1
notify: reload networking
# define cl_bond once in tasks file
# then write interface config in variables file
# with just the options you want.
- cl_bond:
name: "{{ item.key }}"
slaves: "{{ item.value.slaves }}"
clag_id: "{{ item.value.clag_id|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
with_dict: "{{ cl_bonds }}"
notify: reload networking
# In vars file
# ============
cl_bonds:
bond0:
alias_name: 'uplink to isp'
slaves: ['swp1', 'swp3']
ipv4: '10.1.1.1/24'
bond2:
vids: [1, 50]
clag_id: 1
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def conv_array_to_str(_value):
if isinstance(_value, list):
return ' '.join(_value)
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = conv_array_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def add_glob_to_array(_bondmems):
"""
goes through each bond member if it sees a dash add glob
before it
"""
result = []
if isinstance(_bondmems, list):
for _entry in _bondmems:
if re.search('-', _entry):
_entry = 'glob ' + _entry
result.append(_entry)
return ' '.join(result)
return _bondmems
def build_bond_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = add_glob_to_array(_value)
if _value:
module.custom_desired_config['config'][
'bond-' + re.sub('_', '-', _attr)] = str(_value)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
for _attr in ['slaves', 'mode', 'xmit_hash_policy',
'miimon', 'lacp_rate', 'lacp_bypass_allow',
'lacp_bypass_period', 'lacp_bypass_all_active',
'min_links']:
build_bond_attr(module, _attr)
build_addr_method(module)
build_address(module)
build_vids(module)
build_pvid(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge'
'mstpctl_bpduguard', 'clag_id',
'lacp_bypass_priority']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
slaves=dict(required=True, type='list'),
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'dhcp']),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_portnetwork=dict(type='bool', choices=BOOLEANS),
mstpctl_portadminedge=dict(type='bool', choices=BOOLEANS),
mstpctl_bpduguard=dict(type='bool', choices=BOOLEANS),
clag_id=dict(type='str'),
min_links=dict(type='int', default=1),
mode=dict(type='str', default='802.3ad'),
miimon=dict(type='int', default=100),
xmit_hash_policy=dict(type='str', default='layer3+4'),
lacp_rate=dict(type='int', default=1),
lacp_bypass_allow=dict(type='int', choices=[0, 1]),
lacp_bypass_all_active=dict(type='int', choices=[0, 1]),
lacp_bypass_priority=dict(type='list'),
lacp_bypass_period=dict(type='int'),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
mutually_exclusive=[['lacp_bypass_priority', 'lacp_bypass_all_active']],
required_together=[['virtual_ip', 'virtual_mac']]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.iteritems():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
import re
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, getConfigListEntry
from Components.Sources.StaticText import StaticText
class HdmiCECSetupScreen(Screen, ConfigListScreen):
skin = """
<screen position="c-300,c-250" size="600,500" title="HDMI CEC setup">
<widget name="config" position="25,25" size="550,350" />
<widget source="current_address" render="Label" position="25,375" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<widget source="fixed_address" render="Label" position="25,405" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<ePixmap pixmap="skin_default/buttons/red.png" position="20,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="160,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="300,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="440,e-45" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="20,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="160,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="300,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="440,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
</screen>"""
def __init__(self, session):
self.skin = HdmiCECSetupScreen.skin
Screen.__init__(self, session)
from Components.ActionMap import ActionMap
from Components.Button import Button
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Set fixed"))
self["key_blue"] = StaticText(_("Clear fixed"))
self["current_address"] = StaticText()
self["fixed_address"] = StaticText()
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
"yellow": self.setFixedAddress,
"blue": self.clearFixedAddress,
"menu": self.closeRecursive,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
self.list.append(getConfigListEntry(_("Enabled"), config.hdmicec.enabled))
self.list.append(getConfigListEntry(_("Put TV in standby"), config.hdmicec.control_tv_standby))
self.list.append(getConfigListEntry(_("Wakeup TV from standby"), config.hdmicec.control_tv_wakeup))
self.list.append(getConfigListEntry(_("Regard deep standby as standby"), config.hdmicec.handle_deepstandby_events))
self.list.append(getConfigListEntry(_("Switch TV to correct input"), config.hdmicec.report_active_source))
self.list.append(getConfigListEntry(_("Use TV remote control"), config.hdmicec.report_active_menu))
self.list.append(getConfigListEntry(_("Handle standby from TV"), config.hdmicec.handle_tv_standby))
self.list.append(getConfigListEntry(_("Handle wakeup from TV"), config.hdmicec.handle_tv_wakeup))
self.list.append(getConfigListEntry(_("Wakeup signal from TV"), config.hdmicec.tv_wakeup_detection))
self.list.append(getConfigListEntry(_("Forward volume keys"), config.hdmicec.volume_forwarding))
self.list.append(getConfigListEntry(_("Put receiver in standby"), config.hdmicec.control_receiver_standby))
self.list.append(getConfigListEntry(_("Wakeup receiver from standby"), config.hdmicec.control_receiver_wakeup))
self.list.append(getConfigListEntry(_("Minimum send interval"), config.hdmicec.minimum_send_interval))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.updateAddress()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyGo(self):
for x in self["config"].list:
x[1].save()
self.close()
def keyCancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
def setFixedAddress(self):
import Components.HdmiCec
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress(Components.HdmiCec.hdmi_cec.getPhysicalAddress())
self.updateAddress()
def clearFixedAddress(self):
import Components.HdmiCec
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress("0.0.0.0")
self.updateAddress()
def updateAddress(self):
import Components.HdmiCec
self["current_address"].setText(_("Current CEC address") + ": " + Components.HdmiCec.hdmi_cec.getPhysicalAddress())
if config.hdmicec.fixed_physical_address.value == "0.0.0.0":
fixedaddresslabel = ""
else:
fixedaddresslabel = _("Using fixed address") + ": " + config.hdmicec.fixed_physical_address.value
self["fixed_address"].setText(fixedaddresslabel)
def main(session, **kwargs):
session.open(HdmiCECSetupScreen)
def startSetup(menuid):
if menuid == "system":
return [(_("HDMI-CEC setup"), main, "hdmi_cec_setup", 0)]
return []
def Plugins(**kwargs):
from os import path
if path.exists("/dev/hdmi_cec") or path.exists("/dev/misc/hdmi_cec0"):
import Components.HdmiCec
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(where = PluginDescriptor.WHERE_MENU, fnc = startSetup)]
return [] | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import orm
from neutron_lib import constants
from neutron_lib.exceptions import l3 as l3_exc
from neutron.db.models import l3 as l3_models
from neutron.db import models_v2
from neutron.services.l3_router import l3_router_plugin
class TricircleL3Plugin(l3_router_plugin.L3RouterPlugin):
# Override the original implementation to allow associating a floating ip
# to a port whose network is not attached to the router. Tricircle will
# configures extra routes to guarantee packets can reach the port.
def get_router_for_floatingip(self, context, internal_port,
internal_subnet, external_network_id):
"""Find a router to handle the floating-ip association.
:param internal_port: The port for the fixed-ip.
:param internal_subnet: The subnet for the fixed-ip.
:param external_network_id: The external network for floating-ip.
:raises: ExternalGatewayForFloatingIPNotFound if no suitable router
is found.
"""
router_port = l3_models.RouterPort
gw_port = orm.aliased(models_v2.Port, name="gw_port")
router_port_qry = context.session.query(
router_port.router_id
).join(gw_port, gw_port.device_id == router_port.router_id).filter(
gw_port.network_id == external_network_id,
gw_port.device_owner == constants.DEVICE_OWNER_ROUTER_GW
).distinct()
first_router_id = None
for router in router_port_qry:
if not first_router_id:
first_router_id = router.router_id
if first_router_id:
return first_router_id
raise l3_exc.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet['id'],
external_network_id=external_network_id,
port_id=internal_port['id']) | unknown | codeparrot/codeparrot-clean | ||
from txircd.modbase import Module
class ExtendedJoin(Module):
def capRequest(self, user, capability):
return True
def capAcknowledge(self, user, capability):
return False
def capRequestRemove(self, user, capability):
return True
def capAcknowledgeRemove(self, user, capability):
return False
def capClear(self, user, capability):
return True
def modifyJoinMessage(self, channel, user, joinShowUser):
if "extended_join_wait" in channel.cache:
del channel.cache["extended_join_wait"]
remove = []
for u in joinShowUser:
if "cap" in u.cache and "extended-join" in u.cache["cap"]:
remove.append(u)
u.sendMessage("JOIN", user.metadata["ext"]["accountname"] if "accountname" in user.metadata["ext"] else "*", ":{}".format(user.realname), to=channel.name, prefix=user.prefix())
for u in remove:
joinShowUser.remove(u)
return joinShowUser
else:
channel.cache["extended_join_wait"] = True
return "again" # force this module to have lower priority so it goes last, after any modules that may actually hide the join notice
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.extended_join = None
def spawn(self):
self.extended_join = ExtendedJoin().hook(self.ircd)
if "cap" not in self.ircd.module_data_cache:
self.ircd.module_data_cache["cap"] = {}
self.ircd.module_data_cache["cap"]["extended-join"] = self.extended_join
return {
"actions": {
"joinmessage": self.extended_join.modifyJoinMessage
}
}
def cleanup(self):
del self.ircd.module_data_cache["cap"]["extended-join"] | unknown | codeparrot/codeparrot-clean | ||
import cv2
import gym
import random
import numpy as np
class Environment(object):
def __init__(self, config):
self.env = gym.make(config.env_name)
screen_width, screen_height, self.action_repeat, self.random_start = \
config.screen_width, config.screen_height, config.action_repeat, config.random_start
self.display = config.display
self.dims = (screen_width, screen_height)
self._screen = None
self.reward = 0
self.terminal = True
def new_game(self, from_random_game=False):
if self.lives == 0:
self._screen = self.env.reset()
self._step(0)
self.render()
return self.screen, 0, 0, self.terminal
def new_random_game(self):
self.new_game(True)
for _ in xrange(random.randint(0, self.random_start - 1)):
self._step(0)
self.render()
return self.screen, 0, 0, self.terminal
def _step(self, action):
self._screen, self.reward, self.terminal, _ = self.env.step(action)
def _random_step(self):
action = self.env.action_space.sample()
self._step(action)
@ property
def screen(self):
return cv2.resize(cv2.cvtColor(self._screen, cv2.COLOR_RGB2GRAY)/255., self.dims)
#return cv2.resize(cv2.cvtColor(self._screen, cv2.COLOR_BGR2YCR_CB)/255., self.dims)[:,:,0]
@property
def action_size(self):
return self.env.action_space.n
@property
def lives(self):
return self.env.ale.lives()
@property
def state(self):
return self.screen, self.reward, self.terminal
def render(self):
if self.display:
self.env.render()
def after_act(self, action):
self.render()
class GymEnvironment(Environment):
def __init__(self, config):
super(GymEnvironment, self).__init__(config)
def act(self, action, is_training=True):
cumulated_reward = 0
start_lives = self.lives
for _ in xrange(self.action_repeat):
self._step(action)
cumulated_reward = cumulated_reward + self.reward
if is_training and start_lives > self.lives:
cumulated_reward -= 1
self.terminal = True
if self.terminal:
break
self.reward = cumulated_reward
self.after_act(action)
return self.state
class SimpleGymEnvironment(Environment):
def __init__(self, config):
super(SimpleGymEnvironment, self).__init__(config)
def act(self, action, is_training=True):
self._step(action)
self.after_act(action)
return self.state | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
request.ws_stream.send_message(line, binary=False)
if line == _GOODBYE_MESSAGE:
return
else:
request.ws_stream.send_message(line, binary=True)
def web_socket_passive_closing_handshake(request):
return request.ws_close_code, request.ws_close_reason | unknown | codeparrot/codeparrot-clean | ||
#SPDX-License-Identifier: MIT-0
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# key: The name of the key as defined in galaxy.yml
# description: Comment/info on the key to be used as the generated doc and auto generated skeleton galaxy.yml file
# required: Whether the key is required (default is no)
# type: The type of value that can be set, aligns to the values in the plugin formatter
---
- key: namespace
description:
- The namespace of the collection.
- This can be a company/brand/organization or product namespace under which all content lives.
- May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with underscores or
numbers and cannot contain consecutive underscores.
required: yes
type: str
- key: name
description:
- The name of the collection.
- Has the same character restrictions as C(namespace).
required: yes
type: str
- key: version
description:
- The version of the collection.
- Must be compatible with semantic versioning.
required: yes
type: str
- key: readme
description:
- The path to the Markdown (.md) readme file.
- This path is relative to the root of the collection.
required: yes
type: str
- key: authors
description:
- A list of the collection's content authors.
- Can be just the name or in the format 'Full Name <email> (url) @nicks:irc/im.site#channel'.
required: yes
type: list
- key: description
description:
- A short summary description of the collection.
type: str
- key: license
description:
- Either a single license or a list of licenses for content inside of a collection.
- Ansible Galaxy currently only accepts L(SPDX,https://spdx.org/licenses/) licenses
- This key is mutually exclusive with C(license_file).
type: list
- key: license_file
description:
- The path to the license file for the collection.
- This path is relative to the root of the collection.
- This key is mutually exclusive with C(license).
type: str
- key: tags
description:
- A list of tags you want to associate with the collection for indexing/searching.
- A tag name has the same character requirements as C(namespace) and C(name).
type: list
- key: dependencies
description:
- Collections that this collection requires to be installed for it to be usable.
- The key of the dict is the collection label C(namespace.name).
- The value is a version range
L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification).
- Multiple version range specifiers can be set and are separated by C(,).
type: dict
- key: repository
description:
- The URL of the originating SCM repository.
type: str
- key: documentation
description:
- The URL to any online docs.
type: str
- key: homepage
description:
- The URL to the homepage of the collection/project.
type: str
- key: issues
description:
- The URL to the collection issue tracker.
type: str
- key: build_ignore
description:
- A list of file glob-like patterns used to filter any files or directories
that should not be included in the build artifact.
- A pattern is matched from the relative path of the file or directory of the
collection directory.
- This uses C(fnmatch) to match the files or directories.
- Some directories and files like C(galaxy.yml), C(*.pyc), C(*.retry), and
C(.git) are always filtered.
- Mutually exclusive with C(manifest)
type: list
version_added: '2.10'
- key: manifest
description:
- A dict controlling use of manifest directives used in building the collection artifact.
- The key C(directives) is a list of MANIFEST.in style L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands)
- The key C(omit_default_directives) is a boolean that controls whether the default directives are used
- Mutually exclusive with C(build_ignore)
type: sentinel
version_added: '2.14' | unknown | github | https://github.com/ansible/ansible | lib/ansible/galaxy/data/collections_galaxy_meta.yml |
from typing import Any
from langchain_core.document_loaders import Blob, BlobLoader
from langchain_classic._api import create_importer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Blob": "langchain_community.document_loaders",
"BlobLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Blob",
"BlobLoader",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/document_loaders/blob_loaders/schema.py |
# coding = utf-8
# !/usr/bin/env python
import os
import time
class PinYin(object):
path = "Files/rawdict_utf16_65105_freq.txt"
pinyin_list = []
keyboard_num = ['2', '2', '2', '3', '3', '3', '4', '4', '4', '5', '5', '5', '6', '6', '6', '7', '7', '7', '7', '8',
'8', '8', '9', '9', '9', '9']
keyboard_letter = ["a", 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't','u', 'v', 'w', 'x', 'y', 'z']
def __init__(self):
self.pinyin_file = open("pinyin.txt", 'w', encoding='utf-8')
self.keyboard_file = open("keyboard.txt", 'w', encoding='utf-8')
def do_file(self):
print("Starting...")
start_time = time.time()
with open(self.path, 'r', encoding='utf-8') as data:
for x in data:
self.change_format(x.split(' '))
self.write_file()
end_time = time.time()
return "Write done(%2.2fs). \nPlease check pinyin.txt and keyboard.txt." % (end_time - start_time)
'''
eg. change format from "xiexie" to "xie_xie".
'''
def change_format(self, data):
if len(data) == 4:
self.pinyin_list.append((data[0], "%s" % data[3]))
elif len(data) == 5:
self.pinyin_list.append((data[0], "%s_%s" % (data[3], data[4])))
elif len(data) == 6:
self.pinyin_list.append((data[0], "%s_%s_%s" % (data[3], data[4], data[5])))
elif len(data) == 7:
self.pinyin_list.append((data[0], "%s_%s_%s_%s" % (data[3], data[4], data[5], data[6])))
def write_file(self):
length = len(self.pinyin_list)
str_text = []
str_key = []
str_pinyin = ''
for index in range(length):
if index < length - 1 and self.pinyin_list[index][1] == self.pinyin_list[index + 1][1]:
str_text.append(self.pinyin_list[index][0])
else:
str_text.append(self.pinyin_list[index][0])
str_pinyin = self.pinyin_list[index][1]
self.pinyin_file.write("const unsigned char PY_mb_%s []= {\"%s\"};\n" % (str_pinyin.replace('\n', ''),
"".join(str_text)))
for num in str_pinyin.replace('\n', '').replace('_', ''):
str_key.append(self.keyboard_num[self.keyboard_letter.index(num)])
self.keyboard_file.write("{0x%s, \"%s\", PY_mb_%s};\n" % ("".join(str_key),
str_pinyin.replace('\n', ''),
str_pinyin.replace('\n', '')))
str_text.clear()
str_key.clear()
self.pinyin_file.close()
self.keyboard_file.close()
if __name__ == "__main__":
manager = PinYin()
if os.path.exists(manager.path):
print(manager.do_file())
else:
print("The file is not exist, please put the .txt in right directory!") | unknown | codeparrot/codeparrot-clean | ||
---
- name: notify inexistent handler
hosts: localhost
gather_facts: false
tasks:
- name: test notify an inexistent handler
command: uptime
notify:
- notify_inexistent_handler
register: result | unknown | github | https://github.com/ansible/ansible | test/integration/targets/handlers/test_handlers_inexistent_notify.yml |
"""
Visualization of a Histogram2D as a heaatmap
"""
import re
import logging
import base64
import io
import numpy as np
import scipy as sp
from PIL import Image
from progressivis.core.utils import indices_len
from progressivis.core.slot import SlotDescriptor
from progressivis.table import Table
from progressivis.table.module import TableModule
logger = logging.getLogger(__name__)
class Heatmap(TableModule):
"Heatmap module"
parameters = [('cmax', np.dtype(float), np.nan),
('cmin', np.dtype(float), np.nan),
('high', np.dtype(int), 65536),
('low', np.dtype(int), 0),
('filename', np.dtype(object), None),
('history', np.dtype(int), 3)]
inputs = [SlotDescriptor('array', type=Table)]
# schema = [('image', np.dtype(object), None),
# ('filename', np.dtype(object), None),
# UPDATE_COLUMN_DESC]
schema = "{filename: string, time: int64}"
def __init__(self, colormap=None, **kwds):
super(Heatmap, self).__init__(**kwds)
self.colormap = colormap
self.default_step_size = 1
name = self.generate_table_name('Heatmap')
# params = self.params
# if params.filename is None:
# params.filename = name+'%d.png'
self.result = Table(name, dshape=Heatmap.schema, create=True)
def predict_step_size(self, duration):
_ = duration
# Module sample is constant time (supposedly)
return 1
def run_step(self, run_number, step_size, howlong):
dfslot = self.get_input_slot('array')
input_df = dfslot.data()
# dfslot.update(run_number)
dfslot.deleted.next()
indices = dfslot.created.next()
steps = indices_len(indices)
if steps == 0:
indices = dfslot.updated.next()
steps = indices_len(indices)
if steps == 0:
return self._return_run_step(self.state_blocked, steps_run=1)
histo = input_df.last()['array']
if histo is None:
return self._return_run_step(self.state_blocked, steps_run=1)
params = self.params
cmax = params.cmax
if np.isnan(cmax):
cmax = None
cmin = params.cmin
if np.isnan(cmin):
cmin = None
high = params.high
low = params.low
try:
#import pdb;pdb.set_trace()
if cmin is None:
cmin = histo.min()
if cmax is None:
cmax = histo.max()
cscale = cmax - cmin
scale_hl = float(high - low)
scale = float(high - low) / cscale
#data = (sp.special.cbrt(histo) * 1.0 - cmin) * scale + 0.4999
data = (sp.special.cbrt(histo) * 1.0 - cmin) * scale_hl + 0.4999
data[data > high] = high
data[data < 0] = 0
data = np.cast[np.uint32](data)
if low != 0:
data += low
image = Image.fromarray(data, mode='I')
image = image.transpose(Image.FLIP_TOP_BOTTOM)
filename = params.filename
except:
image = None
filename = None
if filename is not None:
try:
if re.search(r'%(0[\d])?d', filename):
filename = filename % (run_number)
filename = self.storage.fullname(self, filename)
# TODO should do it atomically since it will be
# called 4 times with the same fn
image.save(filename, format='PNG') # bits=16)
logger.debug('Saved image %s', filename)
image = None
except:
logger.error('Cannot save image %s', filename)
raise
else:
buffered = io.BytesIO()
image.save(buffered, format='PNG', bits=16)
res = str(base64.b64encode(buffered.getvalue()), "ascii")
filename = "data:image/png;base64,"+res
if len(self.result) == 0 or self.result.last()['time'] != run_number:
values = {'filename': filename, 'time': run_number}
self.result.add(values)
return self._return_run_step(self.state_blocked, steps_run=1)
def is_visualization(self):
return True
def get_visualization(self):
return "heatmap"
def to_json(self, short=False):
json = super(Heatmap, self).to_json(short)
if short:
return json
return self.heatmap_to_json(json, short)
def heatmap_to_json(self, json, short):
dfslot = self.get_input_slot('array')
histo = dfslot.output_module
json['columns'] = [histo.x_column, histo.y_column]
histo_df = dfslot.data()
if histo_df is not None and len(histo_df) != 0:
row = histo_df.last()
if not (np.isnan(row['xmin']) or np.isnan(row['xmax'])
or np.isnan(row['ymin']) or np.isnan(row['ymax'])):
json['bounds'] = {
'xmin': row['xmin'],
'ymin': row['ymin'],
'xmax': row['xmax'],
'ymax': row['ymax']
}
df = self.result
if df is not None and self._last_update != 0:
row = df.last()
json['image'] = row['filename']
return json
def get_image(self, run_number=None):
if self.result is None or len(self.result) == 0:
return None
last = self.result.last()
if run_number is None or run_number >= last['time']:
run_number = last['time']
filename = last['filename']
else:
time = self.result['time']
idx = np.where(time == run_number)[0]
if len(idx) == 0:
filename = last['filename']
else:
filename = self.result['filename'][idx[0]]
return filename
def get_image_bin(self, run_number=None):
file_url = self.get_image(run_number)
payload = file_url.split(',',1)[1]
return base64.b64decode(payload) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# Unit Testing of pg commands
#
import os
import unittest
import tempfile
from gppylib.db import dbconn
from gppylib.db.test import skipIfDatabaseDown
from gppylib import gplog
from gppylib.commands import pg
from gppylib.gparray import GpArray
logger = gplog.get_default_logger()
gplog.enable_verbose_logging()
@skipIfDatabaseDown()
class PgCommandsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testReadPostmasterTempFile(self):
logger.info("testReadPostmasterTempFile")
url = dbconn.DbURL()
gpdb = GpArray.initFromCatalog(url)
logger.info("Search for valid master port: %s" % gpdb.master.port)
cmd = pg.ReadPostmasterTempFile.local('test pg tempfile read',gpdb.master.port)
(exists,PID,datadir)=cmd.getResults()
logger.info("exists:=%s PID=%d datadir='%s'" % (exists,PID,datadir))
self.assertTrue(exists)
self.assertTrue(PID > 0)
self.assertEquals(datadir,gpdb.master.datadir)
gpdb.master.port=4000
logger.info("Search for bogus master port: %s" % gpdb.master.port)
cmd = pg.ReadPostmasterTempFile.local('test pg tempfile read',gpdb.master.port)
(exists,PID,datadir)=cmd.getResults()
logger.info("exists:=%s PID=%d datadir='%s'" % (exists,PID,datadir))
self.assertFalse(exists)
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""An Ansible module to utilize GCE image resources."""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_img
version_added: "1.9"
short_description: utilize GCE image resources
description:
- This module can create and delete GCE private images from gzipped
compressed tarball containing raw disk data or from existing detached
disks in any zone. U(https://cloud.google.com/compute/docs/images)
options:
name:
description:
- the name of the image to create or delete
required: true
default: null
description:
description:
- an optional description
required: false
default: null
family:
description:
- an optional family name
required: false
default: null
version_added: "2.2"
source:
description:
- the source disk or the Google Cloud Storage URI to create the image from
required: false
default: null
state:
description:
- desired state of the image
required: false
default: "present"
choices: ["present", "absent"]
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
timeout:
description:
- timeout for the operation
required: false
default: 180
version_added: "2.0"
service_account_email:
description:
- service account email
required: false
default: null
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
project_id:
description:
- your GCE project ID
required: false
default: null
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Tom Melendez (supertom)"
'''
EXAMPLES = '''
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
- gce_img:
name: test-image
source: test-disk
zone: us-central1-a
state: present
# Create an image named test-image from a tarball in Google Cloud Storage.
- gce_img:
name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme
- gce_img:
name: test-image
source: gs://bucket/path/to/image.tgz
# Delete an image named test-image.
- gce_img:
name: test-image
state: absent
'''
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
_ = Provider.GCE
has_libcloud = True
except ImportError:
has_libcloud = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
GCS_URI = 'https://storage.googleapis.com/'
def create_image(gce, name, module):
"""Create an image with the specified name."""
source = module.params.get('source')
zone = module.params.get('zone')
desc = module.params.get('description')
timeout = module.params.get('timeout')
family = module.params.get('family')
if not source:
module.fail_json(msg='Must supply a source', changed=False)
if source.startswith(GCS_URI):
# source is a Google Cloud Storage URI
volume = source
elif source.startswith('gs://'):
# libcloud only accepts https URI.
volume = source.replace('gs://', GCS_URI)
else:
try:
volume = gce.ex_get_volume(source, zone)
except ResourceNotFoundError:
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
changed=False)
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
gce_extra_args = {}
if family is not None:
gce_extra_args['family'] = family
old_timeout = gce.connection.timeout
try:
gce.connection.timeout = timeout
gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
return True
except ResourceExistsError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
finally:
gce.connection.timeout = old_timeout
def delete_image(gce, name, module):
"""Delete a specific image resource by name."""
try:
gce.ex_delete_image(name)
return True
except ResourceNotFoundError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
family=dict(),
description=dict(),
source=dict(),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(type='path'),
project_id=dict(),
timeout=dict(type='int', default=180)
)
)
if not has_libcloud:
module.fail_json(msg='libcloud with GCE support is required.')
gce = gce_connect(module)
name = module.params.get('name')
state = module.params.get('state')
family = module.params.get('family')
changed = False
if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
changed=False)
# user wants to create an image.
if state == 'present':
changed = create_image(gce, name, module)
# user wants to delete the image.
if state == 'absent':
changed = delete_image(gce, name, module)
module.exit_json(changed=changed, name=name)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
{
"$schema": "https://json.schemastore.org/tsconfig",
"compilerOptions": {
"target": "ES2022",
"useDefineForClassFields": true,
"lib": ["ES2022", "DOM", "DOM.Iterable"],
"module": "ESNext",
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"isolatedModules": true,
"moduleDetection": "force",
"noEmit": true,
"jsx": "react-jsx",
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedIndexedAccess": true,
"baseUrl": ".",
"paths": {
"src/*": ["./src/*"],
"openapi/*": ["./openapi-gen/*"],
"tests/*": ["./tests/*"],
"playwright.config": ["./playwright.config.ts"]
}
},
"include": ["src", "tests/**/*.ts", "playwright.config.ts"]
} | json | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/tsconfig.app.json |
#!/bin/env python
# -*- coding: utf-8 -*- vim: set ts=4 et sw=4 fdm=indent :
import os, sys
try:
import simplejson
except ImportError:
sys.stderr.write('Unable to load simplejson python module.')
sys.exit(1)
class MCollectiveActionNoEnv(Exception):
pass
class MCollectiveActionFileError(Exception):
pass
class MCollectiveAction(object):
def __init__(self, *args, **kwargs):
try:
self.infile = os.environ['MCOLLECTIVE_REQUEST_FILE']
except KeyError:
raise MCollectiveActionNoEnv("No MCOLLECTIVE_REQUEST_FILE environment variable")
try:
self.outfile = os.environ['MCOLLECTIVE_REPLY_FILE']
except KeyError:
raise MCollectiveActionNoEnv("No MCOLLECTIVE_REPLY_FILE environment variable")
self.request = {}
self.reply = {}
self.load()
def load(self):
if not self.infile:
return False
try:
infile = open(self.infile, 'r')
self.request = simplejson.load(infile)
infile.close()
except IOError, e:
raise MCollectiveActionFileError("Could not read request file `%s`: %s" % (self.infile, e))
except simplejson.JSONDecodeError, e:
infile.close()
raise MCollectiveActionFileError("Could not parse JSON data in file `%s`: %s", (self.infile, e))
def send(self):
if not getattr(self, 'outfile', None): # if exception was raised during or before setting self.outfile
return False
try:
outfile = open(self.outfile, 'w')
simplejson.dump(self.reply, outfile)
outfile.close()
except IOError, e:
raise MCollectiveActionFileError("Could not write reply file `%s`: %s" % (self.outfile, e))
def error(self, msg):
"""Prints line to STDERR that will be logged at error level in the mcollectived log file"""
sys.stderr.write("%s\n" % msg)
def fail(self, msg):
"""Logs error message and exitst with RPCAborted"""
self.error(msg)
sys.exit(1)
def info(self, msg):
"""Prints line to STDOUT that will be logged at info level in the mcollectived log file"""
sys.stdout.write("%s\n" % msg)
def __del__(self):
self.send() | unknown | codeparrot/codeparrot-clean | ||
comment: false
coverage:
precision: 5
status:
patch:
default:
informational: true
project:
default:
informational: true
github_checks:
annotations: false | unknown | github | https://github.com/microsoft/TypeScript | .github/codecov.yml |
# Copyright 2013-2015, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor microdescriptors, which contain a distilled version of a
relay's server descriptor. As of Tor version 0.2.3.3-alpha Tor no longer
downloads server descriptors by default, opting for microdescriptors instead.
Unlike most descriptor documents these aren't available on the metrics site
(since they don't contain any information that the server descriptors don't).
The limited information in microdescriptors make them rather clunky to use
compared with server descriptors. For instance microdescriptors lack the
relay's fingerprint, making it difficut to use them to look up the relay's
other descriptors.
To do so you need to match the microdescriptor's digest against its
corresponding router status entry. For added fun as of this writing the
controller doesn't even surface those router status entries
(:trac:`7953`).
For instance, here's an example that prints the nickname and fignerprints of
the exit relays.
::
import os
from stem.control import Controller
from stem.descriptor import parse_file
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
exit_digests = set()
data_dir = controller.get_conf('DataDirectory')
for desc in controller.get_microdescriptors():
if desc.exit_policy.is_exiting_allowed():
exit_digests.add(desc.digest)
print 'Exit Relays:'
for desc in parse_file(os.path.join(data_dir, 'cached-microdesc-consensus')):
if desc.digest in exit_digests:
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
Doing the same is trivial with server descriptors...
::
from stem.descriptor import parse_file
print 'Exit Relays:'
for desc in parse_file('/home/atagar/.tor/cached-descriptors'):
if desc.exit_policy.is_exiting_allowed():
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
**Module Overview:**
::
Microdescriptor - Tor microdescriptor.
"""
import hashlib
import stem.exit_policy
from stem.descriptor import (
Descriptor,
_get_descriptor_components,
_read_until_keywords,
_value,
_parse_simple_line,
_parse_key_block,
)
from stem.descriptor.router_status_entry import (
_parse_a_line,
_parse_p_line,
)
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
REQUIRED_FIELDS = (
'onion-key',
)
SINGLE_FIELDS = (
'onion-key',
'ntor-onion-key',
'family',
'p',
'p6',
)
def _parse_file(descriptor_file, validate = False, **kwargs):
"""
Iterates over the microdescriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for Microdescriptor instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is True
* **IOError** if the file can't be read
"""
while True:
annotations = _read_until_keywords('onion-key', descriptor_file)
# read until we reach an annotation or onion-key line
descriptor_lines = []
# read the onion-key line, done if we're at the end of the document
onion_key_line = descriptor_file.readline()
if onion_key_line:
descriptor_lines.append(onion_key_line)
else:
break
while True:
last_position = descriptor_file.tell()
line = descriptor_file.readline()
if not line:
break # EOF
elif line.startswith(b'@') or line.startswith(b'onion-key'):
descriptor_file.seek(last_position)
break
else:
descriptor_lines.append(line)
if descriptor_lines:
if descriptor_lines[0].startswith(b'@type'):
descriptor_lines = descriptor_lines[1:]
# strip newlines from annotations
annotations = list(map(bytes.strip, annotations))
descriptor_text = bytes.join(b'', descriptor_lines)
yield Microdescriptor(descriptor_text, validate, annotations, **kwargs)
else:
break # done parsing descriptors
def _parse_id_line(descriptor, entries):
value = _value('id', entries)
value_comp = value.split()
if len(value_comp) >= 2:
descriptor.identifier_type = value_comp[0]
descriptor.identifier = value_comp[1]
else:
raise ValueError("'id' lines should contain both the key type and digest: id %s" % value)
_parse_digest = lambda descriptor, entries: setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper())
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', _value('family', entries).split(' '))
_parse_p6_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('p6', entries)))
class Microdescriptor(Descriptor):
"""
Microdescriptor (`descriptor specification
<https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_)
:var str digest: **\*** hex digest for this microdescriptor, this can be used
to match against the corresponding digest attribute of a
:class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3`
:var str onion_key: **\*** key used to encrypt EXTEND cells
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
:var list or_addresses: **\*** alternative for our address/or_port attributes, each
entry is a tuple of the form (address (**str**), port (**int**), is_ipv6
(**bool**))
:var list family: **\*** nicknames or fingerprints of declared family
:var stem.exit_policy.MicroExitPolicy exit_policy: **\*** relay's exit policy
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6
:var str identifier_type: identity digest key type
:var str identifier: base64 encoded identity digest, this is only used for collision prevention (:trac:`11743`)
**\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.1.0
Added the identifier and identifier_type attributes.
"""
ATTRIBUTES = {
'onion_key': (None, _parse_onion_key_line),
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
'or_addresses': ([], _parse_a_line),
'family': ([], _parse_family_line),
'exit_policy': (stem.exit_policy.MicroExitPolicy('reject 1-65535'), _parse_p_line),
'exit_policy_v6': (None, _parse_p6_line),
'identifier_type': (None, _parse_id_line),
'identifier': (None, _parse_id_line),
'digest': (None, _parse_digest),
}
PARSER_FOR_LINE = {
'onion-key': _parse_onion_key_line,
'ntor-onion-key': _parse_ntor_onion_key_line,
'a': _parse_a_line,
'family': _parse_family_line,
'p': _parse_p_line,
'p6': _parse_p6_line,
'id': _parse_id_line,
}
def __init__(self, raw_contents, validate = False, annotations = None):
super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate)
self._annotation_lines = annotations if annotations else []
entries = _get_descriptor_components(raw_contents, validate)
if validate:
self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper()
self._parse(entries, validate)
self._check_constraints(entries)
else:
self._entries = entries
@lru_cache()
def get_annotations(self):
"""
Provides content that appeared prior to the descriptor. If this comes from
the cached-microdescs then this commonly contains content like...
::
@last-listed 2013-02-24 00:18:30
:returns: **dict** with the key/value pairs in our annotations
"""
annotation_dict = {}
for line in self._annotation_lines:
if b' ' in line:
key, value = line.split(b' ', 1)
annotation_dict[key] = value
else:
annotation_dict[line] = None
return annotation_dict
def get_annotation_lines(self):
"""
Provides the lines of content that appeared prior to the descriptor. This
is the same as the
:func:`~stem.descriptor.microdescriptor.Microdescriptor.get_annotations`
results, but with the unparsed lines and ordering retained.
:returns: **list** with the lines of annotation that came before this descriptor
"""
return self._annotation_lines
def _check_constraints(self, entries):
"""
Does a basic check that the entries conform to this descriptor type's
constraints.
:param dict entries: keyword => (value, pgp key) entries
:raises: **ValueError** if an issue arises in validation
"""
for keyword in REQUIRED_FIELDS:
if keyword not in entries:
raise ValueError("Microdescriptor must have a '%s' entry" % keyword)
for keyword in SINGLE_FIELDS:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in a microdescriptor" % keyword)
if 'onion-key' != list(entries.keys())[0]:
raise ValueError("Microdescriptor must start with a 'onion-key' entry")
def _name(self, is_plural = False):
return 'microdescriptors' if is_plural else 'microdescriptor'
def _compare(self, other, method):
if not isinstance(other, Microdescriptor):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
# pylint: disable=too-many-arguments, too-many-locals
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
import bisect
import random
import numpy as np
from ..io import DataIter, DataBatch
from .. import ndarray
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n', start_label=0):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert new_vocab, "Unknown token %s"%word
if idx == invalid_label:
idx += 1
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab
class BucketSentenceIter(DataIter):
"""Simple bucketing iterator for language model.
The label at each sequence step is the following token
in the sequence.
Parameters
----------
sentences : list of list of int
Encoded sentences.
batch_size : int
Batch size of the data.
invalid_label : int, optional
Key for invalid label, e.g. <end-of-sentence>. The default is -1.
dtype : str, optional
Data type of the encoding. The default data type is 'float32'.
buckets : list of int, optional
Size of the data buckets. Automatically generated if None.
data_name : str, optional
Name of the data. The default name is 'data'.
label_name : str, optional
Name of the label. The default name is 'softmax_label'.
layout : str, optional
Format of data and label. 'NT' means (batch_size, length)
and 'TN' means (length, batch_size).
"""
def __init__(self, sentences, batch_size, buckets=None, invalid_label=-1,
data_name='data', label_name='softmax_label', dtype='float32',
layout='NTC'):
super(BucketSentenceIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
for i, sent in enumerate(sentences):
buck = bisect.bisect_left(buckets, len(sent))
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sent)] = sent
self.data[buck].append(buff)
self.data = [np.asarray(i, dtype=dtype) for i in self.data]
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.data_name = data_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nddata = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [(data_name, (batch_size, self.default_bucket_key))]
self.provide_label = [(label_name, (batch_size, self.default_bucket_key))]
elif self.major_axis == 1:
self.provide_data = [(data_name, (self.default_bucket_key, batch_size))]
self.provide_label = [(label_name, (self.default_bucket_key, batch_size))]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck)
label[:, :-1] = buck[:, 1:]
label[:, -1] = self.invalid_label
self.nddata.append(ndarray.array(buck, dtype=self.dtype))
self.ndlabel.append(ndarray.array(label, dtype=self.dtype))
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label], pad=0,
bucket_key=self.buckets[i],
provide_data=[(self.data_name, data.shape)],
provide_label=[(self.label_name, label.shape)]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Cipher/DES.py : DES
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""DES symmetric cipher
DES `(Data Encryption Standard)`__ is a symmetric block cipher standardized
by NIST_ . It has a fixed data block size of 8 bytes.
Its keys are 64 bits long, even though 8 bits were used for integrity (now they
are ignored) and do not contribute to securty.
DES is cryptographically secure, but its key length is too short by nowadays
standards and it could be brute forced with some effort.
DES should not be used for new designs. Use `AES`.
As an example, encryption can be done as follows:
>>> from Crypto.Cipher import DES3
>>> from Crypto import Random
>>>
>>> key = b'Sixteen byte key'
>>> iv = Random.new().read(DES3.block_size)
>>> cipher = DES3.new(key, DES3.MODE_OFB, iv)
>>> plaintext = b'sona si latine loqueris '
>>> msg = iv + cipher.encrypt(plaintext)
.. __: http://en.wikipedia.org/wiki/Data_Encryption_Standard
.. _NIST: http://csrc.nist.gov/publications/fips/fips46-3/fips46-3.pdf
:undocumented: __revision__, __package__
"""
__revision__ = "$Id$"
from Crypto.Cipher import blockalgo
from Crypto.Cipher import _DES
class DESCipher(blockalgo.BlockAlgo):
"""DES cipher object"""
def __init__(self, key, *args, **kwargs):
"""Initialize a DES cipher object
See also `new()` at the module level."""
blockalgo.BlockAlgo.__init__(self, _DES, key, *args, **kwargs)
def new(key, *args, **kwargs):
"""Create a new DES cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
It must be 8 byte long. The parity bits will be ignored.
:Keywords:
mode : a *MODE_** constant
The chaining mode to use for encryption or decryption.
Default is `MODE_ECB`.
IV : byte string
The initialization vector to use for encryption or decryption.
It is ignored for `MODE_ECB` and `MODE_CTR`.
For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption
and `block_size` +2 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
It is mandatory.
For all other modes, it must be `block_size` bytes longs. It is optional and
when not present it will be given a default value of all zeroes.
counter : callable
(*Only* `MODE_CTR`). A stateful function that returns the next
*counter block*, which is a byte string of `block_size` bytes.
For better performance, use `Crypto.Util.Counter`.
segment_size : integer
(*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext
are segmented in.
It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8.
:Return: an `DESCipher` object
"""
return DESCipher(key, *args, **kwargs)
#: Electronic Code Book (ECB). See `blockalgo.MODE_ECB`.
MODE_ECB = 1
#: Cipher-Block Chaining (CBC). See `blockalgo.MODE_CBC`.
MODE_CBC = 2
#: Cipher FeedBack (CFB). See `blockalgo.MODE_CFB`.
MODE_CFB = 3
#: This mode should not be used.
MODE_PGP = 4
#: Output FeedBack (OFB). See `blockalgo.MODE_OFB`.
MODE_OFB = 5
#: CounTer Mode (CTR). See `blockalgo.MODE_CTR`.
MODE_CTR = 6
#: OpenPGP Mode. See `blockalgo.MODE_OPENPGP`.
MODE_OPENPGP = 7
#: Size of a data block (in bytes)
block_size = 8
#: Size of a key (in bytes)
key_size = 8 | unknown | codeparrot/codeparrot-clean | ||
import http.client as http
import requests
import json
import sys
import os
import pycurl
from io import BytesIO
import logging
from acd import oauth
from acd.common import RequestError
import utils
logger = logging.getLogger(__name__)
def progress(total_to_download, total_downloaded, total_to_upload, total_uploaded):
"""curl progress indicator function"""
if total_to_upload:
rate = float(total_uploaded) / total_to_upload
percentage = round(rate * 100, ndigits=2)
completed = "#" * int(percentage / 2)
spaces = " " * (50 - len(completed))
sys.stdout.write('[%s%s] %s%% of %s\r'
% (completed, spaces, ('%05.2f' % percentage).rjust(6), utils.file_size_str(total_to_upload)))
sys.stdout.flush()
def create_folder(name, parent=None):
# params = {'localId' : ''}
body = {'kind': 'FOLDER', 'name': name}
if parent:
body['parents'] = [parent]
body_str = json.dumps(body)
r = requests.post(oauth.get_metadata_url() + 'nodes', headers=oauth.get_auth_header(), data=body_str)
if r.status_code != http.CREATED:
# print('Error creating folder "%s"' % name)
raise RequestError(r.status_code, r.text)
return r.json()
# file must be valid, readable
def upload_file(file_name, parent=None):
params = '?suppress=deduplication' # suppresses 409 response
metadata = {'kind': 'FILE', 'name': os.path.basename(file_name)}
if parent:
metadata['parents'] = [parent]
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, oauth.get_content_url() + 'nodes' + params)
c.setopt(c.HTTPHEADER, ['Authorization: ' + oauth.get_auth_token()])
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPPOST, [('metadata', json.dumps(metadata)),
('content', (c.FORM_FILE, file_name.encode('UTF-8')))])
c.setopt(c.NOPROGRESS, 0)
c.setopt(c.PROGRESSFUNCTION, progress)
if logger.getEffectiveLevel() == logging.DEBUG:
c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error as e:
raise RequestError(0, e)
status = c.getinfo(pycurl.HTTP_CODE)
c.close()
print() # break progress line
body = buffer.getvalue().decode('utf-8')
if status != http.CREATED:
# print('Uploading "%s" failed.' % file_name)
raise RequestError(status, body)
return json.loads(body)
def overwrite_file(node_id, file_name):
params = '?suppress=deduplication' # suppresses 409 response
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, oauth.get_content_url() + 'nodes/' + node_id + '/content' + params)
c.setopt(c.HTTPHEADER, ['Authorization: ' + oauth.get_auth_token()])
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPPOST, [('content', (c.FORM_FILE, file_name.encode('UTF-8')))])
c.setopt(c.CUSTOMREQUEST, 'PUT')
c.setopt(c.NOPROGRESS, 0)
c.setopt(c.PROGRESSFUNCTION, progress)
if logger.getEffectiveLevel() == logging.DEBUG:
c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error as e:
raise RequestError(0, e)
status = c.getinfo(pycurl.HTTP_CODE)
c.close()
print() # break progress line
body = buffer.getvalue().decode('utf-8')
if status != http.OK:
# print('Overwriting "%s" failed.' % file_name)
raise RequestError(status, body)
return json.loads(body)
# local name must be checked prior to call
# existing file will be overwritten
def download_file(node_id, local_name, local_path=None, write_callback=None):
r = requests.get(oauth.get_content_url() + 'nodes/' + node_id, headers=oauth.get_auth_header(), stream=True)
if r.status_code != http.OK:
print('Downloading %s failed.' % node_id)
raise RequestError(r.status_code, r.text)
dl_path = local_name
if local_path:
dl_path = os.path.join(local_path, local_name)
with open(dl_path, 'wb') as f:
total_ln = int(r.headers.get('content-length'))
curr_ln = 0
for chunk in r.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
if write_callback:
write_callback(chunk)
curr_ln += len(chunk)
progress(0, 0, total_ln, curr_ln)
print() # break progress line
return # no response text? | unknown | codeparrot/codeparrot-clean | ||
import os
import time
import re
import pprint
import json
import datetime
from django.conf import settings
import musicbrainzngs
from obp_legacy.models import *
from spf.models import Match, Request
import logging
log = logging.getLogger(__name__)
class MediaMatch(object):
def __init__(self):
log = logging.getLogger('util.migrator.__init__')
musicbrainzngs.set_useragent("Example music app", "0.1", "http://example.com/music")
#musicbrainzngs.set_hostname("mb.anorg.net")
#musicbrainzngs.set_rate_limit(limit_or_interval=False)
self.pp = pprint.PrettyPrinter(indent=4)
#self.pp.pprint = lambda d: None
def match(self, obj):
log = logging.getLogger('util.match.match')
log.info('matching: %s' % obj.title)
for r in obj.results_mb:
print '--'
#print r
mb_id = r['id']
print mb_id
match, created = Match.objects.get_or_create(request=obj, mb_id=mb_id)
includes = [
'artists',
'releases',
'artist-credits',
'release-rels',
'release-group-rels',
'artist-rels',
'annotation',
'discids',
'label-rels',
'work-rels',
'recording-rels',
'media',
'isrcs',
]
try:
mr = musicbrainzngs.get_recording_by_id(id=mb_id, includes=includes)
mr = mr['recording']
# self.pp.pprint(mr)
match.title = mr['title']
# match data as json
match.results_mb = mr
match.artist = mr['artist-credit-phrase']
if 'length' in mr:
match.duration = mr['length']
# compose cretits string
if 'artist-credit' in mr:
credits = ''
for c in mr['artist-credit']:
try:
astr = c['artist']['name']
credits += astr + "\n"
except:
pass
match.artist_credits = credits
# compose secondary cretits string
if 'artist-relation-list' in mr:
credits = ''
for c in mr['artist-relation-list']:
try:
astr = c['artist']['name']
astr = '%s - [%s: %s]' % (astr, c['type'], ', '.join(c['attribute-list']))
credits += astr + "\n"
except Exception, e:
print e
pass
match.artist_credits_secondary = credits
if 'isrc-list' in mr:
self.pp.pprint(mr['isrc-list'])
try:
isrcs = "\n".join(mr['isrc-list'])
match.isrc_list = isrcs
except Exception, e:
print e
pass
# compose release string
if 'release-list' in mr:
releases = ''
for r in mr['release-list']:
"""
print '*******************************'
self.pp.pprint(r)
print '*******************************'
"""
includes = ['labels','release-rels', 'work-rels']
#mr = mr['recording']
try:
pass
except:
pass
try:
rstr = r['title']
rstr = '%s - [%s | %s]' % (rstr, r['country'], r['date'])
if 'medium-list' in r:
try:
rstr += ' - %s - Track# %s' % (r['medium-list'][0]['format'], r['medium-list'][0]['track-list'][0]['number'])
except:
pass
try:
tstr = ''
if 'label-info-list' in mrel:
lil = mrel['label-info-list'][0]
self.pp.pprint(lil)
if 'label' in lil:
tstr += ' %s ' % lil['label']['name']
if 'label-code' in lil['label']:
tstr += '(%s) ' % lil['label']['label-code']
if 'catalog-number' in lil:
tstr += 'catno: %s' % lil['catalog-number']
print '****'
print tstr
rstr += ' [ ' + tstr + ' ] '
except Exception, e:
print e
pass
try:
mrel = musicbrainzngs.get_release_by_id(id=r['id'], includes=includes)
mrel = mrel['release']
# self.pp.pprint(mrel)
rstr += ' [barcode: %s]' % (mrel['barcode'])
except:
pass
releases += rstr + "\n"
except Exception, e:
print e
pass
match.release_list = releases
# compose release string
if 'work-relation-list' in mr:
try:
iswcs = "\n".join(mr['work-relation-list'][0]['work']['iswc-list'])
match.iswc_list = iswcs
except:
pass
match.status = 1
except Exception, e:
print 'GOT ERROR!!!: '
print e
print
match.status = 99
match.save() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: getent
short_description: a wrapper to the unix getent utility
description:
- Runs getent against one of it's various databases and returns information into
the host's facts, in a getent_<database> prefixed variable
version_added: "1.8"
options:
database:
required: True
description:
- the name of a getent database supported by the target system (passwd, group,
hosts, etc).
key:
required: False
default: ''
description:
- key from which to return values from the specified database, otherwise the
full contents are returned.
split:
required: False
default: None
description:
- "character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database"
fail_key:
required: False
default: True
description:
- If a supplied key is missing this will make the task fail if True
notes:
- "Not all databases support enumeration, check system documentation for details"
requirements: [ ]
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# get root user info
- getent: database=passwd key=root
- debug: var=getent_passwd
# get all groups
- getent: database=group split=':'
- debug: var=getent_group
# get all hosts, split by tab
- getent: database=hosts
- debug: var=getent_hosts
# get http service info, no error if missing
- getent: database=services key=http fail_key=False
- debug: var=getent_services
# get user password hash (requires sudo/root)
- getent: database=shadow key=www-data split=:
- debug: var=getent_shadow
'''
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec = dict(
database = dict(required=True),
key = dict(required=False, default=None),
split = dict(required=False, default=None),
fail_key = dict(required=False, type='bool', default=True),
),
supports_check_mode = True,
)
colon = [ 'passwd', 'shadow', 'group', 'gshadow' ]
database = module.params['database']
key = module.params.get('key')
split = module.params.get('split')
fail_key = module.params.get('fail_key')
getent_bin = module.get_bin_path('getent', True)
if key is not None:
cmd = [ getent_bin, database, key ]
else:
cmd = [ getent_bin, database ]
if split is None and database in colon:
split = ':'
try:
rc, out, err = module.run_command(cmd)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
results = { dbtree: {} }
if rc == 0:
for line in out.splitlines():
record = line.split(split)
results[dbtree][record[0]] = record[1:]
module.exit_json(ansible_facts=results)
elif rc == 1:
msg = "Missing arguments, or database unknown."
elif rc == 2:
msg = "One or more supplied key could not be found in the database."
if not fail_key:
results[dbtree][key] = None
module.exit_json(ansible_facts=results, msg=msg)
elif rc == 3:
msg = "Enumeration not supported on this database."
module.fail_json(msg=msg)
main() | unknown | codeparrot/codeparrot-clean | ||
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import math
import pickle
from collections import defaultdict
from functools import partial
from itertools import combinations, product
from typing import Any, Dict
from unittest.mock import patch
import joblib
import numpy as np
import pytest
from scipy.special import comb
import sklearn
from sklearn import clone, datasets
from sklearn.datasets import make_classification, make_hastie_10_2
from sklearn.decomposition import TruncatedSVD
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import (
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
RandomTreesEmbedding,
)
from sklearn.ensemble._bootstrap import _get_n_samples_bootstrap
from sklearn.ensemble._forest import _generate_unsampled_indices
from sklearn.exceptions import NotFittedError
from sklearn.metrics import (
explained_variance_score,
f1_score,
mean_poisson_deviance,
mean_squared_error,
)
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
from sklearn.svm import LinearSVC
from sklearn.tree._classes import SPARSE_SPLITTERS
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
skip_if_no_parallel,
)
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.parallel import Parallel
from sklearn.utils.validation import check_random_state
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# Larger classification sample used for testing feature importances
X_large, y_large = datasets.make_classification(
n_samples=500,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Make regression dataset
X_reg, y_reg = datasets.make_regression(n_samples=500, n_features=10, random_state=1)
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
# Get the default backend in joblib to test parallelism and interaction with
# different backends
DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS: Dict[str, Any] = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
FOREST_CLASSIFIERS_REGRESSORS: Dict[str, Any] = FOREST_CLASSIFIERS.copy()
FOREST_CLASSIFIERS_REGRESSORS.update(FOREST_REGRESSORS)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
# also test apply
leaf_indices = clf.apply(X)
assert leaf_indices.shape == (len(X), clf.n_estimators)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
@pytest.mark.parametrize("criterion", ("gini", "log_loss"))
def test_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (criterion, score)
clf = ForestClassifier(
n_estimators=10, criterion=criterion, max_features=2, random_state=1
)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.5, "Failed with criterion %s and score = %f" % (criterion, score)
# TODO(1.11): remove the deprecated friedman_mse criterion parametrization
@pytest.mark.filterwarnings("ignore:.*friedman_mse.*:FutureWarning")
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
@pytest.mark.parametrize(
"criterion", ("squared_error", "friedman_mse", "absolute_error")
)
def test_regression_criterion(name, criterion):
# Check consistency on regression dataset.
ForestRegressor = FOREST_REGRESSORS[name]
reg = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
reg.fit(X_reg, y_reg)
score = reg.score(X_reg, y_reg)
assert score > 0.93, (
"Failed with max_features=None, criterion %s and score = %f"
% (
criterion,
score,
)
)
reg = ForestRegressor(
n_estimators=5, criterion=criterion, max_features=6, random_state=1
)
reg.fit(X_reg, y_reg)
score = reg.score(X_reg, y_reg)
assert score > 0.92, "Failed with max_features=6, criterion %s and score = %f" % (
criterion,
score,
)
def test_poisson_vs_mse():
"""Test that random forest with poisson criterion performs better than
mse for a poisson target.
There is a similar test for DecisionTreeRegressor.
"""
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
# We prevent some overfitting by setting min_samples_split=10.
forest_poi = RandomForestRegressor(
criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng
)
forest_mse = RandomForestRegressor(
criterion="squared_error",
min_samples_leaf=10,
max_features="sqrt",
random_state=rng,
)
forest_poi.fit(X_train, y_train)
forest_mse.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y, data_name in [(X_train, y_train, "train"), (X_test, y_test, "test")]:
metric_poi = mean_poisson_deviance(y, forest_poi.predict(X))
# squared_error forest might produce non-positive predictions => clip
# If y = 0 for those, the poisson deviance gets too good.
# If we drew more samples, we would eventually get y > 0 and the
# poisson deviance would explode, i.e. be undefined. Therefore, we do
# not clip to a tiny value like 1e-15, but to 1e-6. This acts like a
# small penalty to the non-positive predictions.
metric_mse = mean_poisson_deviance(
y, np.clip(forest_mse.predict(X), 1e-6, None)
)
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
# As squared_error might correctly predict 0 in train set, its train
# score can be better than Poisson. This is no longer the case for the
# test set. But keep the above comment for clipping in mind.
if data_name == "test":
assert metric_poi < metric_mse
assert metric_poi < 0.8 * metric_dummy
@pytest.mark.parametrize("criterion", ("poisson", "squared_error"))
def test_balance_property_random_forest(criterion):
""" "Test that sum(y_pred)==sum(y_true) on the training set."""
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
reg = RandomForestRegressor(
criterion=criterion, n_estimators=10, bootstrap=False, random_state=rng
)
reg.fit(X, y)
assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y))
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
def test_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert not hasattr(r, "classes_")
assert not hasattr(r, "n_classes_")
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert not hasattr(r, "classes_")
assert not hasattr(r, "n_classes_")
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(
n_estimators=10, random_state=1, max_features=1, max_depth=1
)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(
np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])
)
assert_array_almost_equal(
clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))
)
@pytest.mark.parametrize("dtype", (np.float64, np.float32))
@pytest.mark.parametrize(
"name, criterion",
itertools.chain(
product(FOREST_CLASSIFIERS, ["gini", "log_loss"]),
product(FOREST_REGRESSORS, ["squared_error", "absolute_error"]),
),
)
def test_importances(dtype, name, criterion):
tolerance = 0.01
if name in FOREST_REGRESSORS and criterion == "absolute_error":
tolerance = 0.05
# cast as dtype
X = X_large.astype(dtype, copy=False)
y = y_large.astype(dtype, copy=False)
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=10, criterion=criterion, random_state=0)
est.fit(X, y)
importances = est.feature_importances_
# The forest estimator can detect that only the first 3 features of the
# dataset are informative:
n_important = np.sum(importances > 0.1)
assert importances.shape[0] == 10
assert n_important == 3
assert np.all(importances[:3] > 0.1)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert np.all(importances >= 0.0)
for scale in [0.5, 100]:
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert np.abs(importances - importances_bis).mean() < tolerance
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.0
for count in np.bincount(samples):
p = 1.0 * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.0
for k in range(n_features):
# Weight of each B of size k
coef = 1.0 / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (
coef
* (1.0 * n_samples_b / n_samples) # P(B=b)
* (
entropy(y_)
- sum(
[
entropy(c) * len(c) / n_samples_b
for c in children
]
)
)
)
return imp
data = np.array(
[
[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0],
]
)
X, y = np.array(data[:, :7], dtype=bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(
n_estimators=500, max_features=1, criterion="log_loss", random_state=0
).fit(X, y)
importances = (
sum(
tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_
)
/ clf.n_estimators
)
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert np.abs(true_importances - importances).mean() < 0.01
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_unfitted_feature_importances(name):
err_msg = (
"This {} instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.".format(name)
)
with pytest.raises(NotFittedError, match=err_msg):
getattr(FOREST_ESTIMATORS[name](), "feature_importances_")
@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
@pytest.mark.parametrize(
"X, y, lower_bound_accuracy",
[
(
*datasets.make_classification(n_samples=300, n_classes=2, random_state=0),
0.9,
),
(
*datasets.make_classification(
n_samples=1000, n_classes=3, n_informative=6, random_state=0
),
0.65,
),
(
iris.data,
iris.target * 2 + 1,
0.65,
),
(
*datasets.make_multilabel_classification(n_samples=300, random_state=0),
0.18,
),
],
)
@pytest.mark.parametrize("oob_score", [True, partial(f1_score, average="micro")])
def test_forest_classifier_oob(
ForestClassifier, X, y, X_type, lower_bound_accuracy, oob_score
):
"""Check that OOB score is close to score on a test set."""
X = _convert_container(X, constructor_name=X_type)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.5,
random_state=0,
)
classifier = ForestClassifier(
n_estimators=40,
bootstrap=True,
oob_score=oob_score,
random_state=0,
)
assert not hasattr(classifier, "oob_score_")
assert not hasattr(classifier, "oob_decision_function_")
classifier.fit(X_train, y_train)
if callable(oob_score):
test_score = oob_score(y_test, classifier.predict(X_test))
else:
test_score = classifier.score(X_test, y_test)
assert classifier.oob_score_ >= lower_bound_accuracy
abs_diff = abs(test_score - classifier.oob_score_)
assert abs_diff <= 0.11, f"{abs_diff=} is greater than 0.11"
assert hasattr(classifier, "oob_score_")
assert not hasattr(classifier, "oob_prediction_")
assert hasattr(classifier, "oob_decision_function_")
if y.ndim == 1:
expected_shape = (X_train.shape[0], len(set(y)))
else:
expected_shape = (X_train.shape[0], len(set(y[:, 0])), y.shape[1])
assert classifier.oob_decision_function_.shape == expected_shape
@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
@pytest.mark.parametrize(
"X, y, lower_bound_r2",
[
(
*datasets.make_regression(
n_samples=500, n_features=10, n_targets=1, random_state=0
),
0.7,
),
(
*datasets.make_regression(
n_samples=500, n_features=10, n_targets=2, random_state=0
),
0.55,
),
],
)
@pytest.mark.parametrize("oob_score", [True, explained_variance_score])
def test_forest_regressor_oob(ForestRegressor, X, y, X_type, lower_bound_r2, oob_score):
"""Check that forest-based regressor provide an OOB score close to the
score on a test set."""
X = _convert_container(X, constructor_name=X_type)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.5,
random_state=0,
)
regressor = ForestRegressor(
n_estimators=50,
bootstrap=True,
oob_score=oob_score,
random_state=0,
)
assert not hasattr(regressor, "oob_score_")
assert not hasattr(regressor, "oob_prediction_")
regressor.fit(X_train, y_train)
if callable(oob_score):
test_score = oob_score(y_test, regressor.predict(X_test))
else:
test_score = regressor.score(X_test, y_test)
assert regressor.oob_score_ >= lower_bound_r2
assert abs(test_score - regressor.oob_score_) <= 0.1
assert hasattr(regressor, "oob_score_")
assert hasattr(regressor, "oob_prediction_")
assert not hasattr(regressor, "oob_decision_function_")
if y.ndim == 1:
expected_shape = (X_train.shape[0],)
else:
expected_shape = (X_train.shape[0], y.ndim)
assert regressor.oob_prediction_.shape == expected_shape
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
def test_forest_oob_warning(ForestEstimator):
"""Check that a warning is raised when not enough estimator and the OOB
estimates will be inaccurate."""
estimator = ForestEstimator(
n_estimators=1,
oob_score=True,
bootstrap=True,
random_state=0,
)
with pytest.warns(UserWarning, match="Some inputs do not have OOB scores"):
estimator.fit(iris.data, iris.target)
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
def test_forest_oob_score_requires_bootstrap(ForestEstimator):
"""Check that we raise an error if OOB score is requested without
activating bootstrapping.
"""
X = iris.data
y = iris.target
err_msg = "Out of bag estimation only available if bootstrap=True"
estimator = ForestEstimator(oob_score=True, bootstrap=False)
with pytest.raises(ValueError, match=err_msg):
estimator.fit(X, y)
@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
def test_classifier_error_oob_score_multiclass_multioutput(ForestClassifier):
"""Check that we raise an error with when requesting OOB score with
multiclass-multioutput classification target.
"""
rng = np.random.RandomState(42)
X = iris.data
y = rng.randint(low=0, high=5, size=(iris.data.shape[0], 2))
y_type = type_of_target(y)
assert y_type == "multiclass-multioutput"
estimator = ForestClassifier(oob_score=True, bootstrap=True)
err_msg = "The type of target cannot be used to compute OOB estimates"
with pytest.raises(ValueError, match=err_msg):
estimator.fit(X, y)
@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
def test_forest_multioutput_integral_regression_target(ForestRegressor):
"""Check that multioutput regression with integral values is not interpreted
as a multiclass-multioutput target and OOB score can be computed.
"""
rng = np.random.RandomState(42)
X = iris.data
y = rng.randint(low=0, high=10, size=(iris.data.shape[0], 2))
estimator = ForestRegressor(
n_estimators=30, oob_score=True, bootstrap=True, random_state=0
)
estimator.fit(X, y)
n_samples_bootstrap = _get_n_samples_bootstrap(len(X), estimator.max_samples, None)
n_samples_test = X.shape[0] // 4
oob_pred = np.zeros([n_samples_test, 2])
for sample_idx, sample in enumerate(X[:n_samples_test]):
n_samples_oob = 0
oob_pred_sample = np.zeros(2)
for tree in estimator.estimators_:
oob_unsampled_indices = _generate_unsampled_indices(
tree.random_state, len(X), n_samples_bootstrap, None
)
if sample_idx in oob_unsampled_indices:
n_samples_oob += 1
oob_pred_sample += tree.predict(sample.reshape(1, -1)).squeeze()
oob_pred[sample_idx] = oob_pred_sample / n_samples_oob
assert_allclose(oob_pred, estimator.oob_prediction_[:n_samples_test])
@pytest.mark.parametrize("oob_score", [True, False])
def test_random_trees_embedding_raise_error_oob(oob_score):
with pytest.raises(TypeError, match="got an unexpected keyword argument"):
RandomTreesEmbedding(oob_score=oob_score)
with pytest.raises(NotImplementedError, match="OOB score not supported"):
RandomTreesEmbedding()._set_oob_score_and_attributes(X, y)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_gridsearch(name):
# Check that base trees can be grid-searched.
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {"n_estimators": (1, 2), "max_depth": (1, 2)})
clf.fit(iris.data, iris.target)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_parallel(name):
"""Check parallel computations in classification"""
if name in FOREST_CLASSIFIERS:
X = iris.data
y = iris.target
elif name in FOREST_REGRESSORS:
X = X_reg
y = y_reg
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert len(forest) == 10
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_pickle(name):
# Check pickability.
if name in FOREST_CLASSIFIERS:
X = iris.data[::2]
y = iris.target[::2]
elif name in FOREST_REGRESSORS:
X = X_reg[::2]
y = y_reg[::2]
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert type(obj2) == obj.__class__
score2 = obj2.score(X, y)
assert score == score2
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_multioutput(name):
# Check estimators on multi-output problems.
X_train = [
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2],
]
y_train = [
[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3],
]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = est.predict_log_proba(X_test)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_multioutput_string(name):
# Check estimators on multi-output problems with string outputs.
X_train = [
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2],
]
y_train = [
["red", "blue"],
["red", "blue"],
["red", "blue"],
["green", "green"],
["green", "green"],
["green", "green"],
["red", "purple"],
["red", "purple"],
["red", "purple"],
["green", "yellow"],
["green", "yellow"],
["green", "yellow"],
]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [
["red", "blue"],
["green", "green"],
["red", "purple"],
["green", "yellow"],
]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_equal(y_pred, y_test)
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = est.predict_log_proba(X_test)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert clf.n_classes_ == 2
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr_matrix
assert isinstance(X_transformed, np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(
n_estimators=10, sparse_output=False, random_state=0
)
hasher_sparse = RandomTreesEmbedding(
n_estimators=10, sparse_output=True, random_state=0
)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray())
# one leaf active per data point per forest
assert X_transformed.shape[0] == X.shape[0]
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert linear_clf.score(X_reduced, y) == 1.0
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_random_hasher_sparse_data(csc_container):
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_container(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(
X_train, y_train
)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in itertools.pairwise(probas):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
reg = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in reg.estimators_:
tree = "".join(
("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
)
uniques[tree] += 1
uniques = sorted([(1.0 * count / n_trees, tree) for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert len(uniques) == 5
assert 0.20 > uniques[0][0] # Rough approximation of 1/6.
assert 0.20 > uniques[1][0]
assert 0.20 > uniques[2][0]
assert 0.20 > uniques[3][0]
assert uniques[4][0] > 0.3
assert uniques[4][1] == "0,1/0,0/--0,2/--"
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
reg = ExtraTreesRegressor(max_features=1, random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in reg.estimators_:
tree = "".join(
("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
)
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert len(uniques) == 8
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(
max_depth=1, max_leaf_nodes=4, n_estimators=1, random_state=0
).fit(X, y)
assert est.estimators_[0].get_depth() == 1
est = ForestEstimator(max_depth=1, n_estimators=1, random_state=0).fit(X, y)
assert est.estimators_[0].get_depth() == 1
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name)
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > len(X) * 0.25 - 1, "Failed with {0}".format(name)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(
min_weight_fraction_leaf=frac, n_estimators=1, random_state=0
)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf, (
"Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf
)
)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
@pytest.mark.parametrize(
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
)
def test_sparse_input(name, sparse_container):
X, y = datasets.make_multilabel_classification(random_state=0, n_samples=50)
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(sparse_container(X), y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(
sparse.feature_importances_, dense.feature_importances_
)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X))
assert_array_almost_equal(
sparse.predict_log_proba(X), dense.predict_log_proba(X)
)
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(
sparse.transform(X).toarray(), dense.transform(X).toarray()
)
assert_array_almost_equal(
sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()
)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
@pytest.mark.parametrize("dtype", (np.float64, np.float32))
def test_memory_layout(name, dtype):
# Test that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Dense
for container, kwargs in (
(np.asarray, {}), # Nothing
(np.asarray, {"order": "C"}), # C-order
(np.asarray, {"order": "F"}), # F-order
(np.ascontiguousarray, {}), # Contiguous
):
X = container(iris.data, dtype=dtype, **kwargs)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# Sparse (if applicable)
if est.estimator.splitter in SPARSE_SPLITTERS:
for sparse_container in COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS:
X = sparse_container(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_almost_equal(est.fit(X, y).predict(X), y)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_1d_input(name):
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
with ignore_warnings():
ForestEstimator = FOREST_ESTIMATORS[name]
with pytest.raises(ValueError):
ForestEstimator(n_estimators=1, random_state=0).fit(X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
with pytest.raises(ValueError):
est.predict(X)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
@pytest.mark.parametrize("n_classes", [2, 3, 4])
def test_validate_y_class_weight(name, n_classes, global_random_seed):
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(random_state=0)
# toy dataset with n_classes
y = np.repeat(np.arange(n_classes), 3)
rng = np.random.RandomState(global_random_seed)
sw = rng.randint(1, 5, size=len(y))
weighted_frequency = np.bincount(y, weights=sw) / sw.sum()
balanced_class_weight = 1 / (n_classes * weighted_frequency)
# validation in fit reshapes y as (n_samples, 1)
y_reshaped = np.reshape(y, (-1, 1))
# Manually set these attributes, as we are not calling `fit`
clf._n_samples, clf.n_outputs_ = y_reshaped.shape
# checking dict class_weight
class_weight = rng.randint(1, 7, size=n_classes)
class_weight_dict = dict(enumerate(class_weight))
clf.set_params(class_weight=class_weight_dict)
_, expanded_class_weight = clf._validate_y_class_weight(y_reshaped, sw)
assert_allclose(expanded_class_weight, class_weight[y])
# checking class_weight="balanced"
clf.set_params(class_weight="balanced")
_, expanded_class_weight = clf._validate_y_class_weight(y_reshaped, sw)
assert_allclose(expanded_class_weight, balanced_class_weight[y])
# checking class_weight="balanced_subsample" with bootstrap=False
# (should be equivalent to "balanced")
clf.set_params(class_weight="balanced_subsample", bootstrap=False)
_, expanded_class_weight = clf._validate_y_class_weight(y_reshaped, sw)
assert_allclose(expanded_class_weight, balanced_class_weight[y])
# checking class_weight="balanced_subsample" with bootstrap=True
# (should be None)
clf.set_params(class_weight="balanced_subsample", bootstrap=True)
_, expanded_class_weight = clf._validate_y_class_weight(y_reshaped, sw)
assert expanded_class_weight is None
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
@pytest.mark.parametrize("bootstrap", [True, False])
def test_class_weights_forest(name, bootstrap, global_random_seed):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(random_state=global_random_seed, bootstrap=bootstrap)
# Iris is balanced, so no effect expected for using 'balanced' weights.
# Using the class_weight="balanced" option is then equivalent to fit with
# all ones sample_weight. However we cannot guarantee the same fit for
# sample_weight = None vs all ones, because the indices are drawn by
# different rng functions (choice vs randint). Thus we explicitly pass
# the sample_weight as all ones in clf1 fit.
clf1 = clone(clf)
clf1.fit(iris.data, iris.target, sample_weight=np.ones_like(iris.target))
clf2 = clone(clf).set_params(class_weight="balanced")
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf2._sample_weight, 1)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = clone(clf).set_params(
class_weight=[
{0: 2.0, 1: 2.0, 2: 1.0},
{0: 2.0, 1: 1.0, 2: 2.0},
{0: 1.0, 1: 2.0, 2: 2.0},
]
)
clf3.fit(iris.data, iris_multi)
# for multi-output, weights are multiplied
assert_almost_equal(clf3._sample_weight, 2 * 2 * 1)
# FIXME why is this test brittle ?
assert_allclose(clf2.feature_importances_, clf3.feature_importances_, atol=0.002)
# Check against multi-output "balanced" which should also have no effect
clf4 = clone(clf).set_params(class_weight="balanced")
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf4._sample_weight, 1)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1.0, 1: 100.0, 2: 1.0}
clf1 = clone(clf)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clone(clf).set_params(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1._sample_weight, clf2._sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clone(clf)
clf1.fit(iris.data, iris.target, sample_weight**2)
clf2 = clone(clf).set_params(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1._sample_weight, clf2._sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight="balanced", random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(
class_weight=[{-1: 0.5, 1: 1.0}, {-2: 1.0, 2: 1.0}], random_state=0
)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight="balanced_subsample", random_state=0)
clf.fit(X, _y)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Warning warm_start with preset
clf = ForestClassifier(class_weight="balanced", warm_start=True, random_state=0)
clf.fit(X, y)
warn_msg = (
"Warm-start fitting without increasing n_estimators does not fit new trees."
)
with pytest.warns(UserWarning, match=warn_msg):
clf.fit(X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.0}], random_state=0)
with pytest.raises(ValueError):
clf.fit(X, _y)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_warm_start(name):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est_ws = None
for n_estimators in [5, 10]:
if est_ws is None:
est_ws = ForestEstimator(
n_estimators=n_estimators, random_state=42, warm_start=True
)
else:
est_ws.set_params(n_estimators=n_estimators)
est_ws.fit(X, y)
assert len(est_ws) == n_estimators
est_no_ws = ForestEstimator(n_estimators=10, random_state=42, warm_start=False)
est_no_ws.fit(X, y)
assert set([tree.random_state for tree in est_ws]) == set(
[tree.random_state for tree in est_no_ws]
)
assert_array_equal(
est_ws.apply(X), est_no_ws.apply(X), err_msg="Failed with {0}".format(name)
)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1)
est.fit(X, y)
est_2 = ForestEstimator(
n_estimators=5, max_depth=1, warm_start=True, random_state=2
)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False, random_state=1)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.apply(X), est.apply(X))
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=4)
with pytest.raises(ValueError):
est.fit(X, y)
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1)
est.fit(X, y)
est_2 = ForestEstimator(
n_estimators=5, max_depth=3, warm_start=True, random_state=1
)
est_2.fit(X, y)
# Now est_2 equals est.
est_2.set_params(random_state=2)
warn_msg = (
"Warm-start fitting without increasing n_estimators does not fit new trees."
)
with pytest.warns(UserWarning, match=warn_msg):
est_2.fit(X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(est.apply(X), est_2.apply(X))
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
est = ForestEstimator(
n_estimators=15,
max_depth=3,
warm_start=False,
random_state=1,
bootstrap=True,
oob_score=True,
)
est.fit(X, y)
est_2 = ForestEstimator(
n_estimators=5,
max_depth=3,
warm_start=False,
random_state=1,
bootstrap=True,
oob_score=False,
)
est_2.fit(X, y)
est_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
est_2.fit(X, y)
assert hasattr(est_2, "oob_score_")
assert est.oob_score_ == est_2.oob_score_
# Test that oob_score is computed even if we don't need to train
# additional trees.
est_3 = ForestEstimator(
n_estimators=15,
max_depth=3,
warm_start=True,
random_state=1,
bootstrap=True,
oob_score=False,
)
est_3.fit(X, y)
assert not hasattr(est_3, "oob_score_")
est_3.set_params(oob_score=True)
ignore_warnings(est_3.fit)(X, y)
assert est.oob_score_ == est_3.oob_score_
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_oob_not_computed_twice(name):
# Check that oob_score is not computed twice when warm_start=True.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(
n_estimators=10, warm_start=True, bootstrap=True, oob_score=True
)
with patch.object(
est, "_set_oob_score_and_attributes", wraps=est._set_oob_score_and_attributes
) as mock_set_oob_score_and_attributes:
est.fit(X, y)
with pytest.warns(UserWarning, match="Warm-start fitting without increasing"):
est.fit(X, y)
mock_set_oob_score_and_attributes.assert_called_once()
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in "ABCDEFGHIJKLMNOPQRSTU"[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert indicator.shape[1] == n_nodes_ptr[-1]
assert indicator.shape[0] == n_samples
assert_array_equal(
np.diff(n_nodes_ptr), [e.tree_.node_count for e in est.estimators_]
)
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [
indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])
]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [
RandomForestClassifier,
RandomForestRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert tree.min_impurity_decrease == 0.1
def test_poisson_y_positive_check():
est = RandomForestRegressor(criterion="poisson")
X = np.zeros((3, 3))
y = [-1, 1, 3]
err_msg = (
r"Some value\(s\) of y are negative which is "
r"not allowed for Poisson regression."
)
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
y = [0, 0, 0]
err_msg = (
r"Sum of y is not strictly positive which "
r"is necessary for Poisson regression."
)
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
# mypy error: Variable "DEFAULT_JOBLIB_BACKEND" is not valid type
class MyBackend(DEFAULT_JOBLIB_BACKEND): # type: ignore[valid-type,misc]
def __init__(self, *args, **kwargs):
self.count = 0
super().__init__(*args, **kwargs)
def start_call(self):
self.count += 1
return super().start_call()
joblib.register_parallel_backend("testing", MyBackend)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
@skip_if_no_parallel
def test_backend_respected():
clf = RandomForestClassifier(n_estimators=10, n_jobs=2)
with joblib.parallel_backend("testing") as (ba, n_jobs):
clf.fit(X, y)
assert ba.count > 0
# predict_proba requires shared memory. Ensure that's honored.
with joblib.parallel_backend("testing") as (ba, _):
clf.predict_proba(X)
assert ba.count == 0
def test_forest_feature_importances_sum():
X, y = make_classification(
n_samples=15, n_informative=3, random_state=1, n_classes=3
)
clf = RandomForestClassifier(
min_samples_leaf=5, random_state=42, n_estimators=200
).fit(X, y)
assert math.isclose(1, clf.feature_importances_.sum(), abs_tol=1e-7)
def test_forest_degenerate_feature_importances():
# build a forest of single node trees. See #13636
X = np.zeros((10, 10))
y = np.ones((10,))
gbr = RandomForestRegressor(n_estimators=10).fit(X, y)
assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64))
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_max_samples_geq_one(name):
# Check that `max_samples >= 1.0` and `max_samples >= n_samples `
# is allowed, issue #28507
X, y = hastie_X, hastie_y
max_samples_float = 1.5
max_sample_int = int(max_samples_float * X.shape[0])
est1 = FOREST_CLASSIFIERS_REGRESSORS[name](
bootstrap=True, max_samples=max_samples_float, random_state=11
)
est1.fit(X, y)
est2 = FOREST_CLASSIFIERS_REGRESSORS[name](
bootstrap=True, max_samples=max_sample_int, random_state=11
)
est2.fit(X, y)
assert est1._n_samples_bootstrap == est2._n_samples_bootstrap
assert_allclose(est1.score(X, y), est2.score(X, y))
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_max_samples_bootstrap(name):
# Check invalid `max_samples` values
est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=False, max_samples=0.5)
err_msg = (
r"`max_sample` cannot be set if `bootstrap=False`. "
r"Either switch to `bootstrap=True` or set "
r"`max_sample=None`."
)
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
def test_max_samples_boundary_regressors(name):
X_train, X_test, y_train, y_test = train_test_split(
X_reg, y_reg, train_size=0.7, test_size=0.3, random_state=0
)
ms_1_model = FOREST_REGRESSORS[name](
bootstrap=True, max_samples=1.0, random_state=0
)
ms_1_predict = ms_1_model.fit(X_train, y_train).predict(X_test)
ms_None_model = FOREST_REGRESSORS[name](
bootstrap=True, max_samples=None, random_state=0
)
ms_None_predict = ms_None_model.fit(X_train, y_train).predict(X_test)
ms_1_ms = mean_squared_error(ms_1_predict, y_test)
ms_None_ms = mean_squared_error(ms_None_predict, y_test)
assert ms_1_ms == pytest.approx(ms_None_ms)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_max_samples_boundary_classifiers(name):
X_train, X_test, y_train, _ = train_test_split(
X_large, y_large, random_state=0, stratify=y_large
)
ms_1_model = FOREST_CLASSIFIERS[name](
bootstrap=True, max_samples=1.0, random_state=0
)
ms_1_proba = ms_1_model.fit(X_train, y_train).predict_proba(X_test)
ms_None_model = FOREST_CLASSIFIERS[name](
bootstrap=True, max_samples=None, random_state=0
)
ms_None_proba = ms_None_model.fit(X_train, y_train).predict_proba(X_test)
np.testing.assert_allclose(ms_1_proba, ms_None_proba)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_forest_y_sparse(csr_container):
X = [[1, 2, 3]]
y = csr_container([[4, 5, 6]])
est = RandomForestClassifier()
msg = "sparse multilabel-indicator for y is not supported."
with pytest.raises(ValueError, match=msg):
est.fit(X, y)
@pytest.mark.parametrize("ForestClass", [RandomForestClassifier, RandomForestRegressor])
def test_little_tree_with_small_max_samples(ForestClass):
rng = np.random.RandomState(1)
X = rng.randn(10000, 2)
y = rng.randn(10000) > 0
# First fit with no restriction on max samples
est1 = ForestClass(
n_estimators=1,
random_state=rng,
max_samples=None,
)
# Second fit with max samples restricted to just 2
est2 = ForestClass(
n_estimators=1,
random_state=rng,
max_samples=2,
)
est1.fit(X, y)
est2.fit(X, y)
tree1 = est1.estimators_[0].tree_
tree2 = est2.estimators_[0].tree_
msg = "Tree without `max_samples` restriction should have more nodes"
assert tree1.node_count > tree2.node_count, msg
@pytest.mark.parametrize("Forest", FOREST_REGRESSORS)
def test_mse_criterion_object_segfault_smoke_test(Forest):
# This is a smoke test to ensure that passing a mutable criterion
# does not cause a segfault when fitting with concurrent threads.
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/12623
from sklearn.tree._criterion import MSE
y = y_reg.reshape(-1, 1)
n_samples, n_outputs = y.shape
mse_criterion = MSE(n_outputs, n_samples)
est = FOREST_REGRESSORS[Forest](n_estimators=2, n_jobs=2, criterion=mse_criterion)
est.fit(X_reg, y)
def test_random_trees_embedding_feature_names_out():
"""Check feature names out for Random Trees Embedding."""
random_state = np.random.RandomState(0)
X = np.abs(random_state.randn(100, 4))
hasher = RandomTreesEmbedding(
n_estimators=2, max_depth=2, sparse_output=False, random_state=0
).fit(X)
names = hasher.get_feature_names_out()
expected_names = [
f"randomtreesembedding_{tree}_{leaf}"
# Note: nodes with indices 0, 1 and 4 are internal split nodes and
# therefore do not appear in the expected output feature names.
for tree, leaf in [
(0, 2),
(0, 3),
(0, 5),
(0, 6),
(1, 2),
(1, 3),
(1, 5),
(1, 6),
]
]
assert_array_equal(expected_names, names)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_read_only_buffer(csr_container, monkeypatch):
"""RandomForestClassifier must work on readonly sparse data.
Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/25333
"""
monkeypatch.setattr(
sklearn.ensemble._forest,
"Parallel",
partial(Parallel, max_nbytes=100),
)
rng = np.random.RandomState(seed=0)
X, y = make_classification(n_samples=100, n_features=200, random_state=rng)
X = csr_container(X, copy=True)
clf = RandomForestClassifier(n_jobs=2, random_state=rng)
cross_val_score(clf, X, y, cv=2)
@pytest.mark.parametrize("class_weight", ["balanced_subsample", None])
def test_round_samples_to_one_when_samples_too_low(class_weight):
"""Check low max_samples works and is rounded to one.
Non-regression test for gh-24037.
"""
X, y = datasets.load_wine(return_X_y=True)
forest = RandomForestClassifier(
n_estimators=10, max_samples=1e-4, class_weight=class_weight, random_state=0
)
forest.fit(X, y)
@pytest.mark.parametrize("seed", [None, 1])
@pytest.mark.parametrize("bootstrap", [True, False])
@pytest.mark.parametrize("ForestClass", FOREST_CLASSIFIERS_REGRESSORS.values())
def test_estimators_samples(ForestClass, bootstrap, seed):
"""Estimators_samples_ property should be consistent.
Tests consistency across fits and whether or not the seed for the random generator
is set.
"""
X, y = make_hastie_10_2(n_samples=200, random_state=1)
if bootstrap:
max_samples = 0.5
else:
max_samples = None
est = ForestClass(
n_estimators=10,
max_samples=max_samples,
max_features=0.5,
random_state=seed,
bootstrap=bootstrap,
)
est.fit(X, y)
estimators_samples = est.estimators_samples_.copy()
# Test repeated calls result in same set of indices
assert_array_equal(estimators_samples, est.estimators_samples_)
estimators = est.estimators_
assert isinstance(estimators_samples, list)
assert len(estimators_samples) == len(estimators)
assert estimators_samples[0].dtype == np.int32
for i in range(len(estimators)):
if bootstrap:
assert len(estimators_samples[i]) == len(X) // 2
# the bootstrap should be a resampling with replacement
assert len(np.unique(estimators_samples[i])) < len(estimators_samples[i])
else:
assert len(set(estimators_samples[i])) == len(X)
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator = estimators[estimator_index]
X_train = X[estimator_samples]
y_train = y[estimator_samples]
orig_tree_values = estimator.tree_.value
estimator = clone(estimator)
estimator.fit(X_train, y_train)
new_tree_values = estimator.tree_.value
assert_allclose(orig_tree_values, new_tree_values)
@pytest.mark.parametrize(
"make_data, Forest",
[
(datasets.make_regression, RandomForestRegressor),
(datasets.make_classification, RandomForestClassifier),
(datasets.make_regression, ExtraTreesRegressor),
(datasets.make_classification, ExtraTreesClassifier),
],
)
def test_missing_values_is_resilient(make_data, Forest):
"""Check that forest can deal with missing values and has decent performance."""
rng = np.random.RandomState(0)
n_samples, n_features = 1000, 10
X, y = make_data(n_samples=n_samples, n_features=n_features, random_state=rng)
# Create dataset with missing values
X_missing = X.copy()
X_missing[rng.choice([False, True], size=X.shape, p=[0.95, 0.05])] = np.nan
assert np.isnan(X_missing).any()
X_missing_train, X_missing_test, y_train, y_test = train_test_split(
X_missing, y, random_state=0
)
# Train forest with missing values
forest_with_missing = Forest(random_state=rng, n_estimators=50)
forest_with_missing.fit(X_missing_train, y_train)
score_with_missing = forest_with_missing.score(X_missing_test, y_test)
# Train forest without missing values
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
forest = Forest(random_state=rng, n_estimators=50)
forest.fit(X_train, y_train)
score_without_missing = forest.score(X_test, y_test)
# Score is still 80 percent of the forest's score that had no missing values
assert score_with_missing >= 0.80 * score_without_missing
@pytest.mark.parametrize(
"Forest",
[
RandomForestClassifier,
RandomForestRegressor,
ExtraTreesRegressor,
ExtraTreesClassifier,
],
)
def test_missing_value_is_predictive(Forest):
"""Check that the forest learns when missing values are only present for
a predictive feature."""
rng = np.random.RandomState(0)
n_samples = 300
expected_score = 0.75
X_non_predictive = rng.standard_normal(size=(n_samples, 10))
y = rng.randint(0, high=2, size=n_samples)
# Create a predictive feature using `y` and with some noise
X_random_mask = rng.choice([False, True], size=n_samples, p=[0.95, 0.05])
y_mask = y.astype(bool)
y_mask[X_random_mask] = ~y_mask[X_random_mask]
predictive_feature = rng.standard_normal(size=n_samples)
predictive_feature[y_mask] = np.nan
assert np.isnan(predictive_feature).any()
X_predictive = X_non_predictive.copy()
X_predictive[:, 5] = predictive_feature
(
X_predictive_train,
X_predictive_test,
X_non_predictive_train,
X_non_predictive_test,
y_train,
y_test,
) = train_test_split(X_predictive, X_non_predictive, y, random_state=0)
forest_predictive = Forest(random_state=0).fit(X_predictive_train, y_train)
forest_non_predictive = Forest(random_state=0).fit(X_non_predictive_train, y_train)
predictive_test_score = forest_predictive.score(X_predictive_test, y_test)
assert predictive_test_score >= expected_score
assert predictive_test_score >= forest_non_predictive.score(
X_non_predictive_test, y_test
)
@pytest.mark.parametrize("Forest", FOREST_REGRESSORS.values())
def test_non_supported_criterion_raises_error_with_missing_values(Forest):
"""Raise error for unsupported criterion when there are missing values."""
X = np.array([[0, 1, 2], [np.nan, 0, 2.0]])
y = [0.5, 1.0]
forest = Forest(criterion="absolute_error")
msg = ".*does not accept missing values"
with pytest.raises(ValueError, match=msg):
forest.fit(X, y)
# TODO(1.11): remove test with the deprecation of friedman_mse criterion
@pytest.mark.parametrize("Forest", FOREST_REGRESSORS.values())
def test_friedman_mse_deprecation(Forest):
with pytest.warns(FutureWarning, match="friedman_mse"):
_ = Forest(criterion="friedman_mse") | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/ensemble/tests/test_forest.py |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A handler that exports various App Engine services over HTTP.
You can export this handler in your app by adding it directly to app.yaml's
list of handlers:
handlers:
- url: /remote_api
script: $PYTHON_LIB/google/appengine/ext/remote_api/handler.py
login: admin
Then, you can use remote_api_stub to remotely access services exported by this
handler. See the documentation in remote_api_stub.py for details on how to do
this.
Using this handler without specifying "login: admin" would be extremely unwise.
So unwise that the default handler insists on checking for itself.
"""
import google
import logging
import os
import pickle
import sha
import sys
import wsgiref.handlers
import yaml
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import mail_service_pb
from google.appengine.api import urlfetch_service_pb
from google.appengine.api import users
from google.appengine.api.capabilities import capability_service_pb
from google.appengine.api.images import images_service_pb
from google.appengine.api.memcache import memcache_service_pb
try:
__import__('google.appengine.api.labs.taskqueue.taskqueue_service_pb')
taskqueue_service_pb = sys.modules.get(
'google.appengine.api.labs.taskqueue.taskqueue_service_pb')
except ImportError:
from google.appengine.api.taskqueue import taskqueue_service_pb
from google.appengine.api.xmpp import xmpp_service_pb
from google.appengine.datastore import datastore_pb
from google.appengine.ext import webapp
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.runtime import apiproxy_errors
class RemoteDatastoreStub(apiproxy_stub.APIProxyStub):
"""Provides a stub that permits execution of stateful datastore queries.
Some operations aren't possible using the standard interface. Notably,
datastore RunQuery operations internally store a cursor that is referenced in
later Next calls, and cleaned up at the end of each request. Because every
call to ApiCallHandler takes place in its own request, this isn't possible.
To work around this, RemoteDatastoreStub provides its own implementation of
RunQuery that immediately returns the query results.
"""
def __init__(self, service='datastore_v3', _test_stub_map=None):
"""Constructor.
Args:
service: The name of the service
_test_stub_map: An APIProxyStubMap to use for testing purposes.
"""
super(RemoteDatastoreStub, self).__init__(service)
if _test_stub_map:
self.__call = _test_stub_map.MakeSyncCall
else:
self.__call = apiproxy_stub_map.MakeSyncCall
def _Dynamic_RunQuery(self, request, response):
"""Handle a RunQuery request.
We handle RunQuery by executing a Query and a Next and returning the result
of the Next request.
This method is DEPRECATED, but left in place for older clients.
"""
runquery_response = datastore_pb.QueryResult()
self.__call('datastore_v3', 'RunQuery', request, runquery_response)
if runquery_response.result_size() > 0:
response.CopyFrom(runquery_response)
return
next_request = datastore_pb.NextRequest()
next_request.mutable_cursor().CopyFrom(runquery_response.cursor())
next_request.set_count(request.limit())
self.__call('datastore_v3', 'Next', next_request, response)
def _Dynamic_Transaction(self, request, response):
"""Handle a Transaction request.
We handle transactions by accumulating Put requests on the client end, as
well as recording the key and hash of Get requests. When Commit is called,
Transaction is invoked, which verifies that all the entities in the
precondition list still exist and their hashes match, then performs a
transaction of its own to make the updates.
"""
begin_request = datastore_pb.BeginTransactionRequest()
begin_request.set_app(os.environ['APPLICATION_ID'])
tx = datastore_pb.Transaction()
self.__call('datastore_v3', 'BeginTransaction', begin_request, tx)
preconditions = request.precondition_list()
if preconditions:
get_request = datastore_pb.GetRequest()
get_request.mutable_transaction().CopyFrom(tx)
for precondition in preconditions:
key = get_request.add_key()
key.CopyFrom(precondition.key())
get_response = datastore_pb.GetResponse()
self.__call('datastore_v3', 'Get', get_request, get_response)
entities = get_response.entity_list()
assert len(entities) == request.precondition_size()
for precondition, entity in zip(preconditions, entities):
if precondition.has_hash() != entity.has_entity():
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.CONCURRENT_TRANSACTION,
"Transaction precondition failed.")
elif entity.has_entity():
entity_hash = sha.new(entity.entity().Encode()).digest()
if precondition.hash() != entity_hash:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.CONCURRENT_TRANSACTION,
"Transaction precondition failed.")
if request.has_puts():
put_request = request.puts()
put_request.mutable_transaction().CopyFrom(tx)
self.__call('datastore_v3', 'Put', put_request, response)
if request.has_deletes():
delete_request = request.deletes()
delete_request.mutable_transaction().CopyFrom(tx)
self.__call('datastore_v3', 'Delete', delete_request,
api_base_pb.VoidProto())
self.__call('datastore_v3', 'Commit', tx, api_base_pb.VoidProto())
def _Dynamic_GetIDs(self, request, response):
"""Fetch unique IDs for a set of paths."""
for entity in request.entity_list():
assert entity.property_size() == 0
assert entity.raw_property_size() == 0
assert entity.entity_group().element_size() == 0
lastpart = entity.key().path().element_list()[-1]
assert lastpart.id() == 0 and not lastpart.has_name()
begin_request = datastore_pb.BeginTransactionRequest()
begin_request.set_app(os.environ['APPLICATION_ID'])
tx = datastore_pb.Transaction()
self.__call('datastore_v3', 'BeginTransaction', begin_request, tx)
self.__call('datastore_v3', 'Put', request, response)
self.__call('datastore_v3', 'Rollback', tx, api_base_pb.VoidProto())
SERVICE_PB_MAP = {
'capability_service': {
'IsEnabled': (capability_service_pb.IsEnabledRequest,
capability_service_pb.IsEnabledResponse),
},
'datastore_v3': {
'Get': (datastore_pb.GetRequest, datastore_pb.GetResponse),
'Put': (datastore_pb.PutRequest, datastore_pb.PutResponse),
'Delete': (datastore_pb.DeleteRequest, datastore_pb.DeleteResponse),
'Count': (datastore_pb.Query, api_base_pb.Integer64Proto),
'GetIndices': (api_base_pb.StringProto, datastore_pb.CompositeIndices),
'AllocateIds':(datastore_pb.AllocateIdsRequest,
datastore_pb.AllocateIdsResponse),
'GetSchema': (datastore_pb.GetSchemaRequest, datastore_pb.Schema),
'RunQuery': (datastore_pb.Query,
datastore_pb.QueryResult),
'RunCompiledQuery':(datastore_pb.RunCompiledQueryRequest,
datastore_pb.QueryResult),
},
'images': {
'Transform': (images_service_pb.ImagesTransformRequest,
images_service_pb.ImagesTransformResponse),
'Composite': (images_service_pb.ImagesCompositeRequest,
images_service_pb.ImagesCompositeResponse),
'Histogram': (images_service_pb.ImagesHistogramRequest,
images_service_pb.ImagesHistogramResponse),
},
'mail': {
'Send': (mail_service_pb.MailMessage, api_base_pb.VoidProto),
'SendToAdmins': (mail_service_pb.MailMessage, api_base_pb.VoidProto),
},
'memcache': {
'Get': (memcache_service_pb.MemcacheGetRequest,
memcache_service_pb.MemcacheGetResponse),
'Set': (memcache_service_pb.MemcacheSetRequest,
memcache_service_pb.MemcacheSetResponse),
'Delete': (memcache_service_pb.MemcacheDeleteRequest,
memcache_service_pb.MemcacheDeleteResponse),
'Increment': (memcache_service_pb.MemcacheIncrementRequest,
memcache_service_pb.MemcacheIncrementResponse),
'FlushAll': (memcache_service_pb.MemcacheFlushRequest,
memcache_service_pb.MemcacheFlushResponse),
'Stats': (memcache_service_pb.MemcacheStatsRequest,
memcache_service_pb.MemcacheStatsResponse),
},
'taskqueue': {
'Add': (taskqueue_service_pb.TaskQueueAddRequest,
taskqueue_service_pb.TaskQueueAddResponse),
'BulkAdd': (taskqueue_service_pb.TaskQueueBulkAddRequest,
taskqueue_service_pb.TaskQueueBulkAddResponse),
'UpdateQueue':(taskqueue_service_pb.TaskQueueUpdateQueueRequest,
taskqueue_service_pb.TaskQueueUpdateQueueResponse),
'FetchQueues':(taskqueue_service_pb.TaskQueueFetchQueuesRequest,
taskqueue_service_pb.TaskQueueFetchQueuesResponse),
'FetchQueueStats':(
taskqueue_service_pb.TaskQueueFetchQueueStatsRequest,
taskqueue_service_pb.TaskQueueFetchQueueStatsResponse),
},
'remote_datastore': {
'RunQuery': (datastore_pb.Query, datastore_pb.QueryResult),
'Transaction': (remote_api_pb.TransactionRequest,
datastore_pb.PutResponse),
'GetIDs': (remote_api_pb.PutRequest, datastore_pb.PutResponse),
},
'urlfetch': {
'Fetch': (urlfetch_service_pb.URLFetchRequest,
urlfetch_service_pb.URLFetchResponse),
},
'xmpp': {
'GetPresence': (xmpp_service_pb.PresenceRequest,
xmpp_service_pb.PresenceResponse),
'SendMessage': (xmpp_service_pb.XmppMessageRequest,
xmpp_service_pb.XmppMessageResponse),
'SendInvite': (xmpp_service_pb.XmppInviteRequest,
xmpp_service_pb.XmppInviteResponse),
},
}
class ApiCallHandler(webapp.RequestHandler):
"""A webapp handler that accepts API calls over HTTP and executes them."""
LOCAL_STUBS = {
'remote_datastore': RemoteDatastoreStub('remote_datastore'),
}
def CheckIsAdmin(self):
if not users.is_current_user_admin():
self.response.set_status(401)
self.response.out.write(
"You must be logged in as an administrator to access this.")
self.response.headers['Content-Type'] = 'text/plain'
return False
elif 'X-appcfg-api-version' not in self.request.headers:
self.response.set_status(403)
self.response.out.write("This request did not contain a necessary header")
self.response.headers['Content-Type'] = 'text/plain'
return False
return True
def get(self):
"""Handle a GET. Just show an info page."""
if not self.CheckIsAdmin():
return
rtok = self.request.get('rtok', '0')
app_info = {
'app_id': os.environ['APPLICATION_ID'],
'rtok': rtok
}
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(yaml.dump(app_info))
def post(self):
"""Handle POST requests by executing the API call."""
if not self.CheckIsAdmin():
return
self.response.headers['Content-Type'] = 'application/octet-stream'
response = remote_api_pb.Response()
try:
request = remote_api_pb.Request()
request.ParseFromString(self.request.body)
response_data = self.ExecuteRequest(request)
response.mutable_response().set_contents(response_data.Encode())
self.response.set_status(200)
except Exception, e:
logging.exception('Exception while handling %s', request)
self.response.set_status(200)
response.mutable_exception().set_contents(pickle.dumps(e))
if isinstance(e, apiproxy_errors.ApplicationError):
application_error = response.mutable_application_error()
application_error.set_code(e.application_error)
application_error.set_detail(e.error_detail)
self.response.out.write(response.Encode())
def ExecuteRequest(self, request):
"""Executes an API invocation and returns the response object."""
service = request.service_name()
method = request.method()
service_methods = SERVICE_PB_MAP.get(service, {})
request_class, response_class = service_methods.get(method, (None, None))
if not request_class:
raise apiproxy_errors.CallNotFoundError()
request_data = request_class()
request_data.ParseFromString(request.request().contents())
response_data = response_class()
if service in self.LOCAL_STUBS:
self.LOCAL_STUBS[service].MakeSyncCall(service, method, request_data,
response_data)
else:
apiproxy_stub_map.MakeSyncCall(service, method, request_data,
response_data)
return response_data
def InfoPage(self):
"""Renders an information page."""
return """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html><head>
<title>App Engine API endpoint.</title>
</head><body>
<h1>App Engine API endpoint.</h1>
<p>This is an endpoint for the App Engine remote API interface.
Point your stubs (google.appengine.ext.remote_api.remote_api_stub) here.</p>
</body>
</html>"""
def main():
application = webapp.WSGIApplication([('.*', ApiCallHandler)])
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_TAC_TAC_MODULE_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_TAC_TAC_MODULE_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project
#include "mlir/IR/BuiltinOps.h" // from @llvm-project
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.h"
#include "tensorflow/compiler/mlir/lite/experimental/tac/tac_importer_exporter.h"
namespace mlir {
namespace TFL {
namespace tac {
// Main class for using Target Aware Conversion (TAC).
// To run TAC:
// 1) users should create object form this class, with desired options
// (TacModule::Options).
// 2) Use SetImporter/SetExporter to the desired importer
// and exporter.
// 3) Call Run()
//
// The module fetches all TargetHardware backends registered in the binary
// and only create TargetHardware requested in Options.
//
// This class is not thread safe.
class TacModule {
public:
// TAC options. Contains knobs to configure TAC as needed.
struct Options {
// List of names for the requested Target hardware.
std::vector<std::string> hardware_backends;
// Debug mode.
// This will output different alternative subgraphs in mlir format for debug
// purpose.
bool debug_mode = false;
// Whether to enable inliner passes or not.
bool enable_inliner = false;
// Whether to legalize ops to TFLite ops before exporting.
bool legalize_to_tflite_ops = false;
};
virtual ~TacModule() = default;
explicit TacModule(const Options& options) : options_(options) {}
void SetImporter(std::unique_ptr<TacImporter> importer) {
importer_ = std::move(importer);
}
void SetExporter(std::unique_ptr<TacExporter> exporter) {
exporter_ = std::move(exporter);
}
// Returns pointer to the TargetHardware that is identified by 'hardware_name'
// Returns NULL If no hardware with this name found.
const tac::TargetHardware* GetTargetHardware(
const std::string& hardware_name) const;
// Runs the TAC workflow, configured as in the options provided during
// construction.
// SetImporter/SetExporter should be called prior to invoking `Run`.
// Returns Status of the Run.
virtual absl::Status Run();
// Returns all available hardware backends registered in this module
// instance.
const std::vector<const tac::TargetHardware*>& GetAvailableHardwares() const {
return const_backends_;
}
// Registers all dialects in 'registry' with the module.
// This to allow clients to register extra dialects required.
void RegisterExtraDialects(mlir::DialectRegistry& registry);
protected:
// Adds TAC passes to the 'pass_manager'.
virtual void AddTACPass(mlir::OpPassManager* pass_manager,
llvm::ArrayRef<std::string> device_specs);
private:
// Runs all TAC passes on the provided module.
absl::Status RunTacPasses(mlir::ModuleOp* module, bool debug_mode = false);
// Create instances of all registered hardwares.
std::vector<std::unique_ptr<tac::TargetHardware>> InstantiateBackends();
std::unique_ptr<TacImporter> importer_;
std::unique_ptr<TacExporter> exporter_;
// Owned list of all target hardware backends.
std::vector<std::unique_ptr<tac::TargetHardware>> backends_;
// Holder for const pointers for the data in 'backends_'
std::vector<const tac::TargetHardware*> const_backends_;
// Extra dialects requested by the user.
mlir::DialectRegistry registry_;
const Options options_;
};
} // namespace tac
} // namespace TFL
} // namespace mlir
#endif // TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_TAC_TAC_MODULE_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/lite/experimental/tac/tac_module.h |
from __future__ import absolute_import
import logging
import re
import pip
from pip.compat import stdlib_pkgs
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
# packages to exclude from freeze output
freeze_excludes = stdlib_pkgs + ['setuptools', 'pip', 'distribute']
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
find_tags=False,
default_vcs=None,
isolated=False,
wheel_cache=None):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=freeze_excludes,
user_only=user_only):
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links,
find_tags=find_tags,
)
installations[req.name] = req
if requirement:
with open(requirement) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match.search(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--extra-index-url'))):
yield line.rstrip()
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line because it's not clear what it "
"would install: %s",
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file contains %s, but that package is"
" not installed",
line.strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
yield str(installation).rstrip() | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v0alpha1.gauge_tests_new.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana",
"version": "v0",
"datasource": {
"name": "-- Grafana --"
},
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "Plain",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidthFactor": 0.4,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-11": {
"kind": "Panel",
"spec": {
"id": 11,
"title": "Continuous color scheme ",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"max": 98,
"min": 5,
"noise": 22,
"scenarioId": "random_walk",
"seriesCount": 5,
"spread": 12,
"startValue": 50
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidth": 12,
"barWidthFactor": 0.4,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": true
},
"fieldConfig": {
"defaults": {
"unit": "percent",
"min": 1,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "continuous-GrYlRd",
"fixedColor": "blue"
}
},
"overrides": []
}
}
}
}
},
"panel-13": {
"kind": "Panel",
"spec": {
"id": 13,
"title": "Active gateways",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"max": 8,
"min": 1,
"noise": 2,
"scenarioId": "random_walk",
"seriesCount": 1,
"spread": 6,
"startValue": 0
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidth": 12,
"barWidthFactor": 0.49,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 8,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"unit": "suffix:/ 8 ",
"decimals": 0,
"min": 0,
"max": 8,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "continuous-GrYlRd",
"fixedColor": "blue"
}
},
"overrides": []
}
}
}
}
},
"panel-14": {
"kind": "Panel",
"spec": {
"id": 14,
"title": "Active pods",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"max": 12,
"min": 1,
"noise": 2,
"scenarioId": "random_walk",
"seriesCount": 1,
"spread": 6,
"startValue": 0
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidth": 12,
"barWidthFactor": 0.49,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 12,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"unit": "suffix:/ 12",
"decimals": 0,
"min": 0,
"max": 12,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "continuous-GrYlRd",
"fixedColor": "blue"
}
},
"overrides": []
}
}
}
}
},
"panel-15": {
"kind": "Panel",
"spec": {
"id": 15,
"title": "Backend",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"max": 100,
"min": 10,
"noise": 22,
"scenarioId": "random_walk",
"seriesCount": 1,
"spread": 12,
"startValue": 10
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidth": 12,
"barWidthFactor": 0.84,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 39,
"segmentSpacing": 0.76,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"unit": "percent",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "fixed",
"fixedColor": "red"
}
},
"overrides": []
}
}
}
}
},
"panel-16": {
"kind": "Panel",
"spec": {
"id": 16,
"title": "Backend",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"max": 100,
"min": 10,
"noise": 22,
"scenarioId": "random_walk",
"seriesCount": 1,
"spread": 12,
"startValue": 10
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidth": 12,
"barWidthFactor": 0.66,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 81,
"segmentSpacing": 0.28,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"unit": "percent",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "fixed",
"fixedColor": "red"
}
},
"overrides": []
}
}
}
}
},
"panel-18": {
"kind": "Panel",
"spec": {
"id": 18,
"title": "Bar width 0,1",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidthFactor": 0.1,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-19": {
"kind": "Panel",
"spec": {
"id": 19,
"title": "Bar width 0,32",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidthFactor": 0.32,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-20": {
"kind": "Panel",
"spec": {
"id": 20,
"title": "Bar width 0,57",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidthFactor": 0.57,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-21": {
"kind": "Panel",
"spec": {
"id": 21,
"title": "Bar width 0,8",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidthFactor": 0.8,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-22": {
"kind": "Panel",
"spec": {
"id": 22,
"title": "Square butts",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.72,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-23": {
"kind": "Panel",
"spec": {
"id": 23,
"title": "Square butts gauge",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.72,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-25": {
"kind": "Panel",
"spec": {
"id": 25,
"title": "Plain thresholds",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,70"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.9,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 65,
"color": "orange"
},
{
"value": 85,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-26": {
"kind": "Panel",
"spec": {
"id": 26,
"title": "Thresholds segmented",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,70"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.72,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 37,
"segmentSpacing": 0.31,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 65,
"color": "orange"
},
{
"value": 85,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-28": {
"kind": "Panel",
"spec": {
"id": 28,
"title": "Thresholds circle segmented",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,70"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.72,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 62,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 65,
"color": "orange"
},
{
"value": 85,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-29": {
"kind": "Panel",
"spec": {
"id": 29,
"title": "Segmented + gradient",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,70"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.72,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 46,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 65,
"color": "orange"
},
{
"value": 85,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "Center and bar glow",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidthFactor": 0.4,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-30": {
"kind": "Panel",
"spec": {
"id": 30,
"title": "Plain thresholds",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,70"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.9,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 65,
"color": "orange"
},
{
"value": 85,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-32": {
"kind": "Panel",
"spec": {
"id": 32,
"title": "Thresholds labels",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,70"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.9,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 62,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": true,
"showThresholdMarkers": true,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 40,
"color": "semi-dark-yellow"
},
{
"value": 65,
"color": "orange"
},
{
"value": 85,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-33": {
"kind": "Panel",
"spec": {
"id": 33,
"title": "Thresholds labels + circle",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,70"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.9,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 80,
"segmentSpacing": 0.59,
"shape": "circle",
"showThresholdLabels": true,
"showThresholdMarkers": true,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 40,
"color": "semi-dark-yellow"
},
{
"value": 65,
"color": "orange"
},
{
"value": 85,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-34": {
"kind": "Panel",
"spec": {
"id": 34,
"title": "Thresholds labels only",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,70"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "flat",
"barWidthFactor": 0.9,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 62,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": true,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 40,
"color": "semi-dark-yellow"
},
{
"value": 65,
"color": "orange"
},
{
"value": 85,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Center glow",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidthFactor": 0.4,
"effects": {
"barGlow": false,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-8": {
"kind": "Panel",
"spec": {
"id": 8,
"title": "Sparkline",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "1",
"max": 100,
"min": 1,
"noise": 22,
"scenarioId": "random_walk",
"spread": 22,
"startValue": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidthFactor": 0.4,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": true
},
"fieldConfig": {
"defaults": {
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-9": {
"kind": "Panel",
"spec": {
"id": 9,
"title": "Auto gradient + classic palette",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"max": 98,
"min": 5,
"noise": 22,
"scenarioId": "random_walk",
"seriesCount": 5,
"spread": 12,
"startValue": 50
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 20
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "gauge",
"version": "13.0.0-pre",
"spec": {
"options": {
"barShape": "rounded",
"barWidth": 12,
"barWidthFactor": 0.4,
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true
},
"endpointMarker": "glow",
"glow": "both",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "circle",
"showThresholdLabels": false,
"showThresholdMarkers": false,
"sparkline": false
},
"fieldConfig": {
"defaults": {
"unit": "percent",
"min": 1,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic",
"fixedColor": "blue"
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Basic single ",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 4,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 4,
"y": 0,
"width": 4,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 8,
"y": 0,
"width": 4,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 16,
"y": 0,
"width": 4,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-8"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 6,
"width": 4,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-22"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 4,
"y": 6,
"width": 4,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-23"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Bar width",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 5,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-18"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 5,
"y": 0,
"width": 5,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-19"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 10,
"y": 0,
"width": 5,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-20"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 15,
"y": 0,
"width": 5,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-21"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Thresholds",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-25"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 6,
"y": 0,
"width": 6,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-26"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 0,
"width": 5,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-29"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 6,
"width": 6,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-30"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 6,
"y": 6,
"width": 6,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-28"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Threshold labels",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 7,
"height": 10,
"element": {
"kind": "ElementReference",
"name": "panel-32"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 7,
"y": 0,
"width": 7,
"height": 10,
"element": {
"kind": "ElementReference",
"name": "panel-34"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 14,
"y": 0,
"width": 6,
"height": 10,
"element": {
"kind": "ElementReference",
"name": "panel-33"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Gradient modes",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 24,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-9"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 6,
"width": 24,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-11"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Segmented",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 4,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-13"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 4,
"y": 0,
"width": 5,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-14"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 9,
"y": 0,
"width": 5,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-15"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 14,
"y": 0,
"width": 6,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-16"
}
}
}
]
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "browser",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Panel tests - Gauge (new)",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-gauge/v0alpha1.gauge_tests_new.v42.v2beta1.json |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import unittest
from telemetry.core import browser_options
class BrowserOptionsTest(unittest.TestCase):
def testDefaults(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 3) # pylint: disable=E1101
def testDefaultsPlusOverride(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any', '-x', 10])
self.assertEquals(options.x, 10) # pylint: disable=E1101
def testDefaultsDontClobberPresetValue(self):
options = browser_options.BrowserFinderOptions()
setattr(options, 'x', 7)
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 7) # pylint: disable=E1101
def testCount0(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.v, None) # pylint: disable=E1101
def testCount2(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any', '-xx'])
self.assertEquals(options.v, 2) # pylint: disable=E1101
def testOptparseMutabilityWhenSpecified(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any', '-x'])
self.assertEquals(options_ret, options)
self.assertTrue(options.verbosity)
def testOptparseMutabilityWhenNotSpecified(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any'])
self.assertEquals(options_ret, options)
self.assertFalse(options.verbosity)
def testProfileDirDefault(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.browser_options.profile_dir, None)
def testProfileDir(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
# Need to use a directory that exists.
current_dir = os.path.dirname(__file__)
parser.parse_args(['--browser', 'any', '--profile-dir', current_dir])
self.assertEquals(options.browser_options.profile_dir, current_dir)
def testExtraBrowserArgs(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
parser.parse_args(['--extra-browser-args=--foo --bar'])
self.assertEquals(options.browser_options.extra_browser_args,
set(['--foo','--bar']))
def testMergeDefaultValues(self):
options = browser_options.BrowserFinderOptions()
options.already_true = True
options.already_false = False
options.override_to_true = False
options.override_to_false = True
parser = optparse.OptionParser()
parser.add_option('--already_true', action='store_true')
parser.add_option('--already_false', action='store_true')
parser.add_option('--unset', action='store_true')
parser.add_option('--default_true', action='store_true', default=True)
parser.add_option('--default_false', action='store_true', default=False)
parser.add_option('--override_to_true', action='store_true', default=False)
parser.add_option('--override_to_false', action='store_true', default=True)
options.MergeDefaultValues(parser.get_default_values())
self.assertTrue(options.already_true)
self.assertFalse(options.already_false)
self.assertTrue(options.unset is None)
self.assertTrue(options.default_true)
self.assertFalse(options.default_false)
self.assertFalse(options.override_to_true)
self.assertTrue(options.override_to_false) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
import django.db.models.signals
import django.dispatch.dispatcher
import web.core
import re
import os
import uuid
class File(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
slug = models.CharField(max_length=64, default=web.core.random_slug_default_length, editable=False, unique=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL)
file = models.FileField(upload_to=web.core.get_file_path)
created = models.DateTimeField(auto_now_add=True)
expiry = models.DateTimeField(default=web.core.default_expiry, blank=True)
def uri(self):
"""
@returns The URI of this file, which can be used to retrieve it using a redirect.
"""
filename = re.sub(r'^%s' % settings.UPLOAD_BASE_DIR, '', self.file.name).lstrip('/')
return os.path.join(settings.FILES_DIR, filename)
def url(self):
"""
@returns The short URL of this file, including the protocol and domain.
"""
return os.path.join(settings.SITE_URL, self.slug)
def delete_file(self):
base_path = os.path.dirname(self.file.name)
if os.path.exists(self.file.name):
os.unlink(self.file.name)
if os.path.exists(base_path):
os.rmdir(base_path)
def __unicode__(self):
return self.file.name
class Option(models.Model):
"""
Option is a key/value store that stores serialized options on specific users.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
author = models.ForeignKey(settings.AUTH_USER_MODEL)
key = models.CharField(max_length=128)
value = models.CharField(max_length=4096, null=True, blank=True)
class Meta:
unique_together = ('key', 'author')
def __unicode__(self):
return '%s=%s' % (self.key, self.value)
@django.dispatch.dispatcher.receiver(django.db.models.signals.post_delete, sender=File)
def file_delete(sender, instance, **kwargs):
instance.delete_file() | unknown | codeparrot/codeparrot-clean | ||
from geopy.geocoders import GoogleV3
from s2sphere import CellId, LatLng
from pogo.custom_exceptions import GeneralPogoException
import gpxpy.geo
# Wrapper for location
class Location(object):
def __init__(self, locationLookup, geo_key, noop=False):
# Blank location
if noop:
self.noop = True
self.geo_key = None
self.locator = None
self.latitude = None
self.longitude = None
self.altitude = None
return
self.noop = False
self.geo_key = geo_key
self.locator = GoogleV3()
if geo_key:
self.locator = GoogleV3(api_key=geo_key)
self.latitude, self.longitude, self.altitude = self.setLocation(locationLookup)
def __str__(self):
s = 'Coordinates: {} {} {}'.format(
self.latitude,
self.longitude,
self.altitude
)
return s
@staticmethod
def getDistance(*coords):
return gpxpy.geo.haversine_distance(*coords)
@staticmethod
def getLatLongIndex(latitude, longitude):
return CellId.from_lat_lng(
LatLng.from_degrees(
latitude,
longitude
)
).id()
@staticmethod
def Noop():
return Location(None, None, noop=True)
def setLocation(self, search):
try:
geo = self.locator.geocode(search)
except:
raise GeneralPogoException('Error in Geo Request')
# 8 is arbitrary, but not as suspicious as 0
return geo.latitude, geo.longitude, geo.altitude or 8
def setCoordinates(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def getCoordinates(self):
return self.latitude, self.longitude, self.altitude
def getCells(self, radius=10, bothDirections=True):
origin = CellId.from_lat_lng(
LatLng.from_degrees(
self.latitude,
self.longitude
)
).parent(15)
# Create walk around area
walk = [origin.id()]
right = origin.next()
left = origin.prev()
# Double the radius if we're only walking one way
if not bothDirections:
radius *= 2
# Search around provided radius
for _ in range(radius):
walk.append(right.id())
right = right.next()
if bothDirections:
walk.append(left.id())
left = left.prev()
# Return everything
return sorted(walk) | unknown | codeparrot/codeparrot-clean | ||
#include "builtin.h"
#include "gettext.h"
#include "parse-options.h"
#include "prune-packed.h"
static const char * const prune_packed_usage[] = {
"git prune-packed [-n | --dry-run] [-q | --quiet]",
NULL
};
int cmd_prune_packed(int argc,
const char **argv,
const char *prefix,
struct repository *repo UNUSED)
{
int opts = isatty(2) ? PRUNE_PACKED_VERBOSE : 0;
const struct option prune_packed_options[] = {
OPT_BIT('n', "dry-run", &opts, N_("dry run"),
PRUNE_PACKED_DRY_RUN),
OPT_NEGBIT('q', "quiet", &opts, N_("be quiet"),
PRUNE_PACKED_VERBOSE),
OPT_END()
};
argc = parse_options(argc, argv, prefix, prune_packed_options,
prune_packed_usage, 0);
if (argc > 0)
usage_msg_opt(_("too many arguments"),
prune_packed_usage,
prune_packed_options);
prune_packed_objects(opts);
return 0;
} | c | github | https://github.com/git/git | builtin/prune-packed.c |
# Extension to format a paragraph
# Does basic, standard text formatting, and also understands Python
# comment blocks. Thus, for editing Python source code, this
# extension is really only suitable for reformatting these comment
# blocks or triple-quoted strings.
# Known problems with comment reformatting:
# * If there is a selection marked, and the first line of the
# selection is not complete, the block will probably not be detected
# as comments, and will have the normal "text formatting" rules
# applied.
# * If a comment block has leading whitespace that mixes tabs and
# spaces, they will not be considered part of the same block.
# * Fancy comments, like this bulleted list, arent handled :-)
import string
import re
class FormatParagraph:
menudefs = [
('edit', [
('Format Paragraph', '<<format-paragraph>>'),
])
]
keydefs = {
'<<format-paragraph>>': ['<Alt-q>'],
}
unix_keydefs = {
'<<format-paragraph>>': ['<Meta-q>'],
}
def __init__(self, editwin):
self.editwin = editwin
def close(self):
self.editwin = None
def format_paragraph_event(self, event):
text = self.editwin.text
first, last = self.editwin.get_selection_indices()
if first and last:
data = text.get(first, last)
comment_header = ''
else:
first, last, comment_header, data = \
find_paragraph(text, text.index("insert"))
if comment_header:
# Reformat the comment lines - convert to text sans header.
lines = string.split(data, "\n")
lines = map(lambda st, l=len(comment_header): st[l:], lines)
data = string.join(lines, "\n")
# Reformat to 70 chars or a 20 char width, whichever is greater.
format_width = max(70-len(comment_header), 20)
newdata = reformat_paragraph(data, format_width)
# re-split and re-insert the comment header.
newdata = string.split(newdata, "\n")
# If the block ends in a \n, we dont want the comment
# prefix inserted after it. (Im not sure it makes sense to
# reformat a comment block that isnt made of complete
# lines, but whatever!) Can't think of a clean soltution,
# so we hack away
block_suffix = ""
if not newdata[-1]:
block_suffix = "\n"
newdata = newdata[:-1]
builder = lambda item, prefix=comment_header: prefix+item
newdata = string.join(map(builder, newdata), '\n') + block_suffix
else:
# Just a normal text format
newdata = reformat_paragraph(data)
text.tag_remove("sel", "1.0", "end")
if newdata != data:
text.mark_set("insert", first)
text.undo_block_start()
text.delete(first, last)
text.insert(first, newdata)
text.undo_block_stop()
else:
text.mark_set("insert", last)
text.see("insert")
def find_paragraph(text, mark):
lineno, col = map(int, string.split(mark, "."))
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
first_lineno = lineno
comment_header = get_comment_header(line)
comment_header_len = len(comment_header)
while get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
last = "%d.0" % lineno
# Search back to beginning of paragraph
lineno = first_lineno - 1
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
while lineno > 0 and \
get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno - 1
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
first = "%d.0" % (lineno+1)
return first, last, comment_header, text.get(first, last)
def reformat_paragraph(data, limit=70):
lines = string.split(data, "\n")
i = 0
n = len(lines)
while i < n and is_all_white(lines[i]):
i = i+1
if i >= n:
return data
indent1 = get_indent(lines[i])
if i+1 < n and not is_all_white(lines[i+1]):
indent2 = get_indent(lines[i+1])
else:
indent2 = indent1
new = lines[:i]
partial = indent1
while i < n and not is_all_white(lines[i]):
# XXX Should take double space after period (etc.) into account
words = re.split("(\s+)", lines[i])
for j in range(0, len(words), 2):
word = words[j]
if not word:
continue # Can happen when line ends in whitespace
if len(string.expandtabs(partial + word)) > limit and \
partial != indent1:
new.append(string.rstrip(partial))
partial = indent2
partial = partial + word + " "
if j+1 < len(words) and words[j+1] != " ":
partial = partial + " "
i = i+1
new.append(string.rstrip(partial))
# XXX Should reformat remaining paragraphs as well
new.extend(lines[i:])
return string.join(new, "\n")
def is_all_white(line):
return re.match(r"^\s*$", line) is not None
def get_indent(line):
return re.match(r"^(\s*)", line).group()
def get_comment_header(line):
m = re.match(r"^(\s*#*)", line)
if m is None: return ""
return m.group(1) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', 8388)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict) | unknown | codeparrot/codeparrot-clean | ||
See ["Contributing to Ruby"](https://docs.ruby-lang.org/en/master/contributing/contributing_md.html), which includes setup and build instructions. | unknown | github | https://github.com/ruby/ruby | CONTRIBUTING.md |
import new
import cairo
import rsvg
import pango
from random import random
from widgets.primitives import Primitives
from core.world import TheWorld
from ontology.thing import Thing
class ObjectViewer(Thing):
def __init__(self, target):
Thing.__init__(self)
self.target = target
# set & lock position
self.x = (random() * 2.0) - 1
self.y = (random() * 2.0) - 1
self.width = 0.1
self.height = 0.1
self.pixel_width = 0.0
self.pixel_height = 0.0
# pango
self.layout = pango.Layout(TheWorld.pango_context)
fontDescription = pango.FontDescription("Sans 8")
self.layout.set_font_description(fontDescription)
self.text = self.target.__class__.__name__ + "\n" + self.target.code
# calc text metrics
self.recalc_text_size()
# -- properties
def __get_text(self):
return self.__text
def __set_text(self, text):
self.__text = text
self.layout.set_markup(self.__text)
self.recalc_text_size() # recalc text size
text = property(__get_text, __set_text)
def recalc_text_size(self):
(self.pixel_width, self.pixel_height) = self.layout.get_pixel_size() # bogus when called from init() !?
#self.width = self.pixel_width / float(TheWorld.width) #+ self.padding * 2
#self.height = self.pixel_height / float(TheWorld.height) #+ self.padding
def draw(self, context):
Thing.draw(self, context)
self.text = self.target.__class__.__name__ # + "\n" + self.target.code
# figure out scale factor to fit text to thing size
scale = min(1.0 / self.pixel_width, 1.0 / self.pixel_height) / 2.0
# draw connectors
self.draw_inputs(context)
#self.draw_outputs(context)
# render the text
context.save()
context.move_to(-0.3, -0.5)
context.scale(scale, scale)
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.show_layout(self.layout)
context.restore()
# render box
Primitives.panel(context, -0.5, -0.5, 1.0, 1.0, 0.05)
def draw_inputs(self, context):
inputs = self.target.receives()
i = 0
for label in inputs:
x = -0.55
y = -0.45 + (i * (1.0 / len(inputs)))
Primitives.connector(context, x, y, label, 0.0, 0.0, 1.0)
i = i + 1
def draw_outputs(self, context):
num_outputs = 2
for i in range(num_outputs):
x = 0.25
y = -0.45 + (i * (1.0 / num_outputs))
Primitives.connector(context, x, y, '', 1.0, 0.0, 0.0)
def tick(self):
pass | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "abstract_unit"
require "active_support/core_ext/hash/conversions"
class MetalRenderingController < ActionController::Metal
include AbstractController::Rendering
include ActionController::Rendering
include ActionController::Renderers
end
class MetalRenderingJsonController < MetalRenderingController
class Model
def to_json(options = {})
{ a: "b" }.to_json(options)
end
def to_xml(options = {})
{ a: "b" }.to_xml(options)
end
end
use_renderers :json
def one
render json: Model.new
end
def two
render xml: Model.new
end
end
class RenderersMetalTest < ActionController::TestCase
tests MetalRenderingJsonController
def test_render_json
get :one
assert_response :success
assert_equal({ a: "b" }.to_json, @response.body)
assert_equal "application/json", @response.media_type
end
def test_render_xml
get :two
assert_response :success
assert_equal(" ", @response.body)
assert_equal "text/plain", @response.media_type
end
end | ruby | github | https://github.com/rails/rails | actionpack/test/controller/metal/renderers_test.rb |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.models.shelf_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from absl.testing import parameterized
import freezegun
import mock
from google.appengine.api import datastore_errors
from google.appengine.api import search
from google.appengine.ext import ndb
from loaner.web_app import constants
from loaner.web_app.backend.lib import events
from loaner.web_app.backend.models import config_model
from loaner.web_app.backend.models import shelf_model
from loaner.web_app.backend.testing import loanertest
class ShelfModelTest(loanertest.EndpointsTestCase, parameterized.TestCase):
"""Tests for the Shelf class."""
def setUp(self):
super(ShelfModelTest, self).setUp()
self.original_location = 'US-NYC'
self.original_friendly_name = 'NYC office'
self.original_capacity = 18
self.test_shelf = shelf_model.Shelf(
enabled=True,
friendly_name=self.original_friendly_name,
location=self.original_location,
capacity=self.original_capacity,
audit_notification_enabled=True).put().get()
def test_get_search_index(self):
self.assertIsInstance(shelf_model.Shelf.get_index(), search.Index)
@parameterized.parameters((-1,), (0,))
def test_validate_capacity(self, capacity):
"""Test that validate capacity raises db.BadValueError for less than 1."""
with self.assertRaisesWithLiteralMatch(
datastore_errors.BadValueError, shelf_model._NEGATIVE_CAPACITY_MSG):
shelf_model._validate_capacity('capacity', capacity)
def create_shelf_list(self):
"""Convenience function to create extra shelves to test listing."""
self.shelf2 = shelf_model.Shelf(
enabled=True,
location='US-NYC2',
capacity=20).put().get()
self.shelf3 = shelf_model.Shelf(
enabled=False,
location='US-NYC3',
capacity=30).put().get()
self.shelf4 = shelf_model.Shelf(
enabled=False,
location='US-NYC4',
capacity=40).put().get()
def test_audited_property_false(self):
"""Test that the audited property is False outside the interval."""
now = datetime.datetime.utcnow()
config_model.Config.set('audit_interval', 48)
with freezegun.freeze_time(now):
self.test_shelf.last_audit_time = now - datetime.timedelta(hours=49)
shelf_key = self.test_shelf.put()
retrieved_shelf = shelf_model.Shelf.get_by_id(shelf_key.id())
self.assertFalse(retrieved_shelf.audited)
def test_audited_property_true(self):
"""Test that the audited property is True inside the interval."""
now = datetime.datetime.utcnow()
config_model.Config.set('audit_interval', 48)
with freezegun.freeze_time(now):
self.test_shelf.last_audit_time = now - datetime.timedelta(hours=47)
shelf_key = self.test_shelf.put()
retrieved_shelf = shelf_model.Shelf.get_by_id(shelf_key.id())
self.assertTrue(retrieved_shelf.audited)
@mock.patch.object(shelf_model.Shelf, 'to_document', autospec=True)
def test_identifier(self, mock_to_document):
"""Test the identifier property."""
# Name is friendly name.
self.assertEqual(self.test_shelf.identifier, self.original_friendly_name)
# Name is location.
self.test_shelf.friendly_name = None
shelf_key = self.test_shelf.put()
assert mock_to_document.call_count == 1
retrieved_shelf = shelf_model.Shelf.get_by_id(shelf_key.id())
self.assertEqual(retrieved_shelf.identifier, self.original_location)
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
@mock.patch.object(shelf_model, 'logging', autospec=True)
def test_enroll_new_shelf(self, mock_logging, mock_stream):
"""Test enrolling a new shelf."""
new_location = 'US-NYC2'
new_capacity = 16
new_friendly_name = 'Statue of Liberty'
lat = 40.6892534
lon = -74.0466891
new_shelf = shelf_model.Shelf.enroll(
loanertest.USER_EMAIL, new_location, new_capacity, new_friendly_name,
lat, lon, 1.0, loanertest.USER_EMAIL)
self.assertEqual(new_shelf.location, new_location)
self.assertEqual(new_shelf.capacity, new_capacity)
self.assertEqual(new_shelf.friendly_name, new_friendly_name)
self.assertEqual(new_shelf.lat_long, ndb.GeoPt(lat, lon))
self.assertEqual(new_shelf.latitude, lat)
self.assertEqual(new_shelf.longitude, lon)
mock_logging.info.assert_called_once_with(
shelf_model._CREATE_NEW_SHELF_MSG, new_shelf.identifier)
mock_stream.assert_called_once_with(
new_shelf, loanertest.USER_EMAIL,
shelf_model._ENROLL_MSG % new_shelf.identifier)
self.testbed.mock_raiseevent.assert_called_once_with(
'shelf_enroll', shelf=new_shelf)
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
@mock.patch.object(shelf_model, 'logging', autospec=True)
def test_enroll_new_shelf_no_lat_long_event_error(
self, mock_logging, mock_stream):
"""Test enrolling a new shelf without latitude and longitude."""
self.testbed.mock_raiseevent.side_effect = events.EventActionsError
new_location = 'US-NYC2'
new_capacity = 16
new_friendly_name = 'Statue of Liberty'
new_shelf = shelf_model.Shelf.enroll(
loanertest.USER_EMAIL, new_location, new_capacity, new_friendly_name)
self.assertEqual(new_shelf.location, new_location)
self.assertEqual(new_shelf.capacity, new_capacity)
self.assertEqual(new_shelf.friendly_name, new_friendly_name)
self.assertIsNone(new_shelf.lat_long)
self.assertIsNone(new_shelf.latitude)
self.assertIsNone(new_shelf.longitude)
mock_logging.info.assert_called_once_with(
shelf_model._CREATE_NEW_SHELF_MSG, new_shelf.identifier)
mock_stream.assert_called_once_with(
new_shelf, loanertest.USER_EMAIL,
shelf_model._ENROLL_MSG % new_shelf.identifier)
self.assertEqual(mock_logging.error.call_count, 1)
self.testbed.mock_raiseevent.assert_called_once_with(
'shelf_enroll', shelf=new_shelf)
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
@mock.patch.object(shelf_model, 'logging', autospec=True)
def test_enroll_shelf_exists(self, mock_logging, mock_stream):
"""Test enrolling an existing shelf reactivates the shelf."""
new_capacity = 14
lat = 40.6892534
lon = -74.0466891
self.test_shelf.enabled = False
self.test_shelf.put()
reactivated_shelf = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL,
location=self.original_location,
capacity=new_capacity, latitude=lat, longitude=lon)
self.assertEqual(self.test_shelf.key, reactivated_shelf.key)
self.assertEqual(new_capacity, reactivated_shelf.capacity)
self.assertEqual(reactivated_shelf.lat_long, ndb.GeoPt(lat, lon))
mock_logging.info.assert_called_once_with(
shelf_model._REACTIVATE_MSG, self.test_shelf.identifier)
mock_stream.assert_called_once_with(
reactivated_shelf, loanertest.USER_EMAIL,
shelf_model._ENROLL_MSG % reactivated_shelf.identifier)
self.testbed.mock_raiseevent.assert_called_once_with(
'shelf_enroll', shelf=reactivated_shelf)
def test_enroll_latitude_no_longitude(self):
"""Test that enroll requires both lat and long, raises EnrollmentError."""
with self.assertRaisesRegexp(
shelf_model.EnrollmentError,
shelf_model._LAT_LONG_MSG):
shelf_model.Shelf.enroll(
loanertest.USER_EMAIL, self.original_location, self.original_capacity,
self.original_friendly_name, 40.6892534)
def test_get_with_friendly_name(self):
"""Test the get method with a friendly_name provided."""
self.assertEqual(
self.test_shelf, shelf_model.Shelf.get(
friendly_name=self.original_friendly_name))
def test_get_with_location(self):
"""Test the get method with a location provided."""
self.assertEqual(
self.test_shelf, shelf_model.Shelf.get(location=self.original_location))
def test_get_with_both(self):
"""Test the get method with a location and friendly_name provided."""
self.assertEqual(
self.test_shelf, shelf_model.Shelf.get(
location=self.original_location,
friendly_name=self.original_friendly_name))
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
def test_edit(self, mock_stream):
"""Test that a shelf edit changes the appropriate properties."""
new_capacity = 10
new_friendly_name = 'NYC is the best!'
self.test_shelf.edit(
user_email=loanertest.USER_EMAIL, capacity=new_capacity,
friendly_name=new_friendly_name)
retrieved_shelf = self.test_shelf.key.get()
self.assertEqual(retrieved_shelf.capacity, new_capacity)
self.assertEqual(retrieved_shelf.friendly_name, new_friendly_name)
mock_stream.assert_called_once_with(
retrieved_shelf, loanertest.USER_EMAIL,
shelf_model._EDIT_MSG % retrieved_shelf.identifier)
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
@mock.patch.object(shelf_model, 'logging', autospec=True)
def test_audit(self, mock_logging, mock_stream):
"""Test that an audit updates the appropriate properties."""
self.testbed.mock_raiseevent.side_effect = events.EventActionsError
test_num_of_devices = '10'
self.test_shelf.audit(loanertest.USER_EMAIL, test_num_of_devices)
retrieved_shelf = self.test_shelf.key.get()
self.assertFalse(retrieved_shelf.audit_requested)
mock_logging.info.assert_called_once_with(
shelf_model._AUDIT_MSG, self.test_shelf.identifier, test_num_of_devices)
mock_stream.assert_called_once_with(
self.test_shelf, loanertest.USER_EMAIL,
shelf_model._AUDIT_MSG % (
self.test_shelf.identifier, test_num_of_devices))
self.assertEqual(mock_logging.error.call_count, 1)
self.testbed.mock_raiseevent.assert_called_once_with(
'shelf_audited', shelf=self.test_shelf)
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
@mock.patch.object(shelf_model, 'logging', autospec=True)
def test_request_audit(self, mock_logging, mock_stream):
"""Test that an audit request occurrs."""
self.test_shelf.request_audit()
retrieved_shelf = self.test_shelf.key.get()
self.assertTrue(retrieved_shelf.audit_requested)
mock_logging.info.assert_called_once_with(
shelf_model._AUDIT_REQUEST_MSG, self.test_shelf.identifier)
mock_stream.assert_called_once_with(
self.test_shelf, constants.DEFAULT_ACTING_USER,
shelf_model._AUDIT_REQUEST_MSG % self.test_shelf.identifier)
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
@mock.patch.object(shelf_model, 'logging', autospec=True)
def test_enable(self, mock_logging, mock_stream):
"""Test the enabling of a shelf."""
self.test_shelf.enabled = False
shelf_key = self.test_shelf.put()
retrieved_shelf = shelf_key.get()
# Ensure that the shelf is now disabled
self.assertFalse(retrieved_shelf.enabled)
self.test_shelf.enable(loanertest.USER_EMAIL)
retrieved_shelf = shelf_key.get()
# Ensure that the shelf is now re-enabled
self.assertTrue(retrieved_shelf.enabled)
mock_logging.info.assert_called_once_with(
shelf_model._ENABLE_MSG, self.test_shelf.identifier)
mock_stream.assert_called_once_with(
self.test_shelf, loanertest.USER_EMAIL,
shelf_model._ENABLE_MSG % self.test_shelf.identifier)
@parameterized.parameters(
(True, True, True), (True, False, False), (False, False, False))
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
@mock.patch.object(shelf_model, 'logging', autospec=True)
def test_audit_enabled(
self, system_value, shelf_value, final_value, mock_logging, mock_stream):
"""Testing the audit_enabled property with different configurations."""
config_model.Config.set('shelf_audit', system_value)
self.test_shelf.audit_notification_enabled = shelf_value
# Ensure the shelf audit notification status is equal to the expected value.
self.assertEqual(self.test_shelf.audit_enabled, final_value)
@mock.patch.object(shelf_model.Shelf, 'stream_to_bq', autospec=True)
@mock.patch.object(shelf_model, 'logging', autospec=True)
def test_disable(self, mock_logging, mock_stream):
"""Test the disabling of a shelf."""
self.testbed.mock_raiseevent.side_effect = events.EventActionsError
self.test_shelf.disable(loanertest.USER_EMAIL)
retrieved_shelf = self.test_shelf.key.get()
self.assertFalse(retrieved_shelf.enabled)
mock_logging.info.assert_called_once_with(
shelf_model._DISABLE_MSG, self.test_shelf.identifier)
mock_stream.assert_called_once_with(
self.test_shelf, loanertest.USER_EMAIL,
shelf_model._DISABLE_MSG % self.test_shelf.identifier)
self.assertEqual(mock_logging.error.call_count, 1)
self.testbed.mock_raiseevent.assert_called_once_with(
'shelf_disable', shelf=self.test_shelf)
if __name__ == '__main__':
loanertest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_webhook
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of Webhook Avi RESTful Object
description:
- This module is used to configure Webhook object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
callback_url:
description:
- Callback url for the webhook.
- Field introduced in 17.1.1.
description:
description:
- Field introduced in 17.1.1.
name:
description:
- The name of the webhook profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the webhook profile.
- Field introduced in 17.1.1.
verification_token:
description:
- Verification token sent back with the callback asquery parameters.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Webhook object
avi_webhook:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_webhook
"""
RETURN = '''
obj:
description: Webhook (api/webhook) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
callback_url=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
verification_token=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'webhook',
set([]))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
)
func TestActionIsReversible(t *testing.T) {
tcs := []struct {
name string
action action
state map[string]string
}{
{
name: "setKeyAction empty state",
action: setKeyAction{
Bucket: Meta,
FieldName: []byte("/test"),
FieldValue: []byte("1"),
},
},
{
name: "setKeyAction with key",
action: setKeyAction{
Bucket: Meta,
FieldName: []byte("/test"),
FieldValue: []byte("1"),
},
state: map[string]string{"/test": "2"},
},
{
name: "deleteKeyAction empty state",
action: deleteKeyAction{
Bucket: Meta,
FieldName: []byte("/test"),
},
},
{
name: "deleteKeyAction with key",
action: deleteKeyAction{
Bucket: Meta,
FieldName: []byte("/test"),
},
state: map[string]string{"/test": "2"},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
be, _ := betesting.NewTmpBackend(t, time.Microsecond, 10)
defer be.Close()
tx := be.BatchTx()
require.NotNilf(t, tx, "batch tx is nil")
tx.Lock()
defer tx.Unlock()
UnsafeCreateMetaBucket(tx)
putKeyValues(tx, Meta, tc.state)
assertBucketState(t, tx, Meta, tc.state)
reverse, err := tc.action.unsafeDo(tx)
if err != nil {
t.Errorf("Failed to upgrade, err: %v", err)
}
_, err = reverse.unsafeDo(tx)
if err != nil {
t.Errorf("Failed to downgrade, err: %v", err)
}
assertBucketState(t, tx, Meta, tc.state)
})
}
}
func TestActionListRevert(t *testing.T) {
tcs := []struct {
name string
actions ActionList
expectState map[string]string
expectError error
}{
{
name: "Apply multiple actions",
actions: ActionList{
setKeyAction{Meta, []byte("/testKey1"), []byte("testValue1")},
setKeyAction{Meta, []byte("/testKey2"), []byte("testValue2")},
},
expectState: map[string]string{"/testKey1": "testValue1", "/testKey2": "testValue2"},
},
{
name: "Broken action should result in changes reverted",
actions: ActionList{
setKeyAction{Meta, []byte("/testKey1"), []byte("testValue1")},
brokenAction{},
setKeyAction{Meta, []byte("/testKey2"), []byte("testValue2")},
},
expectState: map[string]string{},
expectError: errBrokenAction,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
lg := zaptest.NewLogger(t)
be, _ := betesting.NewTmpBackend(t, time.Microsecond, 10)
defer be.Close()
tx := be.BatchTx()
require.NotNilf(t, tx, "batch tx is nil")
tx.Lock()
defer tx.Unlock()
UnsafeCreateMetaBucket(tx)
err := tc.actions.unsafeExecute(lg, tx)
if !errors.Is(err, tc.expectError) {
t.Errorf("Unexpected error or lack thereof, expected: %v, got: %v", tc.expectError, err)
}
assertBucketState(t, tx, Meta, tc.expectState)
})
}
}
type brokenAction struct{}
var errBrokenAction = fmt.Errorf("broken action error")
func (c brokenAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) {
return nil, errBrokenAction
}
func putKeyValues(tx backend.UnsafeWriter, bucket backend.Bucket, kvs map[string]string) {
for k, v := range kvs {
tx.UnsafePut(bucket, []byte(k), []byte(v))
}
}
func assertBucketState(t *testing.T, tx backend.UnsafeReadWriter, bucket backend.Bucket, expect map[string]string) {
t.Helper()
got := map[string]string{}
ks, vs := tx.UnsafeRange(bucket, []byte("\x00"), []byte("\xff"), 0)
for i := 0; i < len(ks); i++ {
got[string(ks[i])] = string(vs[i])
}
if expect == nil {
expect = map[string]string{}
}
assert.Equal(t, expect, got)
} | go | github | https://github.com/etcd-io/etcd | server/storage/schema/actions_test.go |
//
// RequestCompression.swift
//
// Copyright (c) 2023 Alamofire Software Foundation (http://alamofire.org/)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#if canImport(zlib) && !os(Android)
import Foundation
import zlib
/// `RequestAdapter` which compresses outgoing `URLRequest` bodies using the `deflate` `Content-Encoding` and adds the
/// appropriate header.
///
/// - Note: Most requests to most APIs are small and so would only be slowed down by applying this adapter. Measure the
/// size of your request bodies and the performance impact of using this adapter before use. Using this adapter
/// with already compressed data, such as images, will, at best, have no effect. Additionally, body compression
/// is a synchronous operation, so measuring the performance impact may be important to determine whether you
/// want to use a dedicated `requestQueue` in your `Session` instance. Finally, not all servers support request
/// compression, so test with all of your server configurations before deploying.
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
public struct DeflateRequestCompressor: Sendable, RequestInterceptor {
/// Type that determines the action taken when the `URLRequest` already has a `Content-Encoding` header.
public enum DuplicateHeaderBehavior: Sendable {
/// Throws a `DuplicateHeaderError`. The default.
case error
/// Replaces the existing header value with `deflate`.
case replace
/// Silently skips compression when the header exists.
case skip
}
/// `Error` produced when the outgoing `URLRequest` already has a `Content-Encoding` header, when the instance has
/// been configured to produce an error.
public struct DuplicateHeaderError: Error {}
/// Behavior to use when the outgoing `URLRequest` already has a `Content-Encoding` header.
public let duplicateHeaderBehavior: DuplicateHeaderBehavior
/// Closure which determines whether the outgoing body data should be compressed.
public let shouldCompressBodyData: @Sendable (_ bodyData: Data) -> Bool
/// Creates an instance with the provided parameters.
///
/// - Parameters:
/// - duplicateHeaderBehavior: `DuplicateHeaderBehavior` to use. `.error` by default.
/// - shouldCompressBodyData: Closure which determines whether the outgoing body data should be compressed. `true` by default.
public init(duplicateHeaderBehavior: DuplicateHeaderBehavior = .error,
shouldCompressBodyData: @escaping @Sendable (_ bodyData: Data) -> Bool = { _ in true }) {
self.duplicateHeaderBehavior = duplicateHeaderBehavior
self.shouldCompressBodyData = shouldCompressBodyData
}
public func adapt(_ urlRequest: URLRequest, for session: Session, completion: @escaping (Result<URLRequest, any Error>) -> Void) {
// No need to compress unless we have body data. No support for compressing streams.
guard let bodyData = urlRequest.httpBody else {
completion(.success(urlRequest))
return
}
guard shouldCompressBodyData(bodyData) else {
completion(.success(urlRequest))
return
}
if urlRequest.headers.value(for: "Content-Encoding") != nil {
switch duplicateHeaderBehavior {
case .error:
completion(.failure(DuplicateHeaderError()))
return
case .replace:
// Header will be replaced once the body data is compressed.
break
case .skip:
completion(.success(urlRequest))
return
}
}
var compressedRequest = urlRequest
do {
compressedRequest.httpBody = try deflate(bodyData)
compressedRequest.headers.update(.contentEncoding("deflate"))
completion(.success(compressedRequest))
} catch {
completion(.failure(error))
}
}
func deflate(_ data: Data) throws -> Data {
var output = Data([0x78, 0x5E]) // Header
try output.append((data as NSData).compressed(using: .zlib) as Data)
var checksum = adler32Checksum(of: data).bigEndian
output.append(Data(bytes: &checksum, count: MemoryLayout<UInt32>.size))
return output
}
func adler32Checksum(of data: Data) -> UInt32 {
data.withUnsafeBytes { buffer in
UInt32(adler32(1, buffer.baseAddress, UInt32(buffer.count)))
}
}
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
extension RequestInterceptor where Self == DeflateRequestCompressor {
/// Create a `DeflateRequestCompressor` with default `duplicateHeaderBehavior` and `shouldCompressBodyData` values.
public static var deflateCompressor: DeflateRequestCompressor {
DeflateRequestCompressor()
}
/// Creates a `DeflateRequestCompressor` with the provided `DuplicateHeaderBehavior` and `shouldCompressBodyData`
/// closure.
///
/// - Parameters:
/// - duplicateHeaderBehavior: `DuplicateHeaderBehavior` to use.
/// - shouldCompressBodyData: Closure which determines whether the outgoing body data should be compressed. `true` by default.
///
/// - Returns: The `DeflateRequestCompressor`.
public static func deflateCompressor(
duplicateHeaderBehavior: DeflateRequestCompressor.DuplicateHeaderBehavior = .error,
shouldCompressBodyData: @escaping @Sendable (_ bodyData: Data) -> Bool = { _ in true }
) -> DeflateRequestCompressor {
DeflateRequestCompressor(duplicateHeaderBehavior: duplicateHeaderBehavior,
shouldCompressBodyData: shouldCompressBodyData)
}
}
#endif | swift | github | https://github.com/Alamofire/Alamofire | Source/Features/RequestCompression.swift |
from __future__ import unicode_literals
import base64
import binascii
import hashlib
import importlib
import warnings
from collections import OrderedDict
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache
from django.utils.crypto import (
constant_time_compare, get_random_string, pbkdf2,
)
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
def is_password_usable(encoded):
if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
return False
try:
identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
hasher_changed = hasher.algorithm != preferred.algorithm
must_update = hasher_changed or preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
# If the hasher didn't change (we don't protect against enumeration if it
# does) and the password should get updated, try to close the timing gap
# between the work factor of the current encoded password and the default
# work factor.
if not is_correct and not hasher_changed and must_update:
hasher.harden_runtime(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt.
If password is None then a concatenation of
UNUSABLE_PASSWORD_PREFIX and a random string will be returned
which disallows logins. Additional random string reduces chances
of gaining access to staff or superuser accounts.
See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
@lru_cache.lru_cache()
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
@lru_cache.lru_cache()
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ASCII
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')
def must_update(self, encoded):
return False
def harden_runtime(self, password, encoded):
"""
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
"""
warnings.warn('subclasses of BasePasswordHasher should provide a harden_runtime() method')
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 36000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
def harden_runtime(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
extra_iterations = self.iterations - int(iterations)
if extra_iterations > 0:
self.encode(password, salt, extra_iterations)
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class Argon2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the argon2 algorithm.
This is the winner of the Password Hashing Competition 2013-2015
(https://password-hashing.net). It requires the argon2-cffi library which
depends on native C code and might cause portability issues.
"""
algorithm = 'argon2'
library = 'argon2'
time_cost = 2
memory_cost = 512
parallelism = 2
def encode(self, password, salt):
argon2 = self._load_library()
data = argon2.low_level.hash_secret(
force_bytes(password),
force_bytes(salt),
time_cost=self.time_cost,
memory_cost=self.memory_cost,
parallelism=self.parallelism,
hash_len=argon2.DEFAULT_HASH_LENGTH,
type=argon2.low_level.Type.I,
)
return self.algorithm + data.decode('ascii')
def verify(self, password, encoded):
argon2 = self._load_library()
algorithm, rest = encoded.split('$', 1)
assert algorithm == self.algorithm
try:
return argon2.low_level.verify_secret(
force_bytes('$' + rest),
force_bytes(password),
type=argon2.low_level.Type.I,
)
except argon2.exceptions.VerificationError:
return False
def safe_summary(self, encoded):
(algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data) = self._decode(encoded)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('variety'), variety),
(_('version'), version),
(_('memory cost'), memory_cost),
(_('time cost'), time_cost),
(_('parallelism'), parallelism),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(data)),
])
def must_update(self, encoded):
(algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data) = self._decode(encoded)
assert algorithm == self.algorithm
argon2 = self._load_library()
return (
argon2.low_level.ARGON2_VERSION != version or
self.time_cost != time_cost or
self.memory_cost != memory_cost or
self.parallelism != parallelism
)
def harden_runtime(self, password, encoded):
# The runtime for Argon2 is too complicated to implement a sensible
# hardening algorithm.
pass
def _decode(self, encoded):
"""
Split an encoded hash and return: (
algorithm, variety, version, time_cost, memory_cost,
parallelism, salt, data,
).
"""
bits = encoded.split('$')
if len(bits) == 5:
# Argon2 < 1.3
algorithm, variety, raw_params, salt, data = bits
version = 0x10
else:
assert len(bits) == 6
algorithm, variety, raw_version, raw_params, salt, data = bits
assert raw_version.startswith('v=')
version = int(raw_version[len('v='):])
params = dict(bit.split('=', 1) for bit in raw_params.split(','))
assert len(params) == 3 and all(x in params for x in ('t', 'm', 'p'))
time_cost = int(params['t'])
memory_cost = int(params['m'])
parallelism = int(params['p'])
return (
algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data,
)
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
# Hash the password prior to using bcrypt to prevent password
# truncation as described in #20138.
if self.digest is not None:
# Use binascii.hexlify() because a hex encoded bytestring is
# Unicode on Python 3.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, force_text(data))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, force_bytes(data))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return OrderedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
def must_update(self, encoded):
algorithm, empty, algostr, rounds, data = encoded.split('$', 4)
return int(rounds) != self.rounds
def harden_runtime(self, password, encoded):
_, data = encoded.split('$', 1)
salt = data[:29] # Length of the salt in bcrypt.
rounds = data.split('$')[2]
# work factor is logarithmic, adding one doubles the load.
diff = 2**(self.rounds - int(rounds)) - 1
while diff > 0:
self.encode(password, force_bytes(salt))
diff -= 1
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
the 72 character bcrypt password truncation, most use cases should prefer
the BCryptSHA256PasswordHasher.
See: https://code.djangoproject.com/ticket/20138
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
def harden_runtime(self, password, encoded):
pass
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
assert data is not None # A platform like OpenBSD with a dummy crypt module.
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
def harden_runtime(self, password, encoded):
pass | unknown | codeparrot/codeparrot-clean | ||
"""View to accept incoming websocket connection."""
import asyncio
from contextlib import suppress
import logging
from typing import Optional
from aiohttp import WSMsgType, web
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers.event import async_call_later
from .auth import AuthPhase, auth_required_message
from .const import (
CANCELLATION_ERRORS,
DATA_CONNECTIONS,
MAX_PENDING_MSG,
PENDING_MSG_PEAK,
PENDING_MSG_PEAK_TIME,
SIGNAL_WEBSOCKET_CONNECTED,
SIGNAL_WEBSOCKET_DISCONNECTED,
URL,
)
from .error import Disconnect
from .messages import message_to_json
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name = "websocketapi"
url = URL
requires_auth = False
async def get(self, request: web.Request) -> web.WebSocketResponse:
"""Handle an incoming websocket connection."""
return await WebSocketHandler(request.app["hass"], request).async_handle()
class WebSocketHandler:
"""Handle an active websocket client connection."""
def __init__(self, hass, request):
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock: Optional[web.WebSocketResponse] = None
self._to_write: asyncio.Queue = asyncio.Queue(maxsize=MAX_PENDING_MSG)
self._handle_task = None
self._writer_task = None
self._logger = logging.getLogger("{}.connection.{}".format(__name__, id(self)))
self._peak_checker_unsub = None
async def _writer(self):
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
with suppress(RuntimeError, ConnectionResetError, *CANCELLATION_ERRORS):
while not self.wsock.closed:
message = await self._to_write.get()
if message is None:
break
self._logger.debug("Sending %s", message)
if not isinstance(message, str):
message = message_to_json(message)
await self.wsock.send_str(message)
# Clean up the peaker checker when we shut down the writer
if self._peak_checker_unsub:
self._peak_checker_unsub()
self._peak_checker_unsub = None
@callback
def _send_message(self, message):
"""Send a message to the client.
Closes connection if the client is not reading the messages.
Async friendly.
"""
try:
self._to_write.put_nowait(message)
except asyncio.QueueFull:
self._logger.error(
"Client exceeded max pending messages [2]: %s", MAX_PENDING_MSG
)
self._cancel()
if self._to_write.qsize() < PENDING_MSG_PEAK:
if self._peak_checker_unsub:
self._peak_checker_unsub()
self._peak_checker_unsub = None
return
if self._peak_checker_unsub is None:
self._peak_checker_unsub = async_call_later(
self.hass, PENDING_MSG_PEAK_TIME, self._check_write_peak
)
@callback
def _check_write_peak(self, _):
"""Check that we are no longer above the write peak."""
self._peak_checker_unsub = None
if self._to_write.qsize() < PENDING_MSG_PEAK:
return
self._logger.error(
"Client unable to keep up with pending messages. Stayed over %s for %s seconds",
PENDING_MSG_PEAK,
PENDING_MSG_PEAK_TIME,
)
self._cancel()
@callback
def _cancel(self):
"""Cancel the connection."""
self._handle_task.cancel()
self._writer_task.cancel()
async def async_handle(self) -> web.WebSocketResponse:
"""Handle a websocket response."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
await wsock.prepare(request)
self._logger.debug("Connected from %s", request.remote)
self._handle_task = asyncio.current_task()
@callback
def handle_hass_stop(event):
"""Cancel this connection."""
self._cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop
)
# As the webserver is now started before the start
# event we do not want to block for websocket responses
self._writer_task = asyncio.create_task(self._writer())
auth = AuthPhase(self._logger, self.hass, self._send_message, request)
connection = None
disconnect_warn = None
try:
self._send_message(auth_required_message())
# Auth Phase
try:
with async_timeout.timeout(10):
msg = await wsock.receive()
except asyncio.TimeoutError as err:
disconnect_warn = "Did not receive auth message within 10 seconds"
raise Disconnect from err
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
raise Disconnect
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
raise Disconnect
try:
msg_data = msg.json()
except ValueError as err:
disconnect_warn = "Received invalid JSON."
raise Disconnect from err
self._logger.debug("Received %s", msg_data)
connection = await auth.async_handle(msg_data)
self.hass.data[DATA_CONNECTIONS] = (
self.hass.data.get(DATA_CONNECTIONS, 0) + 1
)
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_CONNECTED
)
# Command phase
while not wsock.closed:
msg = await wsock.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
break
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
break
try:
msg_data = msg.json()
except ValueError:
disconnect_warn = "Received invalid JSON."
break
self._logger.debug("Received %s", msg_data)
connection.async_handle(msg_data)
except asyncio.CancelledError:
self._logger.info("Connection closed by client")
except Disconnect:
pass
except Exception: # pylint: disable=broad-except
self._logger.exception("Unexpected error inside websocket API")
finally:
unsub_stop()
if connection is not None:
connection.async_close()
try:
self._to_write.put_nowait(None)
# Make sure all error messages are written before closing
await self._writer_task
except asyncio.QueueFull:
self._writer_task.cancel()
await wsock.close()
if disconnect_warn is None:
self._logger.debug("Disconnected")
else:
self._logger.warning("Disconnected: %s", disconnect_warn)
if connection is not None:
self.hass.data[DATA_CONNECTIONS] -= 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_DISCONNECTED
)
return wsock | unknown | codeparrot/codeparrot-clean | ||
""" Merkyl plugin for Artifactor
Add a stanza to the artifactor config like this,
artifactor:
log_dir: /home/username/outdir
per_run: test #test, run, None
overwrite: True
plugins:
merkyl:
enabled: False
plugin: merkyl
port: 8192
log_files:
- /var/www/miq/vmdb/log/evm.log
- /var/www/miq/vmdb/log/production.log
- /var/www/miq/vmdb/log/automation.log
"""
from artifactor import ArtifactorBasePlugin
import os.path
import requests
class Merkyl(ArtifactorBasePlugin):
class Test(object):
def __init__(self, ident, ip, port):
self.ident = ident
self.ip = ip
self.port = port
self.in_progress = False
self.extra_files = set()
def plugin_initialize(self):
self.register_plugin_hook('setup_merkyl', self.start_session)
self.register_plugin_hook('start_test', self.start_test)
self.register_plugin_hook('finish_test', self.finish_test)
self.register_plugin_hook('teardown_merkyl', self.finish_session)
self.register_plugin_hook('get_log_merkyl', self.get_log)
self.register_plugin_hook('add_log_merkyl', self.add_log)
def configure(self):
self.files = self.data.get('log_files', [])
self.port = self.data.get('port', '8192')
self.tests = {}
self.configured = True
@ArtifactorBasePlugin.check_configured
def start_test(self, test_name, test_location, ip):
test_ident = "{}/{}".format(test_location, test_name)
if test_ident in self.tests:
if self.tests[test_ident].in_progress:
print("Test already running, can't start another")
return None
else:
self.tests[test_ident] = self.Test(test_ident, ip, self.port)
url = "http://{}:{}/resetall".format(ip, self.port)
requests.get(url, timeout=15)
self.tests[test_ident].in_progress = True
@ArtifactorBasePlugin.check_configured
def get_log(self, test_name, test_location, filename):
test_ident = "{}/{}".format(test_location, test_name)
ip = self.tests[test_ident].ip
base, tail = os.path.split(filename)
url = "http://{}:{}/get/{}".format(ip, self.port, tail)
doc = requests.get(url, timeout=15)
content = doc.content
return {'merkyl_content': content}, None
@ArtifactorBasePlugin.check_configured
def add_log(self, test_name, test_location, filename):
test_ident = "{}/{}".format(test_location, test_name)
ip = self.tests[test_ident].ip
if filename not in self.files:
if filename not in self.tests[test_ident].extra_files:
self.tests[test_ident].extra_files.add(filename)
url = "http://{}:{}/setup{}".format(ip, self.port, filename)
requests.get(url, timeout=15)
@ArtifactorBasePlugin.check_configured
def finish_test(self, artifact_path, test_name, test_location, ip, slaveid):
test_ident = "{}/{}".format(test_location, test_name)
artifacts = []
for filename in self.files:
_, tail = os.path.split(filename)
url = "http://{}:{}/get/{}".format(ip, self.port, tail)
doc = requests.get(url, timeout=15)
artifacts.append((tail, doc.content))
for filename in self.tests[test_ident].extra_files:
_, tail = os.path.split(filename)
url = "http://{}:{}/get/{}".format(ip, self.port, tail)
doc = requests.get(url, timeout=15)
artifacts.append((tail, doc.content))
url = "http://{}:{}/delete/{}".format(ip, self.port, tail)
requests.get(url, timeout=15)
del self.tests[test_ident]
for filename, contents in artifacts:
self.fire_hook('filedump', test_location=test_location, test_name=test_name,
description="Merkyl: {}".format(filename), slaveid=slaveid,
contents=contents, file_type="log", display_type="danger",
display_glyph="align-justify", group_id="merkyl")
return None, None
@ArtifactorBasePlugin.check_configured
def start_session(self, ip):
"""Session started"""
for file_name in self.files:
url = "http://{}:{}/setup{}".format(ip, self.port, file_name)
requests.get(url, timeout=15)
@ArtifactorBasePlugin.check_configured
def finish_session(self, ip):
"""Session finished"""
for filename in self.files:
base, tail = os.path.split(filename)
url = "http://{}:{}/delete/{}".format(ip, self.port, tail)
requests.get(url, timeout=15) | unknown | codeparrot/codeparrot-clean | ||
//===--- SILUndef.h - SIL Undef Value Representation ------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_SIL_UNDEF_H
#define SWIFT_SIL_UNDEF_H
#include "swift/Basic/Compiler.h"
#include "swift/SIL/SILValue.h"
namespace swift {
class SILArgument;
class SILInstruction;
class SILModule;
class SILUndef : public ValueBase {
/// A back pointer to the function that this SILUndef is uniqued by.
SILFunction *parent;
SILUndef(SILFunction *parent, SILType type);
public:
void operator=(const SILArgument &) = delete;
void operator delete(void *, size_t) = delete;
/// Return a SILUndef with the same type as the passed in value.
static SILUndef *get(SILValue value) {
return SILUndef::get(value->getFunction(), value->getType());
}
static SILUndef *get(SILFunction *f, SILType ty);
static SILUndef *get(SILFunction &f, SILType ty) {
return SILUndef::get(&f, ty);
}
/// This is an API only used by SILSSAUpdater... please do not use it anywhere
/// else.
template <class OwnerTy>
static SILUndef *getSentinelValue(SILFunction *fn, OwnerTy owner,
SILType type) {
// Ownership kind isn't used here, the value just needs to have a unique
// address.
return new (*owner) SILUndef(fn, type);
}
SILFunction *getParent() const { return parent; }
ValueOwnershipKind getOwnershipKind() const { return OwnershipKind::None; }
static bool classof(const SILArgument *) = delete;
static bool classof(const SILInstruction *) = delete;
static bool classof(SILNodePointer node) {
return node->getKind() == SILNodeKind::SILUndef;
}
};
} // end swift namespace
#endif | c | github | https://github.com/apple/swift | include/swift/SIL/SILUndef.h |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
from django.template import Context, Engine
from django.template.base import TemplateEncodingError
from django.utils import six
from django.utils.safestring import SafeData
class UnicodeTests(TestCase):
def test_template(self):
# Templates can be created from unicode strings.
engine = Engine()
t1 = engine.from_string('ŠĐĆŽćžšđ {{ var }}')
# Templates can also be created from bytestrings. These are assumed to
# be encoded using UTF-8.
s = b'\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91 {{ var }}'
t2 = engine.from_string(s)
with self.assertRaises(TemplateEncodingError):
engine.from_string(b'\x80\xc5\xc0')
# Contexts can be constructed from unicode or UTF-8 bytestrings.
Context({b"var": b"foo"})
Context({"var": b"foo"})
c3 = Context({b"var": "Đđ"})
Context({"var": b"\xc4\x90\xc4\x91"})
# Since both templates and all four contexts represent the same thing,
# they all render the same (and are returned as unicode objects and
# "safe" objects as well, for auto-escaping purposes).
self.assertEqual(t1.render(c3), t2.render(c3))
self.assertIsInstance(t1.render(c3), six.text_type)
self.assertIsInstance(t1.render(c3), SafeData) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""Standalone console script"""
import sys
import os
import optparse
from AutoNetkit.internet import Internet
from AutoNetkit import config
import AutoNetkit as ank
import logging
import pkg_resources
LOG = logging.getLogger("ANK")
def main():
version="git-current"
try:
version=pkg_resources.get_distribution("AutoNetkit").version
except:
pass
# make it easy to turn on and off plotting and deploying from command line
usage = ("\nNetkit: %prog -f filename.graphml --netkit\n"
"Junosphere: %prog -f filename.graphml --junos\n"
"Additional documentation at http://packages.python.org/AutoNetkit/")
opt = optparse.OptionParser(usage, version="%prog " + str(version))
opt.add_option('--plot', '-p', action="store_true", dest="plot", default=False, help="Plot lab")
opt.add_option('--deploy', action="store_true", default=False, help="Deploy lab to hosts")
opt.add_option('--verify', action="store_true", default=False, help="Verify lab on hosts")
opt.add_option('--save', action="store_true", default=False,
help="Save the network for future use (eg verification")
opt.add_option('--file', '-f', default= None, help="Load configuration from FILE")
opt.add_option('--bgp_policy', '-b', default= None, help="Load BGP policy statements from FILE")
opt.add_option('--debug', action="store_true", default=False, help="Debugging output")
# Deployment environments
opt.add_option('--netkit', action="store_true", default=False, help="Compile Netkit")
opt.add_option('--libvirt', action="store_true", default=False, help="Compile Libvirt")
opt.add_option('--cbgp', action="store_true", default=False, help="Compile cBGP")
opt.add_option('--dynagen', action="store_true", default=False, help="Compile dynagen")
opt.add_option('--junos', action="store_true", default=False, help="Compile Junosphere (legacy command)")
# Juniper options
opt.add_option('--junosphere', action="store_true", default=False, help="Compile to Junosphere")
opt.add_option('--junosphere_olive', action="store_true", default=False,
help="Compile to Olive-based Junosphere")
opt.add_option('--olive', action="store_true", default=False, help="Compile to Qemu-based Olive")
opt.add_option('--olive_qemu_patched', action="store_true", default=False,
help="Custom Qemu install (6 interface count")
opt.add_option('--isis', action="store_true", default=False, help="Use IS-IS as IGP")
opt.add_option('--ospf', action="store_true", default=False, help="Use OSPF as IGP")
options, arguments = opt.parse_args()
config.add_logging(console_debug = options.debug)
#### Main code
if not options.file:
LOG.warn("Please specify topology file")
sys.exit(0)
logging.setLevel(logging.DEBUG)
#TODO: if topology file doesn't exist, then try inside lib/examples/topologies/
f_name = options.file
igp = None
if options.ospf:
igp = "ospf"
if options.isis:
igp = "isis"
use_junosphere = (options.junos or options.junosphere)
inet = Internet(netkit=options.netkit,
cbgp=options.cbgp, dynagen=options.dynagen, junosphere=use_junosphere,
junosphere_olive=options.junosphere_olive, olive=options.olive,
policy_file = options.bgp_policy, deploy = options.deploy,
libvirt = options.libvirt,
olive_qemu_patched=options.olive_qemu_patched, igp=igp)
inet.load(f_name)
inet.add_dns()
inet.compile()
#inet.save()
if(options.plot):
inet.plot()
if(options.deploy):
inet.deploy()
if options.verify:
inet.verify()
# finally, save the network
inet.save()
inet.dump()
#inet.restore()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2018 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CAP CERN service views."""
import ldap
from ldap import LDAPError
from flask import jsonify, request
from . import blueprint
from cap.modules.access.utils import login_required
LDAP_SERVER_URL = 'ldap://xldap.cern.ch'
LDAP_USER_RESP_FIELDS = [
'cn',
'displayName',
'mail',
'memberOf',
'company',
'department',
'cernAccountType',
'objectClass'
]
LDAP_EGROUP_RESP_FIELDS = [
'cn',
'displayName',
'description',
'mail',
'member',
'objectClass'
]
def _ldap(query, sf=None, by=None):
"""LDAP user depending on the query type."""
lc = ldap.initialize(LDAP_SERVER_URL)
# different arguments depending on the query type
if by == 'mail':
ldap_fields = ['mail'] # LDAP_USER_RESP_FIELDS alternative
search_at = 'OU=Users,OU=Organic Units,DC=cern,DC=ch'
ldap_query = '(&(cernAccountType=Primary)(mail=*{}*))' \
.format(query)
else:
ldap_fields = LDAP_EGROUP_RESP_FIELDS
search_at = 'OU=e-groups,OU=Workgroups,DC=cern,DC=ch'
ldap_query = '{}=*{}*'.format(sf, query)
try:
lc.search_ext(
search_at, ldap.SCOPE_ONELEVEL, ldap_query, ldap_fields,
serverctrls=[ldap.controls.SimplePagedResultsControl(
True, size=7, cookie='')
]
)
status = 200
data = lc.result()[1]
except LDAPError as err:
status = 500
data = err.message
return data, status
@blueprint.route('/ldap/user/mail')
@login_required
def ldap_user_by_mail():
"""LDAP user by username query."""
query = request.args.get('query', None)
if not query:
return jsonify([])
resp, status = _ldap(query, by='mail')
data = [x[1]['mail'][0] for x in resp]
return jsonify(data)
@blueprint.route('/ldap/egroup/mail')
@login_required
def ldap_egroup_mail():
"""LDAP egroup query."""
query = request.args.get('query', None)
sf = request.args.get('sf', 'cn')
if not query:
return jsonify([])
resp, status = _ldap(query, sf, by='egroup')
data = [x[1]['mail'][0] for x in resp]
return jsonify(data) | unknown | codeparrot/codeparrot-clean | ||
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq cat --hostname`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.dbwrappers.host import hostname_to_host
from aquilon.worker.dbwrappers.resources import get_resource
from aquilon.worker.templates.base import Plenary
from aquilon.worker.templates.host import PlenaryToplevelHost, PlenaryHostData
class CommandCatHostname(BrokerCommand):
required_parameters = ["hostname"]
def render(self, session, logger, hostname, data, generate, **arguments):
dbhost = hostname_to_host(session, hostname)
dbresource = get_resource(session, dbhost, **arguments)
if dbresource:
plenary_info = Plenary.get_plenary(dbresource, logger=logger)
else:
if data:
plenary_info = PlenaryHostData(dbhost, logger=logger)
else:
plenary_info = PlenaryToplevelHost(dbhost, logger=logger)
if generate:
return plenary_info._generate_content()
else:
return plenary_info.read() | unknown | codeparrot/codeparrot-clean | ||
'''
// Copyright (c) 2014 Anonymized. All rights reserved.
//
// Code submitted as supplementary material for manuscript:
// "Dependency Recurrent Neural Language Models for Sentence Completion"
// Do not redistribute.
Created on Aug 4, 2014
Take a corpus the JSON format obtained from Stanford and convert it to this
arg1 = input
arg2 = output
corpus = (list of sentences)
sentence = (list of unrolls)
unroll = (list of tokens)
token = (map containing: index in sentence, string, discount, outDep)
outDep is the dependency going from the current token to the next word on the path
the last token on the path (leaf node) has a LEAF outEdge
'''
import networkx
import json
import sys
from collections import Counter
import glob
import os
import os.path
def extractUnrolls(sentenceDAG):
unrolls = []
# so each unroll is a path from ROOT to the leaves.
root2leafPaths = []
# this counts the number of times a node appears in the path
discountFactors = Counter()
# traverse all tokens to find the root and the leaves:
leaves = []
root = None
for tokenNo in sentenceDAG.nodes():
# if a token is a leaf (avoid punctuation which has no incoming ones):
if sentenceDAG.out_degree(tokenNo) == 0 and sentenceDAG.in_degree(tokenNo) > 0:
leaves.append(tokenNo)
if sentenceDAG.in_degree(tokenNo) == 0 and sentenceDAG.out_degree(tokenNo) > 0:
root = tokenNo
#print "leaves:" + str(leaves)
#print "root:" + str(root)
for leaf in leaves:
# let's get the path from ROOT:
try:
path = networkx.shortest_path(sentenceDAG, source=root, target=leaf)
root2leafPaths.append(path)
# add the discounts:
for tok in path:
discountFactors[tok] += 1
except networkx.exception.NetworkXNoPath:
print "path did not exist among tokens " + str(root) + " and " + str(leaf) + " in sentence:"
print str(sentenceDAG)
#print root2leafPaths
#print discountFactors
for path in root2leafPaths:
unroll = []
for idx_in_path, tokenNo in enumerate(path):
#print sentenceDAG[tokenNo]
word = sentenceDAG.node[tokenNo]['word']
# the last word has the dummy out edge
if idx_in_path == len(path)-1:
outDep = "LEAF"
else:
outDep = sentenceDAG[tokenNo][path[idx_in_path+1]]["label"]
unroll.append([tokenNo, word, discountFactors[tokenNo], outDep])
unrolls.append(unroll)
return unrolls
def constructDAG(sentence):
sentenceDAG = networkx.DiGraph()
# first put the nodes in the graph
# fields of interest 0 (tokenNo, starting at 0), 1 (token (lowercase it maybe?), 6 (ancestor), 7 (depLabel to ancestor))
# add the root
#sentenceDAG.add_node(0, word="ROOT")
# add the index of the token in the sentence, remember to start things from 1 as 0 is reserved for root
for idx, token in enumerate(sentence["tokens"]):
sentenceDAG.add_node(idx, word=token["word"].lower())
# and now the edges:
for dependency in sentence["dependencies"]:
sentenceDAG.add_edge(dependency["head"], dependency["dep"], label=dependency["label"])
#networkx.draw(sentenceDAG)
#print sentenceDAG.nodes(data=True)
#print sentenceDAG.edges(data=True)
return sentenceDAG
# Create the output path
os.mkdir(sys.argv[2])
threshold = int(sys.argv[3])
tokensOnly = False
# check if we are generating the text for the RNNs
if len(sys.argv) == 5 and sys.argv[4] == "TOKENS":
tokensOnly = True
threshold = float("inf")
tokensKeptCounter = 0
wordTypesKept = []
for filename in glob.glob(sys.argv[1]+ "/*"):
allSentences = []
jsonFile = open(filename)
sentences = json.loads(jsonFile.read())
jsonFile.close()
for sentence in sentences:
sentenceDAG = constructDAG(sentence)
if (len(sentenceDAG.nodes()) < threshold):
gutenbergCheck = False
nodes = sentenceDAG.nodes(data=True)
for node in nodes:
if node[1]["word"] == "gutenberg":
#print nodes
gutenbergCheck = True
if not gutenbergCheck:
tokensKeptCounter += len(nodes)
for node in nodes:
if node[1]["word"] not in wordTypesKept:
wordTypesKept.append( node[1]["word"])
if tokensOnly:
tokens = []
for node in nodes:
tokens.append(node[1]["word"])
allSentences.append(" ".join(tokens))
else:
unrolls = extractUnrolls(sentenceDAG)
allSentences.append(unrolls)
print "unique word types kept=" + str(len(wordTypesKept))
if tokensOnly:
with open(sys.argv[2] + "/" + os.path.basename(filename) + ".tokens.txt", "wb") as out:
out.write(("\n".join(allSentences)).encode('utf-8') + "\n")
else:
with open(sys.argv[2] + "/" + os.path.basename(filename) + ".unrolls.json", "wb") as out:
json.dump(allSentences, out)
print "tokens kept=" + str(tokensKeptCounter)
print "unique word types kept=" + str(len(wordTypesKept)) | unknown | codeparrot/codeparrot-clean | ||
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from ranstring import randomByteString
from zope.interface import implements
from twisted.internet import reactor, interfaces
from autobahn.websocket import WebSocketProtocol, \
WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
# 2^63 - This is the maximum imposed by the WS protocol
FRAME_SIZE = 0x7FFFFFFFFFFFFFFF
class RandomByteStreamProducer:
"""
A Twisted Push Producer generating a stream of random octets sending out data
in a WebSockets message frame.
"""
implements(interfaces.IPushProducer)
def __init__(self, proto):
self.proto = proto
self.started = False
self.paused = False
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
if not self.started:
self.proto.beginMessage(opcode = WebSocketProtocol.MESSAGE_TYPE_BINARY)
self.proto.beginMessageFrame(FRAME_SIZE)
self.started = True
while not self.paused:
data = randomByteString(1024)
if self.proto.sendMessageFrameData(data) <= 0:
self.proto.beginMessageFrame(FRAME_SIZE)
print "new frame started!"
def stopProducing(self):
pass
class StreamingProducerHashClientProtocol(WebSocketClientProtocol):
"""
Streaming WebSockets client that generates stream of random octets
sent to streaming WebSockets server, which computes a running SHA-256,
which it will send every BATCH_SIZE octets back to us. This example
uses a Twisted producer to produce the byte stream as fast as the
receiver can consume, but not faster. Therefor, we don't need the
application-level flow control as with the other examples.
"""
def onOpen(self):
self.count = 0
producer = RandomByteStreamProducer(self)
self.registerProducer(producer, True)
producer.resumeProducing()
def onMessage(self, message, binary):
print "Digest for batch %d computed by server: %s" % (self.count, message)
self.count += 1
if __name__ == '__main__':
factory = WebSocketClientFactory("ws://localhost:9000")
factory.protocol = StreamingProducerHashClientProtocol
connectWS(factory)
reactor.run() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python -tt
# Copyright 2011-2012 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from subprocess import Popen, PIPE
import binascii
import hashlib
import re
def _verify_signature(cert_filename, signature, data, algorithm):
'''
Attempts to verify a signature by comparing the digest in the signature
vs the calculated digest using the data and algorithm provided. This
is currently done in separate stages to avoid writing temp files to disk.
:param cert_filename: local file path to certificate containing pubkey
:param signature: str buffer containing hex signature
:param data: str buffer to build digest from
:param algorithm: algorithm name, ie: md5, sha1, sha256, etc..
:raises ValueError if digests do not match
'''
sig_digest = _get_digest_from_signature(cert_filename=cert_filename,
signature=signature,
algorithm=algorithm)
data_digest = _get_data_digest(data=data, algorithm=algorithm)
sig_digest = binascii.hexlify(sig_digest)
data_digest = binascii.hexlify(data_digest)
if sig_digest != data_digest:
raise ValueError('Signature not verified. Signature digest:'
+ str(sig_digest).encode('hex') + ", data_digest:"
+ str(data_digest).encode('hex'))
return {'sig_digest': sig_digest, 'data_digest': data_digest}
def _get_data_signature(data_to_sign, algorithm, key_filename):
'''
Creates a signature for 'data_to_sign' using the provided algorithm
and private key file path. ie: /keys/cloud-pk.pem
:param data_to_sign: string buffer with data to be signed
:param algorithm: algorithm name to be used in signature, ie:md5, sha256
:param key_filename: local path to private key to be used in signing
:returns signature string.
'''
if not data_to_sign or not algorithm or not key_filename:
raise ValueError("Bad value passed to _get_data_signature."
"data:'{0}', algorithm:'{1}', key:'{2}'"
.format(data_to_sign, algorithm, key_filename))
digest = _get_digest_algorithm_from_string(algorithm)
digest.update(data_to_sign)
popen = Popen(['openssl', 'pkeyutl', '-sign', '-inkey',
key_filename, '-pkeyopt', 'digest:' + digest.name],
stdin=PIPE, stdout=PIPE)
(stdout, _) = popen.communicate(digest.digest())
return binascii.hexlify(stdout)
def _get_digest_from_signature(cert_filename, signature, algorithm):
'''
Attempts to recover the original digest from the signature provided
using the certificate provided. ie: /keys/cloud-cert.pem
:param cert_filename: local file path to certificate containing pubkey
:param signature: str buffer containing hex signature
:returns digest string
'''
digest_type = _get_digest_algorithm_from_string(algorithm)
if not cert_filename or not signature:
raise ValueError("Bad value passed to _get_digest_from_signature. "
"cert:'{0}', signature:'{1}'"
.format(cert_filename, signature))
popen = Popen(['openssl', 'rsautl', '-verify', '-certin', '-inkey',
cert_filename],
stdin=PIPE, stdout=PIPE)
(stdout, _) = popen.communicate(binascii.unhexlify(signature))
#The digest is the last element in the returned output.
#The size/offset will vary depending on the digest algorithm
return stdout[-int(digest_type.digestsize):]
def _get_data_digest(data, algorithm):
'''
Returns digest for 'data' provided using the 'algorithm' provided.
:param data: str buffer to build digest from
:param algorithm: algorithm name, ie: md5, sha1, sha256, etc..
:returns digest string
'''
if not data:
raise ValueError('No data provided to _get_data_digest, data:"{0}"'
.format(str(data)))
digest = _get_digest_algorithm_from_string(algorithm)
digest.update(data)
return digest.digest()
def _get_digest_algorithm_from_string(algorithm):
'''
Helper method to convert a string to a hashlib digest algorithm
:param algorithm: string representing algorithm, ie: md5, sha1, sha256
:returns hashlib builtin digest function ie: hashlib.md5()
'''
#Manifest prepends text to algorithm, remove it here
r_algorithm = re.search('(?:sha.*|md5.*)', algorithm.lower()).group()
try:
digest = getattr(hashlib, str(r_algorithm))
except AttributeError as AE:
AE.args = ['Invalid altorithm type:"' + str(r_algorithm)
+ '" from:"' + str(algorithm) + '". ' + AE.message]
raise AE
return digest()
def _decrypt_hex_key(hex_encrypted_key, key_filename):
'''
Attempts to decrypt 'hex_encrypted_key' using key at local
file path 'key_filename'.
:param hex_encrypted_key: hex to decrypt
:param key_filename: local file path to key used to decrypt
:returns decrypted key
'''
if not hex_encrypted_key:
raise ValueError('Empty hex_encrypted_key passed to decrypt')
if not key_filename:
raise ValueError('Empty key_filename passed to decrypt')
#borrowed from euca2ools...
popen = Popen(['openssl', 'rsautl', '-decrypt', '-pkcs',
'-inkey', key_filename], stdin=PIPE, stdout=PIPE)
binary_encrypted_key = binascii.unhexlify(hex_encrypted_key)
(decrypted_key, _) = popen.communicate(binary_encrypted_key)
try:
# Make sure it might actually be an encryption key.
int(decrypted_key, 16)
return decrypted_key
except ValueError as VE:
VE.args = ['Failed to decrypt:"' + str(hex_encrypted_key)
+ '", and keyfile:"' + str(key_filename) + '".'
+ VE.message]
raise VE
def _calc_digest_for_fileobj(file_obj, algorithm, chunk_size=None):
'''
Calculated and return the digest for the fileobj provided using the
hashlib 'alogrithm' provided.
:param file_obj: file like obj to read compute digest for
:param algorithm: string representing hashlib type(sha1, md5, etc)
:param chunksize: # of bytes to read/write per read()/write()
'''
chunk_size = chunk_size or 8192
digest = _get_digest_algorithm_from_string(algorithm)
while True:
chunk = file_obj.read(chunk_size)
if not chunk:
break
digest.update(chunk)
return digest.hexdigest() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_account_facts
short_description: Gather facts about the Vultr account.
description:
- Gather facts about account balance, charges and payments.
version_added: "2.5"
deprecated:
removed_in: "2.12"
why: Transformed into an info module.
alternative: Use M(vultr_account_info) instead.
author: "René Moser (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr account facts
local_action:
module: vultr_account_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_account_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_account_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
balance:
description: Your account balance.
returned: success
type: float
sample: -214.69
pending_charges:
description: Charges pending.
returned: success
type: float
sample: 57.03
last_payment_date:
description: Date of the last payment.
returned: success
type: str
sample: "2017-08-26 12:47:48"
last_payment_amount:
description: The amount of the last payment transaction.
returned: success
type: float
sample: -250.0
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrAccountFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrAccountFacts, self).__init__(module, "vultr_account_facts")
self.returns = {
'balance': dict(convert_to='float'),
'pending_charges': dict(convert_to='float'),
'last_payment_date': dict(),
'last_payment_amount': dict(convert_to='float'),
}
def get_account_info(self):
return self.api_query(path="/v1/account/info")
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
account_facts = AnsibleVultrAccountFacts(module)
result = account_facts.get_result(account_facts.get_account_info())
ansible_facts = {
'vultr_account_facts': result['vultr_account_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines
/**
* A runnable task for [CoroutineDispatcher.dispatch].
*
* Equivalent to the type `() -> Unit`.
*/
public actual fun interface Runnable {
/**
* @suppress
*/
public actual fun run()
}
@Deprecated(
"Preserved for binary compatibility, see https://github.com/Kotlin/kotlinx.coroutines/issues/4309",
level = DeprecationLevel.HIDDEN
)
public inline fun Runnable(crossinline block: () -> Unit): Runnable =
object : Runnable {
override fun run() {
block()
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jsAndWasmShared/src/Runnable.kt |
import json
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from crits.core.handlers import get_item_names
from crits.core.user_tools import user_can_view_data
from crits.core.user_tools import user_is_admin
from crits.raw_data.forms import UploadRawDataFileForm, UploadRawDataForm
from crits.raw_data.forms import NewRawDataTypeForm
from crits.raw_data.handlers import update_raw_data_tool_details
from crits.raw_data.handlers import update_raw_data_tool_name
from crits.raw_data.handlers import update_raw_data_type
from crits.raw_data.handlers import handle_raw_data_file
from crits.raw_data.handlers import delete_raw_data, get_raw_data_details
from crits.raw_data.handlers import generate_raw_data_jtable
from crits.raw_data.handlers import generate_raw_data_csv, new_inline_comment
from crits.raw_data.handlers import generate_inline_comments
from crits.raw_data.handlers import generate_raw_data_versions
from crits.raw_data.handlers import get_id_from_link_and_version
from crits.raw_data.handlers import add_new_raw_data_type, new_highlight
from crits.raw_data.handlers import update_raw_data_highlight_comment
from crits.raw_data.handlers import delete_highlight
from crits.raw_data.handlers import update_raw_data_highlight_date
from crits.raw_data.raw_data import RawDataType
@user_passes_test(user_can_view_data)
def raw_data_listing(request,option=None):
"""
Generate RawData Listing template.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param option: Whether or not we should generate a CSV (yes if option is "csv")
:type option: str
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_raw_data_csv(request)
return generate_raw_data_jtable(request, option)
@user_passes_test(user_can_view_data)
def set_raw_data_tool_details(request, _id):
"""
Set the RawData tool details. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
details = request.POST['details']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_tool_details(_id,
details,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_raw_data_tool_name(request, _id):
"""
Set the RawData tool name. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
name = request.POST['name']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_tool_name(_id,
name,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_raw_data_type(request, _id):
"""
Set the RawData datatype. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
data_type = request.POST['data_type']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_type(_id,
data_type,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_raw_data_highlight_comment(request, _id):
"""
Set a highlight comment in RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
comment = request.POST['comment']
line = request.POST['line']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_highlight_comment(_id,
comment,
line,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_raw_data_highlight_date(request, _id):
"""
Set a highlight date in RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
date = request.POST['date']
line = request.POST['line']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_highlight_date(_id,
date,
line,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def add_inline_comment(request, _id):
"""
Add an inline comment to RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
comment = request.POST['comment']
analyst = request.user.username
line_num = request.GET.get('line', 1)
return HttpResponse(json.dumps(new_inline_comment(_id,
comment,
line_num,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def add_highlight(request, _id):
"""
Set a line as highlighted for RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
analyst = request.user.username
line_num = request.POST.get('line', 1)
line_data = request.POST.get('line_data', None)
return HttpResponse(json.dumps(new_highlight(_id,
line_num,
line_data,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def remove_highlight(request, _id):
"""
Remove a line highlight from RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
analyst = request.user.username
line_num = request.POST.get('line', 1)
return HttpResponse(json.dumps(delete_highlight(_id,
line_num,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_inline_comments(request, _id):
"""
Get inline comments for RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
return HttpResponse(json.dumps(generate_inline_comments(_id)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_raw_data_versions(request, _id):
"""
Get a list of versions for RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
return HttpResponse(json.dumps(generate_raw_data_versions(_id)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def raw_data_details(request, _id):
"""
Generate RawData details page.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
template = 'raw_data_details.html'
analyst = request.user.username
(new_template, args) = get_raw_data_details(_id, analyst)
if new_template:
template = new_template
return render_to_response(template,
args,
RequestContext(request))
@user_passes_test(user_can_view_data)
def details_by_link(request, link):
"""
Generate RawData details page by link.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param link: The LinkId of the RawData.
:type link: str
:returns: :class:`django.http.HttpResponse`
"""
version = request.GET.get('version', 1)
return raw_data_details(request,
get_id_from_link_and_version(link, version))
@user_passes_test(user_can_view_data)
def upload_raw_data(request, link_id=None):
"""
Upload new RawData to CRITs.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param link_id: The LinkId of RawData if this is a new version upload.
:type link_id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
if 'filedata' in request.FILES:
form = UploadRawDataFileForm(request.user,
request.POST,
request.FILES)
filedata = request.FILES['filedata']
data = filedata.read() # XXX: Should be using chunks here.
has_file = True
else:
form = UploadRawDataForm(request.user,request.POST)
data = request.POST.get('data', None)
has_file = False
if form.is_valid():
source = form.cleaned_data.get('source')
user = request.user.username
description = form.cleaned_data.get('description', '')
title = form.cleaned_data.get('title', None)
tool_name = form.cleaned_data.get('tool_name', '')
tool_version = form.cleaned_data.get('tool_version', '')
tool_details = form.cleaned_data.get('tool_details', '')
data_type = form.cleaned_data.get('data_type', None)
copy_rels = request.POST.get('copy_relationships', False)
link_id = link_id
bucket_list = form.cleaned_data.get('bucket_list')
ticket = form.cleaned_data.get('ticket')
method = form.cleaned_data.get('method', '') or 'Upload'
reference = form.cleaned_data.get('reference', '')
status = handle_raw_data_file(data, source, user,
description, title, data_type,
tool_name, tool_version, tool_details,
link_id,
method=method,
reference=reference,
copy_rels=copy_rels,
bucket_list=bucket_list,
ticket=ticket)
if status['success']:
jdump = json.dumps({
'message': 'raw_data uploaded successfully! <a href="%s">View raw_data</a>'
% reverse('crits.raw_data.views.raw_data_details',
args=[status['_id']]), 'success': True})
if not has_file:
return HttpResponse(jdump, mimetype="application/json")
return render_to_response('file_upload_response.html',
{'response': jdump},
RequestContext(request))
else:
jdump = json.dumps({'success': False,
'message': status['message']})
if not has_file:
return HttpResponse(jdump, mimetype="application/json")
return render_to_response('file_upload_response.html',
{'response': jdump},
RequestContext(request))
else:
jdump = json.dumps({'success': False,
'form': form.as_table()})
if not has_file:
return HttpResponse(jdump, mimetype="application/json")
return render_to_response('file_upload_response.html',
{'response': jdump},
RequestContext(request))
else:
return render_to_response('error.html',
{'error': "Expected POST."},
RequestContext(request))
@user_passes_test(user_is_admin)
def remove_raw_data(request, _id):
"""
Remove RawData from CRITs.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData to remove.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
result = delete_raw_data(_id, '%s' % request.user.username)
if result:
return HttpResponseRedirect(reverse('crits.raw_data.views.raw_data_listing'))
else:
return render_to_response('error.html',
{'error': "Could not delete raw_data"})
@user_passes_test(user_can_view_data)
def new_raw_data_type(request):
"""
Add a new RawData datatype to CRITs. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
form = NewRawDataTypeForm(request.POST)
analyst = request.user.username
if form.is_valid():
result = add_new_raw_data_type(form.cleaned_data['data_type'],
analyst)
if result:
message = {'message': '<div>Raw Data Type added successfully!</div>',
'success': True}
else:
message = {'message': '<div>Raw Data Type addition failed!</div>',
'success': False}
else:
message = {'form': form.as_table()}
return HttpResponse(json.dumps(message),
mimetype="application/json")
return render_to_response('error.html',
{'error':'Expected AJAX POST'})
@user_passes_test(user_can_view_data)
def get_raw_data_type_dropdown(request):
"""
Generate RawData datetypes dropdown information. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
dt_types = get_item_names(RawDataType)
dt_final = []
for dt in dt_types:
dt_final.append(dt.name)
result = {'data': dt_final}
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{'error': error},
RequestContext(request)) | unknown | codeparrot/codeparrot-clean | ||
"""Tests for unix_events.py."""
import contextlib
import errno
import io
import multiprocessing
from multiprocessing.util import _cleanup_tests as multiprocessing_cleanup_tests
import os
import signal
import socket
import stat
import sys
import time
import unittest
from unittest import mock
from test import support
from test.support import os_helper, warnings_helper
from test.support import socket_helper
from test.support import wait_process
from test.support import hashlib_helper
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import asyncio
from asyncio import unix_events
from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.events._set_event_loop_policy(None)
MOCK_ANY = mock.ANY
def EXITCODE(exitcode):
return 32768 + exitcode
def SIGNAL(signum):
if not 1 <= signum <= 68:
raise AssertionError(f'invalid signum {signum}')
return 32768 - signum
def close_pipe_transport(transport):
# Don't call transport.close() because the event loop and the selector
# are mocked
if transport._pipe is None:
return
transport._pipe.close()
transport._pipe = None
@unittest.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1)
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
async def simple_coroutine():
pass
# callback must not be a coroutine function
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
self.assertRaisesRegex(
TypeError, 'coroutines cannot be used with add_signal_handler',
self.loop.add_signal_handler,
signal.SIGINT, func)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@mock.patch('asyncio.unix_events.sys')
@mock.patch('asyncio.unix_events.signal')
def test_close_on_finalizing(self, m_signal, m_sys):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 1)
m_sys.is_finalizing.return_value = True
m_signal.signal.reset_mock()
with self.assertWarnsRegex(ResourceWarning,
"skipping signal handlers removal"):
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
self.assertFalse(m_signal.signal.called)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(1)
sock.close()
coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_server_pathlike(self):
with test_utils.unix_socket_path() as path:
path = os_helper.FakePath(path)
srv_coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(srv_coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_unix_connection_pathlike(self):
with test_utils.unix_socket_path() as path:
path = os_helper.FakePath(path)
coro = self.loop.create_unix_connection(lambda: None, path)
with self.assertRaises(FileNotFoundError):
# If path-like object weren't supported, the exception would be
# different.
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
path = test_utils.gen_unix_socket_path()
self.addCleanup(os_helper.unlink, path)
# create the file
open(path, "wb").close()
coro = self.loop.create_unix_server(lambda: None, path)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_dgram(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_server_path_stream_bittype(self):
fn = test_utils.gen_unix_socket_path()
self.addCleanup(os_helper.unlink, fn)
sock = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
sock.bind(fn)
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_unix_server_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_connection(lambda: None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_connection(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(os, 'sendfile'),
'sendfile is not supported')
class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
self._ready = loop.create_future()
def connection_made(self, transport):
self.started = True
self.transport = transport
self._ready.set_result(None)
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
os_helper.unlink(os_helper.TESTFN)
super().tearDownClass()
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
self.file = open(os_helper.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, cleanup=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
if cleanup:
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
port = socket_helper.find_unused_port()
srv_sock = self.make_socket(cleanup=False)
srv_sock.bind((socket_helper.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: proto, sock=srv_sock))
self.run_loop(self.loop.sock_connect(sock, (socket_helper.HOST, port)))
self.run_loop(proto._ready)
def cleanup():
proto.transport.close()
self.run_loop(proto.wait_closed())
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test_sock_sendfile_not_available(self):
sock, proto = self.prepare()
with mock.patch('asyncio.unix_events.os', spec=[]):
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"os[.]sendfile[(][)] is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_a_file(self):
sock, proto = self.prepare()
f = object()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_iobuffer(self):
sock, proto = self.prepare()
f = io.BytesIO()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_regular_file(self):
sock, proto = self.prepare()
f = mock.Mock()
f.fileno.return_value = -1
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_cancel1(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
with contextlib.suppress(asyncio.CancelledError):
self.run_loop(fut)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_cancel2(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_blocking_error(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = mock.Mock()
fut.cancelled.return_value = False
with mock.patch('os.sendfile', side_effect=BlockingIOError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
key = self.loop._selector.get_key(sock)
self.assertIsNotNone(key)
fut.add_done_callback.assert_called_once_with(mock.ANY)
def test_sock_sendfile_os_error_first_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
with mock.patch('os.sendfile', side_effect=OSError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIsInstance(exc, asyncio.SendfileNotAvailableError)
self.assertEqual(0, self.file.tell())
def test_sock_sendfile_os_error_next_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = OSError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
def test_sock_sendfile_exception(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = asyncio.SendfileNotAvailableError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
class UnixReadPipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def read_pipe_transport(self, waiter=None):
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = self.loop.create_future()
tr = self.read_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertIsNone(waiter.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = self.read_pipe_transport()
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = self.read_pipe_transport()
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = self.read_pipe_transport()
tr.pause_reading()
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = self.read_pipe_transport()
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = self.read_pipe_transport()
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = self.read_pipe_transport()
err = object()
tr._close(err)
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_pause_reading_on_closed_pipe(self):
tr = self.read_pipe_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.assertIsNone(tr._loop)
tr.pause_reading()
def test_pause_reading_on_paused_pipe(self):
tr = self.read_pipe_transport()
tr.pause_reading()
# the second call should do nothing
tr.pause_reading()
def test_resume_reading_on_closed_pipe(self):
tr = self.read_pipe_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.assertIsNone(tr._loop)
tr.resume_reading()
def test_resume_reading_on_paused_pipe(self):
tr = self.read_pipe_transport()
# the pipe is not paused
# resuming should do nothing
tr.resume_reading()
class UnixWritePipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def write_pipe_transport(self, waiter=None):
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = self.loop.create_future()
tr = self.write_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertEqual(None, waiter.result())
def test_can_write_eof(self):
tr = self.write_pipe_transport()
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = self.write_pipe_transport()
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(b''), tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 2
tr.write(b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'ta'), tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'previous')
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'previousdata'), tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = self.write_pipe_transport()
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
# This is a bit overspecified. :-(
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
tr.close()
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = self.write_pipe_transport()
tr._read_ready() # pipe was closed by peer
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = self.write_pipe_transport()
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 3
tr._write_ready()
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'a'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = err = OSError()
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.assertTrue(tr.is_closing())
m_logexc.assert_not_called()
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_close(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
# closing the transport twice must not fail
tr.close()
def test_close_closing(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = self.write_pipe_transport()
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = self.write_pipe_transport()
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.protocol.connection_lost.called)
class TestFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
asyncio.set_event_loop(None)
def test_add_reader_invalid_argument(self):
def assert_raises():
return self.assertRaisesRegex(ValueError, r'Invalid file object')
cb = lambda: None
with assert_raises():
self.loop.add_reader(object(), cb)
with assert_raises():
self.loop.add_writer(object(), cb)
with assert_raises():
self.loop.remove_reader(object())
with assert_raises():
self.loop.remove_writer(object())
def test_add_reader_or_writer_transport_fd(self):
def assert_raises():
return self.assertRaisesRegex(
RuntimeError,
r'File descriptor .* is used by transport')
async def runner():
tr, pr = await self.loop.create_connection(
lambda: asyncio.Protocol(), sock=rsock)
try:
cb = lambda: None
with assert_raises():
self.loop.add_reader(rsock, cb)
with assert_raises():
self.loop.add_reader(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_reader(rsock)
with assert_raises():
self.loop.remove_reader(rsock.fileno())
with assert_raises():
self.loop.add_writer(rsock, cb)
with assert_raises():
self.loop.add_writer(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_writer(rsock)
with assert_raises():
self.loop.remove_writer(rsock.fileno())
finally:
tr.close()
rsock, wsock = socket.socketpair()
try:
self.loop.run_until_complete(runner())
finally:
rsock.close()
wsock.close()
@support.requires_fork()
class TestFork(unittest.TestCase):
@warnings_helper.ignore_fork_in_thread_deprecation_warnings()
def test_fork_not_share_current_task(self):
loop = object()
task = object()
asyncio._set_running_loop(loop)
self.addCleanup(asyncio._set_running_loop, None)
asyncio.tasks._enter_task(loop, task)
self.addCleanup(asyncio.tasks._leave_task, loop, task)
self.assertIs(asyncio.current_task(), task)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
pid = os.fork()
if pid == 0:
# child
try:
asyncio._set_running_loop(loop)
current_task = asyncio.current_task()
if current_task is None:
os.write(w, b'NO TASK')
else:
os.write(w, b'TASK:' + str(id(current_task)).encode())
except BaseException as e:
os.write(w, b'ERROR:' + ascii(e).encode())
finally:
asyncio._set_running_loop(None)
os._exit(0)
else:
# parent
result = os.read(r, 100)
self.assertEqual(result, b'NO TASK')
wait_process(pid, exitcode=0)
@warnings_helper.ignore_fork_in_thread_deprecation_warnings()
def test_fork_not_share_event_loop(self):
# The forked process should not share the event loop with the parent
loop = object()
asyncio._set_running_loop(loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.addCleanup(asyncio._set_running_loop, None)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
pid = os.fork()
if pid == 0:
# child
try:
loop = asyncio.get_event_loop()
os.write(w, b'LOOP:' + str(id(loop)).encode())
except RuntimeError:
os.write(w, b'NO LOOP')
except BaseException as e:
os.write(w, b'ERROR:' + ascii(e).encode())
finally:
os._exit(0)
else:
# parent
result = os.read(r, 100)
self.assertEqual(result, b'NO LOOP')
wait_process(pid, exitcode=0)
@warnings_helper.ignore_fork_in_thread_deprecation_warnings()
@hashlib_helper.requires_hashdigest('md5')
@support.skip_if_sanitizer("TSAN doesn't support threads after fork", thread=True)
def test_fork_signal_handling(self):
self.addCleanup(multiprocessing_cleanup_tests)
# Sending signal to the forked process should not affect the parent
# process
ctx = multiprocessing.get_context('fork')
manager = ctx.Manager()
self.addCleanup(manager.shutdown)
child_started = manager.Event()
child_handled = manager.Event()
parent_handled = manager.Event()
def child_main():
def on_sigterm(*args):
child_handled.set()
sys.exit()
signal.signal(signal.SIGTERM, on_sigterm)
child_started.set()
while True:
time.sleep(1)
async def main():
loop = asyncio.get_running_loop()
loop.add_signal_handler(signal.SIGTERM, lambda *args: parent_handled.set())
process = ctx.Process(target=child_main)
process.start()
child_started.wait()
os.kill(process.pid, signal.SIGTERM)
process.join(timeout=support.SHORT_TIMEOUT)
async def func():
await asyncio.sleep(0.1)
return 42
# Test parent's loop is still functional
self.assertEqual(await asyncio.create_task(func()), 42)
asyncio.run(main())
child_handled.wait(timeout=support.SHORT_TIMEOUT)
self.assertFalse(parent_handled.is_set())
self.assertTrue(child_handled.is_set())
@warnings_helper.ignore_fork_in_thread_deprecation_warnings()
@hashlib_helper.requires_hashdigest('md5')
@support.skip_if_sanitizer("TSAN doesn't support threads after fork", thread=True)
def test_fork_asyncio_run(self):
self.addCleanup(multiprocessing_cleanup_tests)
ctx = multiprocessing.get_context('fork')
manager = ctx.Manager()
self.addCleanup(manager.shutdown)
result = manager.Value('i', 0)
async def child_main():
await asyncio.sleep(0.1)
result.value = 42
process = ctx.Process(target=lambda: asyncio.run(child_main()))
process.start()
process.join()
self.assertEqual(result.value, 42)
@warnings_helper.ignore_fork_in_thread_deprecation_warnings()
@hashlib_helper.requires_hashdigest('md5')
@support.skip_if_sanitizer("TSAN doesn't support threads after fork", thread=True)
def test_fork_asyncio_subprocess(self):
self.addCleanup(multiprocessing_cleanup_tests)
ctx = multiprocessing.get_context('fork')
manager = ctx.Manager()
self.addCleanup(manager.shutdown)
result = manager.Value('i', 1)
async def child_main():
proc = await asyncio.create_subprocess_exec(sys.executable, '-c', 'pass')
result.value = await proc.wait()
process = ctx.Process(target=lambda: asyncio.run(child_main()))
process.start()
process.join()
self.assertEqual(result.value, 0)
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_asyncio/test_unix_events.py |
/* Lua CJSON floating point conversion routines */
/* Buffer required to store the largest string representation of a double.
*
* Longest double printed with %.14g is 21 characters long:
* -1.7976931348623e+308 */
# define FPCONV_G_FMT_BUFSIZE 32
#ifdef USE_INTERNAL_FPCONV
static inline void fpconv_init()
{
/* Do nothing - not required */
}
#else
extern void fpconv_init();
#endif
extern int fpconv_g_fmt(char*, double, int);
extern double fpconv_strtod(const char*, char**);
/* vi:ai et sw=4 ts=4:
*/ | c | github | https://github.com/redis/redis | deps/lua/src/fpconv.h |
# This file is part of KEmuFuzzer.
#
# KEmuFuzzer is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# KEmuFuzzer is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# KEmuFuzzer. If not, see <http://www.gnu.org/licenses/>.
import os, sys, hashlib
KEMUFUZZER_PATH = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(KEMUFUZZER_PATH)
import subprocess, signal, gzip, tempfile, shutil, struct
from gdb import GDBClient
from x86_cpustate import *
from elf import Elf
KERNEL_FILE = os.path.join(KEMUFUZZER_PATH, "kernel/kernel")
DISK_FILE = os.path.join(KEMUFUZZER_PATH, "kernel/floppy.img")
VMX_FILE = os.path.join(KEMUFUZZER_PATH, "../emulatori/vmware-workstation7/bee.vmx")
VMDK_FILE = os.path.join(KEMUFUZZER_PATH, "../emulatori/vmware-workstation7/bee.vmdk")
MEMORY_SIZE = 4 * 1024 * 1024
DEFAULT_PRESTATE_FILE = "/tmp/vmware-dump-pre.gz"
DEFAULT_POSTSTATE_FILE = "/tmp/vmware-dump-post.gz"
EXCEPTION_NONE = 0xFFFF
# Maps a DPL to a pair (name, selector) for its TSS.
DPL_TO_TSS = {0: ('tss3', (0x20|0)),
1: ('tss4', (0x28|1)),
2: ('tss5', (0x30|2)),
3: ('tss6', (0x38|3)),
4: ('tssVM', (0xd3))}
##################################################
def alarm_handler(signum, frame):
raise Timeout()
def parse_seg(sel, cpu, mem):
unpack8 = lambda b: struct.unpack('B', b)[0]
pack8 = lambda b: struct.pack('B', b)
# Read GDT entry
gdt_base = cpu.sregs_state.gdtr.base
gdt_limit = cpu.sregs_state.gdtr.limit
# Extract i-th entry
gdt_idx = sel >> 3
if not (gdt_idx*8 <= gdt_limit and (gdt_idx*8+8) < gdt_limit):
# Invalid GDT index
print "[W] Invalid GDT selector %.4x (index: %.4x, limit: %.4x)" % (sel, gdt_idx, gdt_limit)
return None
addr = gdt_idx*8 + gdt_base
data = mem[addr:addr+8]
seg = segment_reg_t()
# Parse i-th descriptor
tmp = data[2] + data[3] + data[4] + data[7]
seg.base = struct.unpack("I", tmp)[0]
seg.selector = sel
seg.type = ((unpack8(data[5])) & 0xf)
seg.s = ((unpack8(data[5]) >> 4) & 0x1)
seg.dpl = ((unpack8(data[5]) >> 5) & 0x3)
seg.present = ((unpack8(data[5]) >> 7) & 0x1)
seg.avl = ((unpack8(data[6]) >> 4) & 0x1)
seg.l = ((unpack8(data[6]) >> 5) & 0x1)
seg.db = ((unpack8(data[6]) >> 6) & 0x1)
seg.g = ((unpack8(data[6]) >> 7) & 0x1)
tmp = data[0] + data[1] + pack8(unpack8(data[6]) & 0xf) + '\x00'
seg.limit = struct.unpack("I", tmp)[0]
# Scale the limit according to 'granularity'
# if seg.g:
# seg.limit = seg.limit << 12 | 0xfff
seg.unusable = 0
return seg
class Emulator:
def __init__(self, cmdline, memorysz, kernel, disk):
self.kernel = kernel
self.disk = disk
self.cmdline = cmdline
self.memorysz = memorysz
self.pid = None
def run(self):
p = subprocess.Popen(self.cmdline.split())
self.pid = p.pid
def kill(self):
self.pid = None
def isRunning(self):
return self.pid is not None
def getGDBVersion(self):
abstract()
def getGDBPort(self):
abstract()
def dumpState(self, filename, typ, tcfinger):
abstract()
def __del__(self):
self.kill()
class EmulatorQEMU(Emulator):
def __init__(self, memorysz = 0, kernel = KERNEL_FILE, disk = DISK_FILE):
Emulator.__init__(self, cmdline, memorysz, kernel, disk)
def getGDBVersion(self):
return "qemu"
def getGDBPort(self):
return 1234
def kill(self):
if self.isRunning():
print "[*] Killing emulator process #%d" % self.pid
os.kill(self.pid, signal.SIGTERM)
Emulator.kill(self)
class EmulatorVMware(Emulator):
def __init__(self, cmd_start, cmd_stop, cmd_suspend, memorysz = 0,
vmx = VMX_FILE, kernel = KERNEL_FILE, disk = DISK_FILE):
self.vmx = vmx
self.cmd_start = cmd_start
self.cmd_stop = cmd_stop
self.cmd_suspend = cmd_suspend
Emulator.__init__(self, cmd_start, memorysz, kernel, disk)
def getGDBVersion(self):
return "vmware"
def getGDBPort(self):
return 8832
def kill(self):
if self.isRunning():
print "[*] Stopping vmware process #%d" % self.pid
os.system(self.cmd_stop)
Emulator.kill(self)
def dumpState(self, gdb, filename, typ, tcfinger, exc = EXCEPTION_NONE, tasks = None):
assert typ in [PRE_TESTCASE, POST_TESTCASE], "[!] Invalid dump type #%d" % typ
hdr = header_t()
hdr.magic = CPU_STATE_MAGIC
hdr.version = CPU_STATE_VERSION
hdr.emulator = EMULATOR_VMWARE
hdr.kernel_version="protected mode +"
hdr.kernel_checksum= hashlib.md5(open(KERNEL_FILE).read()).hexdigest()
hdr.testcase_checksum = tcfinger
hdr.type = typ
hdr.cpusno = 1
hdr.mem_size = self.memorysz
hdr.ioports[0] = KEMUFUZZER_HYPERCALL_START_TESTCASE
hdr.ioports[1] = KEMUFUZZER_HYPERCALL_STOP_TESTCASE
# Read current CPU state
cpu = gdb.getRegisters()
if tasks is not None:
# Check if RIP falls inside a task area
for n, v in tasks.iteritems():
if v[0] <= cpu.regs_state.rip <= v[1]:
cpu.regs_state.rip -= v[0]
# Update exception state
cpu.exception_state.vector = c_uint32(exc)
cpu.exception_state.error_code = c_uint32(0)
# Read system memory
mem = self.readAllMemory(gdb)
# Normalize state
hdr, cpus, mem = self.__normalize(typ, hdr, [cpu], mem)
cpu = cpus[0]
# Write data to file
f = gzip.open(filename, 'w')
s = string_at(byref(hdr), sizeof(hdr))
f.write(s)
s = string_at(byref(cpu), sizeof(cpu))
f.write(s)
f.write(mem)
f.close()
def __normalize(self, typ, hdr, cpus, mem):
elf = Elf(self.kernel)
for i in range(hdr.cpusno):
c = cpus[i]
#### Normalize task-register (TR) ####
c.sregs_state.tr.present = 0x1
c.sregs_state.tr.type = 0xb
c.sregs_state.tr.limit = 0x68
if typ == PRE_TESTCASE:
# Use 'main' TSS
c.sregs_state.tr.base = elf.getSymbol('tss0').getAddress()
c.sregs_state.tr.dpl = 0x0
c.sregs_state.tr.selector = 0x8
else:
pop = lambda x,y: struct.unpack("I", mem[x + 4 * y:(x + 4 * y) + 4])[0]
# Choose the correct TSS for current CPL
rsp = c.regs_state.rsp + c.sregs_state.ss.base
if exception_has_error_code(c.exception_state.vector):
rsp += 4
stack_cs_sel = pop(rsp, 2)
stack_cs = parse_seg(stack_cs_sel, c, mem)
if stack_cs is not None:
c.sregs_state.tr.dpl = stack_cs.dpl
c.sregs_state.tr.base = elf.getSymbol(DPL_TO_TSS[stack_cs.dpl][0]).getAddress()
c.sregs_state.tr.selector = DPL_TO_TSS[stack_cs.dpl][1]
#### Normalize MSR registers ####
c.msrs_state.n = 3
c.msrs_state.msr_regs[0].idx = X86_MSR_IA32_SYSENTER_CS
c.msrs_state.msr_regs[0].val = 0x68
c.msrs_state.msr_regs[1].idx = X86_MSR_IA32_SYSENTER_ESP
c.msrs_state.msr_regs[1].val = 0x800
c.msrs_state.msr_regs[2].idx = X86_MSR_IA32_SYSENTER_EIP
c.msrs_state.msr_regs[2].val = elf.getSection('.tcring0').getLowAddr()
#### Normalize segment descriptors ####
# Fix granularity bits (both in pre- and post-states)
c.sregs_state.cs.g = 1
c.sregs_state.ds.g = 1
c.sregs_state.es.g = 1
c.sregs_state.fs.g = 1
c.sregs_state.gs.g = 1
c.sregs_state.ss.g = 1
if typ == PRE_TESTCASE:
# PRE-normalization
gdt = elf.getSymbol('gdt').getAddress()
for s in [c.sregs_state.ds, c.sregs_state.es, c.sregs_state.fs,
c.sregs_state.gs, c.sregs_state.ss]:
# Mark as accessed
s.type |= 1
# Fix GDT
gdt_index = s.selector >> 3
gdt_addr = gdt + (gdt_index * 8) + 4
data = struct.unpack("I", mem[gdt_addr:gdt_addr+4])[0]
data |= 0x100
data = struct.pack("I", data)
mem = mem[:gdt_addr] + data + mem[gdt_addr+4:]
return (hdr, cpus, mem)
def readAllMemory(self, gdb=None):
# 1. Suspend VMware
os.system(self.cmd_suspend)
# 2. Detach debugger
if gdb:
gdb.kill()
# 3. Read memory image
f = open(self.vmx.replace(".vmx", ".vmem"), 'r')
data = f.read()
f.close()
# 4. Resume VMware
self.run()
# 5. Re-connect debugger
if gdb:
gdb.reconnect()
return data
###########################################################################
def read_beefinger():
rev, md5 = None, None
f = open('beefinger.h', 'r')
for l in f.readlines():
l = l.strip(" \t\n\r")
if not l.startswith("#define"):
continue
l = l.replace("#define", "").strip()
if l.startswith("BEE_SVN"):
rev = l.split()[-1].replace('"', "")
elif l.startswith("BEE_MD5"):
md5 = l.split()[-1].replace('"', "")
f.close()
assert rev is not None and md5 is not None, "[!] Error parsing BEE fingerprint file"
s = "%s-%s" % (rev, md5)
return s
def load_symbols(k):
symbols = {}
tasks = {0: [0,0], 1: [0,0], 2: [0,0], 3: [0,0], 4:[0,0]}
cmdline = "nm %s" % k
p = subprocess.Popen(cmdline.split(), stdout=subprocess.PIPE)
for l in p.stdout.readlines():
# Symbols
l = l.strip().split()
if l[-1].startswith("notify_int") or \
l[-1] in ["tcring0", "tcring1", "tcring2", "tcring3", "tcringvm", "testcase_start"]:
symbols[l[-1]] = int(l[0], 16)
# Tasks
if l[-1].startswith("tcring"):
# check tcringvm - VM8086
if l[-1].startswith("tcringvm"):
v= int(l[0], 16)
if l[-1].endswith("end"):
tasks[4][1]=v-1;
else:
tasks[4][0]=v;
else:
n = int(l[-1][6])
v = int(l[0], 16)
if l[-1].endswith("end"):
tasks[n][1] = v-1
else:
tasks[n][0] = v
return symbols, tasks
def show_help():
print "Syntax: python %s" % sys.argv[0]
def compare_symbols(a,b):
data = [a,b]
for i in range(len(data)):
v = data[i]
if v.startswith("notify_int"):
v = int(v.replace("notify_int", ""))
elif v.startswith("tcring") or v == "testcase_start":
# HACK: raise priority of V
v = -1
data[i] = v
return cmp(data[0], data[1])
def guess_exit_reason(rip, symbol_name):
if symbol_name == "tcringXend":
# Testcase completed with no exception
r = EXCEPTION_NONE
elif symbol_name.startswith("notify_int"):
r = int(symbol_name.replace("notify_int", ""))
elif symbol_name == "testcase_start":
# Reboot -- Simulate a crash
print "[!] Reboot detected!"
r = None
else:
assert False, "[!] Unknown post symbol '%s' (@%.8x)" % (symbol_name, rip)
return r
def init_cmds(disk, vmx, gui):
qemu_start = "qemu -fda %s -s -S" % disk
if gui:
display = "gui"
else:
display = "nogui"
vmware_start = "vmrun -T ws start %s %s" % (vmx, display)
vmware_stop = "vmrun -T ws stop %s" % vmx
vmware_suspend = "vmrun -T ws suspend %s" % vmx
return qemu_start, vmware_start, vmware_stop, vmware_suspend
def run(kernel = KERNEL_FILE, disk = DISK_FILE, gui = False, timeout = None):
assert os.path.isfile(kernel) and os.path.isfile(disk)
vmx = prepare_vmx(disk)
assert os.path.isfile(vmx)
qemu_start, vmware_start, vmware_stop, vmware_suspend = init_cmds(disk, vmx, gui)
prestate = os.environ.get("KEMUFUZZER_PRE_STATE", DEFAULT_PRESTATE_FILE)
poststate = os.environ.get("KEMUFUZZER_POST_STATE", DEFAULT_POSTSTATE_FILE)
tcfinger = os.environ.get("KEMUFUZZER_TESTCASE_CHECKSUM", "????????????????")
assert tcfinger is not None
print "[*] Loading symbols from kernel image..."
symbols, tasks = load_symbols(kernel)
assert "tcring0" in symbols and "testcase_start" in symbols
# Build the symbol that marks the end of a testcase
symbols['tcringXend'] = symbols['notify_int31']
# 'tcringX' symbols are not needed anymore
for i in range(4):
n = "tcring%d" % i
if symbols.has_key(n):
del symbols[n]
# Build reverse symbols map
reverse_symbols = {}
for k,v in symbols.iteritems():
reverse_symbols[v] = k
print "[*] Starting emulator (vmx: %s, disk: %s) ..." % (vmx, disk)
emu = EmulatorVMware(cmd_start = vmware_start, cmd_stop = vmware_stop,
cmd_suspend = vmware_suspend,
memorysz = MEMORY_SIZE, vmx = vmx,
kernel = kernel, disk = disk)
emu.run()
gdb = GDBClient(host="127.0.0.1", port=emu.getGDBPort(), ver=emu.getGDBVersion())
gdb.connect()
print "[*] Debugger connected!"
# Set breakpoints
i = 0
kk = symbols.keys()
kk.sort(cmp=compare_symbols)
set_breakpoints = []
for k in kk:
v = symbols[k]
i += 1
r = gdb.setBreakpoint(v)
if not r:
print "[W] No more breakpoints available for symbol '%s'" % k
break
else:
set_breakpoints.append(k)
print "[*] A breakpoint has been set at the following symbols: %s" % set_breakpoints
# Set timeout
if timeout is not None:
print "[*] Setting timeout to %d seconds" % timeout
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(timeout)
# Continue until the testcase begins
print "[*] Resuming execution"
r = gdb.resume()
rip = gdb.getRegister('rip')
if reverse_symbols.get(rip) == "testcase_start" or reverse_symbols.get(rip).startswith("tcring"):
r = EXCEPTION_NONE
print "[*] Execution interrupted with reason %.2x" % r
print "\t- RIP: %.8x (%s)" % (rip, reverse_symbols.get(rip))
print "\t- raw data @%.8x: %s" % (rip, repr(gdb.readMemory(rip, 16)))
assert reverse_symbols.get(rip) == "testcase_start"
tmpfilename = tempfile.mktemp(prefix = "kemufuzzer-")
emu.dumpState(gdb, tmpfilename, PRE_TESTCASE, tcfinger, exc = r, tasks = tasks)
shutil.move(tmpfilename, prestate)
print "[*] Pre-execution state dumped to '%s'" % prestate
# Continue until next exception
print "[*] Resuming execution"
r = gdb.resume()
rip = gdb.getRegister('rip')
r = guess_exit_reason(rip, reverse_symbols.get(rip))
if r is None:
# Execution has crashed
if timeout is not None:
signal.alarm(0)
raise Crash()
print "[*] Execution interrupted with reason %.2x" % r
print "\t- RIP: %.8x (%s)" % (rip, reverse_symbols.get(rip))
print "\t- raw data @%.8x: %s" % (rip, repr(gdb.readMemory(rip, 16)))
emu.dumpState(gdb, tmpfilename, POST_TESTCASE, tcfinger, r, tasks = tasks)
shutil.move(tmpfilename, poststate)
print "[*] Post-execution state dumped to '%s'" % poststate
# All done, detach
gdb.detach()
print "[*] Debugger detached!"
# Disable the alarm
if timeout is not None:
signal.alarm(0)
# Stop emulator
del emu
# Delete temporary files
print "[*] Flushing temporary files.."
for x in ["vmxf", "vmsd", "vmx.lck", "vmx"] :
n = vmx.replace(".vmx", ".%s" % x)
if os.path.isdir(n):
shutil.rmtree(n)
elif os.path.isfile(n):
os.unlink(n)
def prepare_vmx(disk):
template = """
#!/usr/bin/vmware
.encoding = "UTF-8"
config.version = "8"
virtualHW.version = "7"
maxvcpus = "4"
scsi0.present = "TRUE"
memsize = "4"
ide0:0.present = "TRUE"
ide0:0.fileName = "%s"
ide1:0.present = "FALSE"
ide1:0.fileName = "/dev/hdc"
ide1:0.deviceType = "cdrom-raw"
floppy0.startConnected = "TRUE"
floppy0.fileName = "%s"
floppy0.autodetect = "TRUE"
sound.present = "FALSE"
sound.fileName = "-1"
sound.autodetect = "TRUE"
pciBridge0.present = "TRUE"
pciBridge4.present = "TRUE"
pciBridge4.virtualDev = "pcieRootPort"
pciBridge4.functions = "8"
pciBridge5.present = "TRUE"
pciBridge5.virtualDev = "pcieRootPort"
pciBridge5.functions = "8"
pciBridge6.present = "TRUE"
pciBridge6.virtualDev = "pcieRootPort"
pciBridge6.functions = "8"
pciBridge7.present = "TRUE"
pciBridge7.virtualDev = "pcieRootPort"
pciBridge7.functions = "8"
vmci0.present = "TRUE"
roamingVM.exitBehavior = "go"
displayName = "bee"
guestOS = "other"
nvram = "bee.nvram"
virtualHW.productCompatibility = "hosted"
extendedConfigFile = "bee.vmxf"
ide1:0.startConnected = "FALSE"
floppy0.fileType = "file"
floppy0.clientDevice = "FALSE"
sound.startConnected = "FALSE"
uuid.location = "56 4d c9 20 8c 69 9a e3-59 d0 d0 3e 24 66 65 8e"
uuid.bios = "56 4d c9 20 8c 69 9a e3-59 d0 d0 3e 24 66 65 8e"
cleanShutdown = "TRUE"
replay.supported = "TRUE"
replay.filename = ""
ide0:0.redo = ""
pciBridge0.pciSlotNumber = "17"
pciBridge4.pciSlotNumber = "21"
pciBridge5.pciSlotNumber = "22"
pciBridge6.pciSlotNumber = "23"
pciBridge7.pciSlotNumber = "24"
scsi0.pciSlotNumber = "16"
sound.pciSlotNumber = "32"
vmci0.pciSlotNumber = "33"
vmotion.checkpointFBSize = "16777216"
vmci0.id = "-536728653"
logging = "FALSE"
debugStub.listen.guest32 = "1"
monitor.debugOnStartGuest32 = "TRUE" # halt on first instruction
debugStub.hideBreakpoints= "1"
checkpoint.vmState = ""
""" % (os.path.abspath(VMDK_FILE), os.path.abspath(disk))
fd, name = tempfile.mkstemp(prefix="kemufuzzer-vmx-", suffix=".vmx")
os.write(fd, template)
os.close(fd)
return name
if __name__ == "__main__":
kernel = KERNEL_FILE
disk = DISK_FILE
for a in sys.argv[1:]:
n,v = a.split(":")
if n == 'kernel':
kernel = v
elif n == 'disk':
disk = v
else:
assert False, "[!] Unknown option '%s'" % n
run(kernel = kernel, disk = disk) | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import NasaAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"NasaAction": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NasaAction",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/tools/nasa/tool.py |
/** @import { AST } from '#compiler' */
/** @import { ComponentContext } from '../types' */
import * as b from '../../../../utils/builders.js';
import { build_expression } from './shared/utils.js';
/**
* @param {AST.AttachTag} node
* @param {ComponentContext} context
*/
export function AttachTag(node, context) {
const expression = build_expression(context, node.expression, node.metadata.expression);
let statement = b.stmt(b.call('$.attach', context.state.node, b.thunk(expression)));
if (node.metadata.expression.is_async()) {
statement = b.stmt(
b.call(
'$.run_after_blockers',
node.metadata.expression.blockers(),
b.thunk(b.block([statement]))
)
);
}
context.state.init.push(statement);
context.next();
} | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/src/compiler/phases/3-transform/client/visitors/AttachTag.js |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.test.rest.transform.length;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.elasticsearch.gradle.internal.test.rest.transform.AssertObjectNodes;
import org.elasticsearch.gradle.internal.test.rest.transform.TransformTests;
import org.junit.Test;
import java.util.Collections;
import java.util.List;
public class ReplaceKeyInLengthTests extends TransformTests {
@Test
public void testLengthKeyChange() throws Exception {
String test_original = "/rest/transform/length/length_replace_original.yml";
List<ObjectNode> tests = getTests(test_original);
String test_transformed = "/rest/transform/length/length_replace_transformed_key.yml";
List<ObjectNode> expectedTransformation = getTests(test_transformed);
List<ObjectNode> transformedTests = transformTests(
tests,
Collections.singletonList(new ReplaceKeyInLength("key.in_length_to_replace", "key.in_length_replaced", null))
);
AssertObjectNodes.areEqual(transformedTests, expectedTransformation);
}
} | java | github | https://github.com/elastic/elasticsearch | build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/length/ReplaceKeyInLengthTests.java |
# -*- coding: utf-8 -*-
import collections
import json
import bleach
def strip_html(unclean):
"""Sanitize a string, removing (as opposed to escaping) HTML tags
:param unclean: A string to be stripped of HTML tags
:return: stripped string
:rtype: str
"""
# We make this noop for non-string, non-collection inputs so this function can be used with higher-order
# functions, such as rapply (recursively applies a function to collections)
if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:
return unclean
return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])
# TODO: Not used anywhere except unit tests? Review for deletion
def clean_tag(data):
"""Format as a valid Tag
:param data: A string to be cleaned
:return: cleaned string
:rtype: str
"""
# TODO: make this a method of Tag?
return escape_html(data).replace('"', '"').replace("'", ''')
def is_iterable(obj):
return isinstance(obj, collections.Iterable)
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (is_iterable(obj) and not hasattr(obj, 'strip'))
def escape_html(data):
"""Escape HTML characters in data (as opposed to stripping them out entirely). Will ignore whitelisted tags.
:param data: A string, dict, or list to clean of HTML characters
:return: A cleaned object
:rtype: str or list or dict
"""
if isinstance(data, dict):
return {
key: escape_html(value)
for (key, value) in data.iteritems()
}
if is_iterable_but_not_string(data):
return [
escape_html(value)
for value in data
]
if isinstance(data, basestring):
return bleach.clean(data)
return data
# FIXME: Doesn't raise either type of exception expected, and can probably be deleted along with sole use
def assert_clean(data):
"""Ensure that data is cleaned
:raise: AssertionError
"""
def _ensure_clean(value):
if value != bleach.clean(value):
raise ValueError
return escape_html(data)
# TODO: Remove unescape_entities when mako html safe comes in
def unescape_entities(value):
"""
Convert HTML-encoded data (stored in the database) to literal characters.
Intended primarily for endpoints consumed by frameworks that handle their own escaping (eg Knockout)
:param value: A string, dict, or list
:return: A string or list or dict without html escape characters
"""
safe_characters = {
'&': '&',
}
if isinstance(value, dict):
return {
key: unescape_entities(value)
for (key, value) in value.iteritems()
}
if is_iterable_but_not_string(value):
return [
unescape_entities(each)
for each in value
]
if isinstance(value, basestring):
for escape_sequence, character in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
def temp_ampersand_fixer(s):
"""As a workaround for ampersands stored as escape sequences in database, unescape text before use on a safe page
Explicitly differentiate from safe_unescape_html in case use cases/behaviors diverge
"""
return s.replace('&', '&')
def safe_json(value):
"""
Dump a string to JSON in a manner that can be used for JS strings in mako templates.
Providing additional forward-slash escaping to prevent injection of closing markup in strings. See:
http://benalpert.com/2012/08/03/preventing-xss-json.html
:param value: A string to be converted
:return: A JSON-formatted string that explicitly escapes forward slashes when needed
"""
return json.dumps(value).replace('</', '<\\/') # Fix injection of closing markup in strings | unknown | codeparrot/codeparrot-clean | ||
#!python
# Copyright (c) 2009 John Hampton <pacopablo@pacopablo.com>
#
# Script to checksum a file and verify said checksum
#
# usage:
# checksum -m md5.hash -s sha1.hash filename
#
# Exits with a 0 return status if both hashes match. Otherwise it exits
# with a return status of 1
VERSION='1.0.0'
try:
import hashlib
md5 = lambda : hashlib.md5()
sha = lambda : hashlib.sha1()
except ImportError:
import md5, sha
md5 = lambda : md5.new()
sha = lambda : sha.new()
import pickle
import os
import os.path
import sys
import gzip
from optparse import OptionParser
def load_hashes(hash_path):
""" Load the hashes from the pickle """
data = {}
try:
data = pickle.load(gzip.open(hash_path, 'r'))
except IOError, e:
# Check the exception for presence of gzip error. If it's a gzip error
# try loading the pickle without gzip. Otherwise it means that it's
# either not there or we don't have access. Either way, returning
# an empty dict is sufficient
if isinstance(e.args[0], basetring) and (e.errno is None) and \
(e.strerror is None):
try:
data = pickle.load(open(hash_path, 'rb'))
except IOError:
pass
except (pickle.UnpicklingError, AttributeError, EOFError,
ImportError, IndexError):
pass
pass
except (pickle.UnpicklingError, AttributeError, EOFError, ImportError, IndexError):
# OK, What should be behavior be? If we can't load the pickle, should
# we bomb and mention the manifest is corrupt? Or do we simply ignore
# it and the checksum fails? In the case of creating a checksum, a
# fail and ignore would sipmly end up in the file being re-written
# with the new data
pass
return data
def checksum(filename, md5_hashes, sha_hashes, verbose=False):
s = sha()
m = md5()
m.update(file(filename, 'r').read())
fname = os.path.basename(filename)
rc = 1
if (fname in md5_hashes) and md5_hashes[fname] == m.hexdigest():
s.update(file(filename, 'r').read())
if (fname in sha_hashes) and sha_hashes[fname] == s.hexdigest():
rc = 0
if verbose:
print("File: %s" % fname)
print(" MD5 : %s" % m.hexdigest())
print(" SHA1: %s" % s.hexdigest())
print("Recipies:")
print(" MD5 : %s" % (fname in md5_hashes and md5_hashes[fname] or 'Not Found'))
print(" SHA1: %s" % (fname in sha_hashes and sha_hashes[fname] or 'Not Found'))
if rc == 0:
print("FOR THE WIN!")
else:
print("FAIL")
return rc
def create_checksum(filename, directory, md5file, shafile, verbose=False):
md5_hashes = load_hashes(md5file)
sha_hashes = load_hashes(shafile)
if directory:
abspath = os.path.abspath(directory)
files = [(f, os.path.join(abspath, f)) for f in os.listdir(abspath) \
if not f.startswith('.') and \
os.path.isfile(os.path.join(abspath, f))]
else:
abspath = os.path.abspath(filename)
files = [(os.path.basename(baspath), abspath)]
for f, fpath in files:
if os.path.isfile(fpath) and \
not f.startswith('.'):
# Create the MD5 Hash
m = md5()
m.update(open(fpath, 'rb').read())
md5_hashes[f] = m.hexdigest()
# Create the SHA Hash
s = sha()
s.update(open(fpath, 'rb').read())
sha_hashes[f] = s.hexdigest()
if verbose:
print("File: %s" % f)
print(" MD5 : %s" % md5_hashes[f])
print(" SHA1: %s" % sha_hashes[f])
continue
# Save pickle files
mfile = gzip.open(md5file, 'wb')
sfile = gzip.open(shafile, 'wb')
pickle.dump(md5_hashes, mfile, pickle.HIGHEST_PROTOCOL)
pickle.dump(sha_hashes, sfile, pickle.HIGHEST_PROTOCOL)
mfile.close()
sfile.close()
return 0
def verify_args(args):
""" Verify that the files referenced actually exist and stuff """
args.directory = None
if args.create:
try:
f = open(args.md5, 'a+')
os.utime(args.md5, None)
f.close()
f = open(args.sha, 'a+')
os.utime(args.sha, None)
f.close()
except IOError, err:
sys.stderr.write("Unable to write to %s. Please "
"verify the file is writable.\n" % err.filename)
return 1
if len(args.args) < 1:
sys.stderr.write("A file or directory to checksum must be "
"specified\n")
return 1
args.filename = args.args[0]
if os.path.isdir(args.filename):
args.directory = args.filename
elif not os.path.isfile(args.filename):
sys.stderr.write("A file or directory to checksum must be "
"specified\n")
return 1
else:
if not os.path.isfile(args.md5):
sys.stderr.write("The file containing the MD5 hashes does not exists."
" Please verify the file\nexists and can be read.\n")
return 1
if not os.path.isfile(args.sha):
sys.stderr.write("The file containing the SHA hashes does not exists."
" Please verify the file\nexists and can be read.\n")
return 1
if len(args.args) < 1:
sys.stderr.write("A file to checksum must be specified\n")
return 1
args.filename = args.args[0]
if not os.path.isfile(args.filename):
sys.stderr.write("The file to checksum does not exists."
" Please verify the file\nexists and can be read.\n")
return 1
return 0
def parseargs(argv):
""" Return a parsed optparse OptionParser object """
global VERSION
usage = "usage: %prog [options] file"
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option('-m', '--md5', dest='md5',
help='File containing the package md5 hashes',
metavar='FILE', default='recipies.md5.gz')
parser.add_option('-s', '--sha', dest='sha',
help='File containing the package sha hashes',
metavar='FILE', default='recipies.sha.gz')
parser.add_option('-c', '--create', dest='create', action='store_true',
help='Create hash files', default=False)
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='Produce a verbose output.', default=False)
options, args = parser.parse_args(argv)
options.args = args
return options
def main(argv):
args = parseargs(argv)
rc = verify_args(args)
if rc == 0:
if not args.create:
md5_hashes = load_hashes(args.md5)
sha_hashes = load_hashes(args.sha)
rc = checksum(args.filename, md5_hashes, sha_hashes, args.verbose)
else:
rc = create_checksum(args.filename, args.directory,
args.md5, args.sha, args.verbose)
return rc
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | unknown | codeparrot/codeparrot-clean | ||
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show() | unknown | codeparrot/codeparrot-clean | ||
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# test_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-0-0-4.dat')
figure(1)
clf()
plot(vm[:,0], vm[:,1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show() | unknown | codeparrot/codeparrot-clean | ||
import asyncio
import logging
from collections.abc import Sequence
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import Runnable
from typing_extensions import override
from langchain_classic.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class LineListOutputParser(BaseOutputParser[list[str]]):
"""Output parser for a list of lines."""
@override
def parse(self, text: str) -> list[str]:
lines = text.strip().split("\n")
return list(filter(None, lines)) # Remove empty lines
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an AI language model assistant. Your task is
to generate 3 different versions of the given user
question to retrieve relevant documents from a vector database.
By generating multiple perspectives on the user question,
your goal is to help the user overcome some of the limitations
of distance-based similarity search. Provide these alternative
questions separated by newlines. Original question: {question}""",
)
def _unique_documents(documents: Sequence[Document]) -> list[Document]:
return [doc for i, doc in enumerate(documents) if doc not in documents[:i]]
class MultiQueryRetriever(BaseRetriever):
"""Given a query, use an LLM to write a set of queries.
Retrieve docs for each query. Return the unique union of all retrieved docs.
"""
retriever: BaseRetriever
llm_chain: Runnable
verbose: bool = True
parser_key: str = "lines"
"""DEPRECATED. parser_key is no longer used and should not be specified."""
include_original: bool = False
"""Whether to include the original query in the list of generated queries."""
@classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLanguageModel,
prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT,
parser_key: str | None = None, # noqa: ARG003
include_original: bool = False, # noqa: FBT001,FBT002
) -> "MultiQueryRetriever":
"""Initialize from llm using default template.
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: The prompt which aims to generate several different versions
of the given user query
parser_key: DEPRECATED. `parser_key` is no longer used and should not be
specified.
include_original: Whether to include the original query in the list of
generated queries.
Returns:
MultiQueryRetriever
"""
output_parser = LineListOutputParser()
llm_chain = prompt | llm | output_parser
return cls(
retriever=retriever,
llm_chain=llm_chain,
include_original=include_original,
)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""Get relevant documents given a user query.
Args:
query: user query
run_manager: the callback handler to use.
Returns:
Unique union of relevant documents from all generated queries
"""
queries = await self.agenerate_queries(query, run_manager)
if self.include_original:
queries.append(query)
documents = await self.aretrieve_documents(queries, run_manager)
return self.unique_union(documents)
async def agenerate_queries(
self,
question: str,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[str]:
"""Generate queries based upon user input.
Args:
question: user query
run_manager: the callback handler to use.
Returns:
List of LLM generated queries that are similar to the user input
"""
response = await self.llm_chain.ainvoke(
{"question": question},
config={"callbacks": run_manager.get_child()},
)
lines = response["text"] if isinstance(self.llm_chain, LLMChain) else response
if self.verbose:
logger.info("Generated queries: %s", lines)
return lines
async def aretrieve_documents(
self,
queries: list[str],
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""Run all LLM generated queries.
Args:
queries: query list
run_manager: the callback handler to use
Returns:
List of retrieved Documents
"""
document_lists = await asyncio.gather(
*(
self.retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child()},
)
for query in queries
),
)
return [doc for docs in document_lists for doc in docs]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""Get relevant documents given a user query.
Args:
query: user query
run_manager: the callback handler to use.
Returns:
Unique union of relevant documents from all generated queries
"""
queries = self.generate_queries(query, run_manager)
if self.include_original:
queries.append(query)
documents = self.retrieve_documents(queries, run_manager)
return self.unique_union(documents)
def generate_queries(
self,
question: str,
run_manager: CallbackManagerForRetrieverRun,
) -> list[str]:
"""Generate queries based upon user input.
Args:
question: user query
run_manager: run manager for callbacks
Returns:
List of LLM generated queries that are similar to the user input
"""
response = self.llm_chain.invoke(
{"question": question},
config={"callbacks": run_manager.get_child()},
)
lines = response["text"] if isinstance(self.llm_chain, LLMChain) else response
if self.verbose:
logger.info("Generated queries: %s", lines)
return lines
def retrieve_documents(
self,
queries: list[str],
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""Run all LLM generated queries.
Args:
queries: query list
run_manager: run manager for callbacks
Returns:
List of retrieved Documents
"""
documents = []
for query in queries:
docs = self.retriever.invoke(
query,
config={"callbacks": run_manager.get_child()},
)
documents.extend(docs)
return documents
def unique_union(self, documents: list[Document]) -> list[Document]:
"""Get unique Documents.
Args:
documents: List of retrieved Documents
Returns:
List of unique retrieved Documents
"""
return _unique_documents(documents) | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/retrievers/multi_query.py |
//===- Error.h - system_error extensions for Object -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This declares a new error_category for the Object library.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_ERROR_H
#define LLVM_OBJECT_ERROR_H
#include "llvm/Support/Error.h"
#include <system_error>
namespace llvm {
class Twine;
namespace object {
const std::error_category &object_category();
enum class object_error {
// Error code 0 is absent. Use std::error_code() instead.
arch_not_found = 1,
invalid_file_type,
parse_failed,
unexpected_eof,
string_table_non_null_end,
invalid_section_index,
bitcode_section_not_found,
invalid_symbol_index,
section_stripped,
};
inline std::error_code make_error_code(object_error e) {
return std::error_code(static_cast<int>(e), object_category());
}
/// Base class for all errors indicating malformed binary files.
///
/// Having a subclass for all malformed binary files allows archive-walking
/// code to skip malformed files without having to understand every possible
/// way that a binary file might be malformed.
///
/// Currently inherits from ECError for easy interoperability with
/// std::error_code, but this will be removed in the future.
class BinaryError : public ErrorInfo<BinaryError, ECError> {
void anchor() override;
public:
static char ID;
BinaryError() {
// Default to parse_failed, can be overridden with setErrorCode.
setErrorCode(make_error_code(object_error::parse_failed));
}
};
/// Generic binary error.
///
/// For errors that don't require their own specific sub-error (most errors)
/// this class can be used to describe the error via a string message.
class GenericBinaryError : public ErrorInfo<GenericBinaryError, BinaryError> {
public:
static char ID;
GenericBinaryError(const Twine &Msg);
GenericBinaryError(const Twine &Msg, object_error ECOverride);
const std::string &getMessage() const { return Msg; }
void log(raw_ostream &OS) const override;
private:
std::string Msg;
};
/// isNotObjectErrorInvalidFileType() is used when looping through the children
/// of an archive after calling getAsBinary() on the child and it returns an
/// llvm::Error. In the cases we want to loop through the children and ignore the
/// non-objects in the archive this is used to test the error to see if an
/// error() function needs to called on the llvm::Error.
Error isNotObjectErrorInvalidFileType(llvm::Error Err);
inline Error createError(const Twine &Err) {
return make_error<StringError>(Err, object_error::parse_failed);
}
} // end namespace object.
} // end namespace llvm.
namespace std {
template <>
struct is_error_code_enum<llvm::object::object_error> : std::true_type {};
}
#endif | c | github | https://github.com/apple/swift | include/swift/RemoteInspection/RuntimeHeaders/llvm/Object/Error.h |
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re,traceback,urlparse,random,urllib
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2
from resources.lib.modules import log_utils
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['icefilms.info','icefilms.unblocked.pro','icefilms.unblocked.vc','icefilms.unblocked.sh']
self.base_url = 'http://icefilms.unblocked.sh'
self.search_link = urlparse.urljoin(self.base_url, 'search.php?q=%s+%s&x=0&y=0')
self.list_url = urlparse.urljoin(self.base_url, 'membersonly/components/com_iceplayer/video.php?h=374&w=631&vid=%s&img=')
self.post = 'id=%s&s=%s&iqs=&url=&m=%s&cap= &sec=%s&t=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = self.search_link % (clean_title.replace('-','+'), year)
headers = {'Host': 'http://icefilms1.unblocked.sh',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8'}
r = client.request(search_url, headers=headers)
r = dom_parser2.parse_dom(r, 'td')
r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r if "<div class='number'" in i.content]
r = [(urlparse.urljoin(self.base_url, i[0].attrs['href'])) for i in r if title.lower() in i[0].content.lower() and year in i[0].content]
url = r[0]
url = url[:-1]
url = url.split('?v=')[1]
url = self.list_url % url
return url
except:
failure = traceback.format_exc()
log_utils.log('IceFilms - Exception: \n' + str(failure))
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
clean_title = cleantitle.geturl(tvshowtitle)
search_url = self.search_link % (clean_title.replace('-','+'), year)
r = client.request(search_url, headers=self.headers)
r = dom_parser2.parse_dom(r, 'td')
r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r if "<div class='number'" in i.content]
r = [(urlparse.urljoin(self.base_url, i[0].attrs['href'])) for i in r if tvshowtitle.lower() in i[0].content.lower() and year in i[0].content]
url = r[0]
return url
except:
failure = traceback.format_exc()
log_utils.log('IceFilms - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
sep = '%dx%02d' % (int(season), int(episode))
r = client.request(url, headers=self.headers)
r = dom_parser2.parse_dom(r, 'span', attrs={'class': 'list'})
r1 = dom_parser2.parse_dom(r, 'br')
r1 = [dom_parser2.parse_dom(i, 'a', req='href') for i in r1]
try:
if int(season) == 1 and int(episode) == 1:
url = dom_parser2.parse_dom(r, 'a', req='href')[1].attrs['href']
else:
for i in r1:
if sep in i[0].content:
url = urlparse.urljoin(self.base_url, i[0].attrs['href'])
except:
pass
url = url[:-1]
url = url.split('?v=')[1]
url = self.list_url % url
return url
except:
failure = traceback.format_exc()
log_utils.log('IceFilms - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
url_for_post = url
headers = {'Host': 'http://icefilms1.unblocked.lol',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8'}
cookie = client.request(url, close=False, headers=headers, output='cookie')
html = client.request(url, close=False, headers=headers, cookie=cookie)
match = re.search('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?', html)
secret = ''.join(match.groups(''))
match = re.search('"&t=([^"]+)', html)
t = match.group(1)
match = re.search('(?:\s+|,)s\s*=(\d+)', html)
s_start = int(match.group(1))
match = re.search('(?:\s+|,)m\s*=(\d+)', html)
m_start = int(match.group(1))
for fragment in dom_parser2.parse_dom(html, 'div', {'class': 'ripdiv'}):
match = re.match('<b>(.*?)</b>', fragment.content)
if match:
q_str = match.group(1).replace(' ', '').upper()
if '1080' in q_str: quality = '1080p'
elif '720' in q_str: quality = '720p'
elif '4k' in q_str.lower(): quality = '4K'
else: quality = 'SD'
else:
quality = 'SD'
pattern = '''onclick='go\((\d+)\)'>([^<]+)(<span.*?)</a>'''
for match in re.finditer(pattern, fragment.content):
link_id, label, host_fragment = match.groups()
host = re.sub('(</?[^>]*>)', '', host_fragment)
info = []
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', host)[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except:
pass
host = re.search('([a-zA-Z]+)', host)
host = host.group(1)
info = ' | '.join(info)
s = s_start + random.randint(3, 1000)
m = m_start + random.randint(21, 1000)
post = self.post % (link_id, s, m, secret, t)
headers = {'Host': 'http://icefilms1.unblocked.lol',
'Connection': 'keep-alive',
'Content-Length': '65',
'Origin': 'http://icefilms1.unblocked.lol',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Content-type': 'application/x-www-form-urlencoded',
'Accept': '*/*',
'Referer': url_for_post,
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8'}
url = urlparse.urljoin(self.base_url, 'membersonly/components/com_iceplayer/video.phpAjaxResp.php?s=%s&t=%s' % (link_id, t))
r = client.request(url, cookie=cookie, close=False, headers=headers, post=post)
match = re.search('url=(http.*)', r)
if match:
if host.lower() in str(hostDict):
url = urllib.unquote_plus(match.group(1))
sources.append({
'source': host,
'info': info,
'quality': quality,
'language': 'en',
'url': url.replace('\/','/'),
'direct': False,
'debridonly': False
})
elif host.lower() in str(hostprDict):
url = urllib.unquote_plus(match.group(1))
sources.append({
'source': host,
'info': info,
'quality': quality,
'language': 'en',
'url': url.replace('\/','/'),
'direct': False,
'debridonly': True
})
return sources
except:
failure = traceback.format_exc()
log_utils.log('IceFilms - Exception: \n' + str(failure))
return
def resolve(self, url):
return url | unknown | codeparrot/codeparrot-clean | ||
"""
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
import json
from .common import *
from logsettings import get_logger_config
import os
from path import path
from dealer.git import git
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
############### ALWAYS THE SAME ################################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'cache'
CELERY_CACHE_BACKEND = 'celery'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Skip djcelery migrations, since we don't use the database as the broker
SOUTH_MIGRATION_MODULES = {
'djcelery': 'ignore',
}
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += git.revision + "/"
# GITHUB_REPO_ROOT is the base directory
# for course data
GITHUB_REPO_ROOT = ENV_TOKENS.get('GITHUB_REPO_ROOT', GITHUB_REPO_ROOT)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / git.revision
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', EMAIL_HOST)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', EMAIL_PORT)
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', EMAIL_USE_TLS)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
# Note that FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Theme overrides
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Push to LMS overrides
GIT_REPO_EXPORT_DIR = ENV_TOKENS.get('GIT_REPO_EXPORT_DIR', '/edx/var/edxapp/export_course_repos')
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', ENV_TOKENS.get('MITX_FEATURES', {}))
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', EMAIL_HOST_PASSWORD)
# If Segment.io key specified, load it and turn on Segment.io if the feature flag is set
# Note that this is the Studio key. There is a separate key for the LMS.
SEGMENT_IO_KEY = AUTH_TOKENS.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = ENV_TOKENS.get('SEGMENT_IO', False)
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
DATABASES = AUTH_TOKENS['DATABASES']
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
DOC_STORE_CONFIG = AUTH_TOKENS['DOC_STORE_CONFIG']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_ALWAYS_EAGER = ENV_TOKENS.get("CELERY_ALWAYS_EAGER", False)
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS.update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
################ ADVANCED COMPONENT/PROBLEM TYPES ###############
ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get('ADVANCED_COMPONENT_TYPES', ADVANCED_COMPONENT_TYPES)
ADVANCED_PROBLEM_TYPES = ENV_TOKENS.get('ADVANCED_PROBLEM_TYPES', ADVANCED_PROBLEM_TYPES) | unknown | codeparrot/codeparrot-clean | ||
###
#5 Card Poker
#Brandon Lee
"""
===Program Design===
The program consists of an implementation of the game "5 Card Poker", utilizing classes.
The program utilizes a deck class to create an array of Card objects in order to simulate a
realistic gaming environment with factors such as luck as well as skill. The program uses
other modules as well as built in modules to check for winnings hands. The game is designed
to entertain a maximum of four players.
===Amount of Effort===
I believe that I put in a good amount of work. Making a working program with classes in python
was a bit new to me and took longer than I had initially expected. However, this did provide me
with a lot of knowledge on how classes can be utilized and how powerful OO programming can be
with Python. Ultimately, I would say I put in a good amount of work into making this game
as my project was not modeled towards any other existing program, but completely designed from
the ground up.
"""
#---Description---
#This program is my implementation of the game "Poker" which utilizes the standard ruleset of the 5 Hand variant.
#The goal of poker is to obtain a better hand than your opponents through strategy and luck by redrawing cards.
#This game supports multiplayer from 1 to 4 players per match.
#---Application Directions (How to Play)---
#1) Start off by inputting total number of players, to which game will initiate
#2) A fresh deck will be shuffled and the game will commence with Player 1's turn,
# which will progress to Player 2, Player 3, and Player 4.
#3) Player 1 will be able to initiate their turn by viewing their hand and respectively
# choosing how many and which cards to discard and redraw from the deck.
#4) Player 1 will redraw any cards selected and the new hand will be displayed.
#5) Player 1's turn ends and repeat steps 3 and 4 for all the other players.
#6) Once all players have ended their turns, all players show their hands and the
# player with the best hand wins.
#7) The winner will get +1 point added to their score and the game offers players to continue playing.
#8) Once users decide to finish game, scores are added up and displayed onto a text file.
#---Poker Hand Rankings--
#1) Royal Flush
#2) Straight Flush
#3) Four of a Kind
#4) Full House
#5) Flush
#6) Straight
#7) Three of a Kind
#8) Two Pair
#9) One Pair
#10)High Card
#---Module Citations---
# Random Library - Used for randint in shuffling and in AI
# Collections Library - Used for checking winning hand combos
###
from random import randint
import collections
###################################################################################################################################
class Card:
"""The card class - Stores card suit and rank. Also able to print cards too."""
def __init__(self, rankCard = "defaultRank", suitCard = "defaultSuit"):
self.rank = rankCard
self.suit = suitCard
#-----------------------------------------------------------------------------------------------------------------------------------
def printCard(self):
"""Print rank and suit of the card."""
#Check if face card, if so print the face
if(self.rank == "11"):
self.cardString = "Jack" + " of " + self.suit
elif(self.rank == "12"):
self.cardString = "Queen" + " of " + self.suit
elif(self.rank == "13"):
self.cardString = "King" + " of " + self.suit
elif(self.rank == "14"):
self.cardString = "Ace" + " of " + self.suit
else:
self.cardString = self.rank + " of " + self.suit
return(self.cardString)
#-----------------------------------------------------------------------------------------------------------------------------------
def getRank(self):
"""Obtain the card's rank."""
return int(self.rank)
#-----------------------------------------------------------------------------------------------------------------------------------
def getSuit(self):
"""Obtain the card's suit."""
return self.suit
###################################################################################################################################
class Deck:
"""The Deck Class - Stores array of cards. Also able to draw, print, and shuffle."""
def __init__(self):
#Initilize range of ranks and suits of all the cards in the deck
#11=Jack , 12=Queen, 13=King, 14=Ace
self.ranks = ["14", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"]
self.suits = ["Spades", "Clubs", "Hearts", "Diamonds"]
#Create array of cards
self.deckArray = [Card() for i in range(52)]
#Fill the deck array
for i in range(52):
self.deckArray[i] = Card( self.ranks[int(i%13)] , self.suits[int(i/13)] )
#Set remaining cards in deck
self.avaliCards = 52
#Initilize as -1 because of drawCardFromTop() method
self.deckTop = -1
#-----------------------------------------------------------------------------------------------------------------------------------
def shuffleDeck(self):
"""Randomizes the deck"""
#For each card in the deck, swap it with another random card
for i in range(51):
random = randint(0,51)
tempCard = self.deckArray[i]
self.deckArray[i] = self.deckArray[random]
self.deckArray[random] = tempCard
#-----------------------------------------------------------------------------------------------------------------------------------
def printDeck(self):
"""Prints the deck"""
for i in range(self.avaliCards):
print( self.deckArray[i].printCard() )
#-----------------------------------------------------------------------------------------------------------------------------------
def drawCardFromTop(self):
"""Draws card from top of the deck"""
#Find the top card in the deck
topCard = self.deckArray[0]
#For each card after top card, move it up one
for self.deckTop in range(51):
self.deckArray[self.deckTop] = self.deckArray[self.deckTop+1]
self.avaliCards -= 1
return topCard
###################################################################################################################################
class Player:
"""The Player Class - Stores and prints player's hand, able to draw cards from deck and check poker hand rankings."""
def __init__(self):
self.hand = [Card for i in range(5)]
self.handSize = 0
self.handValue = 0
#-----------------------------------------------------------------------------------------------------------------------------------
def drawStartingHand(self, deckObject):
"""Draws the player's starting hand of five cards."""
for i in range(5):
self.hand[self.handSize] = deckObject.drawCardFromTop()
self.handSize += 1
#-----------------------------------------------------------------------------------------------------------------------------------
def printHand(self):
"""Prints the player's hand of cards."""
for i in range(self.handSize):
print(i+1,") ", self.hand[i].printCard(), sep="")
#-----------------------------------------------------------------------------------------------------------------------------------
def reDrawCard(self, deckObj):
"""Prompts user to re-draw any cards during turn."""
alreadySelected = []
#Prompt user with loop to check for errors
while(True):
userInput = input("How many cards would you like to redraw? (0-5):")
if(userInput.isdigit() and int(userInput) >= 0 and int(userInput) <= 5):
break
else:
print("Please input a valid number of cards")
if(userInput != 0):
for i in range( int(userInput) ):
#Check if out of bounds or already selected
while(True):
whichCardInput = input("Which card do you want to remove? (1-5):")
if(whichCardInput.isdigit() and int(whichCardInput) >= 1 and int(whichCardInput) <= 5 and (whichCardInput not in alreadySelected)):
break
else:
print("Please input a valid card, (1-5 with no repeating numbers)")
self.hand[ int(whichCardInput)-1 ] = deckObj.drawCardFromTop()
#Add to list of cards already replaced
alreadySelected.extend(whichCardInput)
#-----------------------------------------------------------------------------------------------------------------------------------
def reDrawCardAI(self, deckObj):
"""Prompts AI to re-draw any cards during turn."""
alreadySelected = []
#Prompt AI with loop to check for errors
while(True):
input("How many cards would you like to redraw? (0-5):")
userInput = randint(0,5)
print(userInput)
if(int(userInput) >= 0 and int(userInput) <= 5):
break
else:
print("Please input a valid number of cards")
if(userInput != 0):
for i in range( int(userInput) ):
#Check if out of bounds or already selected
while(True):
input("Which card do you want to remove? (1-5):")
whichCardInput = randint(1,5)
print(whichCardInput)
if(int(whichCardInput) >= 1 and int(whichCardInput) <= 5 and (str(whichCardInput) not in alreadySelected)):
break
else:
print("Please input a valid card, (1-5 with no repeating numbers)")
self.hand[ int(whichCardInput)-1 ] = deckObj.drawCardFromTop()
#Add to list of cards already replaced
alreadySelected.extend(str(whichCardInput))
#-----------------------------------------------------------------------------------------------------------------------------------
def checkHandCombo(self):
"""Checks player's hand for possible poker hands and returns their respective value."""
#Create and fill list for rank checking
rankList = []
for i in range(self.handSize):
rankList.append(self.hand[i].getRank())
#Utilize counter module for easy counting of pairs
counter = collections.Counter(rankList)
#Sort the list for checking straights
rankList.sort()
#Format = [Rank][frequency]
mostFreq = counter.most_common(1)[0][1] #Frequency of the most frequent card in hand
secondMostFreq = counter.most_common(2)[1][1] #Frequency of the 2nd most frequent card in hand
freqRank = counter.most_common(2)[0][0] #Most frequent card's rank
secondFreqRank = counter.most_common(2)[1][0] #2nd most frequent card's rank
#Check 4 Pair
if(mostFreq == 4):
print("Four of a kind of ", self.printFace(freqRank), "'s!", sep="")
return int(freqRank) * 10000000
#Check Full House
if(mostFreq == 3 and secondMostFreq == 2):
print("Full House of ", self.print2Faces(freqRank), " and ", self.print2Faces(secondFreqRank), "'s!", sep="")
return int(freqRank) * 1000000 + int(secondFreqRank)
#Check for Royal Flush | Straight Flush | Flush
if(self.isFlush() == True):
if(self.isStraight(rankList)):
if(rankList[0] == 10):
print("ROYAL FLUSH")
return 1000000000
else:
print("Straight Flush of ", rankList[0], " to " , rankList[4], sep="")
return rankList[4] * 100000000
else:
print("Flush of ", self.hand[0].getSuit(), "!", sep="")
return rankList[4] * 100000
#Check Straight
if(self.isStraight(rankList)):
print("Straight with a max of ", rankList[4], "!", sep="")
return rankList[4] * 10000
#Check 3 Pair
if(mostFreq == 3):
print("Three of a kind of ", self.printFace(freqRank), "'s!", sep="")
return int(freqRank) * 1000
#Check 2 Pairs
if(mostFreq == secondMostFreq and secondMostFreq == 2):
print("Two pairs of ", self.print2Faces(freqRank), " and ", self.print2Faces(secondFreqRank), "'s!", sep="")
return int(freqRank) * 100 + int(secondFreqRank)
#Check Pair
if(mostFreq == 2):
print("Pair of ", self.print2Faces(freqRank), "'s!", sep="")
return int(freqRank) * 10
#Check HighCard
if(mostFreq == 1):
print("High Card of ", self.printFace(rankList[4]), "'s!", sep="")
return self.getMax()
#-----------------------------------------------------------------------------------------------------------------------------------
def isFlush(self):
"""Check if hand is a flush, returns boolean."""
if(self.hand[0].getSuit() == self.hand[1].getSuit() == self.hand[2].getSuit() == self.hand[3].getSuit() == self.hand[4].getSuit()):
return True
else:
return False
#-----------------------------------------------------------------------------------------------------------------------------------
def isStraight(self, rankList):
"""Check if hand is a straight, returns boolean."""
if(rankList[0]+4 == rankList[1]+3 == rankList[2]+2 == rankList[3]+1 == rankList[4]):
return True
else:
return False
#-----------------------------------------------------------------------------------------------------------------------------------
def getMax(self):
"""Get the highCard in a hand."""
theMax = -1
for i in range(self.handSize):
if(theMax < self.hand[i].getRank()):
theMax = self.hand[i].getRank()
return theMax
#-----------------------------------------------------------------------------------------------------------------------------------
def printFace(self, theRank):
"""Print the max value in the hand."""
if(self.getMax() == 11):
theRank = "Jack"
elif(self.getMax() == 12):
theRank = "Queen"
elif(self.getMax() == 13):
theRank = "King"
elif(self.getMax() == 14):
theRank = "Ace"
return theRank
#-----------------------------------------------------------------------------------------------------------------------------------
def print2Faces(self, theRank):
"""Prints 2nd most signifigant value for Full House or Two Pair."""
if(theRank == 11):
theRank = "Jack"
elif(theRank == 12):
theRank = "Queen"
elif(theRank == 13):
theRank = "King"
elif(theRank == 14):
theRank = "Ace"
return theRank
###################################################################################################################################
class Game:
"""The Game Class - Stores player scores and starts the game depending on number of players."""
def __init__(self):
#Initilize all player scores
self.player1score = 0
self.player2score = 0
self.player3score = 0
self.player4score = 0
self.playerNum = self.intro()
#-----------------------------------------------------------------------------------------------------------------------------------
def clearScreen(self):
"""Clears the screen."""
print("\n" * 100)
#-----------------------------------------------------------------------------------------------------------------------------------
def intro(self):
"""Introduction/Instructions on how to play '5 Card Poker' Also prompts user for number of players and executes game."""
print(" WELCOME TO 5 CARD POKER")
print("\t===HOW TO PLAY===")
print("\t---Description---")
print("This program is my implementation of the game 'Poker' which utilizes the standard ruleset of the 5 Hand variant.")
print("The goal of poker is to obtain a better hand than your opponents through strategy and luck by redrawing cards.")
print("This game supports multiplayer from 1 to 4 players per match.")
print("")
while(True):
playerNum = input("Please enter the number of players(1-4), or 5 for hand rankings, or '0' to quit: ")
if(playerNum.isdigit() and playerNum =="5"):
print("\t---Hand Rankings---")
print("01) Royal Flush")
print("02) Straight Flush")
print("03) Four of a Kind")
print("04) Full House")
print("05) Flush")
print("06) Straight")
print("07) Three of a Kind")
print("08) Two Pair")
print("09) One Pair")
print("10) High Card")
if(playerNum.isdigit() and playerNum == "1" or playerNum == "2" or playerNum == "3" or playerNum == "4"):
self.startGame(int(playerNum))
if(playerNum.isdigit() and playerNum == "0"):
print(" ***Thanks for Playing!***")
print("Your scores will be displayed in pokerScores.txt!")
scoreFile = open("pokerScores.txt", "w")
content1 = "Poker Scores:\nPlayer 1 Score: " + str(self.player1score) + "\nPlayer 2 Score: " + str(self.player2score)
content2 = "\nPlayer 3 Score: " + str(self.player3score) + "\nPlayer 4 Score: " + str(self.player4score)
content3 = content1 + content2
scoreFile.write(content3)
return
else:
print("Your input was not valid.")
#-----------------------------------------------------------------------------------------------------------------------------------
def startGame(self, playerNum):
"""Initiates game according to the number of players."""
if(playerNum == 1):
self.onePlayerGame()
return
if(playerNum == 2):
self.twoPlayerGame()
return
if(playerNum == 3):
self.threePlayerGame()
return
if(playerNum == 4):
self.fourPlayerGame()
return
#-----------------------------------------------------------------------------------------------------------------------------------
def onePlayerGame(self):
"""One player Round."""
print("\t===ONE PLAYER GAME VS EASY AI===")
#Create and shuffle deck
theDeck = Deck()
theDeck.shuffleDeck()
#Initilize players
player1 = Player()
player2 = Player()
#Draw cards
player1.drawStartingHand(theDeck)
player2.drawStartingHand(theDeck)
#Player 1's turn
input("Player 1's Turn, please press enter to continue..")
self.clearScreen()
player1.printHand()
player1.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player1.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#Player 2's Turn
input("Player 2's Turn (AI), please press enter to continue..")
player2.printHand()
player2.reDrawCardAI(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player2.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#End Round
input("Please press enter to showdown..")
self.clearScreen()
print("Player 1 has: ", end="")
p1Score = player1.checkHandCombo()
print("Player 2 has: ", end="")
p2Score = player2.checkHandCombo()
theWinner = self.showDown(p1Score, p2Score)
self.winScreen(theWinner)
return
#-----------------------------------------------------------------------------------------------------------------------------------
def twoPlayerGame(self):
"""Standard Two Player Round."""
print("\t===TWO PLAYER GAME===")
#Create and shuffle deck
theDeck = Deck()
theDeck.shuffleDeck()
#Initilize players
player1 = Player()
player2 = Player()
#Draw cards
player1.drawStartingHand(theDeck)
player2.drawStartingHand(theDeck)
#Player 1's turn
input("Player 1's Turn, please press enter to continue..")
self.clearScreen()
player1.printHand()
player1.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player1.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#Player 2's Turn
input("Player 2's Turn, please press enter to continue..")
player2.printHand()
player2.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player2.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#End Round
input("Please press enter to showdown..")
self.clearScreen()
print("Player 1 has: ", end="")
p1Score = player1.checkHandCombo()
print("Player 2 has: ", end="")
p2Score = player2.checkHandCombo()
theWinner = self.showDown(p1Score, p2Score)
self.winScreen(theWinner)
return
#-----------------------------------------------------------------------------------------------------------------------------------
def threePlayerGame(self):
"""Standard Three Player Round."""
print("\t===THREE PLAYER GAME===")
#Create and shuffle deck
theDeck = Deck()
theDeck.shuffleDeck()
#Initilize players
player1 = Player()
player2 = Player()
player3 = Player()
#Draw cards
player1.drawStartingHand(theDeck)
player2.drawStartingHand(theDeck)
player3.drawStartingHand(theDeck)
#Player 1's turn
input("Player 1's Turn, please press enter to continue..")
self.clearScreen()
player1.printHand()
player1.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player1.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#Player 2's Turn
input("Player 2's Turn, please press enter to continue..")
player2.printHand()
player2.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player2.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#Player 3's Turn
input("Player 3's Turn, please press enter to continue..")
player3.printHand()
player3.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player3.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#End Round
input("Please press enter to showdown..")
self.clearScreen()
print("Player 1 has: ", end="")
p1Score = player1.checkHandCombo()
print("Player 2 has: ", end="")
p2Score = player2.checkHandCombo()
print("Player 3 has: ", end="")
p3Score = player3.checkHandCombo()
theWinner = self.showDown(p1Score, p2Score, p3Score)
self.winScreen(theWinner)
return
#-----------------------------------------------------------------------------------------------------------------------------------
def fourPlayerGame(self):
"""Standard Four Player Round."""
print("\t===FOUR PLAYER GAME===")
#Create and shuffle deck
theDeck = Deck()
theDeck.shuffleDeck()
#Initilize players
player1 = Player()
player2 = Player()
player3 = Player()
player4 = Player()
#Draw cards
player1.drawStartingHand(theDeck)
player2.drawStartingHand(theDeck)
player3.drawStartingHand(theDeck)
player4.drawStartingHand(theDeck)
#Player 1's turn
input("Player 1's Turn, please press enter to continue..")
self.clearScreen()
player1.printHand()
player1.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player1.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#Player 2's Turn
input("Player 2's Turn, please press enter to continue..")
player2.printHand()
player2.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player2.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#Player 3's Turn
input("Player 3's Turn, please press enter to continue..")
player3.printHand()
player3.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player3.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#Player 4's Turn
input("Player 4's Turn, please press enter to continue..")
player4.printHand()
player4.reDrawCard(theDeck)
print("----------------------------------------------------")
print("Your redrawn hand:")
player4.printHand()
input("Please press enter to finish turn")
self.clearScreen()
#End Round
input("Please press enter to showdown..")
self.clearScreen()
print("Player 1 has: ", end="")
p1Score = player1.checkHandCombo()
print("Player 2 has: ", end="")
p2Score = player2.checkHandCombo()
print("Player 3 has: ", end="")
p3Score = player3.checkHandCombo()
print("Player 4 has: ", end="")
p4Score = player4.checkHandCombo()
theWinner = self.showDown(p1Score, p2Score, p3Score, p4Score)
self.winScreen(theWinner)
return
#-----------------------------------------------------------------------------------------------------------------------------------
def showDown(self, p1Score=0, p2Score=0, p3Score=0, p4Score=0):
"""Determines winner of the round and checks for any ties as well."""
scoreList = [p1Score, p2Score, p3Score, p4Score]
theMax = scoreList.index(max(scoreList))
#Check if there is a tie
if( len(scoreList) != len(set(scoreList)) ):
for i in range(len(scoreList)):
if(i != theMax and scoreList[i] == scoreList[theMax]):
return None
return theMax
#-----------------------------------------------------------------------------------------------------------------------------------
def winScreen(self, theWinner):
"""Prints winner and adds score."""
if(theWinner == 0):
self.player1score += 1
print("Player 1 Wins! Total Score is: ", self.player1score)
if(theWinner == 1):
self.player2score += 1
print("Player 2 Wins! Total Score is: ", self.player2score)
if(theWinner == 2):
self.player3score += 1
print("Player 3 Wins! Total Score is: ", self.player3score)
if(theWinner == 3):
self.player4score += 1
print("Player 4 Wins! Total Score is: ", self.player4score)
if(theWinner == None):
print("Tie! No winner will be awarded")
return
###################################################################################################################################
def main():
theGame = Game()
main() | unknown | codeparrot/codeparrot-clean | ||
{
"Id": "sha256:1332879bc8e38793a45ebe5a750f2a1c35df07ec2aa9c18f694644a9de77359b",
"RepoTags": [
"cloudfoundry/run:base-cnb"
],
"RepoDigests": [
"cloudfoundry/run@sha256:fb5ecb90a42b2067a859aab23fc1f5e9d9c2589d07ba285608879e7baa415aad"
],
"Parent": "",
"Comment": "",
"Created": "2020-03-20T20:18:18.117972538Z",
"Container": "91d1af87c3bb6163cd9c7cb21e6891cd25f5fa3c7417779047776e288c0bc234",
"ContainerConfig": {
"Hostname": "91d1af87c3bb",
"Domainname": "",
"User": "1000:1000",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/sh",
"-c",
"#(nop) ",
"LABEL io.buildpacks.stack.id=io.buildpacks.stacks.bionic"
],
"ArgsEscaped": true,
"Image": "sha256:fbe314bcb23f15a2a09603b6620acd67c332fd08fbf2a7bc3db8fb2f5078d994",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
"io.buildpacks.stack.id": "io.buildpacks.stacks.bionic"
}
},
"DockerVersion": "18.09.6",
"Author": "",
"Config": {
"Hostname": "",
"Domainname": "",
"User": "1000:1000",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/bash"
],
"ArgsEscaped": true,
"Image": "sha256:fbe314bcb23f15a2a09603b6620acd67c332fd08fbf2a7bc3db8fb2f5078d994",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
"io.buildpacks.stack.id": "io.buildpacks.stacks.bionic"
}
},
"Architecture": "arm64",
"Os": "linux",
"Variant": "v1",
"Size": 71248531,
"VirtualSize": 71248531,
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/17f0a4530fbc3e2982f9dc8feb8c8ddc124473bdd50130dae20856ac597d82dd/diff:/var/lib/docker/overlay2/73dfd4e2075fccb239b3d5e9b33b32b8e410bdc3cd5a620b41346f44cc5c51f7/diff:/var/lib/docker/overlay2/b3924ed7c91730f6714d33c455db888604b59ab093033b3f59ac16ecdd777987/diff:/var/lib/docker/overlay2/e36a32cd0ab20b216a8db1a8a166b17464399e4d587d22504088a7a6ef0a68a4/diff:/var/lib/docker/overlay2/3334e94fe191333b65f571912c0fcfbbf31aeb090a2fb9b4cfdbc32a37c0fe5f/diff",
"MergedDir": "/var/lib/docker/overlay2/8d3f9e3c00bc5072f8051ec7884500ca394f2331d8bcc9452f68d04531f50f82/merged",
"UpperDir": "/var/lib/docker/overlay2/8d3f9e3c00bc5072f8051ec7884500ca394f2331d8bcc9452f68d04531f50f82/diff",
"WorkDir": "/var/lib/docker/overlay2/8d3f9e3c00bc5072f8051ec7884500ca394f2331d8bcc9452f68d04531f50f82/work"
},
"Name": "overlay2"
},
"RootFS": {
"Type": "layers",
"Layers": [
"sha256:c8be1b8f4d60d99c281fc2db75e0f56df42a83ad2f0b091621ce19357e19d853",
"sha256:977183d4e9995d9cd5ffdfc0f29e911ec9de777bcb0f507895daa1068477f76f",
"sha256:6597da2e2e52f4d438ad49a14ca79324f130a9ea08745505aa174a8db51cb79d",
"sha256:16542a8fc3be1bfaff6ed1daa7922e7c3b47b6c3a8d98b7fca58b9517bb99b75",
"sha256:c1daeb79beb276c7441d9a1d7281433e9a7edb9f652b8996ecc62b51e88a47b2",
"sha256:eb195d29dc1aa6e4239f00e7868deebc5ac12bebe76104e0b774c1ef29ca78e3"
]
},
"Metadata": {
"LastTagTime": "0001-01-01T00:00:00Z"
}
} | json | github | https://github.com/spring-projects/spring-boot | buildpack/spring-boot-buildpack-platform/src/test/resources/org/springframework/boot/buildpack/platform/build/run-image-with-platform.json |
from Components.PerServiceDisplay import PerServiceBase
from enigma import iPlayableService
from Source import Source
from Components.Element import cached
import NavigationInstance
class CurrentService(PerServiceBase, Source):
def __init__(self, navcore):
Source.__init__(self)
PerServiceBase.__init__(self, navcore,
{
iPlayableService.evStart: self.serviceEvent,
iPlayableService.evEnd: self.serviceEvent,
# FIXME: we should check 'interesting_events'
# which is not always provided.
iPlayableService.evUpdatedInfo: self.serviceEvent,
iPlayableService.evUpdatedEventInfo: self.serviceEvent,
iPlayableService.evNewProgramInfo: self.serviceEvent,
iPlayableService.evCuesheetChanged: self.serviceEvent,
iPlayableService.evVideoSizeChanged: self.serviceEvent,
iPlayableService.evHBBTVInfo: self.serviceEvent
}, with_event=True)
self.navcore = navcore
def serviceEvent(self, event):
self.changed((self.CHANGED_SPECIFIC, event))
@cached
def getCurrentService(self):
return self.navcore.getCurrentService()
service = property(getCurrentService)
@cached
def getCurrentServiceRef(self):
if NavigationInstance.instance is not None:
return NavigationInstance.instance.getCurrentlyPlayingServiceOrGroup()
return None
serviceref = property(getCurrentServiceRef)
def destroy(self):
PerServiceBase.destroy(self)
Source.destroy(self) | unknown | codeparrot/codeparrot-clean | ||
{
"PUBLISH": {
"summary": "Posts a message to a channel.",
"complexity": "O(N+M) where N is the number of clients subscribed to the receiving channel and M is the total number of subscribed patterns (by any client).",
"group": "pubsub",
"since": "2.0.0",
"arity": 3,
"function": "publishCommand",
"command_flags": [
"PUBSUB",
"LOADING",
"STALE",
"FAST",
"MAY_REPLICATE",
"SENTINEL"
],
"arguments": [
{
"name": "channel",
"type": "string"
},
{
"name": "message",
"type": "string"
}
],
"reply_schema": {
"description": "the number of clients that received the message. Note that in a Redis Cluster, only clients that are connected to the same node as the publishing client are included in the count",
"type": "integer",
"minimum": 0
}
}
} | json | github | https://github.com/redis/redis | src/commands/publish.json |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_LLVM_INCLUDEORDERCHECK_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_LLVM_INCLUDEORDERCHECK_H
#include "../ClangTidyCheck.h"
namespace clang::tidy::llvm_check {
/// Checks the correct order of `#includes`.
///
/// See https://llvm.org/docs/CodingStandards.html#include-style
class IncludeOrderCheck : public ClangTidyCheck {
public:
IncludeOrderCheck(StringRef Name, ClangTidyContext *Context)
: ClangTidyCheck(Name, Context) {}
void registerPPCallbacks(const SourceManager &SM, Preprocessor *PP,
Preprocessor *ModuleExpanderPP) override;
};
} // namespace clang::tidy::llvm_check
#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_LLVM_INCLUDEORDERCHECK_H | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/llvm/IncludeOrderCheck.h |
import re
from django import template
from social.backends.oauth import OAuthAuth
register = template.Library()
name_re = re.compile(r'([^O])Auth')
@register.filter
def backend_name(backend):
name = backend.__class__.__name__
name = name.replace('OAuth', ' OAuth')
name = name.replace('OpenId', ' OpenId')
name = name.replace('Sandbox', '')
name = name_re.sub(r'\1 Auth', name)
return name
@register.filter
def backend_class(backend):
return backend.name.replace('-', ' ')
@register.filter
def icon_name(name):
return {
'stackoverflow': 'stack-overflow',
'google-oauth': 'google',
'google-oauth2': 'google',
'google-openidconnect': 'google',
'yahoo-oauth': 'yahoo',
'facebook-app': 'facebook',
'email': 'envelope',
'vimeo': 'vimeo-square',
'linkedin-oauth2': 'linkedin',
'vk-oauth2': 'vk',
'live': 'windows',
'username': 'user',
}.get(name, name)
@register.filter
def social_backends(backends):
backends = [(name, backend) for name, backend in backends.items()
if name not in ['username', 'email']]
backends.sort(key=lambda b: b[0])
return [backends[n:n + 10] for n in range(0, len(backends), 10)]
@register.filter
def legacy_backends(backends):
backends = [(name, backend) for name, backend in backends.items()
if name in ['username', 'email']]
backends.sort(key=lambda b: b[0])
return backends
@register.filter
def oauth_backends(backends):
backends = [(name, backend) for name, backend in backends.items()
if issubclass(backend, OAuthAuth)]
backends.sort(key=lambda b: b[0])
return backends
@register.simple_tag(takes_context=True)
def associated(context, backend):
user = context.get('user')
context['association'] = None
if user and user.is_authenticated():
try:
context['association'] = user.social_auth.filter(
provider=backend.name
)[0]
except IndexError:
pass
return '' | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2
from google.cloud.dataproc_v1beta2.proto import clusters_pb2
from google.cloud.dataproc_v1beta2.proto import jobs_pb2
from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2
from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2
from google.longrunning import operations_pb2 as longrunning_operations_pb2
from google.protobuf import any_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
from google.rpc import status_pb2
_shared_modules = [
longrunning_operations_pb2,
any_pb2,
duration_pb2,
empty_pb2,
field_mask_pb2,
timestamp_pb2,
status_pb2,
]
_local_modules = [
autoscaling_policies_pb2,
clusters_pb2,
jobs_pb2,
proto_operations_pb2,
workflow_templates_pb2,
]
names = []
for module in _shared_modules: # pragma: NO COVER
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.dataproc_v1beta2.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names)) | unknown | codeparrot/codeparrot-clean | ||
/**
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import * as t from '@babel/types';
import {ZodError, z} from 'zod/v4';
import {fromZodError} from 'zod-validation-error/v4';
import {CompilerError} from '../CompilerError';
import {CompilerOutputMode, Logger, ProgramContext} from '../Entrypoint';
import {Err, Ok, Result} from '../Utils/Result';
import {
DEFAULT_GLOBALS,
DEFAULT_SHAPES,
Global,
GlobalRegistry,
getReanimatedModuleType,
installTypeConfig,
} from './Globals';
import {
BlockId,
BuiltInType,
Effect,
FunctionType,
GeneratedSource,
HIRFunction,
IdentifierId,
NonLocalBinding,
PolyType,
ScopeId,
SourceLocation,
Type,
ValidatedIdentifier,
ValueKind,
getHookKindForType,
makeBlockId,
makeIdentifierId,
makeIdentifierName,
makeScopeId,
} from './HIR';
import {
BuiltInMixedReadonlyId,
DefaultMutatingHook,
DefaultNonmutatingHook,
FunctionSignature,
ShapeRegistry,
addHook,
} from './ObjectShape';
import {Scope as BabelScope, NodePath} from '@babel/traverse';
import {TypeSchema} from './TypeSchema';
import {FlowTypeEnv} from '../Flood/Types';
import {defaultModuleTypeProvider} from './DefaultModuleTypeProvider';
import {assertExhaustive} from '../Utils/utils';
export const ReactElementSymbolSchema = z.object({
elementSymbol: z.union([
z.literal('react.element'),
z.literal('react.transitional.element'),
]),
globalDevVar: z.string(),
});
export const ExternalFunctionSchema = z.object({
// Source for the imported module that exports the `importSpecifierName` functions
source: z.string(),
// Unique name for the feature flag test condition, eg `isForgetEnabled_ProjectName`
importSpecifierName: z.string(),
});
export const InstrumentationSchema = z
.object({
fn: ExternalFunctionSchema,
gating: ExternalFunctionSchema.nullable(),
globalGating: z.string().nullable(),
})
.refine(
opts => opts.gating != null || opts.globalGating != null,
'Expected at least one of gating or globalGating',
);
export type ExternalFunction = z.infer<typeof ExternalFunctionSchema>;
export const USE_FIRE_FUNCTION_NAME = 'useFire';
export const EMIT_FREEZE_GLOBAL_GATING = '__DEV__';
export const MacroSchema = z.string();
export type CompilerMode = 'all_features' | 'no_inferred_memo';
export type Macro = z.infer<typeof MacroSchema>;
const HookSchema = z.object({
/*
* The effect of arguments to this hook. Describes whether the hook may or may
* not mutate arguments, etc.
*/
effectKind: z.nativeEnum(Effect),
/*
* The kind of value returned by the hook. Allows indicating that a hook returns
* a primitive or already-frozen value, which can allow more precise memoization
* of callers.
*/
valueKind: z.nativeEnum(ValueKind),
/*
* Specifies whether hook arguments may be aliased by other arguments or by the
* return value of the function. Defaults to false. When enabled, this allows the
* compiler to avoid memoizing arguments.
*/
noAlias: z.boolean().default(false),
/*
* Specifies whether the hook returns data that is composed of:
* - undefined
* - null
* - boolean
* - number
* - string
* - arrays whose items are also transitiveMixed
* - objects whose values are also transitiveMixed
*
* Many state management and data-fetching APIs return data that meets
* this criteria since this is JSON + undefined. Forget can compile
* hooks that return transitively mixed data more optimally because it
* can make inferences about some method calls (especially array methods
* like `data.items.map(...)` since these builtin types have few built-in
* methods.
*/
transitiveMixedData: z.boolean().default(false),
});
export type Hook = z.infer<typeof HookSchema>;
/*
* TODO(mofeiZ): User defined global types (with corresponding shapes).
* User defined global types should have inline ObjectShapes instead of directly
* using ObjectShapes.ShapeRegistry, as a user-provided ShapeRegistry may be
* accidentally be not well formed.
* i.e.
* missing required shapes (BuiltInArray for [] and BuiltInObject for {})
* missing some recursive Object / Function shapeIds
*/
export const EnvironmentConfigSchema = z.object({
customHooks: z.map(z.string(), HookSchema).default(new Map()),
/**
* A function that, given the name of a module, can optionally return a description
* of that module's type signature.
*/
moduleTypeProvider: z.nullable(z.any()).default(null),
/**
* A list of functions which the application compiles as macros, where
* the compiler must ensure they are not compiled to rename the macro or separate the
* "function" from its argument.
*
* For example, Meta has some APIs such as `featureflag("name-of-feature-flag")` which
* are rewritten by a plugin. Assigning `featureflag` to a temporary would break the
* plugin since it looks specifically for the name of the function being invoked, not
* following aliases.
*/
customMacros: z.nullable(z.array(MacroSchema)).default(null),
/**
* Enable a check that resets the memoization cache when the source code of
* the file changes. This is intended to support hot module reloading (HMR),
* where the same runtime component instance will be reused across different
* versions of the component source.
*
* When set to
* - true: code for HMR support is always generated, regardless of NODE_ENV
* or `globalThis.__DEV__`
* - false: code for HMR support is not generated
* - null: (default) code for HMR support is conditionally generated dependent
* on `NODE_ENV` and `globalThis.__DEV__` at the time of compilation.
*/
enableResetCacheOnSourceFileChanges: z.nullable(z.boolean()).default(null),
/**
* Enable using information from existing useMemo/useCallback to understand when a value is done
* being mutated. With this mode enabled, Forget will still discard the actual useMemo/useCallback
* calls and may memoize slightly differently. However, it will assume that the values produced
* are not subsequently modified, guaranteeing that the value will be memoized.
*
* By preserving guarantees about when values are memoized, this option preserves any existing
* behavior that depends on referential equality in the original program. Notably, this preserves
* existing effect behavior (how often effects fire) for effects that rely on referential equality.
*
* When disabled, Forget will not only prune useMemo and useCallback calls but also completely ignore
* them, not using any information from them to guide compilation. Therefore, disabling this flag
* will produce output that mimics the result from removing all memoization.
*
* Our recommendation is to first try running your application with this flag enabled, then attempt
* to disable this flag and see what changes or breaks. This will mostly likely be effects that
* depend on referential equality, which can be refactored (TODO guide for this).
*
* NOTE: this mode treats freeze as a transitive operation for function expressions. This means
* that if a useEffect or useCallback references a function value, that function value will be
* considered frozen, and in turn all of its referenced variables will be considered frozen as well.
*/
enablePreserveExistingMemoizationGuarantees: z.boolean().default(true),
/**
* Validates that all useMemo/useCallback values are also memoized by Forget. This mode can be
* used with or without @enablePreserveExistingMemoizationGuarantees.
*
* With enablePreserveExistingMemoizationGuarantees, this validation enables automatically and
* verifies that Forget was able to preserve manual memoization semantics under that mode's
* additional assumptions about the input.
*
* With enablePreserveExistingMemoizationGuarantees off, this validation ignores manual memoization
* when determining program behavior, and only uses information from useMemo/useCallback to check
* that the memoization was preserved. This can be useful for determining where referential equalities
* may change under Forget.
*/
validatePreserveExistingMemoizationGuarantees: z.boolean().default(true),
/**
* Validate that dependencies supplied to manual memoization calls are exhaustive.
*/
validateExhaustiveMemoizationDependencies: z.boolean().default(true),
/**
* Validate that dependencies supplied to effect hooks are exhaustive.
* Can be:
* - 'off': No validation (default)
* - 'all': Validate and report both missing and extra dependencies
* - 'missing-only': Only report missing dependencies
* - 'extra-only': Only report extra/unnecessary dependencies
*/
validateExhaustiveEffectDependencies: z
.enum(['off', 'all', 'missing-only', 'extra-only'])
.default('off'),
/**
* When this is true, rather than pruning existing manual memoization but ensuring or validating
* that the memoized values remain memoized, the compiler will simply not prune existing calls to
* useMemo/useCallback.
*/
enablePreserveExistingManualUseMemo: z.boolean().default(false),
// 🌲
enableForest: z.boolean().default(false),
/**
* Enable use of type annotations in the source to drive type inference. By default
* Forget attemps to infer types using only information that is guaranteed correct
* given the source, and does not trust user-supplied type annotations. This mode
* enables trusting user type annotations.
*/
enableUseTypeAnnotations: z.boolean().default(false),
/**
* Allows specifying a function that can populate HIR with type information from
* Flow
*/
flowTypeProvider: z.nullable(z.any()).default(null),
/**
* Enables inference of optional dependency chains. Without this flag
* a property chain such as `props?.items?.foo` will infer as a dep on
* just `props`. With this flag enabled, we'll infer that full path as
* the dependency.
*/
enableOptionalDependencies: z.boolean().default(true),
enableFire: z.boolean().default(false),
enableNameAnonymousFunctions: z.boolean().default(false),
/**
* Enables inference and auto-insertion of effect dependencies. Takes in an array of
* configurable module and import pairs to allow for user-land experimentation. For example,
* [
* {
* module: 'react',
* imported: 'useEffect',
* autodepsIndex: 1,
* },{
* module: 'MyExperimentalEffectHooks',
* imported: 'useExperimentalEffect',
* autodepsIndex: 2,
* },
* ]
* would insert dependencies for calls of `useEffect` imported from `react` and calls of
* useExperimentalEffect` from `MyExperimentalEffectHooks`.
*
* `autodepsIndex` tells the compiler which index we expect the AUTODEPS to appear in.
* With the configuration above, we'd insert dependencies for `useEffect` if it has two
* arguments, and the second is AUTODEPS.
*
* Still experimental.
*/
inferEffectDependencies: z
.nullable(
z.array(
z.object({
function: ExternalFunctionSchema,
autodepsIndex: z.number().min(1, 'autodepsIndex must be > 0'),
}),
),
)
.default(null),
/**
* Enables inlining ReactElement object literals in place of JSX
* An alternative to the standard JSX transform which replaces JSX with React's jsxProd() runtime
* Currently a prod-only optimization, requiring Fast JSX dependencies
*
* The symbol configuration is set for backwards compatability with pre-React 19 transforms
*/
inlineJsxTransform: ReactElementSymbolSchema.nullable().default(null),
/*
* Enable validation of hooks to partially check that the component honors the rules of hooks.
* When disabled, the component is assumed to follow the rules (though the Babel plugin looks
* for suppressions of the lint rule).
*/
validateHooksUsage: z.boolean().default(true),
// Validate that ref values (`ref.current`) are not accessed during render.
validateRefAccessDuringRender: z.boolean().default(true),
/*
* Validates that setState is not unconditionally called during render, as it can lead to
* infinite loops.
*/
validateNoSetStateInRender: z.boolean().default(true),
/**
* When enabled, changes the behavior of validateNoSetStateInRender to recommend
* using useKeyedState instead of the manual pattern for resetting state.
*/
enableUseKeyedState: z.boolean().default(false),
/**
* Validates that setState is not called synchronously within an effect (useEffect and friends).
* Scheduling a setState (with an event listener, subscription, etc) is valid.
*/
validateNoSetStateInEffects: z.boolean().default(false),
/**
* Validates that effects are not used to calculate derived data which could instead be computed
* during render.
*/
validateNoDerivedComputationsInEffects: z.boolean().default(false),
/**
* Experimental: Validates that effects are not used to calculate derived data which could instead be computed
* during render. Generates a custom error message for each type of violation.
*/
validateNoDerivedComputationsInEffects_exp: z.boolean().default(false),
/**
* Validates against creating JSX within a try block and recommends using an error boundary
* instead.
*/
validateNoJSXInTryStatements: z.boolean().default(false),
/**
* Validates against dynamically creating components during render.
*/
validateStaticComponents: z.boolean().default(false),
/**
* Validates that the dependencies of all effect hooks are memoized. This helps ensure
* that Forget does not introduce infinite renders caused by a dependency changing,
* triggering an effect, which triggers re-rendering, which causes a dependency to change,
* triggering the effect, etc.
*
* Covers useEffect, useLayoutEffect, useInsertionEffect.
*/
validateMemoizedEffectDependencies: z.boolean().default(false),
/**
* Validates that there are no capitalized calls other than those allowed by the allowlist.
* Calls to capitalized functions are often functions that used to be components and may
* have lingering hook calls, which makes those calls risky to memoize.
*
* You can specify a list of capitalized calls to allowlist using this option. React Compiler
* always includes its known global functions, including common functions like Boolean and String,
* in this allowlist. You can enable this validation with no additional allowlisted calls by setting
* this option to the empty array.
*/
validateNoCapitalizedCalls: z.nullable(z.array(z.string())).default(null),
validateBlocklistedImports: z.nullable(z.array(z.string())).default(null),
/**
* Validates that AST nodes generated during codegen have proper source locations.
* This is useful for debugging issues with source maps and Istanbul coverage.
* When enabled, the compiler will error if important source locations are missing in the generated AST.
*/
validateSourceLocations: z.boolean().default(false),
/**
* Validate against impure functions called during render
*/
validateNoImpureFunctionsInRender: z.boolean().default(false),
/**
* Validate against passing mutable functions to hooks
*/
validateNoFreezingKnownMutableFunctions: z.boolean().default(false),
/*
* When enabled, the compiler assumes that hooks follow the Rules of React:
* - Hooks may memoize computation based on any of their parameters, thus
* any arguments to a hook are assumed frozen after calling the hook.
* - Hooks may memoize the result they return, thus the return value is
* assumed frozen.
*/
enableAssumeHooksFollowRulesOfReact: z.boolean().default(true),
/**
* When enabled, the compiler assumes that any values are not subsequently
* modified after they are captured by a function passed to React. For example,
* if a value `x` is referenced inside a function expression passed to `useEffect`,
* then this flag will assume that `x` is not subusequently modified.
*/
enableTransitivelyFreezeFunctionExpressions: z.boolean().default(true),
/*
* Enables codegen mutability debugging. This emits a dev-mode only to log mutations
* to values that Forget assumes are immutable (for Forget compiled code).
* For example:
* emitFreeze: {
* source: 'ReactForgetRuntime',
* importSpecifierName: 'makeReadOnly',
* }
*
* produces:
* import {makeReadOnly} from 'ReactForgetRuntime';
*
* function Component(props) {
* if (c_0) {
* // ...
* $[0] = __DEV__ ? makeReadOnly(x) : x;
* } else {
* x = $[0];
* }
* }
*/
enableEmitFreeze: ExternalFunctionSchema.nullable().default(null),
enableEmitHookGuards: ExternalFunctionSchema.nullable().default(null),
/**
* Enable instruction reordering. See InstructionReordering.ts for the details
* of the approach.
*/
enableInstructionReordering: z.boolean().default(false),
/**
* Enables function outlinining, where anonymous functions that do not close over
* local variables can be extracted into top-level helper functions.
*/
enableFunctionOutlining: z.boolean().default(true),
/**
* If enabled, this will outline nested JSX into a separate component.
*
* This will enable the compiler to memoize the separate component, giving us
* the same behavior as compiling _within_ the callback.
*
* ```
* function Component(countries, onDelete) {
* const name = useFoo();
* return countries.map(() => {
* return (
* <Foo>
* <Bar>{name}</Bar>
* <Button onclick={onDelete}>delete</Button>
* </Foo>
* );
* });
* }
* ```
*
* will be transpiled to:
*
* ```
* function Component(countries, onDelete) {
* const name = useFoo();
* return countries.map(() => {
* return (
* <Temp name={name} onDelete={onDelete} />
* );
* });
* }
*
* function Temp({name, onDelete}) {
* return (
* <Foo>
* <Bar>{name}</Bar>
* <Button onclick={onDelete}>delete</Button>
* </Foo>
* );
* }
*
* Both, `Component` and `Temp` will then be memoized by the compiler.
*
* With this change, when `countries` is updated by adding one single value,
* only the newly added value is re-rendered and not the entire list.
*/
enableJsxOutlining: z.boolean().default(false),
/*
* Enables instrumentation codegen. This emits a dev-mode only call to an
* instrumentation function, for components and hooks that Forget compiles.
* For example:
* instrumentForget: {
* import: {
* source: 'react-compiler-runtime',
* importSpecifierName: 'useRenderCounter',
* }
* }
*
* produces:
* import {useRenderCounter} from 'react-compiler-runtime';
*
* function Component(props) {
* if (__DEV__) {
* useRenderCounter("Component", "/filepath/filename.js");
* }
* // ...
* }
*
*/
enableEmitInstrumentForget: InstrumentationSchema.nullable().default(null),
// Enable validation of mutable ranges
assertValidMutableRanges: z.boolean().default(false),
/*
* Enable emitting "change variables" which store the result of whether a particular
* reactive scope dependency has changed since the scope was last executed.
*
* Ex:
* ```
* const c_0 = $[0] !== input; // change variable
* let output;
* if (c_0) ...
* ```
*
* Defaults to false, where the comparison is inlined:
*
* ```
* let output;
* if ($[0] !== input) ...
* ```
*/
enableChangeVariableCodegen: z.boolean().default(false),
/**
* Enable emitting comments that explain Forget's output, and which
* values are being checked and which values produced by each memo block.
*
* Intended for use in demo purposes (incl playground)
*/
enableMemoizationComments: z.boolean().default(false),
/**
* [TESTING ONLY] Throw an unknown exception during compilation to
* simulate unexpected exceptions e.g. errors from babel functions.
*/
throwUnknownException__testonly: z.boolean().default(false),
/**
* Enables deps of a function epxression to be treated as conditional. This
* makes sure we don't load a dep when it's a property (to check if it has
* changed) and instead check the receiver.
*
* This makes sure we don't end up throwing when the reciver is null. Consider
* this code:
*
* ```
* function getLength() {
* return props.bar.length;
* }
* ```
*
* It's only safe to memoize `getLength` against props, not props.bar, as
* props.bar could be null when this `getLength` function is created.
*
* This does cause the memoization to now be coarse grained, which is
* non-ideal.
*/
enableTreatFunctionDepsAsConditional: z.boolean().default(false),
/**
* When true, always act as though the dependencies of a memoized value
* have changed. This makes the compiler not actually perform any optimizations,
* but is useful for debugging. Implicitly also sets
* @enablePreserveExistingManualUseMemo, because otherwise memoization in the
* original source will be disabled as well.
*/
disableMemoizationForDebugging: z.boolean().default(false),
/**
* When true, rather using memoized values, the compiler will always re-compute
* values, and then use a heuristic to compare the memoized value to the newly
* computed one. This detects cases where rules of react violations may cause the
* compiled code to behave differently than the original.
*/
enableChangeDetectionForDebugging:
ExternalFunctionSchema.nullable().default(null),
/**
* The react native re-animated library uses custom Babel transforms that
* requires the calls to library API remain unmodified.
*
* If this flag is turned on, the React compiler will use custom type
* definitions for reanimated library to make it's Babel plugin work
* with the compiler.
*/
enableCustomTypeDefinitionForReanimated: z.boolean().default(false),
/**
* If specified, this value is used as a pattern for determing which global values should be
* treated as hooks. The pattern should have a single capture group, which will be used as
* the hook name for the purposes of resolving hook definitions (for builtin hooks)_.
*
* For example, by default `React$useState` would not be treated as a hook. By specifying
* `hookPattern: 'React$(\w+)'`, the compiler will treat this value equivalently to `useState()`.
*
* This setting is intended for cases where Forget is compiling code that has been prebundled
* and identifiers have been changed.
*/
hookPattern: z.string().nullable().default(null),
/**
* If enabled, this will treat objects named as `ref` or if their names end with the substring `Ref`,
* and contain a property named `current`, as React refs.
*
* ```
* const ref = useMyRef();
* const myRef = useMyRef2();
* useEffect(() => {
* ref.current = ...;
* myRef.current = ...;
* })
* ```
*
* Here the variables `ref` and `myRef` will be typed as Refs.
*/
enableTreatRefLikeIdentifiersAsRefs: z.boolean().default(true),
/**
* Treat identifiers as SetState type if both
* - they are named with a "set-" prefix
* - they are called somewhere
*/
enableTreatSetIdentifiersAsStateSetters: z.boolean().default(false),
/*
* If specified a value, the compiler lowers any calls to `useContext` to use
* this value as the callee.
*
* A selector function is compiled and passed as an argument along with the
* context to this function call.
*
* The compiler automatically figures out the keys by looking for the immediate
* destructuring of the return value from the useContext call. In the future,
* this can be extended to different kinds of context access like property
* loads and accesses over multiple statements as well.
*
* ```
* // input
* const {foo, bar} = useContext(MyContext);
*
* // output
* const {foo, bar} = useCompiledContext(MyContext, (c) => [c.foo, c.bar]);
* ```
*/
lowerContextAccess: ExternalFunctionSchema.nullable().default(null),
/**
* If enabled, will validate useMemos that don't return any values:
*
* Valid:
* useMemo(() => foo, [foo]);
* useMemo(() => { return foo }, [foo]);
* Invalid:
* useMemo(() => { ... }, [...]);
*/
validateNoVoidUseMemo: z.boolean().default(true),
/**
* Validates that Components/Hooks are always defined at module level. This prevents scope
* reference errors that occur when the compiler attempts to optimize the nested component/hook
* while its parent function remains uncompiled.
*/
validateNoDynamicallyCreatedComponentsOrHooks: z.boolean().default(false),
/**
* When enabled, allows setState calls in effects based on valid patterns involving refs:
* - Allow setState where the value being set is derived from a ref. This is useful where
* state needs to take into account layer information, and a layout effect reads layout
* data from a ref and sets state.
* - Allow conditionally calling setState after manually comparing previous/new values
* for changes via a ref. Relying on effect deps is insufficient for non-primitive values,
* so a ref is generally required to manually track previous values and compare prev/next
* for meaningful changes before setting state.
*/
enableAllowSetStateFromRefsInEffects: z.boolean().default(true),
/**
* When enabled, provides verbose error messages for setState calls within effects,
* presenting multiple possible fixes to the user/agent since we cannot statically
* determine which specific use-case applies:
* 1. Non-local derived data - requires restructuring state ownership
* 2. Derived event pattern - detecting when a prop changes
* 3. Force update / external sync - should use useSyncExternalStore
*/
enableVerboseNoSetStateInEffect: z.boolean().default(false),
/**
* Enables inference of event handler types for JSX props on built-in DOM elements.
* When enabled, functions passed to event handler props (props starting with "on")
* on primitive JSX tags are inferred to have the BuiltinEventHandlerId type, which
* allows ref access within those functions since DOM event handlers are guaranteed
* by React to only execute in response to events, not during render.
*/
enableInferEventHandlers: z.boolean().default(false),
});
export type EnvironmentConfig = z.infer<typeof EnvironmentConfigSchema>;
export type PartialEnvironmentConfig = Partial<EnvironmentConfig>;
export type ReactFunctionType = 'Component' | 'Hook' | 'Other';
export function printFunctionType(type: ReactFunctionType): string {
switch (type) {
case 'Component': {
return 'component';
}
case 'Hook': {
return 'hook';
}
default: {
return 'function';
}
}
}
export class Environment {
#globals: GlobalRegistry;
#shapes: ShapeRegistry;
#moduleTypes: Map<string, Global | null> = new Map();
#nextIdentifer: number = 0;
#nextBlock: number = 0;
#nextScope: number = 0;
#scope: BabelScope;
#outlinedFunctions: Array<{
fn: HIRFunction;
type: ReactFunctionType | null;
}> = [];
logger: Logger | null;
filename: string | null;
code: string | null;
config: EnvironmentConfig;
fnType: ReactFunctionType;
outputMode: CompilerOutputMode;
programContext: ProgramContext;
hasFireRewrite: boolean;
hasInferredEffect: boolean;
inferredEffectLocations: Set<SourceLocation> = new Set();
#contextIdentifiers: Set<t.Identifier>;
#hoistedIdentifiers: Set<t.Identifier>;
parentFunction: NodePath<t.Function>;
#flowTypeEnvironment: FlowTypeEnv | null;
constructor(
scope: BabelScope,
fnType: ReactFunctionType,
outputMode: CompilerOutputMode,
config: EnvironmentConfig,
contextIdentifiers: Set<t.Identifier>,
parentFunction: NodePath<t.Function>, // the outermost function being compiled
logger: Logger | null,
filename: string | null,
code: string | null,
programContext: ProgramContext,
) {
this.#scope = scope;
this.fnType = fnType;
this.outputMode = outputMode;
this.config = config;
this.filename = filename;
this.code = code;
this.logger = logger;
this.programContext = programContext;
this.#shapes = new Map(DEFAULT_SHAPES);
this.#globals = new Map(DEFAULT_GLOBALS);
this.hasFireRewrite = false;
this.hasInferredEffect = false;
if (
config.disableMemoizationForDebugging &&
config.enableChangeDetectionForDebugging != null
) {
CompilerError.throwInvalidConfig({
reason: `Invalid environment config: the 'disableMemoizationForDebugging' and 'enableChangeDetectionForDebugging' options cannot be used together`,
description: null,
loc: null,
suggestions: null,
});
}
for (const [hookName, hook] of this.config.customHooks) {
CompilerError.invariant(!this.#globals.has(hookName), {
reason: `[Globals] Found existing definition in global registry for custom hook ${hookName}`,
loc: GeneratedSource,
});
this.#globals.set(
hookName,
addHook(this.#shapes, {
positionalParams: [],
restParam: hook.effectKind,
returnType: hook.transitiveMixedData
? {kind: 'Object', shapeId: BuiltInMixedReadonlyId}
: {kind: 'Poly'},
returnValueKind: hook.valueKind,
calleeEffect: Effect.Read,
hookKind: 'Custom',
noAlias: hook.noAlias,
}),
);
}
if (config.enableCustomTypeDefinitionForReanimated) {
const reanimatedModuleType = getReanimatedModuleType(this.#shapes);
this.#moduleTypes.set(REANIMATED_MODULE_NAME, reanimatedModuleType);
}
this.parentFunction = parentFunction;
this.#contextIdentifiers = contextIdentifiers;
this.#hoistedIdentifiers = new Set();
if (config.flowTypeProvider != null) {
this.#flowTypeEnvironment = new FlowTypeEnv();
CompilerError.invariant(code != null, {
reason:
'Expected Environment to be initialized with source code when a Flow type provider is specified',
loc: GeneratedSource,
});
this.#flowTypeEnvironment.init(this, code);
} else {
this.#flowTypeEnvironment = null;
}
}
get typeContext(): FlowTypeEnv {
CompilerError.invariant(this.#flowTypeEnvironment != null, {
reason: 'Flow type environment not initialized',
loc: GeneratedSource,
});
return this.#flowTypeEnvironment;
}
get enableDropManualMemoization(): boolean {
switch (this.outputMode) {
case 'lint': {
// linting drops to be more compatible with compiler analysis
return true;
}
case 'client':
case 'ssr': {
return true;
}
case 'client-no-memo': {
return false;
}
default: {
assertExhaustive(
this.outputMode,
`Unexpected output mode '${this.outputMode}'`,
);
}
}
}
get enableMemoization(): boolean {
switch (this.outputMode) {
case 'client':
case 'lint': {
// linting also enables memoization so that we can check if manual memoization is preserved
return true;
}
case 'ssr':
case 'client-no-memo': {
return false;
}
default: {
assertExhaustive(
this.outputMode,
`Unexpected output mode '${this.outputMode}'`,
);
}
}
}
get enableValidations(): boolean {
switch (this.outputMode) {
case 'client':
case 'lint':
case 'ssr': {
return true;
}
case 'client-no-memo': {
return false;
}
default: {
assertExhaustive(
this.outputMode,
`Unexpected output mode '${this.outputMode}'`,
);
}
}
}
get nextIdentifierId(): IdentifierId {
return makeIdentifierId(this.#nextIdentifer++);
}
get nextBlockId(): BlockId {
return makeBlockId(this.#nextBlock++);
}
get nextScopeId(): ScopeId {
return makeScopeId(this.#nextScope++);
}
get scope(): BabelScope {
return this.#scope;
}
logErrors(errors: Result<void, CompilerError>): void {
if (errors.isOk() || this.logger == null) {
return;
}
for (const error of errors.unwrapErr().details) {
this.logger.logEvent(this.filename, {
kind: 'CompileError',
detail: error,
fnLoc: null,
});
}
}
isContextIdentifier(node: t.Identifier): boolean {
return this.#contextIdentifiers.has(node);
}
isHoistedIdentifier(node: t.Identifier): boolean {
return this.#hoistedIdentifiers.has(node);
}
generateGloballyUniqueIdentifierName(
name: string | null,
): ValidatedIdentifier {
const identifierNode = this.#scope.generateUidIdentifier(name ?? undefined);
return makeIdentifierName(identifierNode.name);
}
outlineFunction(fn: HIRFunction, type: ReactFunctionType | null): void {
this.#outlinedFunctions.push({fn, type});
}
getOutlinedFunctions(): Array<{
fn: HIRFunction;
type: ReactFunctionType | null;
}> {
return this.#outlinedFunctions;
}
#resolveModuleType(moduleName: string, loc: SourceLocation): Global | null {
let moduleType = this.#moduleTypes.get(moduleName);
if (moduleType === undefined) {
/*
* NOTE: Zod doesn't work when specifying a function as a default, so we have to
* fallback to the default value here
*/
const moduleTypeProvider =
this.config.moduleTypeProvider ?? defaultModuleTypeProvider;
if (moduleTypeProvider == null) {
return null;
}
if (typeof moduleTypeProvider !== 'function') {
CompilerError.throwInvalidConfig({
reason: `Expected a function for \`moduleTypeProvider\``,
loc,
});
}
const unparsedModuleConfig = moduleTypeProvider(moduleName);
if (unparsedModuleConfig != null) {
const parsedModuleConfig = TypeSchema.safeParse(unparsedModuleConfig);
if (!parsedModuleConfig.success) {
CompilerError.throwInvalidConfig({
reason: `Could not parse module type, the configured \`moduleTypeProvider\` function returned an invalid module description`,
description: parsedModuleConfig.error.toString(),
loc,
});
}
const moduleConfig = parsedModuleConfig.data;
moduleType = installTypeConfig(
this.#globals,
this.#shapes,
moduleConfig,
moduleName,
loc,
);
} else {
moduleType = null;
}
this.#moduleTypes.set(moduleName, moduleType);
}
return moduleType;
}
getGlobalDeclaration(
binding: NonLocalBinding,
loc: SourceLocation,
): Global | null {
if (this.config.hookPattern != null) {
const match = new RegExp(this.config.hookPattern).exec(binding.name);
if (
match != null &&
typeof match[1] === 'string' &&
isHookName(match[1])
) {
const resolvedName = match[1];
return this.#globals.get(resolvedName) ?? this.#getCustomHookType();
}
}
switch (binding.kind) {
case 'ModuleLocal': {
// don't resolve module locals
return isHookName(binding.name) ? this.#getCustomHookType() : null;
}
case 'Global': {
return (
this.#globals.get(binding.name) ??
(isHookName(binding.name) ? this.#getCustomHookType() : null)
);
}
case 'ImportSpecifier': {
if (this.#isKnownReactModule(binding.module)) {
/**
* For `import {imported as name} from "..."` form, we use the `imported`
* name rather than the local alias. Because we don't have definitions for
* every React builtin hook yet, we also check to see if the imported name
* is hook-like (whereas the fall-through below is checking if the aliased
* name is hook-like)
*/
return (
this.#globals.get(binding.imported) ??
(isHookName(binding.imported) || isHookName(binding.name)
? this.#getCustomHookType()
: null)
);
} else {
const moduleType = this.#resolveModuleType(binding.module, loc);
if (moduleType !== null) {
const importedType = this.getPropertyType(
moduleType,
binding.imported,
);
if (importedType != null) {
/*
* Check that hook-like export names are hook types, and non-hook names are non-hook types.
* The user-assigned alias isn't decidable by the type provider, so we ignore that for the check.
* Thus we allow `import {fooNonHook as useFoo} from ...` because the name and type both say
* that it's not a hook.
*/
const expectHook = isHookName(binding.imported);
const isHook = getHookKindForType(this, importedType) != null;
if (expectHook !== isHook) {
CompilerError.throwInvalidConfig({
reason: `Invalid type configuration for module`,
description: `Expected type for \`import {${binding.imported}} from '${binding.module}'\` ${expectHook ? 'to be a hook' : 'not to be a hook'} based on the exported name`,
loc,
});
}
return importedType;
}
}
/**
* For modules we don't own, we look at whether the original name or import alias
* are hook-like. Both of the following are likely hooks so we would return a hook
* type for both:
*
* `import {useHook as foo} ...`
* `import {foo as useHook} ...`
*/
return isHookName(binding.imported) || isHookName(binding.name)
? this.#getCustomHookType()
: null;
}
}
case 'ImportDefault':
case 'ImportNamespace': {
if (this.#isKnownReactModule(binding.module)) {
// only resolve imports to modules we know about
return (
this.#globals.get(binding.name) ??
(isHookName(binding.name) ? this.#getCustomHookType() : null)
);
} else {
const moduleType = this.#resolveModuleType(binding.module, loc);
if (moduleType !== null) {
let importedType: Type | null = null;
if (binding.kind === 'ImportDefault') {
const defaultType = this.getPropertyType(moduleType, 'default');
if (defaultType !== null) {
importedType = defaultType;
}
} else {
importedType = moduleType;
}
if (importedType !== null) {
/*
* Check that the hook-like modules are defined as types, and non hook-like modules are not typed as hooks.
* So `import Foo from 'useFoo'` is expected to be a hook based on the module name
*/
const expectHook = isHookName(binding.module);
const isHook = getHookKindForType(this, importedType) != null;
if (expectHook !== isHook) {
CompilerError.throwInvalidConfig({
reason: `Invalid type configuration for module`,
description: `Expected type for \`import ... from '${binding.module}'\` ${expectHook ? 'to be a hook' : 'not to be a hook'} based on the module name`,
loc,
});
}
return importedType;
}
}
return isHookName(binding.name) ? this.#getCustomHookType() : null;
}
}
}
}
#isKnownReactModule(moduleName: string): boolean {
return (
moduleName.toLowerCase() === 'react' ||
moduleName.toLowerCase() === 'react-dom'
);
}
static knownReactModules: ReadonlyArray<string> = ['react', 'react-dom'];
getFallthroughPropertyType(
receiver: Type,
_property: Type,
): BuiltInType | PolyType | null {
let shapeId = null;
if (receiver.kind === 'Object' || receiver.kind === 'Function') {
shapeId = receiver.shapeId;
}
if (shapeId !== null) {
const shape = this.#shapes.get(shapeId);
CompilerError.invariant(shape !== undefined, {
reason: `[HIR] Forget internal error: cannot resolve shape ${shapeId}`,
loc: GeneratedSource,
});
return shape.properties.get('*') ?? null;
}
return null;
}
getPropertyType(
receiver: Type,
property: string | number,
): BuiltInType | PolyType | null {
let shapeId = null;
if (receiver.kind === 'Object' || receiver.kind === 'Function') {
shapeId = receiver.shapeId;
}
if (shapeId !== null) {
/*
* If an object or function has a shapeId, it must have been assigned
* by Forget (and be present in a builtin or user-defined registry)
*/
const shape = this.#shapes.get(shapeId);
CompilerError.invariant(shape !== undefined, {
reason: `[HIR] Forget internal error: cannot resolve shape ${shapeId}`,
loc: GeneratedSource,
});
if (typeof property === 'string') {
return (
shape.properties.get(property) ??
shape.properties.get('*') ??
(isHookName(property) ? this.#getCustomHookType() : null)
);
} else {
return shape.properties.get('*') ?? null;
}
} else if (typeof property === 'string' && isHookName(property)) {
return this.#getCustomHookType();
}
return null;
}
getFunctionSignature(type: FunctionType): FunctionSignature | null {
const {shapeId} = type;
if (shapeId !== null) {
const shape = this.#shapes.get(shapeId);
CompilerError.invariant(shape !== undefined, {
reason: `[HIR] Forget internal error: cannot resolve shape ${shapeId}`,
loc: GeneratedSource,
});
return shape.functionType;
}
return null;
}
addHoistedIdentifier(node: t.Identifier): void {
this.#contextIdentifiers.add(node);
this.#hoistedIdentifiers.add(node);
}
#getCustomHookType(): Global {
if (this.config.enableAssumeHooksFollowRulesOfReact) {
return DefaultNonmutatingHook;
} else {
return DefaultMutatingHook;
}
}
}
const REANIMATED_MODULE_NAME = 'react-native-reanimated';
// From https://github.com/facebook/react/blob/main/packages/eslint-plugin-react-hooks/src/RulesOfHooks.js#LL18C1-L23C2
export function isHookName(name: string): boolean {
return /^use[A-Z0-9]/.test(name);
}
export function parseEnvironmentConfig(
partialConfig: PartialEnvironmentConfig,
): Result<EnvironmentConfig, ZodError<PartialEnvironmentConfig>> {
const config = EnvironmentConfigSchema.safeParse(partialConfig);
if (config.success) {
return Ok(config.data);
} else {
return Err(config.error);
}
}
export function validateEnvironmentConfig(
partialConfig: PartialEnvironmentConfig,
): EnvironmentConfig {
const config = EnvironmentConfigSchema.safeParse(partialConfig);
if (config.success) {
return config.data;
}
CompilerError.throwInvalidConfig({
reason:
'Could not validate environment config. Update React Compiler config to fix the error',
description: `${fromZodError(config.error)}`,
loc: null,
suggestions: null,
});
}
export function tryParseExternalFunction(
maybeExternalFunction: any,
): ExternalFunction {
const externalFunction = ExternalFunctionSchema.safeParse(
maybeExternalFunction,
);
if (externalFunction.success) {
return externalFunction.data;
}
CompilerError.throwInvalidConfig({
reason:
'Could not parse external function. Update React Compiler config to fix the error',
description: `${fromZodError(externalFunction.error)}`,
loc: null,
suggestions: null,
});
}
export const DEFAULT_EXPORT = 'default'; | typescript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/HIR/Environment.ts |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Alexandre Fayolle
# Copyright 2012-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class Product(orm.Model):
_inherit = "product.product"
_columns = {
'description_warehouse': fields.text('Warehouse Description',
translate=True),
} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2019 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from googleapiclient.discovery import build
from googleapiclient.errors import Error
client_service = build('jobs', 'v3')
project_id = 'projects/' + os.environ['GOOGLE_CLOUD_PROJECT']
def list_everything():
try:
response = client_service.projects().companies().list(
parent=project_id).execute()
if response.get('companies') is not None:
print('Companies and Jobs:')
for company in response.get('companies'):
print('%s: %s' % (company.get('displayName'),
company.get('name')))
jobs_response = client_service.projects().jobs().list(
parent=project_id,
filter='companyName="' + company.get('name') + '"'
).execute()
if jobs_response.get('jobs') is not None:
for job in jobs_response.get('jobs'):
print('- %s: %s' % (
job.get('title'),
job.get('name')))
else:
print('No companies')
except Error as e:
print('Got exception while listing everything')
raise e
list_everything() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BatchToSpace op.
Additional tests are included in spacetobatch_op_test.py, where the BatchToSpace
op is tested in tandem with its reverse SpaceToBatch op.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class PythonOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
return array_ops.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
return gen_array_ops._batch_to_space(*args, **kwargs)
class BatchToSpaceDepthToSpace(test.TestCase, PythonOpImpl):
# Verifies that: batch_to_space(x) = transpose(depth_to_space(transpose(x)))
def testDepthToSpaceTranspose(self):
x = np.arange(20 * 5 * 8 * 7, dtype=np.float32).reshape([20, 5, 8, 7])
block_size = 2
for crops_dtype in [dtypes.int64, dtypes.int32]:
crops = array_ops.zeros((2, 2), dtype=crops_dtype)
y1 = self.batch_to_space(x, crops, block_size=block_size)
y2 = array_ops.transpose(
array_ops.depth_to_space(
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.test_session():
self.assertAllEqual(y1.eval(), y2.eval())
class BatchToSpaceDepthToSpaceCpp(BatchToSpaceDepthToSpace, CppOpImpl):
pass
class BatchToSpaceErrorHandlingTest(test.TestCase, PythonOpImpl):
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 2
with self.assertRaises(ValueError):
_ = self.batch_to_space(x_np, crops, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 0
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 1
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 10
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
def testBlockSizeSquaredNotDivisibleBatch(self):
# The block size squared does not divide the batch.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.batch_to_space(x_np, crops, block_size)
def testUnknownShape(self):
t = self.batch_to_space(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class BatchToSpaceErrorHandlingCppTest(BatchToSpaceErrorHandlingTest,
CppOpImpl):
pass
class BatchToSpaceNDErrorHandlingTest(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes known at graph construction time.
with self.assertRaises(error):
_ = array_ops.batch_to_space_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
input_placeholder = array_ops.placeholder(dtypes.float32)
block_shape_placeholder = array_ops.placeholder(
dtypes.int32, shape=block_shape.shape)
paddings_placeholder = array_ops.placeholder(dtypes.int32)
t = array_ops.batch_to_space_nd(input_placeholder, block_shape_placeholder,
paddings_placeholder)
with self.assertRaises(ValueError):
_ = t.eval({
input_placeholder: np.zeros(input_shape, np.float32),
block_shape_placeholder: block_shape,
paddings_placeholder: paddings
})
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
self._testDynamicShape(input_shape, block_shape, paddings)
def testInputWrongDimMissingBatch(self):
self._testShape([2, 2], [2, 2], [[0, 0], [0, 0]], ValueError)
self._testShape([2, 2, 3], [2, 2, 3], [[0, 0], [0, 0]], ValueError)
def testBlockSize0(self):
# The block size is 0.
self._testShape([1, 2, 2, 1], [0, 1], [[0, 0], [0, 0]], ValueError)
def testBlockSizeNegative(self):
self._testShape([1, 2, 2, 1], [-1, 1], [[0, 0], [0, 0]], ValueError)
def testNegativePadding(self):
self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError)
def testCropTooLarge(self):
# The amount to crop exceeds the padded size.
self._testShape([1 * 2 * 2, 2, 3, 1], [2, 2], [[3, 2], [0, 0]], ValueError)
def testBlockSizeSquaredNotDivisibleBatch(self):
# The batch dimension is not divisible by the product of the block_shape.
self._testShape([3, 1, 1, 1], [2, 3], [[0, 0], [0, 0]], ValueError)
def testUnknownShape(self):
# Verify that input shape and paddings shape can be unknown.
_ = array_ops.batch_to_space_nd(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, None)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 2)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, None, None, 2)), [2, 3],
array_ops.placeholder(dtypes.int32))
self.assertEqual([3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, None, 2, 2)), [2, 3],
[[1, 1], [0, 1]])
self.assertEqual([3, None, 5, 2], t.get_shape().as_list())
# Dimensions are fully known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, 2, 1, 2)), [2, 3],
[[1, 1], [0, 0]])
self.assertEqual([3, 2, 3, 2], t.get_shape().as_list())
class BatchToSpaceGradientTest(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, crops, block_size):
assert 4 == x.ndim
with self.test_session():
tf_x = ops.convert_to_tensor(x)
tf_y = self.batch_to_space(tf_x, crops, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for batch_to_space of x which is a four dimensional
# tensor of shape [b * block_size * block_size, h, w, d].
def _compare(self, b, h, w, d, block_size, crop_beg, crop_end):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b * block_size * block_size, h, w, d])
crops = np.array(
[[crop_beg, crop_end], [crop_beg, crop_end]], dtype=np.int32)
self._checkGrad(x, crops, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
crop_beg = 0
crop_end = 0
self._compare(1, 2, 3, 5, block_size, crop_beg, crop_end)
def testSmall2(self):
block_size = 2
crop_beg = 0
crop_end = 0
self._compare(2, 4, 3, 2, block_size, crop_beg, crop_end)
def testSmallCrop1x1(self):
block_size = 2
crop_beg = 1
crop_end = 1
self._compare(1, 2, 3, 5, block_size, crop_beg, crop_end)
class BatchToSpaceGradientCppTest(BatchToSpaceGradientTest, CppOpImpl):
pass
class BatchToSpaceNDGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, crops, crops_dtype):
block_shape = np.array(block_shape)
crops = constant_op.constant(
np.array(crops).reshape((len(block_shape), 2)), crops_dtype)
with self.test_session():
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.batch_to_space_nd(tf_x, block_shape, crops)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, crops, crops_dtype):
input_shape = list(input_shape)
input_shape[0] *= np.prod(block_shape)
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, crops, crops_dtype)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[0, 0], [0, 0]], dtype)
def testSmall2(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([2, 4, 3, 2], [2, 2], [[0, 0], [0, 0]], dtype)
def testSmallCrop1x1(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[1, 1], [1, 1]], dtype)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/assignmentCompatability24.ts] ////
//// [assignmentCompatability24.ts]
namespace __test1__ {
export interface interfaceWithPublicAndOptional<T,U> { one: T; two?: U; }; var obj4: interfaceWithPublicAndOptional<number,string> = { one: 1 };;
export var __val__obj4 = obj4;
}
namespace __test2__ {
export var obj = function f<Tstring>(a: Tstring) { return a; };;
export var __val__obj = obj;
}
__test2__.__val__obj = __test1__.__val__obj4
//// [assignmentCompatability24.js]
"use strict";
var __test1__;
(function (__test1__) {
;
var obj4 = { one: 1 };
;
__test1__.__val__obj4 = obj4;
})(__test1__ || (__test1__ = {}));
var __test2__;
(function (__test2__) {
__test2__.obj = function f(a) { return a; };
;
__test2__.__val__obj = __test2__.obj;
})(__test2__ || (__test2__ = {}));
__test2__.__val__obj = __test1__.__val__obj4; | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/assignmentCompatability24.js |
use rustc_ast::token::{self, IdentIsRaw, MetaVarKind, Token, TokenKind};
use rustc_ast::util::case::Case;
use rustc_ast::{
self as ast, BoundAsyncness, BoundConstness, BoundPolarity, DUMMY_NODE_ID, FnPtrTy, FnRetTy,
GenericBound, GenericBounds, GenericParam, Generics, Lifetime, MacCall, MgcaDisambiguation,
MutTy, Mutability, Pinnedness, PolyTraitRef, PreciseCapturingArg, TraitBoundModifiers,
TraitObjectSyntax, Ty, TyKind, UnsafeBinderTy,
};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_errors::{Applicability, Diag, E0516, PResult};
use rustc_span::{ErrorGuaranteed, Ident, Span, kw, sym};
use thin_vec::{ThinVec, thin_vec};
use super::{Parser, PathStyle, SeqSep, TokenType, Trailing};
use crate::errors::{
self, AttributeOnEmptyType, AttributeOnType, DynAfterMut, ExpectedFnPathFoundFnKeyword,
ExpectedMutOrConstInRawPointerType, FnPtrWithGenerics, FnPtrWithGenericsSugg,
HelpUseLatestEdition, InvalidCVariadicType, InvalidDynKeyword, LifetimeAfterMut,
NeedPlusAfterTraitObjectLifetime, NestedCVariadicType, ReturnTypesUseThinArrow,
};
use crate::parser::item::FrontMatterParsingMode;
use crate::parser::{FnContext, FnParseMode};
use crate::{exp, maybe_recover_from_interpolated_ty_qpath};
/// Signals whether parsing a type should allow `+`.
///
/// For example, let T be the type `impl Default + 'static`
/// With `AllowPlus::Yes`, T will be parsed successfully
/// With `AllowPlus::No`, parsing T will return a parse error
#[derive(Copy, Clone, PartialEq)]
pub(super) enum AllowPlus {
Yes,
No,
}
#[derive(PartialEq)]
pub(super) enum RecoverQPath {
Yes,
No,
}
pub(super) enum RecoverQuestionMark {
Yes,
No,
}
/// Signals whether parsing a type should recover `->`.
///
/// More specifically, when parsing a function like:
/// ```compile_fail
/// fn foo() => u8 { 0 }
/// fn bar(): u8 { 0 }
/// ```
/// The compiler will try to recover interpreting `foo() => u8` as `foo() -> u8` when calling
/// `parse_ty` with anything except `RecoverReturnSign::No`, and it will try to recover `bar(): u8`
/// as `bar() -> u8` when passing `RecoverReturnSign::Yes` to `parse_ty`
#[derive(Copy, Clone, PartialEq)]
pub(super) enum RecoverReturnSign {
Yes,
OnlyFatArrow,
No,
}
impl RecoverReturnSign {
/// [RecoverReturnSign::Yes] allows for recovering `fn foo() => u8` and `fn foo(): u8`,
/// [RecoverReturnSign::OnlyFatArrow] allows for recovering only `fn foo() => u8` (recovering
/// colons can cause problems when parsing where clauses), and
/// [RecoverReturnSign::No] doesn't allow for any recovery of the return type arrow
fn can_recover(self, token: &TokenKind) -> bool {
match self {
Self::Yes => matches!(token, token::FatArrow | token::Colon),
Self::OnlyFatArrow => matches!(token, token::FatArrow),
Self::No => false,
}
}
}
// Is `...` (`CVarArgs`) legal at this level of type parsing?
#[derive(PartialEq)]
enum AllowCVariadic {
Yes,
No,
}
/// Determine if the given token can begin a bound assuming it follows Rust 2015 identifier `dyn`.
///
/// In Rust 2015, `dyn` is a contextual keyword, not a full one.
fn can_begin_dyn_bound_in_edition_2015(t: Token) -> bool {
if t.is_path_start() {
// In `dyn::x`, `dyn<X>` and `dyn<<X>::Y>`, `dyn` should (continue to) denote a regular path
// segment for backward compatibility. We make an exception for `dyn(X)` which used to be
// interpreted as a path with parenthesized generic arguments which can be semantically
// well-formed (consider: `use std::ops::Fn as dyn;`). Instead, we treat it as a trait
// object type whose first bound is parenthesized.
return t != token::PathSep && t != token::Lt && t != token::Shl;
}
// Contrary to `Parser::can_begin_bound`, `!`, `const`, `[` and `async` are deliberately not
// part of this list to contain the number of potential regressions esp. in MBE code.
// `const` and `[` would regress UI test `macro-dyn-const-2015.rs` and
// `!` would regress `dyn!(...)` macro calls in Rust 2015 for example.
t == token::OpenParen || t == token::Question || t.is_lifetime() || t.is_keyword(kw::For)
}
impl<'a> Parser<'a> {
/// Parses a type.
pub fn parse_ty(&mut self) -> PResult<'a, Box<Ty>> {
if self.token == token::DotDotDot {
// We special case this so that we don't talk about "nested C-variadics" in types.
// We still pass in `AllowCVariadic::No` so that `parse_ty_common` can complain about
// things like `Vec<...>`.
let span = self.token.span;
self.bump();
let kind = TyKind::Err(self.dcx().emit_err(InvalidCVariadicType { span }));
return Ok(self.mk_ty(span, kind));
}
// Make sure deeply nested types don't overflow the stack.
ensure_sufficient_stack(|| {
self.parse_ty_common(
AllowPlus::Yes,
AllowCVariadic::No,
RecoverQPath::Yes,
RecoverReturnSign::Yes,
None,
RecoverQuestionMark::Yes,
)
})
}
pub(super) fn parse_ty_with_generics_recovery(
&mut self,
ty_params: &Generics,
) -> PResult<'a, Box<Ty>> {
self.parse_ty_common(
AllowPlus::Yes,
AllowCVariadic::No,
RecoverQPath::Yes,
RecoverReturnSign::Yes,
Some(ty_params),
RecoverQuestionMark::Yes,
)
}
/// Parse a type suitable for a function or function pointer parameter.
/// The difference from `parse_ty` is that this version allows `...`
/// (`CVarArgs`) at the top level of the type.
pub(super) fn parse_ty_for_param(&mut self) -> PResult<'a, Box<Ty>> {
let ty = self.parse_ty_common(
AllowPlus::Yes,
AllowCVariadic::Yes,
RecoverQPath::Yes,
RecoverReturnSign::Yes,
None,
RecoverQuestionMark::Yes,
)?;
// Recover a trailing `= EXPR` if present.
if self.may_recover()
&& self.check_noexpect(&token::Eq)
&& self.look_ahead(1, |tok| tok.can_begin_expr())
{
let snapshot = self.create_snapshot_for_diagnostic();
self.bump();
let eq_span = self.prev_token.span;
match self.parse_expr() {
Ok(e) => {
self.dcx()
.struct_span_err(eq_span.to(e.span), "parameter defaults are not supported")
.emit();
}
Err(diag) => {
diag.cancel();
self.restore_snapshot(snapshot);
}
}
}
Ok(ty)
}
/// Parses a type in restricted contexts where `+` is not permitted.
///
/// Example 1: `&'a TYPE`
/// `+` is prohibited to maintain operator priority (P(+) < P(&)).
/// Example 2: `value1 as TYPE + value2`
/// `+` is prohibited to avoid interactions with expression grammar.
pub(super) fn parse_ty_no_plus(&mut self) -> PResult<'a, Box<Ty>> {
self.parse_ty_common(
AllowPlus::No,
AllowCVariadic::No,
RecoverQPath::Yes,
RecoverReturnSign::Yes,
None,
RecoverQuestionMark::Yes,
)
}
/// Parses a type following an `as` cast. Similar to `parse_ty_no_plus`, but signaling origin
/// for better diagnostics involving `?`.
pub(super) fn parse_as_cast_ty(&mut self) -> PResult<'a, Box<Ty>> {
self.parse_ty_common(
AllowPlus::No,
AllowCVariadic::No,
RecoverQPath::Yes,
RecoverReturnSign::Yes,
None,
RecoverQuestionMark::No,
)
}
pub(super) fn parse_ty_no_question_mark_recover(&mut self) -> PResult<'a, Box<Ty>> {
self.parse_ty_common(
AllowPlus::Yes,
AllowCVariadic::No,
RecoverQPath::Yes,
RecoverReturnSign::Yes,
None,
RecoverQuestionMark::No,
)
}
/// Parse a type without recovering `:` as `->` to avoid breaking code such
/// as `where fn() : for<'a>`.
pub(super) fn parse_ty_for_where_clause(&mut self) -> PResult<'a, Box<Ty>> {
self.parse_ty_common(
AllowPlus::Yes,
AllowCVariadic::No,
RecoverQPath::Yes,
RecoverReturnSign::OnlyFatArrow,
None,
RecoverQuestionMark::Yes,
)
}
/// Parses an optional return type `[ -> TY ]` in a function declaration.
pub(super) fn parse_ret_ty(
&mut self,
allow_plus: AllowPlus,
recover_qpath: RecoverQPath,
recover_return_sign: RecoverReturnSign,
) -> PResult<'a, FnRetTy> {
let lo = self.prev_token.span;
Ok(if self.eat(exp!(RArrow)) {
// FIXME(Centril): Can we unconditionally `allow_plus`?
let ty = self.parse_ty_common(
allow_plus,
AllowCVariadic::No,
recover_qpath,
recover_return_sign,
None,
RecoverQuestionMark::Yes,
)?;
FnRetTy::Ty(ty)
} else if recover_return_sign.can_recover(&self.token.kind) {
// Don't `eat` to prevent `=>` from being added as an expected token which isn't
// actually expected and could only confuse users
self.bump();
self.dcx().emit_err(ReturnTypesUseThinArrow {
span: self.prev_token.span,
suggestion: lo.between(self.token.span),
});
let ty = self.parse_ty_common(
allow_plus,
AllowCVariadic::No,
recover_qpath,
recover_return_sign,
None,
RecoverQuestionMark::Yes,
)?;
FnRetTy::Ty(ty)
} else {
FnRetTy::Default(self.prev_token.span.shrink_to_hi())
})
}
fn parse_ty_common(
&mut self,
allow_plus: AllowPlus,
allow_c_variadic: AllowCVariadic,
recover_qpath: RecoverQPath,
recover_return_sign: RecoverReturnSign,
ty_generics: Option<&Generics>,
recover_question_mark: RecoverQuestionMark,
) -> PResult<'a, Box<Ty>> {
let allow_qpath_recovery = recover_qpath == RecoverQPath::Yes;
maybe_recover_from_interpolated_ty_qpath!(self, allow_qpath_recovery);
if self.token == token::Pound && self.look_ahead(1, |t| *t == token::OpenBracket) {
let attrs_wrapper = self.parse_outer_attributes()?;
let raw_attrs = attrs_wrapper.take_for_recovery(self.psess);
let attr_span = raw_attrs[0].span.to(raw_attrs.last().unwrap().span);
let (full_span, guar) = match self.parse_ty() {
Ok(ty) => {
let full_span = attr_span.until(ty.span);
let guar = self
.dcx()
.emit_err(AttributeOnType { span: attr_span, fix_span: full_span });
(attr_span, guar)
}
Err(err) => {
err.cancel();
let guar = self.dcx().emit_err(AttributeOnEmptyType { span: attr_span });
(attr_span, guar)
}
};
return Ok(self.mk_ty(full_span, TyKind::Err(guar)));
}
if let Some(ty) = self.eat_metavar_seq_with_matcher(
|mv_kind| matches!(mv_kind, MetaVarKind::Ty { .. }),
|this| this.parse_ty_no_question_mark_recover(),
) {
return Ok(ty);
}
let lo = self.token.span;
let mut impl_dyn_multi = false;
let kind = if self.check(exp!(OpenParen)) {
self.parse_ty_tuple_or_parens(lo, allow_plus)?
} else if self.eat(exp!(Bang)) {
// Never type `!`
TyKind::Never
} else if self.eat(exp!(Star)) {
self.parse_ty_ptr()?
} else if self.eat(exp!(OpenBracket)) {
self.parse_array_or_slice_ty()?
} else if self.check(exp!(And)) || self.check(exp!(AndAnd)) {
// Reference
self.expect_and()?;
self.parse_borrowed_pointee()?
} else if self.eat_keyword_noexpect(kw::Typeof) {
self.parse_typeof_ty(lo)?
} else if self.eat_keyword(exp!(Underscore)) {
// A type to be inferred `_`
TyKind::Infer
} else if self.check_fn_front_matter(false, Case::Sensitive) {
// Function pointer type
self.parse_ty_fn_ptr(lo, ThinVec::new(), None, recover_return_sign)?
} else if self.check_keyword(exp!(For)) {
// Function pointer type or bound list (trait object type) starting with a poly-trait.
// `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T`
// `for<'lt> Trait1<'lt> + Trait2 + 'a`
let (bound_vars, _) = self.parse_higher_ranked_binder()?;
if self.check_fn_front_matter(false, Case::Sensitive) {
self.parse_ty_fn_ptr(
lo,
bound_vars,
Some(self.prev_token.span.shrink_to_lo()),
recover_return_sign,
)?
} else {
// Try to recover `for<'a> dyn Trait` or `for<'a> impl Trait`.
if self.may_recover()
&& (self.eat_keyword_noexpect(kw::Impl) || self.eat_keyword_noexpect(kw::Dyn))
{
let kw = self.prev_token.ident().unwrap().0;
let removal_span = kw.span.with_hi(self.token.span.lo());
let path = self.parse_path(PathStyle::Type)?;
let parse_plus = allow_plus == AllowPlus::Yes && self.check_plus();
let kind = self.parse_remaining_bounds_path(
bound_vars,
path,
lo,
parse_plus,
ast::Parens::No,
)?;
let err = self.dcx().create_err(errors::TransposeDynOrImpl {
span: kw.span,
kw: kw.name.as_str(),
sugg: errors::TransposeDynOrImplSugg {
removal_span,
insertion_span: lo.shrink_to_lo(),
kw: kw.name.as_str(),
},
});
// Take the parsed bare trait object and turn it either
// into a `dyn` object or an `impl Trait`.
let kind = match (kind, kw.name) {
(TyKind::TraitObject(bounds, _), kw::Dyn) => {
TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn)
}
(TyKind::TraitObject(bounds, _), kw::Impl) => {
TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds)
}
_ => return Err(err),
};
err.emit();
kind
} else {
let path = self.parse_path(PathStyle::Type)?;
let parse_plus = allow_plus == AllowPlus::Yes && self.check_plus();
self.parse_remaining_bounds_path(
bound_vars,
path,
lo,
parse_plus,
ast::Parens::No,
)?
}
}
} else if self.eat_keyword(exp!(Impl)) {
self.parse_impl_ty(&mut impl_dyn_multi)?
} else if self.is_explicit_dyn_type() {
self.parse_dyn_ty(&mut impl_dyn_multi)?
} else if self.eat_lt() {
// Qualified path
let (qself, path) = self.parse_qpath(PathStyle::Type)?;
TyKind::Path(Some(qself), path)
} else if (self.token.is_keyword(kw::Const) || self.token.is_keyword(kw::Mut))
&& self.look_ahead(1, |t| *t == token::Star)
{
self.parse_ty_c_style_pointer()?
} else if self.check_path() {
self.parse_path_start_ty(lo, allow_plus, ty_generics)?
} else if self.can_begin_bound() {
self.parse_bare_trait_object(lo, allow_plus)?
} else if self.eat(exp!(DotDotDot)) {
match allow_c_variadic {
AllowCVariadic::Yes => TyKind::CVarArgs,
AllowCVariadic::No => {
// FIXME(c_variadic): Should we just allow `...` syntactically
// anywhere in a type and use semantic restrictions instead?
// NOTE: This may regress certain MBE calls if done incorrectly.
let guar = self.dcx().emit_err(NestedCVariadicType { span: lo });
TyKind::Err(guar)
}
}
} else if self.check_keyword(exp!(Unsafe))
&& self.look_ahead(1, |tok| tok.kind == token::Lt)
{
self.parse_unsafe_binder_ty()?
} else {
let msg = format!("expected type, found {}", super::token_descr(&self.token));
let mut err = self.dcx().struct_span_err(lo, msg);
err.span_label(lo, "expected type");
return Err(err);
};
let span = lo.to(self.prev_token.span);
let mut ty = self.mk_ty(span, kind);
// Try to recover from use of `+` with incorrect priority.
match allow_plus {
AllowPlus::Yes => self.maybe_recover_from_bad_type_plus(&ty)?,
AllowPlus::No => self.maybe_report_ambiguous_plus(impl_dyn_multi, &ty),
}
if let RecoverQuestionMark::Yes = recover_question_mark {
ty = self.maybe_recover_from_question_mark(ty);
}
if allow_qpath_recovery { self.maybe_recover_from_bad_qpath(ty) } else { Ok(ty) }
}
fn parse_unsafe_binder_ty(&mut self) -> PResult<'a, TyKind> {
let lo = self.token.span;
assert!(self.eat_keyword(exp!(Unsafe)));
self.expect_lt()?;
let generic_params = self.parse_generic_params()?;
self.expect_gt()?;
let inner_ty = self.parse_ty()?;
let span = lo.to(self.prev_token.span);
self.psess.gated_spans.gate(sym::unsafe_binders, span);
Ok(TyKind::UnsafeBinder(Box::new(UnsafeBinderTy { generic_params, inner_ty })))
}
/// Parses either:
/// - `(TYPE)`, a parenthesized type.
/// - `(TYPE,)`, a tuple with a single field of type TYPE.
fn parse_ty_tuple_or_parens(&mut self, lo: Span, allow_plus: AllowPlus) -> PResult<'a, TyKind> {
let mut trailing_plus = false;
let (ts, trailing) = self.parse_paren_comma_seq(|p| {
let ty = p.parse_ty()?;
trailing_plus = p.prev_token == TokenKind::Plus;
Ok(ty)
})?;
if ts.len() == 1 && matches!(trailing, Trailing::No) {
let ty = ts.into_iter().next().unwrap();
let maybe_bounds = allow_plus == AllowPlus::Yes && self.token.is_like_plus();
match ty.kind {
// `"(" BareTraitBound ")" "+" Bound "+" ...`.
TyKind::Path(None, path) if maybe_bounds => self.parse_remaining_bounds_path(
ThinVec::new(),
path,
lo,
true,
ast::Parens::Yes,
),
// For `('a) + …`, we know that `'a` in type position already lead to an error being
// emitted. To reduce output, let's indirectly suppress E0178 (bad `+` in type) and
// other irrelevant consequential errors.
TyKind::TraitObject(bounds, TraitObjectSyntax::None)
if maybe_bounds && bounds.len() == 1 && !trailing_plus =>
{
self.parse_remaining_bounds(bounds, true)
}
// `(TYPE)`
_ => Ok(TyKind::Paren(ty)),
}
} else {
Ok(TyKind::Tup(ts))
}
}
fn parse_bare_trait_object(&mut self, lo: Span, allow_plus: AllowPlus) -> PResult<'a, TyKind> {
// A lifetime only begins a bare trait object type if it is followed by `+`!
if self.token.is_lifetime() && !self.look_ahead(1, |t| t.is_like_plus()) {
// In Rust 2021 and beyond, we assume that the user didn't intend to write a bare trait
// object type with a leading lifetime bound since that seems very unlikely given the
// fact that `dyn`-less trait objects are *semantically* invalid.
if self.psess.edition.at_least_rust_2021() {
let lt = self.expect_lifetime();
let mut err = self.dcx().struct_span_err(lo, "expected type, found lifetime");
err.span_label(lo, "expected type");
return Ok(match self.maybe_recover_ref_ty_no_leading_ampersand(lt, lo, err) {
Ok(ref_ty) => ref_ty,
Err(err) => TyKind::Err(err.emit()),
});
}
self.dcx().emit_err(NeedPlusAfterTraitObjectLifetime {
span: lo,
suggestion: lo.shrink_to_hi(),
});
}
Ok(TyKind::TraitObject(
self.parse_generic_bounds_common(allow_plus)?,
TraitObjectSyntax::None,
))
}
fn maybe_recover_ref_ty_no_leading_ampersand<'cx>(
&mut self,
lt: Lifetime,
lo: Span,
mut err: Diag<'cx>,
) -> Result<TyKind, Diag<'cx>> {
if !self.may_recover() {
return Err(err);
}
let snapshot = self.create_snapshot_for_diagnostic();
let mutbl = self.parse_mutability();
match self.parse_ty_no_plus() {
Ok(ty) => {
err.span_suggestion_verbose(
lo.shrink_to_lo(),
"you might have meant to write a reference type here",
"&",
Applicability::MaybeIncorrect,
);
err.emit();
Ok(TyKind::Ref(Some(lt), MutTy { ty, mutbl }))
}
Err(diag) => {
diag.cancel();
self.restore_snapshot(snapshot);
Err(err)
}
}
}
fn parse_remaining_bounds_path(
&mut self,
generic_params: ThinVec<GenericParam>,
path: ast::Path,
lo: Span,
parse_plus: bool,
parens: ast::Parens,
) -> PResult<'a, TyKind> {
let poly_trait_ref = PolyTraitRef::new(
generic_params,
path,
TraitBoundModifiers::NONE,
lo.to(self.prev_token.span),
parens,
);
let bounds = vec![GenericBound::Trait(poly_trait_ref)];
self.parse_remaining_bounds(bounds, parse_plus)
}
/// Parse the remainder of a bare trait object type given an already parsed list.
fn parse_remaining_bounds(
&mut self,
mut bounds: GenericBounds,
plus: bool,
) -> PResult<'a, TyKind> {
if plus {
self.eat_plus(); // `+`, or `+=` gets split and `+` is discarded
bounds.append(&mut self.parse_generic_bounds()?);
}
Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::None))
}
/// Parses a raw pointer with a C-style typo
fn parse_ty_c_style_pointer(&mut self) -> PResult<'a, TyKind> {
let kw_span = self.token.span;
let mutbl = self.parse_const_or_mut();
if let Some(mutbl) = mutbl
&& self.eat(exp!(Star))
{
let star_span = self.prev_token.span;
let mutability = match mutbl {
Mutability::Not => "const",
Mutability::Mut => "mut",
};
let ty = self.parse_ty_no_question_mark_recover()?;
self.dcx()
.struct_span_err(
kw_span,
format!("raw pointer types must be written as `*{mutability} T`"),
)
.with_multipart_suggestion(
format!("put the `*` before `{mutability}`"),
vec![(star_span, String::new()), (kw_span.shrink_to_lo(), "*".to_string())],
Applicability::MachineApplicable,
)
.emit();
return Ok(TyKind::Ptr(MutTy { ty, mutbl }));
}
// This is unreachable because we always get into if above and return from it
unreachable!("this could never happen")
}
/// Parses a raw pointer type: `*[const | mut] $type`.
fn parse_ty_ptr(&mut self) -> PResult<'a, TyKind> {
let mutbl = self.parse_const_or_mut().unwrap_or_else(|| {
let span = self.prev_token.span;
self.dcx().emit_err(ExpectedMutOrConstInRawPointerType {
span,
after_asterisk: span.shrink_to_hi(),
});
Mutability::Not
});
let ty = self.parse_ty_no_plus()?;
Ok(TyKind::Ptr(MutTy { ty, mutbl }))
}
/// Parses an array (`[TYPE; EXPR]`) or slice (`[TYPE]`) type.
/// The opening `[` bracket is already eaten.
fn parse_array_or_slice_ty(&mut self) -> PResult<'a, TyKind> {
let elt_ty = match self.parse_ty() {
Ok(ty) => ty,
Err(err)
if self.look_ahead(1, |t| *t == token::CloseBracket)
| self.look_ahead(1, |t| *t == token::Semi) =>
{
// Recover from `[LIT; EXPR]` and `[LIT]`
self.bump();
let guar = err.emit();
self.mk_ty(self.prev_token.span, TyKind::Err(guar))
}
Err(err) => return Err(err),
};
let ty = if self.eat(exp!(Semi)) {
let mut length =
self.parse_expr_anon_const(|this, expr| this.mgca_direct_lit_hack(expr))?;
if let Err(e) = self.expect(exp!(CloseBracket)) {
// Try to recover from `X<Y, ...>` when `X::<Y, ...>` works
self.check_mistyped_turbofish_with_multiple_type_params(e, &mut length.value)?;
self.expect(exp!(CloseBracket))?;
}
TyKind::Array(elt_ty, length)
} else if self.eat(exp!(CloseBracket)) {
TyKind::Slice(elt_ty)
} else {
self.maybe_recover_array_ty_without_semi(elt_ty)?
};
Ok(ty)
}
/// Recover from malformed array type syntax.
///
/// This method attempts to recover from cases like:
/// - `[u8, 5]` → suggests using `;`, return a Array type
/// - `[u8 5]` → suggests using `;`, return a Array type
/// Consider to add more cases in the future.
fn maybe_recover_array_ty_without_semi(&mut self, elt_ty: Box<Ty>) -> PResult<'a, TyKind> {
let span = self.token.span;
let token_descr = super::token_descr(&self.token);
let mut err =
self.dcx().struct_span_err(span, format!("expected `;` or `]`, found {}", token_descr));
err.span_label(span, "expected `;` or `]`");
// If we cannot recover, return the error immediately.
if !self.may_recover() {
return Err(err);
}
let snapshot = self.create_snapshot_for_diagnostic();
// Consume common erroneous separators.
let hi = self.prev_token.span.hi();
_ = self.eat(exp!(Comma)) || self.eat(exp!(Colon)) || self.eat(exp!(Star));
let suggestion_span = self.prev_token.span.with_lo(hi);
// FIXME(mgca): recovery is broken for `const {` args
// we first try to parse pattern like `[u8 5]`
let length = match self.parse_expr_anon_const(|_, _| MgcaDisambiguation::Direct) {
Ok(length) => length,
Err(e) => {
e.cancel();
self.restore_snapshot(snapshot);
return Err(err);
}
};
if let Err(e) = self.expect(exp!(CloseBracket)) {
e.cancel();
self.restore_snapshot(snapshot);
return Err(err);
}
err.span_suggestion_verbose(
suggestion_span,
"you might have meant to use `;` as the separator",
";",
Applicability::MaybeIncorrect,
);
err.emit();
Ok(TyKind::Array(elt_ty, length))
}
fn parse_borrowed_pointee(&mut self) -> PResult<'a, TyKind> {
let and_span = self.prev_token.span;
let mut opt_lifetime = self.check_lifetime().then(|| self.expect_lifetime());
let (pinned, mut mutbl) = self.parse_pin_and_mut();
if self.token.is_lifetime() && mutbl == Mutability::Mut && opt_lifetime.is_none() {
// A lifetime is invalid here: it would be part of a bare trait bound, which requires
// it to be followed by a plus, but we disallow plus in the pointee type.
// So we can handle this case as an error here, and suggest `'a mut`.
// If there *is* a plus next though, handling the error later provides better suggestions
// (like adding parentheses)
if !self.look_ahead(1, |t| t.is_like_plus()) {
let lifetime_span = self.token.span;
let span = and_span.to(lifetime_span);
let (suggest_lifetime, snippet) =
if let Ok(lifetime_src) = self.span_to_snippet(lifetime_span) {
(Some(span), lifetime_src)
} else {
(None, String::new())
};
self.dcx().emit_err(LifetimeAfterMut { span, suggest_lifetime, snippet });
opt_lifetime = Some(self.expect_lifetime());
}
} else if self.token.is_keyword(kw::Dyn)
&& mutbl == Mutability::Not
&& self.look_ahead(1, |t| t.is_keyword(kw::Mut))
{
// We have `&dyn mut ...`, which is invalid and should be `&mut dyn ...`.
let span = and_span.to(self.look_ahead(1, |t| t.span));
self.dcx().emit_err(DynAfterMut { span });
// Recovery
mutbl = Mutability::Mut;
let (dyn_tok, dyn_tok_sp) = (self.token, self.token_spacing);
self.bump();
self.bump_with((dyn_tok, dyn_tok_sp));
}
let ty = self.parse_ty_no_plus()?;
Ok(match pinned {
Pinnedness::Not => TyKind::Ref(opt_lifetime, MutTy { ty, mutbl }),
Pinnedness::Pinned => TyKind::PinnedRef(opt_lifetime, MutTy { ty, mutbl }),
})
}
/// Parses `pin` and `mut` annotations on references, patterns, or borrow modifiers.
///
/// It must be either `pin const`, `pin mut`, `mut`, or nothing (immutable).
pub(crate) fn parse_pin_and_mut(&mut self) -> (Pinnedness, Mutability) {
if self.token.is_ident_named(sym::pin) && self.look_ahead(1, Token::is_mutability) {
self.psess.gated_spans.gate(sym::pin_ergonomics, self.token.span);
assert!(self.eat_keyword(exp!(Pin)));
let mutbl = self.parse_const_or_mut().unwrap();
(Pinnedness::Pinned, mutbl)
} else {
(Pinnedness::Not, self.parse_mutability())
}
}
/// Parses the `typeof(EXPR)` for better diagnostics before returning
/// an error type.
fn parse_typeof_ty(&mut self, lo: Span) -> PResult<'a, TyKind> {
self.expect(exp!(OpenParen))?;
let _expr = self.parse_expr_anon_const(|_, _| MgcaDisambiguation::AnonConst)?;
self.expect(exp!(CloseParen))?;
let span = lo.to(self.prev_token.span);
let guar = self
.dcx()
.struct_span_err(span, "`typeof` is a reserved keyword but unimplemented")
.with_note("consider replacing `typeof(...)` with an actual type")
.with_code(E0516)
.emit();
Ok(TyKind::Err(guar))
}
/// Parses a function pointer type (`TyKind::FnPtr`).
/// ```ignore (illustrative)
/// [unsafe] [extern "ABI"] fn (S) -> T
/// // ^~~~~^ ^~~~^ ^~^ ^
/// // | | | |
/// // | | | Return type
/// // Function Style ABI Parameter types
/// ```
/// We actually parse `FnHeader FnDecl`, but we error on `const` and `async` qualifiers.
fn parse_ty_fn_ptr(
&mut self,
lo: Span,
mut params: ThinVec<GenericParam>,
param_insertion_point: Option<Span>,
recover_return_sign: RecoverReturnSign,
) -> PResult<'a, TyKind> {
let inherited_vis = rustc_ast::Visibility {
span: rustc_span::DUMMY_SP,
kind: rustc_ast::VisibilityKind::Inherited,
tokens: None,
};
let span_start = self.token.span;
let ast::FnHeader { ext, safety, .. } = self.parse_fn_front_matter(
&inherited_vis,
Case::Sensitive,
FrontMatterParsingMode::FunctionPtrType,
)?;
if self.may_recover() && self.token == TokenKind::Lt {
self.recover_fn_ptr_with_generics(lo, &mut params, param_insertion_point)?;
}
let mode = crate::parser::item::FnParseMode {
req_name: |_, _| false,
context: FnContext::Free,
req_body: false,
};
let decl = self.parse_fn_decl(&mode, AllowPlus::No, recover_return_sign)?;
let decl_span = span_start.to(self.prev_token.span);
Ok(TyKind::FnPtr(Box::new(FnPtrTy {
ext,
safety,
generic_params: params,
decl,
decl_span,
})))
}
/// Recover from function pointer types with a generic parameter list (e.g. `fn<'a>(&'a str)`).
fn recover_fn_ptr_with_generics(
&mut self,
lo: Span,
params: &mut ThinVec<GenericParam>,
param_insertion_point: Option<Span>,
) -> PResult<'a, ()> {
let generics = self.parse_generics()?;
let arity = generics.params.len();
let mut lifetimes: ThinVec<_> = generics
.params
.into_iter()
.filter(|param| matches!(param.kind, ast::GenericParamKind::Lifetime))
.collect();
let sugg = if !lifetimes.is_empty() {
let snippet =
lifetimes.iter().map(|param| param.ident.as_str()).intersperse(", ").collect();
let (left, snippet) = if let Some(span) = param_insertion_point {
(span, if params.is_empty() { snippet } else { format!(", {snippet}") })
} else {
(lo.shrink_to_lo(), format!("for<{snippet}> "))
};
Some(FnPtrWithGenericsSugg {
left,
snippet,
right: generics.span,
arity,
for_param_list_exists: param_insertion_point.is_some(),
})
} else {
None
};
self.dcx().emit_err(FnPtrWithGenerics { span: generics.span, sugg });
params.append(&mut lifetimes);
Ok(())
}
/// Parses an `impl B0 + ... + Bn` type.
fn parse_impl_ty(&mut self, impl_dyn_multi: &mut bool) -> PResult<'a, TyKind> {
if self.token.is_lifetime() {
self.look_ahead(1, |t| {
if let token::Ident(sym, _) = t.kind {
// parse pattern with "'a Sized" we're supposed to give suggestion like
// "'a + Sized"
self.dcx().emit_err(errors::MissingPlusBounds {
span: self.token.span,
hi: self.token.span.shrink_to_hi(),
sym,
});
}
})
}
// Always parse bounds greedily for better error recovery.
let bounds = self.parse_generic_bounds()?;
*impl_dyn_multi = bounds.len() > 1 || self.prev_token == TokenKind::Plus;
Ok(TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds))
}
/// Parse a use-bound aka precise capturing list.
///
/// ```ebnf
/// UseBound = "use" "<" (PreciseCapture ("," PreciseCapture)* ","?)? ">"
/// PreciseCapture = "Self" | Ident | Lifetime
/// ```
fn parse_use_bound(&mut self, lo: Span, parens: ast::Parens) -> PResult<'a, GenericBound> {
self.expect_lt()?;
let (args, _, _) = self.parse_seq_to_before_tokens(
&[exp!(Gt)],
&[&TokenKind::Ge, &TokenKind::Shr, &TokenKind::Shr],
SeqSep::trailing_allowed(exp!(Comma)),
|self_| {
if self_.check_keyword(exp!(SelfUpper)) {
self_.bump();
Ok(PreciseCapturingArg::Arg(
ast::Path::from_ident(self_.prev_token.ident().unwrap().0),
DUMMY_NODE_ID,
))
} else if self_.check_ident() {
Ok(PreciseCapturingArg::Arg(
ast::Path::from_ident(self_.parse_ident()?),
DUMMY_NODE_ID,
))
} else if self_.check_lifetime() {
Ok(PreciseCapturingArg::Lifetime(self_.expect_lifetime()))
} else {
self_.unexpected_any()
}
},
)?;
self.expect_gt()?;
if let ast::Parens::Yes = parens {
self.expect(exp!(CloseParen))?;
self.report_parenthesized_bound(lo, self.prev_token.span, "precise capturing lists");
}
Ok(GenericBound::Use(args, lo.to(self.prev_token.span)))
}
/// Is a `dyn B0 + ... + Bn` type allowed here?
fn is_explicit_dyn_type(&mut self) -> bool {
self.check_keyword(exp!(Dyn))
&& (self.token_uninterpolated_span().at_least_rust_2018()
|| self.look_ahead(1, |&t| can_begin_dyn_bound_in_edition_2015(t)))
}
/// Parses a `dyn B0 + ... + Bn` type.
///
/// Note that this does *not* parse bare trait objects.
fn parse_dyn_ty(&mut self, impl_dyn_multi: &mut bool) -> PResult<'a, TyKind> {
self.bump(); // `dyn`
// Always parse bounds greedily for better error recovery.
let bounds = self.parse_generic_bounds()?;
*impl_dyn_multi = bounds.len() > 1 || self.prev_token == TokenKind::Plus;
Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn))
}
/// Parses a type starting with a path.
///
/// This can be:
/// 1. a type macro, `mac!(...)`,
/// 2. a bare trait object, `B0 + ... + Bn`,
/// 3. or a path, `path::to::MyType`.
fn parse_path_start_ty(
&mut self,
lo: Span,
allow_plus: AllowPlus,
ty_generics: Option<&Generics>,
) -> PResult<'a, TyKind> {
// Simple path
let path = self.parse_path_inner(PathStyle::Type, ty_generics)?;
if self.eat(exp!(Bang)) {
// Macro invocation in type position
Ok(TyKind::MacCall(Box::new(MacCall { path, args: self.parse_delim_args()? })))
} else if allow_plus == AllowPlus::Yes && self.check_plus() {
// `Trait1 + Trait2 + 'a`
self.parse_remaining_bounds_path(ThinVec::new(), path, lo, true, ast::Parens::No)
} else {
// Just a type path.
Ok(TyKind::Path(None, path))
}
}
pub(super) fn parse_generic_bounds(&mut self) -> PResult<'a, GenericBounds> {
self.parse_generic_bounds_common(AllowPlus::Yes)
}
/// Parse generic bounds.
///
/// Only if `allow_plus` this parses a `+`-separated list of bounds (trailing `+` is admitted).
/// Otherwise, this only parses a single bound or none.
fn parse_generic_bounds_common(&mut self, allow_plus: AllowPlus) -> PResult<'a, GenericBounds> {
let mut bounds = Vec::new();
// In addition to looping while we find generic bounds:
// We continue even if we find a keyword. This is necessary for error recovery on,
// for example, `impl fn()`. The only keyword that can go after generic bounds is
// `where`, so stop if it's it.
// We also continue if we find types (not traits), again for error recovery.
while self.can_begin_bound()
|| (self.may_recover()
&& (self.token.can_begin_type()
|| (self.token.is_reserved_ident() && !self.token.is_keyword(kw::Where))))
{
if self.token.is_keyword(kw::Dyn) {
// Account for `&dyn Trait + dyn Other`.
self.bump();
self.dcx().emit_err(InvalidDynKeyword {
span: self.prev_token.span,
suggestion: self.prev_token.span.until(self.token.span),
});
}
bounds.push(self.parse_generic_bound()?);
if allow_plus == AllowPlus::No || !self.eat_plus() {
break;
}
}
Ok(bounds)
}
/// Can the current token begin a bound?
fn can_begin_bound(&mut self) -> bool {
self.check_path()
|| self.check_lifetime()
|| self.check(exp!(Bang))
|| self.check(exp!(Question))
|| self.check(exp!(Tilde))
|| self.check_keyword(exp!(For))
|| self.check(exp!(OpenParen))
|| self.can_begin_maybe_const_bound()
|| self.check_keyword(exp!(Const))
|| self.check_keyword(exp!(Async))
|| self.check_keyword(exp!(Use))
}
fn can_begin_maybe_const_bound(&mut self) -> bool {
self.check(exp!(OpenBracket))
&& self.look_ahead(1, |t| t.is_keyword(kw::Const))
&& self.look_ahead(2, |t| *t == token::CloseBracket)
}
/// Parse a bound.
///
/// ```ebnf
/// Bound = LifetimeBound | UseBound | TraitBound
/// ```
fn parse_generic_bound(&mut self) -> PResult<'a, GenericBound> {
let leading_token = self.prev_token;
let lo = self.token.span;
// We only admit parenthesized *trait* bounds. However, we want to gracefully recover from
// other kinds of parenthesized bounds, so parse the opening parenthesis *here*.
//
// In the future we might want to lift this syntactic restriction and
// introduce "`GenericBound::Paren(Box<GenericBound>)`".
let parens = if self.eat(exp!(OpenParen)) { ast::Parens::Yes } else { ast::Parens::No };
if self.token.is_lifetime() {
self.parse_lifetime_bound(lo, parens)
} else if self.eat_keyword(exp!(Use)) {
self.parse_use_bound(lo, parens)
} else {
self.parse_trait_bound(lo, parens, &leading_token)
}
}
/// Parse a lifetime-bound aka outlives-bound.
///
/// ```ebnf
/// LifetimeBound = Lifetime
/// ```
fn parse_lifetime_bound(&mut self, lo: Span, parens: ast::Parens) -> PResult<'a, GenericBound> {
let lt = self.expect_lifetime();
if let ast::Parens::Yes = parens {
self.expect(exp!(CloseParen))?;
self.report_parenthesized_bound(lo, self.prev_token.span, "lifetime bounds");
}
Ok(GenericBound::Outlives(lt))
}
fn report_parenthesized_bound(&self, lo: Span, hi: Span, kind: &str) -> ErrorGuaranteed {
let mut diag =
self.dcx().struct_span_err(lo.to(hi), format!("{kind} may not be parenthesized"));
diag.multipart_suggestion(
"remove the parentheses",
vec![(lo, String::new()), (hi, String::new())],
Applicability::MachineApplicable,
);
diag.emit()
}
/// Emits an error if any trait bound modifiers were present.
fn error_lt_bound_with_modifiers(
&self,
modifiers: TraitBoundModifiers,
binder_span: Option<Span>,
) -> ErrorGuaranteed {
let TraitBoundModifiers { constness, asyncness, polarity } = modifiers;
match constness {
BoundConstness::Never => {}
BoundConstness::Always(span) | BoundConstness::Maybe(span) => {
return self
.dcx()
.emit_err(errors::ModifierLifetime { span, modifier: constness.as_str() });
}
}
match polarity {
BoundPolarity::Positive => {}
BoundPolarity::Negative(span) | BoundPolarity::Maybe(span) => {
return self
.dcx()
.emit_err(errors::ModifierLifetime { span, modifier: polarity.as_str() });
}
}
match asyncness {
BoundAsyncness::Normal => {}
BoundAsyncness::Async(span) => {
return self
.dcx()
.emit_err(errors::ModifierLifetime { span, modifier: asyncness.as_str() });
}
}
if let Some(span) = binder_span {
return self.dcx().emit_err(errors::ModifierLifetime { span, modifier: "for<...>" });
}
unreachable!("lifetime bound intercepted in `parse_generic_ty_bound` but no modifiers?")
}
/// Parses the modifiers that may precede a trait in a bound, e.g. `?Trait` or `[const] Trait`.
///
/// If no modifiers are present, this does not consume any tokens.
///
/// ```ebnf
/// Constness = ("const" | "[" "const" "]")?
/// Asyncness = "async"?
/// Polarity = ("?" | "!")?
/// ```
///
/// See `parse_trait_bound` for more context.
fn parse_trait_bound_modifiers(&mut self) -> PResult<'a, TraitBoundModifiers> {
let modifier_lo = self.token.span;
let constness = self.parse_bound_constness()?;
let asyncness = if self.token_uninterpolated_span().at_least_rust_2018()
&& self.eat_keyword(exp!(Async))
{
self.psess.gated_spans.gate(sym::async_trait_bounds, self.prev_token.span);
BoundAsyncness::Async(self.prev_token.span)
} else if self.may_recover()
&& self.token_uninterpolated_span().is_rust_2015()
&& self.is_kw_followed_by_ident(kw::Async)
{
self.bump(); // eat `async`
self.dcx().emit_err(errors::AsyncBoundModifierIn2015 {
span: self.prev_token.span,
help: HelpUseLatestEdition::new(),
});
self.psess.gated_spans.gate(sym::async_trait_bounds, self.prev_token.span);
BoundAsyncness::Async(self.prev_token.span)
} else {
BoundAsyncness::Normal
};
let modifier_hi = self.prev_token.span;
let polarity = if self.eat(exp!(Question)) {
BoundPolarity::Maybe(self.prev_token.span)
} else if self.eat(exp!(Bang)) {
self.psess.gated_spans.gate(sym::negative_bounds, self.prev_token.span);
BoundPolarity::Negative(self.prev_token.span)
} else {
BoundPolarity::Positive
};
// Enforce the mutual-exclusivity of `const`/`async` and `?`/`!`.
match polarity {
BoundPolarity::Positive => {
// All trait bound modifiers allowed to combine with positive polarity
}
BoundPolarity::Maybe(polarity_span) | BoundPolarity::Negative(polarity_span) => {
match (asyncness, constness) {
(BoundAsyncness::Normal, BoundConstness::Never) => {
// Ok, no modifiers.
}
(_, _) => {
let constness = constness.as_str();
let asyncness = asyncness.as_str();
let glue =
if !constness.is_empty() && !asyncness.is_empty() { " " } else { "" };
let modifiers_concatenated = format!("{constness}{glue}{asyncness}");
self.dcx().emit_err(errors::PolarityAndModifiers {
polarity_span,
polarity: polarity.as_str(),
modifiers_span: modifier_lo.to(modifier_hi),
modifiers_concatenated,
});
}
}
}
}
Ok(TraitBoundModifiers { constness, asyncness, polarity })
}
pub fn parse_bound_constness(&mut self) -> PResult<'a, BoundConstness> {
// FIXME(const_trait_impl): remove `~const` parser support once bootstrap has the new syntax
// in rustfmt
Ok(if self.eat(exp!(Tilde)) {
let tilde = self.prev_token.span;
self.expect_keyword(exp!(Const))?;
let span = tilde.to(self.prev_token.span);
self.psess.gated_spans.gate(sym::const_trait_impl, span);
BoundConstness::Maybe(span)
} else if self.can_begin_maybe_const_bound() {
let start = self.token.span;
self.bump();
self.expect_keyword(exp!(Const)).unwrap();
self.bump();
let span = start.to(self.prev_token.span);
self.psess.gated_spans.gate(sym::const_trait_impl, span);
BoundConstness::Maybe(span)
} else if self.eat_keyword(exp!(Const)) {
self.psess.gated_spans.gate(sym::const_trait_impl, self.prev_token.span);
BoundConstness::Always(self.prev_token.span)
} else {
BoundConstness::Never
})
}
/// Parse a trait bound.
///
/// ```ebnf
/// TraitBound = BareTraitBound | "(" BareTraitBound ")"
/// BareTraitBound =
/// (HigherRankedBinder Constness Asyncness | Polarity)
/// TypePath
/// ```
fn parse_trait_bound(
&mut self,
lo: Span,
parens: ast::Parens,
leading_token: &Token,
) -> PResult<'a, GenericBound> {
let (mut bound_vars, binder_span) = self.parse_higher_ranked_binder()?;
let modifiers_lo = self.token.span;
let modifiers = self.parse_trait_bound_modifiers()?;
let modifiers_span = modifiers_lo.to(self.prev_token.span);
if let Some(binder_span) = binder_span {
match modifiers.polarity {
BoundPolarity::Negative(polarity_span) | BoundPolarity::Maybe(polarity_span) => {
self.dcx().emit_err(errors::BinderAndPolarity {
binder_span,
polarity_span,
polarity: modifiers.polarity.as_str(),
});
}
BoundPolarity::Positive => {}
}
}
// Recover erroneous lifetime bound with modifiers or binder.
// e.g. `T: for<'a> 'a` or `T: [const] 'a`.
if self.token.is_lifetime() {
let _: ErrorGuaranteed = self.error_lt_bound_with_modifiers(modifiers, binder_span);
return self.parse_lifetime_bound(lo, parens);
}
if let (more_bound_vars, Some(binder_span)) = self.parse_higher_ranked_binder()? {
bound_vars.extend(more_bound_vars);
self.dcx().emit_err(errors::BinderBeforeModifiers { binder_span, modifiers_span });
}
let mut path = if self.token.is_keyword(kw::Fn)
&& self.look_ahead(1, |t| *t == TokenKind::OpenParen)
&& let Some(path) = self.recover_path_from_fn()
{
path
} else if !self.token.is_path_start() && self.token.can_begin_type() {
let ty = self.parse_ty_no_plus()?;
// Instead of finding a path (a trait), we found a type.
let mut err = self.dcx().struct_span_err(ty.span, "expected a trait, found type");
// If we can recover, try to extract a path from the type. Note
// that we do not use the try operator when parsing the type because
// if it fails then we get a parser error which we don't want (we're trying
// to recover from errors, not make more).
let path = if self.may_recover() {
let (span, message, sugg, path, applicability) = match &ty.kind {
TyKind::Ptr(..) | TyKind::Ref(..)
if let TyKind::Path(_, path) = &ty.peel_refs().kind =>
{
(
ty.span.until(path.span),
"consider removing the indirection",
"",
path,
Applicability::MaybeIncorrect,
)
}
TyKind::ImplTrait(_, bounds)
if let [GenericBound::Trait(tr, ..), ..] = bounds.as_slice() =>
{
(
ty.span.until(tr.span),
"use the trait bounds directly",
"",
&tr.trait_ref.path,
Applicability::MachineApplicable,
)
}
_ => return Err(err),
};
err.span_suggestion_verbose(span, message, sugg, applicability);
path.clone()
} else {
return Err(err);
};
err.emit();
path
} else {
self.parse_path(PathStyle::Type)?
};
if self.may_recover() && self.token == TokenKind::OpenParen {
self.recover_fn_trait_with_lifetime_params(&mut path, &mut bound_vars)?;
}
if let ast::Parens::Yes = parens {
// Someone has written something like `&dyn (Trait + Other)`. The correct code
// would be `&(dyn Trait + Other)`
if self.token.is_like_plus() && leading_token.is_keyword(kw::Dyn) {
let bounds = vec![];
self.parse_remaining_bounds(bounds, true)?;
self.expect(exp!(CloseParen))?;
self.dcx().emit_err(errors::IncorrectParensTraitBounds {
span: vec![lo, self.prev_token.span],
sugg: errors::IncorrectParensTraitBoundsSugg {
wrong_span: leading_token.span.shrink_to_hi().to(lo),
new_span: leading_token.span.shrink_to_lo(),
},
});
} else {
self.expect(exp!(CloseParen))?;
}
}
let poly_trait =
PolyTraitRef::new(bound_vars, path, modifiers, lo.to(self.prev_token.span), parens);
Ok(GenericBound::Trait(poly_trait))
}
// recovers a `Fn(..)` parenthesized-style path from `fn(..)`
fn recover_path_from_fn(&mut self) -> Option<ast::Path> {
let fn_token_span = self.token.span;
self.bump();
let args_lo = self.token.span;
let snapshot = self.create_snapshot_for_diagnostic();
let mode =
FnParseMode { req_name: |_, _| false, context: FnContext::Free, req_body: false };
match self.parse_fn_decl(&mode, AllowPlus::No, RecoverReturnSign::OnlyFatArrow) {
Ok(decl) => {
self.dcx().emit_err(ExpectedFnPathFoundFnKeyword { fn_token_span });
Some(ast::Path {
span: fn_token_span.to(self.prev_token.span),
segments: thin_vec![ast::PathSegment {
ident: Ident::new(sym::Fn, fn_token_span),
id: DUMMY_NODE_ID,
args: Some(Box::new(ast::GenericArgs::Parenthesized(
ast::ParenthesizedArgs {
span: args_lo.to(self.prev_token.span),
inputs: decl.inputs.iter().map(|a| a.ty.clone()).collect(),
inputs_span: args_lo.until(decl.output.span()),
output: decl.output.clone(),
}
))),
}],
tokens: None,
})
}
Err(diag) => {
diag.cancel();
self.restore_snapshot(snapshot);
None
}
}
}
/// Parse an optional higher-ranked binder.
///
/// ```ebnf
/// HigherRankedBinder = ("for" "<" GenericParams ">")?
/// ```
pub(super) fn parse_higher_ranked_binder(
&mut self,
) -> PResult<'a, (ThinVec<GenericParam>, Option<Span>)> {
if self.eat_keyword(exp!(For)) {
let lo = self.token.span;
self.expect_lt()?;
let params = self.parse_generic_params()?;
self.expect_gt()?;
// We rely on AST validation to rule out invalid cases: There must not be
// type or const parameters, and parameters must not have bounds.
Ok((params, Some(lo.to(self.prev_token.span))))
} else {
Ok((ThinVec::new(), None))
}
}
/// Recover from `Fn`-family traits (Fn, FnMut, FnOnce) with lifetime arguments
/// (e.g. `FnOnce<'a>(&'a str) -> bool`). Up to generic arguments have already
/// been eaten.
fn recover_fn_trait_with_lifetime_params(
&mut self,
fn_path: &mut ast::Path,
lifetime_defs: &mut ThinVec<GenericParam>,
) -> PResult<'a, ()> {
let fn_path_segment = fn_path.segments.last_mut().unwrap();
let generic_args = if let Some(p_args) = &fn_path_segment.args {
*p_args.clone()
} else {
// Normally it wouldn't come here because the upstream should have parsed
// generic parameters (otherwise it's impossible to call this function).
return Ok(());
};
let lifetimes =
if let ast::GenericArgs::AngleBracketed(ast::AngleBracketedArgs { span: _, args }) =
&generic_args
{
args.into_iter()
.filter_map(|arg| {
if let ast::AngleBracketedArg::Arg(generic_arg) = arg
&& let ast::GenericArg::Lifetime(lifetime) = generic_arg
{
Some(lifetime)
} else {
None
}
})
.collect()
} else {
Vec::new()
};
// Only try to recover if the trait has lifetime params.
if lifetimes.is_empty() {
return Ok(());
}
let snapshot = if self.parsing_generics {
// The snapshot is only relevant if we're parsing the generics of an `fn` to avoid
// incorrect recovery.
Some(self.create_snapshot_for_diagnostic())
} else {
None
};
// Parse `(T, U) -> R`.
let inputs_lo = self.token.span;
let mode =
FnParseMode { req_name: |_, _| false, context: FnContext::Free, req_body: false };
let params = match self.parse_fn_params(&mode) {
Ok(params) => params,
Err(err) => {
if let Some(snapshot) = snapshot {
self.restore_snapshot(snapshot);
err.cancel();
return Ok(());
} else {
return Err(err);
}
}
};
let inputs: ThinVec<_> = params.into_iter().map(|input| input.ty).collect();
let inputs_span = inputs_lo.to(self.prev_token.span);
let output = match self.parse_ret_ty(AllowPlus::No, RecoverQPath::No, RecoverReturnSign::No)
{
Ok(output) => output,
Err(err) => {
if let Some(snapshot) = snapshot {
self.restore_snapshot(snapshot);
err.cancel();
return Ok(());
} else {
return Err(err);
}
}
};
let args = ast::ParenthesizedArgs {
span: fn_path_segment.span().to(self.prev_token.span),
inputs,
inputs_span,
output,
}
.into();
if let Some(snapshot) = snapshot
&& ![token::Comma, token::Gt, token::Plus].contains(&self.token.kind)
{
// We would expect another bound or the end of type params by now. Most likely we've
// encountered a `(` *not* representing `Trait()`, but rather the start of the `fn`'s
// argument list where the generic param list wasn't properly closed.
self.restore_snapshot(snapshot);
return Ok(());
}
*fn_path_segment = ast::PathSegment {
ident: fn_path_segment.ident,
args: Some(args),
id: ast::DUMMY_NODE_ID,
};
// Convert parsed `<'a>` in `Fn<'a>` into `for<'a>`.
let mut generic_params = lifetimes
.iter()
.map(|lt| GenericParam {
id: lt.id,
ident: lt.ident,
attrs: ast::AttrVec::new(),
bounds: Vec::new(),
is_placeholder: false,
kind: ast::GenericParamKind::Lifetime,
colon_span: None,
})
.collect::<ThinVec<GenericParam>>();
lifetime_defs.append(&mut generic_params);
let generic_args_span = generic_args.span();
let snippet = format!(
"for<{}> ",
lifetimes.iter().map(|lt| lt.ident.as_str()).intersperse(", ").collect::<String>(),
);
let before_fn_path = fn_path.span.shrink_to_lo();
self.dcx()
.struct_span_err(generic_args_span, "`Fn` traits cannot take lifetime parameters")
.with_multipart_suggestion(
"consider using a higher-ranked trait bound instead",
vec![(generic_args_span, "".to_owned()), (before_fn_path, snippet)],
Applicability::MaybeIncorrect,
)
.emit();
Ok(())
}
pub(super) fn check_lifetime(&mut self) -> bool {
self.expected_token_types.insert(TokenType::Lifetime);
self.token.is_lifetime()
}
/// Parses a single lifetime `'a` or panics.
pub(super) fn expect_lifetime(&mut self) -> Lifetime {
if let Some((ident, is_raw)) = self.token.lifetime() {
if is_raw == IdentIsRaw::No && ident.without_first_quote().is_reserved_lifetime() {
self.dcx().emit_err(errors::KeywordLifetime { span: ident.span });
}
self.bump();
Lifetime { ident, id: ast::DUMMY_NODE_ID }
} else {
self.dcx().span_bug(self.token.span, "not a lifetime")
}
}
pub(super) fn mk_ty(&self, span: Span, kind: TyKind) -> Box<Ty> {
Box::new(Ty { kind, span, id: ast::DUMMY_NODE_ID, tokens: None })
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_parse/src/parser/ty.rs |
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"errors"
"fmt"
"math"
"slices"
"github.com/oklog/ulid/v2"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
)
// checkContextEveryNIterations is used in some tight loops to check if the context is done.
const checkContextEveryNIterations = 100
type blockBaseQuerier struct {
blockID ulid.ULID
index IndexReader
chunks ChunkReader
tombstones tombstones.Reader
closed bool
mint, maxt int64
}
func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, error) {
indexr, err := b.Index()
if err != nil {
return nil, fmt.Errorf("open index reader: %w", err)
}
chunkr, err := b.Chunks()
if err != nil {
indexr.Close()
return nil, fmt.Errorf("open chunk reader: %w", err)
}
tombsr, err := b.Tombstones()
if err != nil {
indexr.Close()
chunkr.Close()
return nil, fmt.Errorf("open tombstone reader: %w", err)
}
if tombsr == nil {
tombsr = tombstones.NewMemTombstones()
}
return &blockBaseQuerier{
blockID: b.Meta().ULID,
mint: mint,
maxt: maxt,
index: indexr,
chunks: chunkr,
tombstones: tombsr,
}, nil
}
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.SortedLabelValues(ctx, name, hints, matchers...)
return res, nil, err
}
func (q *blockBaseQuerier) LabelNames(ctx context.Context, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.LabelNames(ctx, matchers...)
return res, nil, err
}
func (q *blockBaseQuerier) Close() error {
if q.closed {
return errors.New("block querier already closed")
}
errs := []error{
q.index.Close(),
q.chunks.Close(),
q.tombstones.Close(),
}
q.closed = true
return errors.Join(errs...)
}
type blockQuerier struct {
*blockBaseQuerier
}
// NewBlockQuerier returns a querier against the block reader and requested min and max time range.
func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
q, err := newBlockBaseQuerier(b, mint, maxt)
if err != nil {
return nil, err
}
return &blockQuerier{blockBaseQuerier: q}, nil
}
func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
return selectSeriesSet(ctx, sortSeries, hints, ms, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
}
func selectSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
) storage.SeriesSet {
disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0
p, err := PostingsForMatchers(ctx, index, ms...)
if err != nil {
return storage.ErrSeriesSet(err)
}
if sharded {
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
}
if sortSeries {
p = index.SortedPostings(p)
}
if hints != nil {
mint = hints.Start
maxt = hints.End
disableTrimming = hints.DisableTrimming
if hints.Func == "series" {
// When you're only looking up metadata (for example series API), you don't need to load any chunks.
return newBlockSeriesSet(index, newNopChunkReader(), tombstones, p, mint, maxt, disableTrimming)
}
}
return newBlockSeriesSet(index, chunks, tombstones, p, mint, maxt, disableTrimming)
}
// blockChunkQuerier provides chunk querying access to a single block database.
type blockChunkQuerier struct {
*blockBaseQuerier
}
// NewBlockChunkQuerier returns a chunk querier against the block reader and requested min and max time range.
func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) {
q, err := newBlockBaseQuerier(b, mint, maxt)
if err != nil {
return nil, err
}
return &blockChunkQuerier{blockBaseQuerier: q}, nil
}
func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet {
return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
}
func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
blockID ulid.ULID, index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
) storage.ChunkSeriesSet {
disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0
if hints != nil {
mint = hints.Start
maxt = hints.End
disableTrimming = hints.DisableTrimming
}
p, err := PostingsForMatchers(ctx, index, ms...)
if err != nil {
return storage.ErrChunkSeriesSet(err)
}
if sharded {
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
}
if sortSeries {
p = index.SortedPostings(p)
}
return NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming)
}
// PostingsForMatchers assembles a single postings iterator against the index reader
// based on the given matchers. The resulting postings are not ordered by series.
func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) {
if len(ms) == 1 && ms[0].Name == "" && ms[0].Value == "" {
k, v := index.AllPostingsKey()
return ix.Postings(ctx, k, v)
}
var its, notIts []index.Postings
// See which label must be non-empty.
// Optimization for case like {l=~".", l!="1"}.
labelMustBeSet := make(map[string]bool, len(ms))
for _, m := range ms {
if !m.Matches("") {
labelMustBeSet[m.Name] = true
}
}
isSubtractingMatcher := func(m *labels.Matcher) bool {
if !labelMustBeSet[m.Name] {
return true
}
return (m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp) && m.Matches("")
}
hasSubtractingMatchers, hasIntersectingMatchers := false, false
for _, m := range ms {
if isSubtractingMatcher(m) {
hasSubtractingMatchers = true
} else {
hasIntersectingMatchers = true
}
}
if hasSubtractingMatchers && !hasIntersectingMatchers {
// If there's nothing to subtract from, add in everything and remove the notIts later.
// We prefer to get AllPostings so that the base of subtraction (i.e. allPostings)
// doesn't include series that may be added to the index reader during this function call.
k, v := index.AllPostingsKey()
allPostings, err := ix.Postings(ctx, k, v)
if err != nil {
return nil, err
}
its = append(its, allPostings)
}
// Sort matchers to have the intersecting matchers first.
// This way the base for subtraction is smaller and
// there is no chance that the set we subtract from
// contains postings of series that didn't exist when
// we constructed the set we subtract by.
slices.SortStableFunc(ms, func(i, j *labels.Matcher) int {
if !isSubtractingMatcher(i) && isSubtractingMatcher(j) {
return -1
}
return +1
})
for _, m := range ms {
if ctx.Err() != nil {
return nil, ctx.Err()
}
switch {
case m.Name == "" && m.Value == "":
// We already handled the case at the top of the function,
// and it is unexpected to get all postings again here.
return nil, errors.New("unexpected all postings")
case m.Type == labels.MatchRegexp && m.Value == ".*":
// .* regexp matches any string: do nothing.
case m.Type == labels.MatchNotRegexp && m.Value == ".*":
return index.EmptyPostings(), nil
case m.Type == labels.MatchRegexp && m.Value == ".+":
// .+ regexp matches any non-empty string: get postings for all label values.
it := ix.PostingsForAllLabelValues(ctx, m.Name)
if index.IsEmptyPostingsType(it) {
return index.EmptyPostings(), nil
}
its = append(its, it)
case m.Type == labels.MatchNotRegexp && m.Value == ".+":
// .+ regexp matches any non-empty string: get postings for all label values and remove them.
notIts = append(notIts, ix.PostingsForAllLabelValues(ctx, m.Name))
case labelMustBeSet[m.Name]:
// If this matcher must be non-empty, we can be smarter.
matchesEmpty := m.Matches("")
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
switch {
case isNot && matchesEmpty: // l!="foo"
// If the label can't be empty and is a Not and the inner matcher
// doesn't match empty, then subtract it out at the end.
inverse, err := m.Inverse()
if err != nil {
return nil, err
}
it, err := postingsForMatcher(ctx, ix, inverse)
if err != nil {
return nil, err
}
notIts = append(notIts, it)
case isNot && !matchesEmpty: // l!=""
// If the label can't be empty and is a Not, but the inner matcher can
// be empty we need to use inversePostingsForMatcher.
inverse, err := m.Inverse()
if err != nil {
return nil, err
}
it, err := inversePostingsForMatcher(ctx, ix, inverse)
if err != nil {
return nil, err
}
if index.IsEmptyPostingsType(it) {
return index.EmptyPostings(), nil
}
its = append(its, it)
default: // l="a", l=~"a|b", l=~"a.b", etc.
// Non-Not matcher, use normal postingsForMatcher.
it, err := postingsForMatcher(ctx, ix, m)
if err != nil {
return nil, err
}
if index.IsEmptyPostingsType(it) {
return index.EmptyPostings(), nil
}
its = append(its, it)
}
default: // l=""
// If the matchers for a labelname selects an empty value, it selects all
// the series which don't have the label name set too. See:
// https://github.com/prometheus/prometheus/issues/3575 and
// https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555
it, err := inversePostingsForMatcher(ctx, ix, m)
if err != nil {
return nil, err
}
notIts = append(notIts, it)
}
}
it := index.Intersect(its...)
for _, n := range notIts {
it = index.Without(it, n)
}
return it, nil
}
func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher) (index.Postings, error) {
// This method will not return postings for missing labels.
// Fast-path for equal matching.
if m.Type == labels.MatchEqual {
return ix.Postings(ctx, m.Name, m.Value)
}
// Fast-path for set matching.
if m.Type == labels.MatchRegexp {
setMatches := m.SetMatches()
if len(setMatches) > 0 {
return ix.Postings(ctx, m.Name, setMatches...)
}
}
it := ix.PostingsForLabelMatching(ctx, m.Name, m.Matches)
return it, it.Err()
}
// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher.
func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher) (index.Postings, error) {
// Fast-path for MatchNotRegexp matching.
// Inverse of a MatchNotRegexp is MatchRegexp (double negation).
// Fast-path for set matching.
if m.Type == labels.MatchNotRegexp {
setMatches := m.SetMatches()
if len(setMatches) > 0 {
return ix.Postings(ctx, m.Name, setMatches...)
}
}
// Fast-path for MatchNotEqual matching.
// Inverse of a MatchNotEqual is MatchEqual (double negation).
if m.Type == labels.MatchNotEqual {
return ix.Postings(ctx, m.Name, m.Value)
}
// If the matcher being inverted is =~"" or ="", we just want all the values.
if m.Value == "" && (m.Type == labels.MatchRegexp || m.Type == labels.MatchEqual) {
it := ix.PostingsForAllLabelValues(ctx, m.Name)
return it, it.Err()
}
it := ix.PostingsForLabelMatching(ctx, m.Name, func(s string) bool {
return !m.Matches(s)
})
return it, it.Err()
}
func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
// Limit is applied at the end, after filtering.
allValues, err := r.LabelValues(ctx, name, nil)
if err != nil {
return nil, fmt.Errorf("fetching values of label %s: %w", name, err)
}
// If we have a matcher for the label name, we can filter out values that don't match
// before we fetch postings. This is especially useful for labels with many values.
// e.g. __name__ with a selector like {__name__="xyz"}
hasMatchersForOtherLabels := false
for _, m := range matchers {
if m.Name != name {
hasMatchersForOtherLabels = true
continue
}
// re-use the allValues slice to avoid allocations
// this is safe because the iteration is always ahead of the append
filteredValues := allValues[:0]
count := 1
for _, v := range allValues {
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
return nil, ctx.Err()
}
count++
if m.Matches(v) {
filteredValues = append(filteredValues, v)
}
}
allValues = filteredValues
}
if len(allValues) == 0 {
return nil, nil
}
// If we don't have any matchers for other labels, then we're done.
if !hasMatchersForOtherLabels {
if hints != nil && hints.Limit > 0 && len(allValues) > hints.Limit {
allValues = allValues[:hints.Limit]
}
return allValues, nil
}
p, err := PostingsForMatchers(ctx, r, matchers...)
if err != nil {
return nil, fmt.Errorf("fetching postings for matchers: %w", err)
}
valuesPostings := make([]index.Postings, len(allValues))
for i, value := range allValues {
valuesPostings[i], err = r.Postings(ctx, name, value)
if err != nil {
return nil, fmt.Errorf("fetching postings for %s=%q: %w", name, value, err)
}
}
indexes, err := index.FindIntersectingPostings(p, valuesPostings)
if err != nil {
return nil, fmt.Errorf("intersecting postings: %w", err)
}
values := make([]string, 0, len(indexes))
for _, idx := range indexes {
values = append(values, allValues[idx])
if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit {
break
}
}
return values, nil
}
func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*labels.Matcher) ([]string, error) {
p, err := PostingsForMatchers(ctx, r, matchers...)
if err != nil {
return nil, err
}
return r.LabelNamesFor(ctx, p)
}
// seriesData, used inside other iterators, are updated when we move from one series to another.
type seriesData struct {
chks []chunks.Meta
intervals tombstones.Intervals
labels labels.Labels
}
// Labels implements part of storage.Series and storage.ChunkSeries.
func (s *seriesData) Labels() labels.Labels { return s.labels }
// blockBaseSeriesSet allows to iterate over all series in the single block.
// Iterated series are trimmed with given min and max time as well as tombstones.
// See newBlockSeriesSet and NewBlockChunkSeriesSet to use it for either sample or chunk iterating.
type blockBaseSeriesSet struct {
blockID ulid.ULID
p index.Postings
index IndexReader
chunks ChunkReader
tombstones tombstones.Reader
mint, maxt int64
disableTrimming bool
curr seriesData
bufChks []chunks.Meta
builder labels.ScratchBuilder
err error
}
func (b *blockBaseSeriesSet) Next() bool {
for b.p.Next() {
if err := b.index.Series(b.p.At(), &b.builder, &b.bufChks); err != nil {
// Postings may be stale. Skip if no underlying series exists.
if errors.Is(err, storage.ErrNotFound) {
continue
}
b.err = fmt.Errorf("get series %d: %w", b.p.At(), err)
return false
}
if len(b.bufChks) == 0 {
continue
}
intervals, err := b.tombstones.Get(b.p.At())
if err != nil {
b.err = fmt.Errorf("get tombstones: %w", err)
return false
}
// NOTE:
// * block time range is half-open: [meta.MinTime, meta.MaxTime).
// * chunks are both closed: [chk.MinTime, chk.MaxTime].
// * requested time ranges are closed: [req.Start, req.End].
var trimFront, trimBack bool
// Copy chunks as iterables are reusable.
// Count those in range to size allocation (roughly - ignoring tombstones).
nChks := 0
for _, chk := range b.bufChks {
if chk.MaxTime >= b.mint && chk.MinTime <= b.maxt {
nChks++
}
}
chks := make([]chunks.Meta, 0, nChks)
// Prefilter chunks and pick those which are not entirely deleted or totally outside of the requested range.
for _, chk := range b.bufChks {
if chk.MaxTime < b.mint {
continue
}
if chk.MinTime > b.maxt {
continue
}
if (tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(intervals)) {
continue
}
chks = append(chks, chk)
// If still not entirely deleted, check if trim is needed based on requested time range.
if !b.disableTrimming {
if chk.MinTime < b.mint {
trimFront = true
}
if chk.MaxTime > b.maxt {
trimBack = true
}
}
}
if len(chks) == 0 {
continue
}
if trimFront {
intervals = intervals.Add(tombstones.Interval{Mint: math.MinInt64, Maxt: b.mint - 1})
}
if trimBack {
intervals = intervals.Add(tombstones.Interval{Mint: b.maxt + 1, Maxt: math.MaxInt64})
}
b.curr.labels = b.builder.Labels()
b.curr.chks = chks
b.curr.intervals = intervals
return true
}
return false
}
func (b *blockBaseSeriesSet) Err() error {
if b.err != nil {
return b.err
}
return b.p.Err()
}
func (*blockBaseSeriesSet) Warnings() annotations.Annotations { return nil }
// populateWithDelGenericSeriesIterator allows to iterate over given chunk
// metas. In each iteration it ensures that chunks are trimmed based on given
// tombstones interval if any.
//
// populateWithDelGenericSeriesIterator assumes that chunks that would be fully
// removed by intervals are filtered out in previous phase.
//
// On each iteration currMeta is available. If currDelIter is not nil, it
// means that the chunk in currMeta is invalid and a chunk rewrite is needed,
// for which currDelIter should be used.
type populateWithDelGenericSeriesIterator struct {
blockID ulid.ULID
cr ChunkReader
// metas are expected to be sorted by minTime and should be related to
// the same, single series.
// It's possible for a single chunks.Meta to refer to multiple chunks.
// cr.ChunkOrIterator() would return an iterable and a nil chunk in this
// case.
metas []chunks.Meta
i int // Index into metas; -1 if not started yet.
err error
bufIter DeletedIterator // Retained for memory re-use. currDelIter may point here.
intervals tombstones.Intervals
currDelIter chunkenc.Iterator
// currMeta is the current chunks.Meta from metas. currMeta.Chunk is set to
// the chunk returned from cr.ChunkOrIterable(). As that can return a nil
// chunk, currMeta.Chunk is not always guaranteed to be set.
currMeta chunks.Meta
}
func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
p.blockID = blockID
p.cr = cr
p.metas = chks
p.i = -1
p.err = nil
// Note we don't touch p.bufIter.Iter; it is holding on to an iterator we might reuse in next().
p.bufIter.Intervals = p.bufIter.Intervals[:0]
p.intervals = intervals
p.currDelIter = nil
p.currMeta = chunks.Meta{}
}
// If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB)
// is deep copied to avoid races between reads and copying chunk bytes.
// However, if the deletion intervals overlaps with the head chunk, then the head chunk is
// not copied irrespective of copyHeadChunk because it will be re-encoded later anyway.
func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
if p.err != nil || p.i >= len(p.metas)-1 {
return false
}
p.i++
p.currMeta = p.metas[p.i]
p.bufIter.Intervals = p.bufIter.Intervals[:0]
for _, interval := range p.intervals {
if p.currMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) {
p.bufIter.Intervals = p.bufIter.Intervals.Add(interval)
}
}
hcr, ok := p.cr.(ChunkReaderWithCopy)
var iterable chunkenc.Iterable
if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
// ChunkOrIterableWithCopy will copy the head chunk, if it can.
var maxt int64
p.currMeta.Chunk, iterable, maxt, p.err = hcr.ChunkOrIterableWithCopy(p.currMeta)
if p.currMeta.Chunk != nil {
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
p.currMeta.MaxTime = maxt
}
} else {
p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta)
}
if p.err != nil {
p.err = fmt.Errorf("cannot populate chunk %d from block %s: %w", p.currMeta.Ref, p.blockID.String(), p.err)
return false
}
// Use the single chunk if possible.
if p.currMeta.Chunk != nil {
if len(p.bufIter.Intervals) == 0 {
// If there is no overlap with deletion intervals and a single chunk is
// returned, we can take chunk as it is.
p.currDelIter = nil
return true
}
// Otherwise we need to iterate over the samples in the single chunk
// and create new chunks.
p.bufIter.Iter = p.currMeta.Chunk.Iterator(p.bufIter.Iter)
p.currDelIter = &p.bufIter
return true
}
// Otherwise, use the iterable to create an iterator.
p.bufIter.Iter = iterable.Iterator(p.bufIter.Iter)
p.currDelIter = &p.bufIter
return true
}
func (p *populateWithDelGenericSeriesIterator) Err() error { return p.err }
type blockSeriesEntry struct {
chunks ChunkReader
blockID ulid.ULID
seriesData
}
func (s *blockSeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
pi, ok := it.(*populateWithDelSeriesIterator)
if !ok {
pi = &populateWithDelSeriesIterator{}
}
pi.reset(s.blockID, s.chunks, s.chks, s.intervals)
return pi
}
type chunkSeriesEntry struct {
chunks ChunkReader
blockID ulid.ULID
seriesData
}
func (s *chunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator {
pi, ok := it.(*populateWithDelChunkSeriesIterator)
if !ok {
pi = &populateWithDelChunkSeriesIterator{}
}
pi.reset(s.blockID, s.chunks, s.chks, s.intervals)
return pi
}
// populateWithDelSeriesIterator allows to iterate over samples for the single series.
type populateWithDelSeriesIterator struct {
populateWithDelGenericSeriesIterator
curr chunkenc.Iterator
}
func (p *populateWithDelSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
p.curr = nil
}
func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
if p.curr != nil {
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
return valueType
}
}
for p.next(false) {
if p.currDelIter != nil {
p.curr = p.currDelIter
} else {
p.curr = p.currMeta.Chunk.Iterator(p.curr)
}
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
return valueType
}
}
return chunkenc.ValNone
}
func (p *populateWithDelSeriesIterator) Seek(t int64) chunkenc.ValueType {
if p.curr != nil {
if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone {
return valueType
}
}
for p.Next() != chunkenc.ValNone {
if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone {
return valueType
}
}
return chunkenc.ValNone
}
func (p *populateWithDelSeriesIterator) At() (int64, float64) {
return p.curr.At()
}
func (p *populateWithDelSeriesIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
return p.curr.AtHistogram(h)
}
func (p *populateWithDelSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
return p.curr.AtFloatHistogram(fh)
}
func (p *populateWithDelSeriesIterator) AtT() int64 {
return p.curr.AtT()
}
// AtST TODO(krajorama): test AtST() when chunks support it.
func (p *populateWithDelSeriesIterator) AtST() int64 {
return p.curr.AtST()
}
func (p *populateWithDelSeriesIterator) Err() error {
if err := p.populateWithDelGenericSeriesIterator.Err(); err != nil {
return err
}
if p.curr != nil {
return p.curr.Err()
}
return nil
}
type populateWithDelChunkSeriesIterator struct {
populateWithDelGenericSeriesIterator
// currMetaWithChunk is current meta with its chunk field set. This meta
// is guaranteed to map to a single chunk. This differs from
// populateWithDelGenericSeriesIterator.currMeta as that
// could refer to multiple chunks.
currMetaWithChunk chunks.Meta
// chunksFromIterable stores the chunks created from iterating through
// the iterable returned by cr.ChunkOrIterable() (with deleted samples
// removed).
chunksFromIterable []chunks.Meta
chunksFromIterableIdx int
}
func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
p.currMetaWithChunk = chunks.Meta{}
p.chunksFromIterable = p.chunksFromIterable[:0]
p.chunksFromIterableIdx = -1
}
func (p *populateWithDelChunkSeriesIterator) Next() bool {
if p.currMeta.Chunk == nil {
// If we've been creating chunks from the iterable, check if there are
// any more chunks to iterate through.
if p.chunksFromIterableIdx < len(p.chunksFromIterable)-1 {
p.chunksFromIterableIdx++
p.currMetaWithChunk = p.chunksFromIterable[p.chunksFromIterableIdx]
return true
}
}
// Move to the next chunk/deletion iterator.
// This is a for loop as if the current p.currDelIter returns no samples
// (which means a chunk won't be created), there still might be more
// samples/chunks from the rest of p.metas.
for p.next(true) {
if p.currDelIter == nil {
p.currMetaWithChunk = p.currMeta
return true
}
if p.currMeta.Chunk != nil {
// If ChunkOrIterable() returned a non-nil chunk, the samples in
// p.currDelIter will only form one chunk, as the only change
// p.currDelIter might make is deleting some samples.
if p.populateCurrForSingleChunk() {
return true
}
} else {
// If ChunkOrIterable() returned an iterable, multiple chunks may be
// created from the samples in p.currDelIter.
if p.populateChunksFromIterable() {
return true
}
}
}
return false
}
// populateCurrForSingleChunk sets the fields within p.currMetaWithChunk. This
// should be called if the samples in p.currDelIter only form one chunk.
// TODO(krajorama): test ST when chunks support it.
func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
valueType := p.currDelIter.Next()
if valueType == chunkenc.ValNone {
if err := p.currDelIter.Err(); err != nil {
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
}
return false
}
p.currMetaWithChunk.MinTime = p.currDelIter.AtT()
// Re-encode the chunk if iterator is provided. This means that it has
// some samples to be deleted or chunk is opened.
var (
newChunk chunkenc.Chunk
app chunkenc.Appender
st, t int64
err error
)
switch valueType {
case chunkenc.ValHistogram:
newChunk = chunkenc.NewHistogramChunk()
if app, err = newChunk.Appender(); err != nil {
break
}
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt)
break
}
var h *histogram.Histogram
t, h = p.currDelIter.AtHistogram(nil)
st = p.currDelIter.AtST()
_, _, app, err = app.AppendHistogram(nil, st, t, h, true)
if err != nil {
break
}
}
case chunkenc.ValFloat:
newChunk = chunkenc.NewXORChunk()
if app, err = newChunk.Appender(); err != nil {
break
}
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloat {
err = fmt.Errorf("found value type %v in float chunk", vt)
break
}
var v float64
t, v = p.currDelIter.At()
st = p.currDelIter.AtST()
app.Append(st, t, v)
}
case chunkenc.ValFloatHistogram:
newChunk = chunkenc.NewFloatHistogramChunk()
if app, err = newChunk.Appender(); err != nil {
break
}
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloatHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt)
break
}
var h *histogram.FloatHistogram
t, h = p.currDelIter.AtFloatHistogram(nil)
st = p.currDelIter.AtST()
_, _, app, err = app.AppendFloatHistogram(nil, st, t, h, true)
if err != nil {
break
}
}
default:
err = fmt.Errorf("populateCurrForSingleChunk: value type %v unsupported", valueType)
}
if err != nil {
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
return false
}
if err := p.currDelIter.Err(); err != nil {
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
return false
}
p.currMetaWithChunk.Chunk = newChunk
p.currMetaWithChunk.MaxTime = t
return true
}
// populateChunksFromIterable reads the samples from currDelIter to create
// chunks for chunksFromIterable. It also sets p.currMetaWithChunk to the first
// chunk.
// TODO(krajorama): test ST when chunks support it.
func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool {
p.chunksFromIterable = p.chunksFromIterable[:0]
p.chunksFromIterableIdx = -1
firstValueType := p.currDelIter.Next()
if firstValueType == chunkenc.ValNone {
if err := p.currDelIter.Err(); err != nil {
p.err = fmt.Errorf("populateChunksFromIterable: no samples could be read: %w", err)
return false
}
return false
}
var (
// t is the timestamp for the current sample.
st, t int64
cmint int64
cmaxt int64
currentChunk chunkenc.Chunk
app chunkenc.Appender
newChunk chunkenc.Chunk
recoded bool
err error
)
prevValueType := chunkenc.ValNone
for currentValueType := firstValueType; currentValueType != chunkenc.ValNone; currentValueType = p.currDelIter.Next() {
// Check if the encoding has changed (i.e. we need to create a new
// chunk as chunks can't have multiple encoding types).
// For the first sample, the following condition will always be true as
// ValNone != ValFloat | ValHistogram | ValFloatHistogram.
if currentValueType != prevValueType {
if prevValueType != chunkenc.ValNone {
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
}
cmint = p.currDelIter.AtT()
if currentChunk, err = currentValueType.NewChunk(); err != nil {
break
}
if app, err = currentChunk.Appender(); err != nil {
break
}
}
switch currentValueType {
case chunkenc.ValFloat:
{
var v float64
t, v = p.currDelIter.At()
st = p.currDelIter.AtST()
app.Append(st, t, v)
}
case chunkenc.ValHistogram:
{
var v *histogram.Histogram
t, v = p.currDelIter.AtHistogram(nil)
st = p.currDelIter.AtST()
// No need to set prevApp as AppendHistogram will set the
// counter reset header for the appender that's returned.
newChunk, recoded, app, err = app.AppendHistogram(nil, st, t, v, false)
}
case chunkenc.ValFloatHistogram:
{
var v *histogram.FloatHistogram
t, v = p.currDelIter.AtFloatHistogram(nil)
st = p.currDelIter.AtST()
// No need to set prevApp as AppendHistogram will set the
// counter reset header for the appender that's returned.
newChunk, recoded, app, err = app.AppendFloatHistogram(nil, st, t, v, false)
}
}
if err != nil {
break
}
if newChunk != nil {
if !recoded {
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
cmint = t
}
currentChunk = newChunk
}
cmaxt = t
prevValueType = currentValueType
}
if err != nil {
p.err = fmt.Errorf("populateChunksFromIterable: error when writing new chunks: %w", err)
return false
}
if err = p.currDelIter.Err(); err != nil {
p.err = fmt.Errorf("populateChunksFromIterable: currDelIter error when writing new chunks: %w", err)
return false
}
if prevValueType != chunkenc.ValNone {
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
}
if len(p.chunksFromIterable) == 0 {
return false
}
p.currMetaWithChunk = p.chunksFromIterable[0]
p.chunksFromIterableIdx = 0
return true
}
func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.currMetaWithChunk }
// blockSeriesSet allows to iterate over sorted, populated series with applied tombstones.
// Series with all deleted chunks are still present as Series with no samples.
// Samples from chunks are also trimmed to requested min and max time.
type blockSeriesSet struct {
blockBaseSeriesSet
}
func newBlockSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.SeriesSet {
return &blockSeriesSet{
blockBaseSeriesSet{
index: i,
chunks: c,
tombstones: t,
p: p,
mint: mint,
maxt: maxt,
disableTrimming: disableTrimming,
},
}
}
func (b *blockSeriesSet) At() storage.Series {
// At can be looped over before iterating, so save the current values locally.
return &blockSeriesEntry{
chunks: b.chunks,
blockID: b.blockID,
seriesData: b.curr,
}
}
// blockChunkSeriesSet allows to iterate over sorted, populated series with applied tombstones.
// Series with all deleted chunks are still present as Labelled iterator with no chunks.
// Chunks are also trimmed to requested [min and max] (keeping samples with min and max timestamps).
type blockChunkSeriesSet struct {
blockBaseSeriesSet
}
func NewBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet {
return &blockChunkSeriesSet{
blockBaseSeriesSet{
blockID: id,
index: i,
chunks: c,
tombstones: t,
p: p,
mint: mint,
maxt: maxt,
disableTrimming: disableTrimming,
},
}
}
func (b *blockChunkSeriesSet) At() storage.ChunkSeries {
// At can be looped over before iterating, so save the current values locally.
return &chunkSeriesEntry{
chunks: b.chunks,
blockID: b.blockID,
seriesData: b.curr,
}
}
// NewMergedStringIter returns string iterator that allows to merge symbols on demand and stream result.
func NewMergedStringIter(a, b index.StringIter) index.StringIter {
return &mergedStringIter{a: a, b: b, aok: a.Next(), bok: b.Next()}
}
type mergedStringIter struct {
a index.StringIter
b index.StringIter
aok, bok bool
cur string
err error
}
func (m *mergedStringIter) Next() bool {
if (!m.aok && !m.bok) || (m.Err() != nil) {
return false
}
switch {
case !m.aok:
m.cur = m.b.At()
m.bok = m.b.Next()
m.err = m.b.Err()
case !m.bok:
m.cur = m.a.At()
m.aok = m.a.Next()
m.err = m.a.Err()
case m.b.At() > m.a.At():
m.cur = m.a.At()
m.aok = m.a.Next()
m.err = m.a.Err()
case m.a.At() > m.b.At():
m.cur = m.b.At()
m.bok = m.b.Next()
m.err = m.b.Err()
default: // Equal.
m.cur = m.b.At()
m.aok = m.a.Next()
m.err = m.a.Err()
m.bok = m.b.Next()
if m.err == nil {
m.err = m.b.Err()
}
}
return true
}
func (m mergedStringIter) At() string { return m.cur }
func (m mergedStringIter) Err() error {
return m.err
}
// DeletedIterator wraps chunk Iterator and makes sure any deleted metrics are not returned.
type DeletedIterator struct {
// Iter is an Iterator to be wrapped.
Iter chunkenc.Iterator
// Intervals are the deletion intervals.
Intervals tombstones.Intervals
}
func (it *DeletedIterator) At() (int64, float64) {
return it.Iter.At()
}
func (it *DeletedIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
t, h := it.Iter.AtHistogram(h)
return t, h
}
func (it *DeletedIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
t, h := it.Iter.AtFloatHistogram(fh)
return t, h
}
func (it *DeletedIterator) AtT() int64 {
return it.Iter.AtT()
}
// AtST TODO(krajorama): test AtST() when chunks support it.
func (it *DeletedIterator) AtST() int64 {
return it.Iter.AtST()
}
func (it *DeletedIterator) Seek(t int64) chunkenc.ValueType {
if it.Iter.Err() != nil {
return chunkenc.ValNone
}
valueType := it.Iter.Seek(t)
if valueType == chunkenc.ValNone {
return chunkenc.ValNone
}
// Now double check if the entry falls into a deleted interval.
ts := it.AtT()
for _, itv := range it.Intervals {
if ts < itv.Mint {
return valueType
}
if ts > itv.Maxt {
it.Intervals = it.Intervals[1:]
continue
}
// We're in the middle of an interval, we can now call Next().
return it.Next()
}
// The timestamp is greater than all the deleted intervals.
return valueType
}
func (it *DeletedIterator) Next() chunkenc.ValueType {
Outer:
for valueType := it.Iter.Next(); valueType != chunkenc.ValNone; valueType = it.Iter.Next() {
ts := it.AtT()
for _, tr := range it.Intervals {
if tr.InBounds(ts) {
continue Outer
}
if ts <= tr.Maxt {
return valueType
}
it.Intervals = it.Intervals[1:]
}
return valueType
}
return chunkenc.ValNone
}
func (it *DeletedIterator) Err() error { return it.Iter.Err() }
type nopChunkReader struct {
emptyChunk chunkenc.Chunk
}
func newNopChunkReader() ChunkReader {
return nopChunkReader{
emptyChunk: chunkenc.NewXORChunk(),
}
}
func (cr nopChunkReader) ChunkOrIterable(chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
return cr.emptyChunk, nil, nil
}
func (nopChunkReader) Close() error { return nil } | go | github | https://github.com/prometheus/prometheus | tsdb/querier.go |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import datetime
import os
import time
from azure import (
WindowsAzureError,
SERVICE_BUS_HOST_BASE,
_convert_response_to_feeds,
_dont_fail_not_exist,
_dont_fail_on_exist,
_encode_base64,
_get_request_body,
_get_request_body_bytes_only,
_int_or_none,
_sign_string,
_str,
_unicode_type,
_update_request_uri_query,
url_quote,
url_unquote,
_validate_not_none,
)
from azure.http import (
HTTPError,
HTTPRequest,
)
from azure.http.httpclient import _HTTPClient
from azure.servicebus import (
AZURE_SERVICEBUS_NAMESPACE,
AZURE_SERVICEBUS_ACCESS_KEY,
AZURE_SERVICEBUS_ISSUER,
_convert_topic_to_xml,
_convert_response_to_topic,
_convert_queue_to_xml,
_convert_response_to_queue,
_convert_subscription_to_xml,
_convert_response_to_subscription,
_convert_rule_to_xml,
_convert_response_to_rule,
_convert_xml_to_queue,
_convert_xml_to_topic,
_convert_xml_to_subscription,
_convert_xml_to_rule,
_create_message,
_service_bus_error_handler,
)
class ServiceBusService(object):
def __init__(self, service_namespace=None, account_key=None, issuer=None,
x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE,
shared_access_key_name=None, shared_access_key_value=None,
authentication=None):
'''
Initializes the service bus service for a namespace with the specified
authentication settings (SAS or ACS).
service_namespace:
Service bus namespace, required for all operations. If None,
the value is set to the AZURE_SERVICEBUS_NAMESPACE env variable.
account_key:
ACS authentication account key. If None, the value is set to the
AZURE_SERVICEBUS_ACCESS_KEY env variable.
Note that if both SAS and ACS settings are specified, SAS is used.
issuer:
ACS authentication issuer. If None, the value is set to the
AZURE_SERVICEBUS_ISSUER env variable.
Note that if both SAS and ACS settings are specified, SAS is used.
x_ms_version: Unused. Kept for backwards compatibility.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
shared_access_key_name:
SAS authentication key name.
Note that if both SAS and ACS settings are specified, SAS is used.
shared_access_key_value:
SAS authentication key value.
Note that if both SAS and ACS settings are specified, SAS is used.
authentication:
Instance of authentication class. If this is specified, then
ACS and SAS parameters are ignored.
'''
self.requestid = None
self.service_namespace = service_namespace
self.host_base = host_base
if not self.service_namespace:
self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE)
if not self.service_namespace:
raise WindowsAzureError('You need to provide servicebus namespace')
if authentication:
self.authentication = authentication
else:
if not account_key:
account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY)
if not issuer:
issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER)
if shared_access_key_name and shared_access_key_value:
self.authentication = ServiceBusSASAuthentication(
shared_access_key_name,
shared_access_key_value)
elif account_key and issuer:
self.authentication = ServiceBusWrapTokenAuthentication(
account_key,
issuer)
else:
raise WindowsAzureError(
'You need to provide servicebus access key and Issuer OR shared access key and value')
self._httpclient = _HTTPClient(service_instance=self)
self._filter = self._httpclient.perform_request
# Backwards compatibility:
# account_key and issuer used to be stored on the service class, they are
# now stored on the authentication class.
@property
def account_key(self):
return self.authentication.account_key
@account_key.setter
def account_key(self, value):
self.authentication.account_key = value
@property
def issuer(self):
return self.authentication.issuer
@issuer.setter
def issuer(self, value):
self.authentication.issuer = value
def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
'''
res = ServiceBusService(
service_namespace=self.service_namespace,
authentication=self.authentication)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
def create_queue(self, queue_name, queue=None, fail_on_exist=False):
'''
Creates a new queue. Once created, this queue's resource manifest is
immutable.
queue_name: Name of the queue to create.
queue: Queue object to create.
fail_on_exist:
Specify whether to throw an exception when the queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.body = _get_request_body(_convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Deletes an existing queue. This operation will also remove all
associated state including messages in the queue.
queue_name: Name of the queue to delete.
fail_not_exist:
Specify whether to throw an exception if the queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue(self, queue_name):
'''
Retrieves an existing queue.
queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_queue(response)
def list_queues(self):
'''
Enumerates the queues in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Queues'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_queue)
def create_topic(self, topic_name, topic=None, fail_on_exist=False):
'''
Creates a new topic. Once created, this topic resource manifest is
immutable.
topic_name: Name of the topic to create.
topic: Topic object to create.
fail_on_exist:
Specify whether to throw an exception when the topic exists.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.body = _get_request_body(_convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_topic(self, topic_name, fail_not_exist=False):
'''
Deletes an existing topic. This operation will also remove all
associated state including associated subscriptions.
topic_name: Name of the topic to delete.
fail_not_exist:
Specify whether throw exception when topic doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_topic(self, topic_name):
'''
Retrieves the description for the specified topic.
topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_topic(response)
def list_topics(self):
'''
Retrieves the topics in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Topics'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_topic)
def create_rule(self, topic_name, subscription_name, rule_name, rule=None,
fail_on_exist=False):
'''
Creates a new rule. Once created, this rule's resource manifest is
immutable.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name: Name of the rule.
fail_on_exist:
Specify whether to throw an exception when the rule exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.body = _get_request_body(_convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_rule(self, topic_name, subscription_name, rule_name,
fail_not_exist=False):
'''
Deletes an existing rule.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name:
Name of the rule to delete. DEFAULT_RULE_NAME=$Default.
Use DEFAULT_RULE_NAME to delete default rule for the subscription.
fail_not_exist:
Specify whether throw exception when rule doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_rule(self, topic_name, subscription_name, rule_name):
'''
Retrieves the description for the specified rule.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name: Name of the rule.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_rule(response)
def list_rules(self, topic_name, subscription_name):
'''
Retrieves the rules that exist under the specified subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/rules/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_rule)
def create_subscription(self, topic_name, subscription_name,
subscription=None, fail_on_exist=False):
'''
Creates a new subscription. Once created, this subscription resource
manifest is immutable.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
fail_on_exist:
Specify whether throw exception when subscription exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.body = _get_request_body(
_convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_subscription(self, topic_name, subscription_name,
fail_not_exist=False):
'''
Deletes an existing subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription to delete.
fail_not_exist:
Specify whether to throw an exception when the subscription
doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_subscription(self, topic_name, subscription_name):
'''
Gets an existing subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_subscription(response)
def list_subscriptions(self, topic_name):
'''
Retrieves the subscriptions in the specified topic.
topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response,
_convert_xml_to_subscription)
def send_topic_message(self, topic_name, message=None):
'''
Enqueues a message into the specified topic. The limit to the number
of messages which may be present in the topic is governed by the
message size in MaxTopicSizeInBytes. If this message causes the topic
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
topic_name: Name of the topic.
message: Message object containing message body and properties.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only(
'message.body', message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
This operation is used to atomically retrieve and lock a message for
processing. The message is guaranteed not to be delivered to other
receivers during the lock duration period specified in buffer
description. Once the lock expires, the message will be available to
other receivers (on the same subscription only) during the lock
duration period specified in the topic description. Once the lock
expires, the message will be available to other receivers. In order to
complete processing of the message, the receiver should issue a delete
command with the lock ID received from this operation. To abandon
processing of the message and unlock it for other receivers, an Unlock
Message command should be issued, or the lock duration period can
expire.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Unlock a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
Read and delete a message from a subscription as an atomic operation.
This operation should be used when a best-effort guarantee is
sufficient for an application; that is, using this operation it is
possible for messages to be lost if processing fails.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the
subscription. This operation should only be called after processing a
previously locked message is successful to maintain At-Least-Once
delivery assurances.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def send_queue_message(self, queue_name, message=None):
'''
Sends a message into the specified queue. The limit to the number of
messages which may be present in the topic is governed by the message
size the MaxTopicSizeInMegaBytes. If this message will cause the queue
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
queue_name: Name of the queue.
message: Message object containing message body and properties.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only('message.body',
message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_queue_message(self, queue_name, timeout='60'):
'''
Automically retrieves and locks a message from a queue for processing.
The message is guaranteed not to be delivered to other receivers (on
the same subscription only) during the lock duration period specified
in the queue description. Once the lock expires, the message will be
available to other receivers. In order to complete processing of the
message, the receiver should issue a delete command with the lock ID
received from this operation. To abandon processing of the message and
unlock it for other receivers, an Unlock Message command should be
issued, or the lock duration period can expire.
queue_name: Name of the queue.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_queue_message(self, queue_name, sequence_number, lock_token):
'''
Unlocks a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
queue_name: Name of the queue.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_queue_message(self, queue_name, timeout='60'):
'''
Reads and deletes a message from a queue as an atomic operation. This
operation should be used when a best-effort guarantee is sufficient
for an application; that is, using this operation it is possible for
messages to be lost if processing fails.
queue_name: Name of the queue.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_queue_message(self, queue_name, sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the queue.
This operation should only be called after processing a previously
locked message is successful to maintain At-Least-Once delivery
assurances.
queue_name: Name of the queue.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
'''
Receive a message from a queue for processing.
queue_name: Name of the queue.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout: Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_queue_message(queue_name, timeout)
else:
return self.read_delete_queue_message(queue_name, timeout)
def receive_subscription_message(self, topic_name, subscription_name,
peek_lock=True, timeout=60):
'''
Receive a message from a subscription for processing.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout: Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_subscription_message(topic_name,
subscription_name,
timeout)
else:
return self.read_delete_subscription_message(topic_name,
subscription_name,
timeout)
def _get_host(self):
return self.service_namespace + self.host_base
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as ex:
return _service_bus_error_handler(ex)
return resp
def _update_service_bus_header(self, request):
''' Add additional headers for service bus. '''
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# if it is not GET or HEAD request, must set content-type.
if not request.method in ['GET', 'HEAD']:
for name, _ in request.headers:
if 'content-type' == name.lower():
break
else:
request.headers.append(
('Content-Type',
'application/atom+xml;type=entry;charset=utf-8'))
# Adds authorization header for authentication.
self.authentication.sign_request(request, self._httpclient)
return request.headers
# Token cache for Authentication
# Shared by the different instances of ServiceBusWrapTokenAuthentication
_tokens = {}
class ServiceBusWrapTokenAuthentication:
def __init__(self, account_key, issuer):
self.account_key = account_key
self.issuer = issuer
def sign_request(self, request, httpclient):
request.headers.append(
('Authorization', self._get_authorization(request, httpclient)))
def _get_authorization(self, request, httpclient):
''' return the signed string with token. '''
return 'WRAP access_token="' + \
self._get_token(request.host, request.path, httpclient) + '"'
def _token_is_expired(self, token):
''' Check if token expires or not. '''
time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')
time_pos_end = token.find('&', time_pos_begin)
token_expire_time = int(token[time_pos_begin:time_pos_end])
time_now = time.mktime(time.localtime())
# Adding 30 seconds so the token wouldn't be expired when we send the
# token to server.
return (token_expire_time - time_now) < 30
def _get_token(self, host, path, httpclient):
'''
Returns token for the request.
host: the service bus service request.
path: the service bus service request.
'''
wrap_scope = 'http://' + host + path + self.issuer + self.account_key
# Check whether has unexpired cache, return cached token if it is still
# usable.
if wrap_scope in _tokens:
token = _tokens[wrap_scope]
if not self._token_is_expired(token):
return token
# get token from accessconstrol server
request = HTTPRequest()
request.protocol_override = 'https'
request.host = host.replace('.servicebus.', '-sb.accesscontrol.')
request.method = 'POST'
request.path = '/WRAPv0.9'
request.body = ('wrap_name=' + url_quote(self.issuer) +
'&wrap_password=' + url_quote(self.account_key) +
'&wrap_scope=' +
url_quote('http://' + host + path)).encode('utf-8')
request.headers.append(('Content-Length', str(len(request.body))))
resp = httpclient.perform_request(request)
token = resp.body.decode('utf-8')
token = url_unquote(token[token.find('=') + 1:token.rfind('&')])
_tokens[wrap_scope] = token
return token
class ServiceBusSASAuthentication:
def __init__(self, key_name, key_value):
self.key_name = key_name
self.key_value = key_value
def sign_request(self, request, httpclient):
request.headers.append(
('Authorization', self._get_authorization(request, httpclient)))
def _get_authorization(self, request, httpclient):
uri = httpclient.get_uri(request)
uri = url_quote(uri, '').lower()
expiry = str(self._get_expiry())
to_sign = uri + '\n' + expiry
signature = url_quote(_sign_string(self.key_value, to_sign, False), '')
auth_format = 'SharedAccessSignature sig={0}&se={1}&skn={2}&sr={3}'
auth = auth_format.format(signature, expiry, self.key_name, uri)
return auth
def _get_expiry(self):
'''Returns the UTC datetime, in seconds since Epoch, when this signed
request expires (5 minutes from now).'''
return int(round(time.time() + 300)) | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package ephemeral
import (
"context"
"github.com/hashicorp/terraform/internal/providers"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// ResourceInstance is an interface that must be implemented for each
// active ephemeral resource instance to determine how it should be renewed
// and eventually closed.
type ResourceInstance interface {
// Renew attempts to extend the life of the remote object associated with
// this resource instance, optionally returning a new renewal request to be
// passed to a subsequent call to this method.
//
// If the object's life is not extended successfully then Renew returns
// error diagnostics explaining why not, and future requests that might
// have made use of the object will fail.
Renew(ctx context.Context, req providers.EphemeralRenew) (nextRenew *providers.EphemeralRenew, diags tfdiags.Diagnostics)
// Close proactively ends the life of the remote object associated with
// this resource instance, if possible. For example, if the remote object
// is a temporary lease for a dynamically-generated secret then this
// might end that lease and thus cause the secret to be promptly revoked.
Close(ctx context.Context) tfdiags.Diagnostics
} | go | github | https://github.com/hashicorp/terraform | internal/resources/ephemeral/ephemeral_resource_instance.go |
# Copyright (c) 2014, Juergen Riegel (FreeCAD@juergen-riegel.net)
# All rights reserved.
# This file is part of the StepClassLibrary (SCL).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple Part21 STEP reader
Reads a given STEP file. Maps the enteties and instaciate the
corosbonding classes.
In addition it writes out a graphwiz file with the entity graph.
"""
import Part21,sys
__title__="Simple Part21 STEP reader"
__author__ = "Juergen Riegel"
__version__ = "0.1 (Jan 2014)"
class SimpleParser:
""" read the file
Part21.Part21Parser Loads all instances definition of a Part21 file into memory.
Two dicts are created:
Part21.Part21Parser._instance_definition : stores attibutes, key is the instance integer id
Part21.Part21Parser._number_of_ancestors : stores the number of ancestors of entity id. This enables
to define the order of instances creation.
"""
def __init__(self, filename):
import time
import sys
self._p21loader = Part21.Part21Parser(filename)
#self._p21loader._number_of_ancestors = {} # not needed, save memory
self.schemaModule = None
self.schemaClasses = None
self.instanceMape = {}
#for i in self._p21loader._instances_definition.keys():
# print i,self._p21loader._instances_definition[i][0],self._p21loader._instances_definition[i][1]
def _writeGraphVizEdge(self,num,attrList,file):
for i in attrList:
if isinstance(i,list):
self._writeGraphVizEdge(num,i,file)
elif isinstance(i,str):
if not i == '' and i[0] == '#':
key = int(i[1:])
file.write(' '+`num`+' -> '+`key`+'\n')
def writeGraphViz(self,fileName):
print "Writing GraphViz file %s..."%fileName,
gvFile = open(fileName,'w')
gvFile.write('digraph G {\n node [fontname=Verdana,fontsize=12]\n node [style=filled]\n node [fillcolor="#EEEEEE"]\n node [color="#EEEEEE"]\n edge [color="#31CEF0"]\n')
for i in self._p21loader._instances_definition.keys():
entityStr = '#'+`i`
nameStr = self._p21loader._instances_definition[i][0].lower()
sttrStr = `self._p21loader._instances_definition[i][1]`.replace('"','').replace("'",'').replace(" ",'')
if len (sttrStr) > 40:
sttrStr = sttrStr[:39]+'....'
gvFile.write(' '+`i`+' [label="'+entityStr+'\n'+nameStr+'\n'+sttrStr+'"]\n')
self._writeGraphVizEdge( i,self._p21loader._instances_definition[i][1],gvFile)
gvFile.write('}\n')
def instaciate(self):
"""Instaciate the python classe from the enteties"""
import inspect
# load the needed schema module
if self._p21loader.get_schema_name() == 'config_control_design':
import config_control_design
self.schemaModule = config_control_design
if self._p21loader.get_schema_name() == 'automotive_design':
import automotive_design
self.schemaModule = automotive_design
if self.schemaModule:
self.schemaClasses = dict(inspect.getmembers(self.schemaModule))
for i in self._p21loader._instances_definition.keys():
#print i
if not self.instanceMape.has_key(i):
self._create_entity_instance(i)
def _create_entity_instance(self, instance_id):
if self._p21loader._instances_definition.has_key(instance_id):
instance_definition = self._p21loader._instances_definition[instance_id]
#print "Instance definition to process",instance_definition
# first find class name
class_name = instance_definition[0].lower()
#print "Class name:%s"%class_name
if not class_name=='':
classDef = self.schemaClasses[class_name]
# then attributes
#print object_.__doc__
instance_attributes = instance_definition[1]
self._transformAttributes(instance_attributes)
print 'Attribute list after transform: ',instance_attributes
self.instanceMape[instance_id] = str('dummy#:'+str(instance_id)) # dummy instance to test
else:
print '############################# lost entity: ',instance_id
self.instanceMape[instance_id] = int(41) # dummy
#print "instance_attributes:",instance_attributes
#a = object_(*instance_attributes)
def _transformAttributes(self,attrList):
n = 0
for i in attrList:
if isinstance(i,list):
self._transformAttributes(i)
elif isinstance(i,str):
if i == '':
print 'empty string'
elif i[0] == '#':
key = int(i[1:])
#print 'Item: ',int(i[1:])
if self.instanceMape.has_key(key):
attrList[n] = self.instanceMape[key]
else:
self._create_entity_instance(key)
if not self.instanceMape.has_key(key):
raise NameError("Needed instance not instanciated: ",key)
else:
attrList[n] = self.instanceMape[key]
elif i[0] == '$':
#print 'Dollar'
pass
elif i[0] == "'":
print 'Dopelstring: ',i[1:-1]
else:
print 'String: ',i
else:
raise NameError("Unknown attribute type")
n = n+1
if __name__ == "__main__":
sys.path.append('..') # path where config_control_design.py is found
parser = SimpleParser("Aufspannung.stp") # simple test file
#parser.instaciate()
parser.writeGraphViz('TestGrap.gv')
#dot.exe -Tsvg -o Test.svg e:\fem-dev\src\Mod\Import\App\SCL\TestGrap-geo.gv | unknown | codeparrot/codeparrot-clean | ||
# Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
"""
import textwrap
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import SSLError
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import RAND_status, RAND_egd, RAND_add
from _ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
_PROTOCOL_NAMES = {
PROTOCOL_TLSv1: "TLSv1",
PROTOCOL_SSLv23: "SSLv23",
PROTOCOL_SSLv3: "SSLv3",
}
try:
from _ssl import PROTOCOL_SSLv2
_SSLv2_IF_EXISTS = PROTOCOL_SSLv2
except ImportError:
_SSLv2_IF_EXISTS = None
else:
_PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
from socket import socket, _fileobject, _delegate_methods, error as socket_error
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
import errno
# Disable weak or insecure ciphers by default
# (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL')
_DEFAULT_CIPHERS = 'DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2'
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if ciphers is None and ssl_version != _SSLv2_IF_EXISTS:
ciphers = _DEFAULT_CIPHERS
if certfile and not keyfile:
keyfile = certfile
# see if it's connected
try:
socket.getpeername(self)
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
# no, no connection yet
self._connected = False
self._sslobj = None
else:
# yes, create the SSL object
self._connected = True
self._sslobj = _ssl.sslwrap(self._sock, server_side,
keyfile, certfile,
cert_reqs, ssl_version, ca_certs,
ciphers)
if do_handshake_on_connect:
self.do_handshake()
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher(self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return self._sock.send(data, flags)
def sendto(self, data, flags_or_addr, addr=None):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return self._sock.sendto(data, flags_or_addr)
else:
return self._sock.sendto(data, flags_or_addr, addr)
def sendall(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return self._sock.recv(buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
else:
return self._sock.recv_into(buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return self._sock.recvfrom(buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return self._sock.recvfrom_into(buffer, nbytes, flags)
def pending(self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown(self, how):
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def _real_connect(self, addr, return_errno):
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs, self.ciphers)
try:
if return_errno:
rc = socket.connect_ex(self, addr)
else:
rc = None
socket.connect(self, addr)
if not rc:
if self.do_handshake_on_connect:
self.do_handshake()
self._connected = True
return rc
except socket_error:
self._sslobj = None
raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
try:
return (SSLSocket(newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
ciphers=self.ciphers,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
except socket_error as e:
newsock.close()
raise e
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _ssl.sslwrap(sock, 0, keyfile, certfile, CERT_NONE,
PROTOCOL_SSLv23, None)
try:
sock.getpeername()
except socket_error:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
/**
* JUnit integration support classes.
*/
package org.mockito.internal.junit; | java | github | https://github.com/mockito/mockito | mockito-core/src/main/java/org/mockito/internal/junit/package-info.java |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import itertools
import mock
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
import six
from sqlalchemy import orm
from testtools import matchers
import webob.exc
import neutron
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import router
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import test_lib
from neutron.common import utils
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import ipam_non_pluggable_backend as non_ipam
from neutron.db import models_v2
from neutron import manager
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit import testlib_api
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
DEVICE_OWNER_COMPUTE = 'compute:None'
DEVICE_OWNER_NOT_COMPUTE = constants.DEVICE_OWNER_DHCP
def optional_ctx(obj, fallback):
if not obj:
return fallback()
@contextlib.contextmanager
def context_wrapper():
yield obj
return context_wrapper()
def _fake_get_pagination_helper(self, request):
return api_common.PaginationEmulatedHelper(request, self._primary_key)
def _fake_get_sorting_helper(self, request):
return api_common.SortingEmulatedHelper(request, self._attr_info)
# TODO(banix): Move the following method to ML2 db test module when ML2
# mechanism driver unit tests are corrected to use Ml2PluginV2TestCase
# instead of directly using NeutronDbPluginV2TestCase
def _get_create_db_method(resource):
ml2_method = '_create_%s_db' % resource
if hasattr(manager.NeutronManager.get_plugin(), ml2_method):
return ml2_method
else:
return 'create_%s' % resource
class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
fmt = 'json'
resource_prefix_map = {}
def setUp(self, plugin=None, service_plugins=None,
ext_mgr=None):
super(NeutronDbPluginV2TestCase, self).setUp()
cfg.CONF.set_override('notify_nova_on_port_status_changes', False)
cfg.CONF.set_override('allow_overlapping_ips', True)
# Make sure at each test according extensions for the plugin is loaded
extensions.PluginAwareExtensionManager._instance = None
# Save the attributes map in case the plugin will alter it
# loading extensions
self.useFixture(tools.AttributeMapMemento())
self._tenant_id = 'test-tenant'
if not plugin:
plugin = DB_PLUGIN_KLASS
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override(
'service_plugins',
[test_lib.test_config.get(key, default)
for key, default in six.iteritems(service_plugins or {})]
)
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
cfg.CONF.set_override('max_dns_nameservers', 2)
cfg.CONF.set_override('max_subnet_host_routes', 2)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self.api = router.APIRouter()
# Set the defualt status
self.net_create_status = 'ACTIVE'
self.port_create_status = 'ACTIVE'
def _is_native_bulk_supported():
plugin_obj = manager.NeutronManager.get_plugin()
native_bulk_attr_name = ("_%s__native_bulk_support"
% plugin_obj.__class__.__name__)
return getattr(plugin_obj, native_bulk_attr_name, False)
self._skip_native_bulk = not _is_native_bulk_supported()
def _is_native_pagination_support():
native_pagination_attr_name = (
"_%s__native_pagination_support" %
manager.NeutronManager.get_plugin().__class__.__name__)
return (cfg.CONF.allow_pagination and
getattr(manager.NeutronManager.get_plugin(),
native_pagination_attr_name, False))
self._skip_native_pagination = not _is_native_pagination_support()
def _is_native_sorting_support():
native_sorting_attr_name = (
"_%s__native_sorting_support" %
manager.NeutronManager.get_plugin().__class__.__name__)
return (cfg.CONF.allow_sorting and
getattr(manager.NeutronManager.get_plugin(),
native_sorting_attr_name, False))
self.plugin = manager.NeutronManager.get_plugin()
self._skip_native_sorting = not _is_native_sorting_support()
if ext_mgr:
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def tearDown(self):
self.api = None
self._deserializers = None
self._skip_native_bulk = None
self._skip_native_pagination = None
self._skip_native_sortin = None
self.ext_api = None
super(NeutronDbPluginV2TestCase, self).tearDown()
def setup_config(self):
# Create the default configurations
args = ['--config-file', base.etcdir('neutron.conf')]
# If test_config specifies some config-file, use it, as well
for config_file in test_lib.test_config.get('config_files', []):
args.extend(['--config-file', config_file])
super(NeutronDbPluginV2TestCase, self).setup_config(args=args)
def _req(self, method, resource, data=None, fmt=None, id=None, params=None,
action=None, subresource=None, sub_id=None, context=None):
fmt = fmt or self.fmt
path = '/%s.%s' % (
'/'.join(p for p in
(resource, id, subresource, sub_id, action) if p),
fmt
)
prefix = self.resource_prefix_map.get(resource)
if prefix:
path = prefix + path
content_type = 'application/%s' % fmt
body = None
if data is not None: # empty dict is valid
body = self.serialize(data)
return testlib_api.create_request(path, body, content_type, method,
query_string=params, context=context)
def new_create_request(self, resource, data, fmt=None, id=None,
subresource=None, context=None):
return self._req('POST', resource, data, fmt, id=id,
subresource=subresource, context=context)
def new_list_request(self, resource, fmt=None, params=None,
subresource=None):
return self._req(
'GET', resource, None, fmt, params=params, subresource=subresource
)
def new_show_request(self, resource, id, fmt=None,
subresource=None, fields=None):
if fields:
params = "&".join(["fields=%s" % x for x in fields])
else:
params = None
return self._req('GET', resource, None, fmt, id=id,
params=params, subresource=subresource)
def new_delete_request(self, resource, id, fmt=None, subresource=None,
sub_id=None):
return self._req(
'DELETE',
resource,
None,
fmt,
id=id,
subresource=subresource,
sub_id=sub_id
)
def new_update_request(self, resource, data, id, fmt=None,
subresource=None, context=None):
return self._req(
'PUT', resource, data, fmt, id=id, subresource=subresource,
context=context
)
def new_action_request(self, resource, data, id, action, fmt=None,
subresource=None):
return self._req(
'PUT',
resource,
data,
fmt,
id=id,
action=action,
subresource=subresource
)
def deserialize(self, content_type, response):
ctype = 'application/%s' % content_type
data = self._deserializers[ctype].deserialize(response.body)['body']
return data
def _create_bulk_from_list(self, fmt, resource, objects, **kwargs):
"""Creates a bulk request from a list of objects."""
collection = "%ss" % resource
req_data = {collection: objects}
req = self.new_create_request(collection, req_data, fmt)
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
elif 'context' in kwargs:
req.environ['neutron.context'] = kwargs['context']
return req.get_response(self.api)
def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs):
"""Creates a bulk request for any kind of resource."""
objects = []
collection = "%ss" % resource
for i in range(number):
obj = copy.deepcopy(data)
obj[resource]['name'] = "%s_%s" % (name, i)
if 'override' in kwargs and i in kwargs['override']:
obj[resource].update(kwargs['override'][i])
objects.append(obj)
req_data = {collection: objects}
req = self.new_create_request(collection, req_data, fmt)
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
elif 'context' in kwargs:
req.environ['neutron.context'] = kwargs['context']
return req.get_response(self.api)
def _create_network(self, fmt, name, admin_state_up,
arg_list=None, **kwargs):
data = {'network': {'name': name,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'tenant_id', 'shared',
'vlan_transparent') + (arg_list or ())):
# Arg must be present
if arg in kwargs:
data['network'][arg] = kwargs[arg]
network_req = self.new_create_request('networks', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
network_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
return network_req.get_response(self.api)
def _create_network_bulk(self, fmt, number, name,
admin_state_up, **kwargs):
base_data = {'network': {'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
return self._create_bulk(fmt, number, 'network', base_data, **kwargs)
def _create_subnet(self, fmt, net_id, cidr,
expected_res_status=None, **kwargs):
data = {'subnet': {'network_id': net_id,
'cidr': cidr,
'ip_version': 4,
'tenant_id': self._tenant_id}}
for arg in ('ip_version', 'tenant_id',
'enable_dhcp', 'allocation_pools',
'dns_nameservers', 'host_routes',
'shared', 'ipv6_ra_mode', 'ipv6_address_mode'):
# Arg must be present and not null (but can be false)
if kwargs.get(arg) is not None:
data['subnet'][arg] = kwargs[arg]
if ('gateway_ip' in kwargs and
kwargs['gateway_ip'] is not attributes.ATTR_NOT_SPECIFIED):
data['subnet']['gateway_ip'] = kwargs['gateway_ip']
subnet_req = self.new_create_request('subnets', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
subnet_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
subnet_res = subnet_req.get_response(self.api)
if expected_res_status:
self.assertEqual(subnet_res.status_int, expected_res_status)
return subnet_res
def _create_subnet_bulk(self, fmt, number, net_id, name,
ip_version=4, **kwargs):
base_data = {'subnet': {'network_id': net_id,
'ip_version': ip_version,
'tenant_id': self._tenant_id}}
# auto-generate cidrs as they should not overlap
overrides = dict((k, v)
for (k, v) in zip(range(number),
[{'cidr': "10.0.%s.0/24" % num}
for num in range(number)]))
kwargs.update({'override': overrides})
return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs)
def _create_subnetpool(self, fmt, prefixes,
expected_res_status=None, admin=False, **kwargs):
subnetpool = {'subnetpool': {'prefixes': prefixes}}
for k, v in kwargs.items():
subnetpool['subnetpool'][k] = str(v)
api = self._api_for_resource('subnetpools')
subnetpools_req = self.new_create_request('subnetpools',
subnetpool, fmt)
if not admin:
neutron_context = context.Context('', kwargs['tenant_id'])
subnetpools_req.environ['neutron.context'] = neutron_context
subnetpool_res = subnetpools_req.get_response(api)
if expected_res_status:
self.assertEqual(subnetpool_res.status_int, expected_res_status)
return subnetpool_res
def _create_port(self, fmt, net_id, expected_res_status=None,
arg_list=None, **kwargs):
data = {'port': {'network_id': net_id,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'device_id',
'mac_address', 'name', 'fixed_ips',
'tenant_id', 'device_owner', 'security_groups') +
(arg_list or ())):
# Arg must be present
if arg in kwargs:
data['port'][arg] = kwargs[arg]
# create a dhcp port device id if one hasn't been supplied
if ('device_owner' in kwargs and
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
'host' in kwargs and
'device_id' not in kwargs):
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
data['port']['device_id'] = device_id
port_req = self.new_create_request('ports', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(port_res.status_int, expected_res_status)
return port_res
def _list_ports(self, fmt, expected_res_status=None,
net_id=None, **kwargs):
query_params = []
if net_id:
query_params.append("network_id=%s" % net_id)
if kwargs.get('device_owner'):
query_params.append("device_owner=%s" % kwargs.get('device_owner'))
port_req = self.new_list_request('ports', fmt, '&'.join(query_params))
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(port_res.status_int, expected_res_status)
return port_res
def _create_port_bulk(self, fmt, number, net_id, name,
admin_state_up, **kwargs):
base_data = {'port': {'network_id': net_id,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
return self._create_bulk(fmt, number, 'port', base_data, **kwargs)
def _make_network(self, fmt, name, admin_state_up, **kwargs):
res = self._create_network(fmt, name, admin_state_up, **kwargs)
# TODO(salvatore-orlando): do exception handling in this test module
# in a uniform way (we do it differently for ports, subnets, and nets
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_subnet(self, fmt, network, gateway, cidr,
allocation_pools=None, ip_version=4, enable_dhcp=True,
dns_nameservers=None, host_routes=None, shared=None,
ipv6_ra_mode=None, ipv6_address_mode=None,
tenant_id=None, set_context=False):
res = self._create_subnet(fmt,
net_id=network['network']['id'],
cidr=cidr,
gateway_ip=gateway,
tenant_id=(tenant_id or
network['network']['tenant_id']),
allocation_pools=allocation_pools,
ip_version=ip_version,
enable_dhcp=enable_dhcp,
dns_nameservers=dns_nameservers,
host_routes=host_routes,
shared=shared,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode,
set_context=set_context)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_subnetpool(self, fmt, prefixes, admin=False, **kwargs):
res = self._create_subnetpool(fmt,
prefixes,
None,
admin,
**kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs):
res = self._create_port(fmt, net_id, expected_res_status, **kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _api_for_resource(self, resource):
if resource in ['networks', 'subnets', 'ports', 'subnetpools']:
return self.api
else:
return self.ext_api
def _delete(self, collection, id,
expected_code=webob.exc.HTTPNoContent.code,
neutron_context=None):
req = self.new_delete_request(collection, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(collection))
self.assertEqual(res.status_int, expected_code)
def _show_response(self, resource, id, neutron_context=None):
req = self.new_show_request(resource, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
return req.get_response(self._api_for_resource(resource))
def _show(self, resource, id,
expected_code=webob.exc.HTTPOk.code,
neutron_context=None):
res = self._show_response(resource, id,
neutron_context=neutron_context)
self.assertEqual(expected_code, res.status_int)
return self.deserialize(self.fmt, res)
def _update(self, resource, id, new_data,
expected_code=webob.exc.HTTPOk.code,
neutron_context=None):
req = self.new_update_request(resource, new_data, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(res.status_int, expected_code)
return self.deserialize(self.fmt, res)
def _list(self, resource, fmt=None, neutron_context=None,
query_params=None):
fmt = fmt or self.fmt
req = self.new_list_request(resource, fmt, query_params)
if neutron_context:
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
return self.deserialize(fmt, res)
def _fail_second_call(self, patched_plugin, orig, *args, **kwargs):
"""Invoked by test cases for injecting failures in plugin."""
def second_call(*args, **kwargs):
raise n_exc.NeutronException()
patched_plugin.side_effect = second_call
return orig(*args, **kwargs)
def _validate_behavior_on_bulk_failure(
self, res, collection,
errcode=webob.exc.HTTPClientError.code):
self.assertEqual(res.status_int, errcode)
req = self.new_list_request(collection)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
items = self.deserialize(self.fmt, res)
self.assertEqual(len(items[collection]), 0)
def _validate_behavior_on_bulk_success(self, res, collection,
names=['test_0', 'test_1']):
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
items = self.deserialize(self.fmt, res)[collection]
self.assertEqual(len(items), 2)
self.assertEqual(items[0]['name'], 'test_0')
self.assertEqual(items[1]['name'], 'test_1')
def _test_list_resources(self, resource, items, neutron_context=None,
query_params=None):
res = self._list('%ss' % resource,
neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertItemsEqual([i['id'] for i in res['%ss' % resource]],
[i[resource]['id'] for i in items])
@contextlib.contextmanager
def network(self, name='net1',
admin_state_up=True,
fmt=None,
**kwargs):
network = self._make_network(fmt or self.fmt, name,
admin_state_up, **kwargs)
yield network
@contextlib.contextmanager
def subnet(self, network=None,
gateway_ip=attributes.ATTR_NOT_SPECIFIED,
cidr='10.0.0.0/24',
fmt=None,
ip_version=4,
allocation_pools=None,
enable_dhcp=True,
dns_nameservers=None,
host_routes=None,
shared=None,
ipv6_ra_mode=None,
ipv6_address_mode=None,
tenant_id=None,
set_context=False):
with optional_ctx(network, self.network) as network_to_use:
subnet = self._make_subnet(fmt or self.fmt,
network_to_use,
gateway_ip,
cidr,
allocation_pools,
ip_version,
enable_dhcp,
dns_nameservers,
host_routes,
shared=shared,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode,
tenant_id=tenant_id,
set_context=set_context)
yield subnet
@contextlib.contextmanager
def subnetpool(self, prefixes, admin=False, **kwargs):
subnetpool = self._make_subnetpool(self.fmt,
prefixes,
admin,
**kwargs)
yield subnetpool
@contextlib.contextmanager
def port(self, subnet=None, fmt=None, **kwargs):
with optional_ctx(subnet, self.subnet) as subnet_to_use:
net_id = subnet_to_use['subnet']['network_id']
port = self._make_port(fmt or self.fmt, net_id, **kwargs)
yield port
def _test_list_with_sort(self, resource,
items, sorts, resources=None, query_params=''):
query_str = query_params
for key, direction in sorts:
query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key,
direction)
if not resources:
resources = '%ss' % resource
req = self.new_list_request(resources,
params=query_str)
api = self._api_for_resource(resources)
res = self.deserialize(self.fmt, req.get_response(api))
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
expected_res = [item[resource]['id'] for item in items]
self.assertEqual(expected_res, [n['id'] for n in res[resources]])
def _test_list_with_pagination(self, resource, items, sort,
limit, expected_page_num,
resources=None,
query_params='',
verify_key='id'):
if not resources:
resources = '%ss' % resource
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&sort_key=%s&"
"sort_dir=%s") % (limit, sort[0], sort[1])
req = self.new_list_request(resources, params=query_str)
items_res = []
page_num = 0
api = self._api_for_resource(resources)
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
while req:
page_num = page_num + 1
res = self.deserialize(self.fmt, req.get_response(api))
self.assertThat(len(res[resources]),
matchers.LessThan(limit + 1))
items_res = items_res + res[resources]
req = None
if '%s_links' % resources in res:
for link in res['%s_links' % resources]:
if link['rel'] == 'next':
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
self.assertEqual([item[resource][verify_key] for item in items],
[n[verify_key] for n in items_res])
def _test_list_with_pagination_reverse(self, resource, items, sort,
limit, expected_page_num,
resources=None,
query_params=''):
if not resources:
resources = '%ss' % resource
resource = resource.replace('-', '_')
api = self._api_for_resource(resources)
marker = items[-1][resource]['id']
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&page_reverse=True&"
"sort_key=%s&sort_dir=%s&"
"marker=%s") % (limit, sort[0], sort[1],
marker)
req = self.new_list_request(resources, params=query_str)
item_res = [items[-1][resource]]
page_num = 0
resources = resources.replace('-', '_')
while req:
page_num = page_num + 1
res = self.deserialize(self.fmt, req.get_response(api))
self.assertThat(len(res[resources]),
matchers.LessThan(limit + 1))
res[resources].reverse()
item_res = item_res + res[resources]
req = None
if '%s_links' % resources in res:
for link in res['%s_links' % resources]:
if link['rel'] == 'previous':
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
expected_res = [item[resource]['id'] for item in items]
expected_res.reverse()
self.assertEqual(expected_res, [n['id'] for n in item_res])
def _compare_resource(self, observed_res, expected_res, res_name):
'''
Compare the observed and expected resources (ie compare subnets)
'''
for k in expected_res:
self.assertIn(k, observed_res[res_name])
if isinstance(expected_res[k], list):
self.assertEqual(sorted(observed_res[res_name][k]),
sorted(expected_res[k]))
else:
self.assertEqual(observed_res[res_name][k], expected_res[k])
def _validate_resource(self, resource, keys, res_name):
for k in keys:
self.assertIn(k, resource[res_name])
if isinstance(keys[k], list):
self.assertEqual(sorted(resource[res_name][k]),
sorted(keys[k]))
else:
self.assertEqual(resource[res_name][k], keys[k])
class TestBasicGet(NeutronDbPluginV2TestCase):
def test_single_get_admin(self):
plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2()
with self.network() as network:
net_id = network['network']['id']
ctx = context.get_admin_context()
n = plugin._get_network(ctx, net_id)
self.assertEqual(net_id, n.id)
def test_single_get_tenant(self):
plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2()
with self.network() as network:
net_id = network['network']['id']
ctx = context.get_admin_context()
n = plugin._get_network(ctx, net_id)
self.assertEqual(net_id, n.id)
class TestV2HTTPResponse(NeutronDbPluginV2TestCase):
def test_create_returns_201(self):
res = self._create_network(self.fmt, 'net2', True)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_list_returns_200(self):
req = self.new_list_request('networks')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def _check_list_with_fields(self, res, field_name):
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
body = self.deserialize(self.fmt, res)
# further checks: 1 networks
self.assertEqual(len(body['networks']), 1)
# 1 field in the network record
self.assertEqual(len(body['networks'][0]), 1)
# field is 'name'
self.assertIn(field_name, body['networks'][0])
def test_list_with_fields(self):
self._create_network(self.fmt, 'some_net', True)
req = self.new_list_request('networks', params="fields=name")
res = req.get_response(self.api)
self._check_list_with_fields(res, 'name')
def test_list_with_fields_noadmin(self):
tenant_id = 'some_tenant'
self._create_network(self.fmt,
'some_net',
True,
tenant_id=tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=name")
req.environ['neutron.context'] = context.Context('', tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'name')
def test_list_with_fields_noadmin_and_policy_field(self):
"""If a field used by policy is selected, do not duplicate it.
Verifies that if the field parameter explicitly specifies a field
which is used by the policy engine, then it is not duplicated
in the response.
"""
tenant_id = 'some_tenant'
self._create_network(self.fmt,
'some_net',
True,
tenant_id=tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=tenant_id")
req.environ['neutron.context'] = context.Context('', tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'tenant_id')
def test_show_returns_200(self):
with self.network() as net:
req = self.new_show_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_delete_returns_204(self):
res = self._create_network(self.fmt, 'net1', True)
net = self.deserialize(self.fmt, res)
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_update_returns_200(self):
with self.network() as net:
req = self.new_update_request('networks',
{'network': {'name': 'steve'}},
net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_update_invalid_json_400(self):
with self.network() as net:
req = self.new_update_request('networks',
'{{"name": "aaa"}}',
net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_bad_route_404(self):
req = self.new_list_request('doohickeys')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_json(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.port(name='myname') as port:
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual('myname', port['port']['name'])
def test_create_port_as_admin(self):
with self.network() as network:
self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='bad_tenant_id',
device_id='fake_device',
device_owner='fake_owner',
fixed_ips=[],
set_context=False)
def test_create_port_bad_tenant(self):
with self.network() as network:
self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPNotFound.code,
tenant_id='bad_tenant_id',
device_id='fake_device',
device_owner='fake_owner',
fixed_ips=[],
set_context=True)
def test_create_port_public_network(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.network(shared=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
self._delete('ports', port['port']['id'])
def test_create_port_public_network_with_ip(self):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
keys = [('admin_state_up', True),
('status', self.port_create_status),
('fixed_ips', [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}])]
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
self._delete('ports', port['port']['id'])
def test_create_port_anticipating_allocation(self):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
fixed_ips = [{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
self._create_port(self.fmt, network['network']['id'],
webob.exc.HTTPCreated.code,
fixed_ips=fixed_ips)
def test_create_port_public_network_with_invalid_ip_no_subnet_id(self,
expected_error='InvalidIpForNetwork'):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
ips = [{'ip_address': '1.1.1.1'}]
res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPBadRequest.code,
fixed_ips=ips,
set_context=True)
data = self.deserialize(self.fmt, res)
msg = str(n_exc.InvalidIpForNetwork(ip_address='1.1.1.1'))
self.assertEqual(expected_error, data['NeutronError']['type'])
self.assertEqual(msg, data['NeutronError']['message'])
def test_create_port_public_network_with_invalid_ip_and_subnet_id(self,
expected_error='InvalidIpForSubnet'):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '1.1.1.1'}]
res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPBadRequest.code,
fixed_ips=ips,
set_context=True)
data = self.deserialize(self.fmt, res)
msg = str(n_exc.InvalidIpForSubnet(ip_address='1.1.1.1'))
self.assertEqual(expected_error, data['NeutronError']['type'])
self.assertEqual(msg, data['NeutronError']['message'])
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True)
self._validate_behavior_on_bulk_success(res, 'ports')
for p in self.deserialize(self.fmt, res)['ports']:
self._delete('ports', p['id'])
def test_create_ports_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True)
self._validate_behavior_on_bulk_success(res, 'ports')
for p in self.deserialize(self.fmt, res)['ports']:
self._delete('ports', p['id'])
def test_create_ports_bulk_wrong_input(self):
with self.network() as net:
overrides = {1: {'admin_state_up': 'doh'}}
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True,
override=overrides)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
req = self.new_list_request('ports')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
ports = self.deserialize(self.fmt, res)
self.assertEqual(len(ports['ports']), 0)
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
orig = manager.NeutronManager.get_plugin().create_port
method_to_patch = _get_create_db_method('port')
with mock.patch.object(manager.NeutronManager.get_plugin(),
method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'ports', webob.exc.HTTPServerError.code
)
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
plugin = manager.NeutronManager.get_plugin()
orig = plugin.create_port
method_to_patch = _get_create_db_method('port')
with mock.patch.object(plugin, method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'ports', webob.exc.HTTPServerError.code)
def test_list_ports(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port() as v1, self.port() as v2, self.port() as v3:
ports = (v1, v2, v3)
self._test_list_resources('port', ports)
def test_list_ports_filtered_by_fixed_ip(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port() as port1, self.port():
fixed_ips = port1['port']['fixed_ips'][0]
query_params = """
fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
""".strip() % (fixed_ips['ip_address'],
'192.168.126.5',
fixed_ips['subnet_id'])
self._test_list_resources('port', [port1],
query_params=query_params)
def test_list_ports_public_network(self):
with self.network(shared=True) as network:
with self.subnet(network) as subnet:
with self.port(subnet, tenant_id='tenant_1') as port1,\
self.port(subnet, tenant_id='tenant_2') as port2:
# Admin request - must return both ports
self._test_list_resources('port', [port1, port2])
# Tenant_1 request - must return single port
q_context = context.Context('', 'tenant_1')
self._test_list_resources('port', [port1],
neutron_context=q_context)
# Tenant_2 request - must return single port
q_context = context.Context('', 'tenant_2')
self._test_list_resources('port', [port2],
neutron_context=q_context)
def test_list_ports_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(admin_state_up='True',
mac_address='00:00:00:00:00:01') as port1,\
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:02') as port2,\
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_sort('port', (port3, port2, port1),
[('admin_state_up', 'asc'),
('mac_address', 'desc')])
def test_list_ports_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(admin_state_up='True',
mac_address='00:00:00:00:00:01') as port1,\
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:02') as port2,\
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_sort('port', (port3, port2, port1),
[('admin_state_up', 'asc'),
('mac_address', 'desc')])
def test_list_ports_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(mac_address='00:00:00:00:00:01') as port1,\
self.port(mac_address='00:00:00:00:00:02') as port2,\
self.port(mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_pagination('port',
(port1, port2, port3),
('mac_address', 'asc'), 2, 2)
def test_list_ports_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(mac_address='00:00:00:00:00:01') as port1,\
self.port(mac_address='00:00:00:00:00:02') as port2,\
self.port(mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_pagination('port',
(port1, port2, port3),
('mac_address', 'asc'), 2, 2)
def test_list_ports_with_pagination_reverse_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(mac_address='00:00:00:00:00:01') as port1,\
self.port(mac_address='00:00:00:00:00:02') as port2,\
self.port(mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_pagination_reverse('port',
(port1, port2, port3),
('mac_address', 'asc'),
2, 2)
def test_list_ports_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(mac_address='00:00:00:00:00:01') as port1,\
self.port(mac_address='00:00:00:00:00:02') as port2,\
self.port(mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_pagination_reverse('port',
(port1, port2, port3),
('mac_address', 'asc'),
2, 2)
def test_show_port(self):
with self.port() as port:
req = self.new_show_request('ports', port['port']['id'], self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port']['id'], sport['port']['id'])
def test_delete_port(self):
with self.port() as port:
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_delete_port_public_network(self):
with self.network(shared=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_update_port(self):
with self.port() as port:
data = {'port': {'admin_state_up': False}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
def update_port_mac(self, port, updated_fixed_ips=None):
orig_mac = port['mac_address']
mac = orig_mac.split(':')
mac[5] = '01' if mac[5] != '01' else '00'
new_mac = ':'.join(mac)
data = {'port': {'mac_address': new_mac}}
if updated_fixed_ips:
data['port']['fixed_ips'] = updated_fixed_ips
req = self.new_update_request('ports', data, port['id'])
return req.get_response(self.api), new_mac
def _check_v6_auto_address_address(self, port, subnet):
if ipv6_utils.is_auto_address_subnet(subnet['subnet']):
port_mac = port['port']['mac_address']
subnet_cidr = subnet['subnet']['cidr']
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
port_mac))
self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
eui_addr)
def check_update_port_mac(
self, expected_status=webob.exc.HTTPOk.code,
expected_error='StateInvalid', subnet=None,
device_owner=DEVICE_OWNER_COMPUTE, updated_fixed_ips=None,
host_arg={}, arg_list=[]):
with self.port(device_owner=device_owner, subnet=subnet,
arg_list=arg_list, **host_arg) as port:
self.assertIn('mac_address', port['port'])
res, new_mac = self.update_port_mac(
port['port'], updated_fixed_ips=updated_fixed_ips)
self.assertEqual(expected_status, res.status_int)
if expected_status == webob.exc.HTTPOk.code:
result = self.deserialize(self.fmt, res)
self.assertIn('port', result)
self.assertEqual(new_mac, result['port']['mac_address'])
if subnet and subnet['subnet']['ip_version'] == 6:
self._check_v6_auto_address_address(port, subnet)
else:
error = self.deserialize(self.fmt, res)
self.assertEqual(expected_error,
error['NeutronError']['type'])
def test_update_port_mac(self):
self.check_update_port_mac()
# sub-classes for plugins/drivers that support mac address update
# override this method
def test_update_port_mac_ip(self):
with self.subnet() as subnet:
updated_fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.3'}]
self.check_update_port_mac(subnet=subnet,
updated_fixed_ips=updated_fixed_ips)
def test_update_port_mac_v6_slaac(self):
with self.subnet(gateway_ip='fe80::1',
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
self.assertTrue(
ipv6_utils.is_auto_address_subnet(subnet['subnet']))
self.check_update_port_mac(subnet=subnet)
def test_update_port_mac_bad_owner(self):
self.check_update_port_mac(
device_owner=DEVICE_OWNER_NOT_COMPUTE,
expected_status=webob.exc.HTTPConflict.code,
expected_error='UnsupportedPortDeviceOwner')
def check_update_port_mac_used(self, expected_error='MacAddressInUse'):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
with self.port(subnet=subnet) as port2:
self.assertIn('mac_address', port['port'])
new_mac = port2['port']['mac_address']
data = {'port': {'mac_address': new_mac}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPConflict.code,
res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual(expected_error,
error['NeutronError']['type'])
def test_update_port_mac_used(self):
self.check_update_port_mac_used()
def test_update_port_not_admin(self):
res = self._create_network(self.fmt, 'net1', True,
tenant_id='not_admin',
set_context=True)
net1 = self.deserialize(self.fmt, res)
res = self._create_port(self.fmt, net1['network']['id'],
tenant_id='not_admin', set_context=True)
port = self.deserialize(self.fmt, res)
data = {'port': {'admin_state_up': False}}
neutron_context = context.Context('', 'not_admin')
port = self._update('ports', port['port']['id'], data,
neutron_context=neutron_context)
self.assertEqual(port['port']['admin_state_up'], False)
def test_update_device_id_unchanged(self):
with self.port() as port:
data = {'port': {'admin_state_up': True,
'device_id': port['port']['device_id']}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'], True)
def test_update_device_id_null(self):
with self.port() as port:
data = {'port': {'device_id': None}}
req = self.new_update_request('ports', data, port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_delete_network_if_port_exists(self):
with self.port() as port:
req = self.new_delete_request('networks',
port['port']['network_id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_delete_network_port_exists_owned_by_network(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
self._create_port(self.fmt, network_id,
device_owner=constants.DEVICE_OWNER_DHCP)
req = self.new_delete_request('networks', network_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_update_port_delete_ip(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': []}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self.assertEqual(res['port']['fixed_ips'],
data['port']['fixed_ips'])
def test_no_more_port_exception(self):
with self.subnet(cidr='10.0.0.0/32', enable_dhcp=False) as subnet:
id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, id)
data = self.deserialize(self.fmt, res)
msg = str(n_exc.IpAddressGenerationFailure(net_id=id))
self.assertEqual(data['NeutronError']['message'], msg)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_update_port_update_ip(self):
"""Test update of port IP.
Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
"""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
def test_update_port_update_ip_address_only(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"},
{'ip_address': "10.0.0.2"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '10.0.0.10',
'subnet_id': subnet['subnet']['id']}, ips)
def test_update_port_update_ips(self):
"""Update IP and associate new IP on port.
Check a port update with the specified subnet_id's. A IP address
will be allocated for each subnet_id.
"""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': '10.0.0.3'}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
def test_update_port_add_additional_ip(self):
"""Test update of port with additional IP."""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']},
{'subnet_id':
subnet['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.3',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '10.0.0.4',
'subnet_id': subnet['subnet']['id']}, ips)
def test_update_port_invalid_fixed_ip_address_v6_slaac(self):
with self.subnet(
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC,
gateway_ip=attributes.ATTR_NOT_SPECIFIED) as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
port_mac = port['port']['mac_address']
subnet_cidr = subnet['subnet']['cidr']
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
port_mac))
self.assertEqual(ips[0]['ip_address'], eui_addr)
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address':
'2607:f0d0:1002:51::5'}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
err = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
self.assertEqual(err['NeutronError']['type'], 'InvalidInput')
def test_requested_duplicate_mac(self):
with self.port() as port:
mac = port['port']['mac_address']
# check that MAC address matches base MAC
base_mac = cfg.CONF.base_mac[0:2]
self.assertTrue(mac.startswith(base_mac))
kwargs = {"mac_address": mac}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_mac_generation(self):
cfg.CONF.set_override('base_mac', "12:34:56:00:00:00")
with self.port() as port:
mac = port['port']['mac_address']
self.assertTrue(mac.startswith("12:34:56"))
def test_mac_generation_4octet(self):
cfg.CONF.set_override('base_mac', "12:34:56:78:00:00")
with self.port() as port:
mac = port['port']['mac_address']
self.assertTrue(mac.startswith("12:34:56:78"))
def test_bad_mac_format(self):
cfg.CONF.set_override('base_mac', "bad_mac")
try:
self.plugin._check_base_mac_format()
except Exception:
return
self.fail("No exception for illegal base_mac format")
def test_mac_exhaustion(self):
# rather than actually consuming all MAC (would take a LONG time)
# we try to allocate an already allocated mac address
cfg.CONF.set_override('mac_generation_retries', 3)
res = self._create_network(fmt=self.fmt, name='net1',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
net_id = network['network']['id']
error = n_exc.MacAddressInUse(net_id=net_id, mac='00:11:22:33:44:55')
with mock.patch.object(
neutron.db.db_base_plugin_v2.NeutronDbPluginV2,
'_create_port_with_mac', side_effect=error) as create_mock:
res = self._create_port(self.fmt, net_id=net_id)
self.assertEqual(res.status_int,
webob.exc.HTTPServiceUnavailable.code)
self.assertEqual(3, create_mock.call_count)
def test_requested_duplicate_ip(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Check configuring of duplicate IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': ips[0]['ip_address']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_requested_subnet_id(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Request a IP from specific subnet
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._delete('ports', port2['port']['id'])
def test_requested_subnet_id_not_on_network(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
# Create new network
res = self._create_network(fmt=self.fmt, name='net2',
admin_state_up=True)
network2 = self.deserialize(self.fmt, res)
subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1",
"1.1.1.0/24", ip_version=4)
net_id = port['port']['network_id']
# Request a IP from specific subnet
kwargs = {"fixed_ips": [{'subnet_id':
subnet2['subnet']['id']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_overlapping_subnets(self):
with self.subnet() as subnet:
tenant_id = subnet['subnet']['tenant_id']
net_id = subnet['subnet']['network_id']
res = self._create_subnet(self.fmt,
tenant_id=tenant_id,
net_id=net_id,
cidr='10.0.0.225/28',
ip_version=4,
gateway_ip=attributes.ATTR_NOT_SPECIFIED)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_subnet_id_v4_and_v6(self):
with self.subnet() as subnet:
# Get a IPv4 and IPv6 address
tenant_id = subnet['subnet']['tenant_id']
net_id = subnet['subnet']['network_id']
res = self._create_subnet(
self.fmt,
tenant_id=tenant_id,
net_id=net_id,
cidr='2607:f0d0:1002:51::/124',
ip_version=6,
gateway_ip=attributes.ATTR_NOT_SPECIFIED)
subnet2 = self.deserialize(self.fmt, res)
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port3 = self.deserialize(self.fmt, res)
ips = port3['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '2607:f0d0:1002:51::2',
'subnet_id': subnet2['subnet']['id']}, ips)
res = self._create_port(self.fmt, net_id=net_id)
port4 = self.deserialize(self.fmt, res)
# Check that a v4 and a v6 address are allocated
ips = port4['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.3',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '2607:f0d0:1002:51::3',
'subnet_id': subnet2['subnet']['id']}, ips)
self._delete('ports', port3['port']['id'])
self._delete('ports', port4['port']['id'])
def test_requested_invalid_fixed_ip_address_v6_slaac(self):
with self.subnet(gateway_ip='fe80::1',
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '2607:f0d0:1002:51::5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
@mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_allocate_specific_ip')
def test_requested_fixed_ip_address_v6_slaac_router_iface(
self, alloc_specific_ip):
with self.subnet(gateway_ip='fe80::1',
cidr='fe80::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': 'fe80::1'}]}
net_id = subnet['subnet']['network_id']
device_owner = constants.DEVICE_OWNER_ROUTER_INTF
res = self._create_port(self.fmt, net_id=net_id,
device_owner=device_owner, **kwargs)
port = self.deserialize(self.fmt, res)
self.assertEqual(len(port['port']['fixed_ips']), 1)
self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
'fe80::1')
self.assertFalse(alloc_specific_ip.called)
def test_requested_subnet_id_v6_slaac(self):
with self.subnet(gateway_ip='fe80::1',
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
with self.port(subnet,
fixed_ips=[{'subnet_id':
subnet['subnet']['id']}]) as port:
port_mac = port['port']['mac_address']
subnet_cidr = subnet['subnet']['cidr']
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
port_mac))
self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
eui_addr)
def test_requested_subnet_id_v4_and_v6_slaac(self):
with self.network() as network:
with self.subnet(network) as subnet,\
self.subnet(
network,
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
gateway_ip='fe80::1',
ipv6_address_mode=constants.IPV6_SLAAC) as subnet2:
with self.port(
subnet,
fixed_ips=[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}]
) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
port_mac = port['port']['mac_address']
subnet_cidr = subnet2['subnet']['cidr']
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(
subnet_cidr, port_mac))
self.assertIn({'ip_address': eui_addr,
'subnet_id': subnet2['subnet']['id']}, ips)
def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self):
with self.network() as network:
# Create an IPv4 and an IPv6 SLAAC subnet on the network
with self.subnet(network),\
self.subnet(network,
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
gateway_ip='fe80::1',
ipv6_address_mode=constants.IPV6_SLAAC):
# Create a router port without specifying fixed_ips
port = self._make_port(
self.fmt, network['network']['id'],
device_owner=constants.DEVICE_OWNER_ROUTER_INTF)
# Router port should only have an IPv4 address
fixed_ips = port['port']['fixed_ips']
self.assertEqual(1, len(fixed_ips))
self.assertEqual('10.0.0.2', fixed_ips[0]['ip_address'])
def _make_v6_subnet(self, network, ra_addr_mode):
return (self._make_subnet(self.fmt, network, gateway='fe80::1',
cidr='fe80::/64', ip_version=6,
ipv6_ra_mode=ra_addr_mode,
ipv6_address_mode=ra_addr_mode))
@staticmethod
def _calc_ipv6_addr_by_EUI64(port, subnet):
port_mac = port['port']['mac_address']
subnet_cidr = subnet['subnet']['cidr']
return str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac))
def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_v6_subnet(network, constants.IPV6_SLAAC)
port = self._make_port(self.fmt, network['network']['id'])
self.assertEqual(1, len(port['port']['fixed_ips']))
self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet),
port['port']['fixed_ips'][0]['ip_address'])
def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode):
"""Test port create with an IPv6 subnet incl in fixed IPs."""
with self.network(name='net') as network:
subnet = self._make_v6_subnet(network, addr_mode)
subnet_id = subnet['subnet']['id']
fixed_ips = [{'subnet_id': subnet_id}]
with self.port(subnet=subnet, fixed_ips=fixed_ips) as port:
if addr_mode == constants.IPV6_SLAAC:
exp_ip_addr = self._calc_ipv6_addr_by_EUI64(port, subnet)
else:
exp_ip_addr = 'fe80::2'
port_fixed_ips = port['port']['fixed_ips']
self.assertEqual(1, len(port_fixed_ips))
self.assertEqual(exp_ip_addr,
port_fixed_ips[0]['ip_address'])
def test_create_port_with_ipv6_slaac_subnet_in_fixed_ips(self):
self._test_create_port_with_ipv6_subnet_in_fixed_ips(
addr_mode=constants.IPV6_SLAAC)
def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self):
self._test_create_port_with_ipv6_subnet_in_fixed_ips(
addr_mode=constants.DHCPV6_STATEFUL)
def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self):
"""Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dicts = [
{'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': 'fe80::1', 'cidr': 'fe80::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe81::1', 'cidr': 'fe81::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe82::1', 'cidr': 'fe82::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL},
{'gateway': 'fe83::1', 'cidr': 'fe83::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}]
subnets = {}
for sub_dict in sub_dicts:
subnet = self._make_subnet(
self.fmt, network,
gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
subnets[subnet['subnet']['id']] = sub_dict
res = self._create_port(self.fmt, net_id=network['network']['id'])
port = self.deserialize(self.fmt, res)
# Since the create port request was made without a list of fixed IPs,
# the port should be associated with addresses for one of the
# IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6
# SLAAC subnets.
self.assertEqual(4, len(port['port']['fixed_ips']))
addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0,
constants.IPV6_SLAAC: 0}
for fixed_ip in port['port']['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
if subnet_id in subnets:
addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1
self.assertEqual(1, addr_mode_count[None])
self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL])
self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC])
def test_delete_port_with_ipv6_slaac_address(self):
"""Test that a port with an IPv6 SLAAC address can be deleted."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
# Create a port that has an associated IPv6 SLAAC address
self._make_v6_subnet(network, constants.IPV6_SLAAC)
res = self._create_port(self.fmt, net_id=network['network']['id'])
port = self.deserialize(self.fmt, res)
self.assertEqual(1, len(port['port']['fixed_ips']))
# Confirm that the port can be deleted
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self):
"""Test port update with an IPv6 SLAAC subnet in fixed IPs."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
# Create a port using an IPv4 subnet and an IPv6 SLAAC subnet
self._make_subnet(self.fmt, network, gateway='10.0.0.1',
cidr='10.0.0.0/24', ip_version=4)
subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC)
res = self._create_port(self.fmt, net_id=network['network']['id'])
port = self.deserialize(self.fmt, res)
self.assertEqual(2, len(port['port']['fixed_ips']))
# Update port including only the IPv6 SLAAC subnet
data = {'port': {'fixed_ips': [{'subnet_id':
subnet_v6['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
# Port should only have an address corresponding to IPv6 SLAAC subnet
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet_v6),
ips[0]['ip_address'])
def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self):
"""Test port update excluding IPv6 SLAAC subnet from fixed ips."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
# Create a port using an IPv4 subnet and an IPv6 SLAAC subnet
subnet_v4 = self._make_subnet(self.fmt, network, gateway='10.0.0.1',
cidr='10.0.0.0/24', ip_version=4)
subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC)
res = self._create_port(self.fmt, net_id=network['network']['id'])
port = self.deserialize(self.fmt, res)
self.assertEqual(2, len(port['port']['fixed_ips']))
# Update port including only the IPv4 subnet
data = {'port': {'fixed_ips': [{'subnet_id':
subnet_v4['subnet']['id'],
'ip_address': "10.0.0.10"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
# Port should still have an addr corresponding to IPv6 SLAAC subnet
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
eui_addr = self._calc_ipv6_addr_by_EUI64(port, subnet_v6)
expected_v6_ip = {'subnet_id': subnet_v6['subnet']['id'],
'ip_address': eui_addr}
self.assertIn(expected_v6_ip, ips)
def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
v6_subnet_1 = self._make_subnet(self.fmt, network,
gateway='2001:100::1',
cidr='2001:100::0/64',
ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC)
v6_subnet_2 = self._make_subnet(self.fmt, network,
gateway='2001:200::1',
cidr='2001:200::0/64',
ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC)
port = self._make_port(self.fmt, network['network']['id'])
port_mac = port['port']['mac_address']
cidr_1 = v6_subnet_1['subnet']['cidr']
cidr_2 = v6_subnet_2['subnet']['cidr']
eui_addr_1 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_1,
port_mac))
eui_addr_2 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_2,
port_mac))
self.assertEqual({eui_addr_1, eui_addr_2},
{fixed_ip['ip_address'] for fixed_ip in
port['port']['fixed_ips']})
def test_range_allocation(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/29') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 5)
alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5',
'10.0.0.6']
for ip in ips:
self.assertIn(ip['ip_address'], alloc)
self.assertEqual(ip['subnet_id'],
subnet['subnet']['id'])
alloc.remove(ip['ip_address'])
self.assertEqual(len(alloc), 0)
self._delete('ports', port['port']['id'])
with self.subnet(gateway_ip='11.0.0.6',
cidr='11.0.0.0/29') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 5)
alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4',
'11.0.0.5']
for ip in ips:
self.assertIn(ip['ip_address'], alloc)
self.assertEqual(ip['subnet_id'],
subnet['subnet']['id'])
alloc.remove(ip['ip_address'])
self.assertEqual(len(alloc), 0)
self._delete('ports', port['port']['id'])
def test_requested_invalid_fixed_ips(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Test invalid subnet_id
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id':
'00000000-ffff-ffff-ffff-000000000000'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
# Test invalid IP address on specified subnet_id
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '1.1.1.1'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
# Test invalid addresses - IP's not on subnet or network
# address or broadcast address
bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255']
net_id = port['port']['network_id']
for ip in bad_ips:
kwargs = {"fixed_ips": [{'ip_address': ip}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
# Enable allocation of gateway address
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.1'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.1')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._delete('ports', port2['port']['id'])
def test_invalid_ip(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '1011.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_split(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ports_to_delete = []
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ports_to_delete.append(port2)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.5')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Allocate specific IP's
allocated = ['10.0.0.3', '10.0.0.4', '10.0.0.6']
for a in allocated:
res = self._create_port(self.fmt, net_id=net_id)
port2 = self.deserialize(self.fmt, res)
ports_to_delete.append(port2)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], a)
self.assertEqual(ips[0]['subnet_id'],
subnet['subnet']['id'])
for p in ports_to_delete:
self._delete('ports', p['port']['id'])
def test_duplicate_ips(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'},
{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_fixed_ip_invalid_subnet_id(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': 'i am invalid',
'ip_address': '10.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_fixed_ip_invalid_ip(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.55555'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_ips_only(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21',
'10.0.0.3', '10.0.0.17', '10.0.0.19']
ports_to_delete = []
for i in ips_only:
kwargs = {"fixed_ips": [{'ip_address': i}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ports_to_delete.append(port)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], i)
self.assertEqual(ips[0]['subnet_id'],
subnet['subnet']['id'])
for p in ports_to_delete:
self._delete('ports', p['port']['id'])
def test_invalid_admin_state(self):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id'],
'admin_state_up': 7,
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_mac_address(self):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id'],
'admin_state_up': 1,
'mac_address': 'mac',
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_max_fixed_ips_exceeded(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/24') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_max_fixed_ips_exceeded(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/24') as subnet:
with self.port(subnet) as port:
data = {'port': {'fixed_ips':
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'},
{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.4'},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_delete_ports_by_device_id(self):
plugin = manager.NeutronManager.get_plugin()
ctx = context.get_admin_context()
with self.subnet() as subnet:
with self.port(subnet=subnet, device_id='owner1') as p1,\
self.port(subnet=subnet, device_id='owner1') as p2,\
self.port(subnet=subnet, device_id='owner2') as p3:
network_id = subnet['subnet']['network_id']
plugin.delete_ports_by_device_id(ctx, 'owner1',
network_id)
self._show('ports', p1['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p2['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p3['port']['id'],
expected_code=webob.exc.HTTPOk.code)
def _test_delete_ports_by_device_id_second_call_failure(self, plugin):
ctx = context.get_admin_context()
with self.subnet() as subnet:
with self.port(subnet=subnet, device_id='owner1') as p1,\
self.port(subnet=subnet, device_id='owner1') as p2,\
self.port(subnet=subnet, device_id='owner2') as p3:
orig = plugin.delete_port
with mock.patch.object(plugin, 'delete_port') as del_port:
def side_effect(*args, **kwargs):
return self._fail_second_call(del_port, orig,
*args, **kwargs)
del_port.side_effect = side_effect
network_id = subnet['subnet']['network_id']
self.assertRaises(n_exc.NeutronException,
plugin.delete_ports_by_device_id,
ctx, 'owner1', network_id)
statuses = {
self._show_response('ports', p['port']['id']).status_int
for p in [p1, p2]}
expected = {webob.exc.HTTPNotFound.code, webob.exc.HTTPOk.code}
self.assertEqual(expected, statuses)
self._show('ports', p3['port']['id'],
expected_code=webob.exc.HTTPOk.code)
def test_delete_ports_by_device_id_second_call_failure(self):
plugin = manager.NeutronManager.get_plugin()
self._test_delete_ports_by_device_id_second_call_failure(plugin)
def _test_delete_ports_ignores_port_not_found(self, plugin):
ctx = context.get_admin_context()
with self.subnet() as subnet:
with self.port(subnet=subnet, device_id='owner1') as p,\
mock.patch.object(plugin, 'delete_port') as del_port:
del_port.side_effect = n_exc.PortNotFound(
port_id=p['port']['id']
)
network_id = subnet['subnet']['network_id']
try:
plugin.delete_ports_by_device_id(ctx, 'owner1',
network_id)
except n_exc.PortNotFound:
self.fail("delete_ports_by_device_id unexpectedly raised "
"a PortNotFound exception. It should ignore "
"this exception because it is often called at "
"the same time other concurrent operations are "
"deleting some of the same ports.")
def test_delete_ports_ignores_port_not_found(self):
plugin = manager.NeutronManager.get_plugin()
self._test_delete_ports_ignores_port_not_found(plugin)
class TestNetworksV2(NeutronDbPluginV2TestCase):
# NOTE(cerberus): successful network update and delete are
# effectively tested above
def test_create_network(self):
name = 'net1'
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', self.net_create_status), ('shared', False)]
with self.network(name=name) as net:
for k, v in keys:
self.assertEqual(net['network'][k], v)
def test_create_public_network(self):
name = 'public_net'
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', self.net_create_status), ('shared', True)]
with self.network(name=name, shared=True) as net:
for k, v in keys:
self.assertEqual(net['network'][k], v)
def test_create_public_network_no_admin_tenant(self):
name = 'public_net'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.network(name=name,
shared=True,
tenant_id="another_tenant",
set_context=True):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPForbidden.code)
def test_update_network(self):
with self.network() as network:
data = {'network': {'name': 'a_brand_new_name'}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['name'],
data['network']['name'])
def test_update_shared_network_noadmin_returns_403(self):
with self.network(shared=True) as network:
data = {'network': {'name': 'a_brand_new_name'}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.api)
# The API layer always returns 404 on updates in place of 403
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_update_network_set_shared(self):
with self.network(shared=False) as network:
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
def test_update_network_set_shared_owner_returns_403(self):
with self.network(shared=False) as network:
net_owner = network['network']['tenant_id']
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('u', net_owner)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPForbidden.code)
def test_update_network_with_subnet_set_shared(self):
with self.network(shared=False) as network:
with self.subnet(network=network) as subnet:
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
# must query db to see whether subnet's shared attribute
# has been updated or not
ctx = context.Context('', '', is_admin=True)
subnet_db = manager.NeutronManager.get_plugin()._get_subnet(
ctx, subnet['subnet']['id'])
self.assertEqual(subnet_db['shared'], True)
def test_update_network_set_not_shared_single_tenant(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertFalse(res['network']['shared'])
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_other_tenant_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_multi_tenants_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
res2 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
port2 = self.deserialize(self.fmt, res2)
self._delete('ports', port1['port']['id'])
self._delete('ports', port2['port']['id'])
def test_update_network_set_not_shared_multi_tenants2_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_create_networks_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
res = self._create_network_bulk(self.fmt, 2, 'test', True)
self._validate_behavior_on_bulk_success(res, 'networks')
def test_create_networks_bulk_native_quotas(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 4
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
res = self._create_network_bulk(self.fmt, quota + 1, 'test', True)
self._validate_behavior_on_bulk_failure(
res, 'networks',
errcode=webob.exc.HTTPConflict.code)
def test_create_networks_bulk_tenants_and_quotas(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 2
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
networks = [{'network': {'name': 'n1',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n1',
'tenant_id': 't1'}},
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'network', networks)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_networks_bulk_tenants_and_quotas_fail(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 2
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
networks = [{'network': {'name': 'n1',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n1',
'tenant_id': 't1'}},
{'network': {'name': 'n3',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'network', networks)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_networks_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
res = self._create_network_bulk(self.fmt, 2, 'test', True)
self._validate_behavior_on_bulk_success(res, 'networks')
def test_create_networks_bulk_wrong_input(self):
res = self._create_network_bulk(self.fmt, 2, 'test', True,
override={1:
{'admin_state_up': 'doh'}})
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
req = self.new_list_request('networks')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
nets = self.deserialize(self.fmt, res)
self.assertEqual(len(nets['networks']), 0)
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
orig = manager.NeutronManager.get_plugin().create_network
#ensures the API choose the emulation code path
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
method_to_patch = _get_create_db_method('network')
with mock.patch.object(manager.NeutronManager.get_plugin(),
method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'networks', webob.exc.HTTPServerError.code
)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
orig = manager.NeutronManager.get_plugin().create_network
method_to_patch = _get_create_db_method('network')
with mock.patch.object(manager.NeutronManager.get_plugin(),
method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'networks', webob.exc.HTTPServerError.code
)
def test_list_networks(self):
with self.network() as v1, self.network() as v2, self.network() as v3:
networks = (v1, v2, v3)
self._test_list_resources('network', networks)
def test_list_networks_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1') as net1,\
self.network(admin_status_up=False, name='net2') as net2,\
self.network(admin_status_up=False, name='net3') as net3:
self._test_list_with_sort('network', (net3, net2, net1),
[('admin_state_up', 'asc'),
('name', 'desc')])
def test_list_networks_with_sort_extended_attr_native_returns_400(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1'),\
self.network(admin_status_up=False, name='net2'),\
self.network(admin_status_up=False, name='net3'):
req = self.new_list_request(
'networks',
params='sort_key=provider:segmentation_id&sort_dir=asc')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_networks_with_sort_remote_key_native_returns_400(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1'),\
self.network(admin_status_up=False, name='net2'),\
self.network(admin_status_up=False, name='net3'):
req = self.new_list_request(
'networks', params='sort_key=subnets&sort_dir=asc')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_networks_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
with self.network(admin_status_up=True, name='net1') as net1,\
self.network(admin_status_up=False, name='net2') as net2,\
self.network(admin_status_up=False, name='net3') as net3:
self._test_list_with_sort('network', (net3, net2, net1),
[('admin_state_up', 'asc'),
('name', 'desc')])
def test_list_networks_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_without_pk_in_fields_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.network(name='net1', shared=True) as net1,\
self.network(name='net2', shared=False) as net2,\
self.network(name='net3', shared=True) as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2,
query_params="fields=name",
verify_key='name')
def test_list_networks_without_pk_in_fields_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2,
query_params="fields=shared",
verify_key='shared')
def test_list_networks_with_pagination_reverse_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination_reverse('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination_reverse('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_with_parameters(self):
with self.network(name='net1', admin_state_up=False) as net1,\
self.network(name='net2') as net2:
query_params = 'admin_state_up=False'
self._test_list_resources('network', [net1],
query_params=query_params)
query_params = 'admin_state_up=True'
self._test_list_resources('network', [net2],
query_params=query_params)
def test_list_networks_with_fields(self):
with self.network(name='net1') as net1:
req = self.new_list_request('networks',
params='fields=name')
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(1, len(res['networks']))
self.assertEqual(res['networks'][0]['name'],
net1['network']['name'])
self.assertIsNone(res['networks'][0].get('id'))
def test_list_networks_with_parameters_invalid_values(self):
with self.network(name='net1', admin_state_up=False),\
self.network(name='net2'):
req = self.new_list_request('networks',
params='admin_state_up=fake')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_shared_networks_with_non_admin_user(self):
with self.network(shared=False,
name='net1',
tenant_id='tenant1') as net1,\
self.network(shared=True,
name='net2',
tenant_id='another_tenant') as net2,\
self.network(shared=False,
name='net3',
tenant_id='another_tenant'):
ctx = context.Context(user_id='non_admin',
tenant_id='tenant1',
is_admin=False)
self._test_list_resources('network', (net1, net2), ctx)
def test_show_network(self):
with self.network(name='net1') as net:
req = self.new_show_request('networks', net['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['name'],
net['network']['name'])
def test_show_network_with_subnet(self):
with self.network(name='net1') as net:
with self.subnet(net) as subnet:
req = self.new_show_request('networks', net['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['subnets'][0],
subnet['subnet']['id'])
def test_invalid_admin_status(self):
value = [[7, False, webob.exc.HTTPClientError.code],
[True, True, webob.exc.HTTPCreated.code],
["True", True, webob.exc.HTTPCreated.code],
["true", True, webob.exc.HTTPCreated.code],
[1, True, webob.exc.HTTPCreated.code],
["False", False, webob.exc.HTTPCreated.code],
[False, False, webob.exc.HTTPCreated.code],
["false", False, webob.exc.HTTPCreated.code],
["7", False, webob.exc.HTTPClientError.code]]
for v in value:
data = {'network': {'name': 'net',
'admin_state_up': v[0],
'tenant_id': self._tenant_id}}
network_req = self.new_create_request('networks', data)
req = network_req.get_response(self.api)
self.assertEqual(req.status_int, v[2])
if v[2] == webob.exc.HTTPCreated.code:
res = self.deserialize(self.fmt, req)
self.assertEqual(res['network']['admin_state_up'], v[1])
class TestSubnetsV2(NeutronDbPluginV2TestCase):
def _test_create_subnet(self, network=None, expected=None, **kwargs):
keys = kwargs.copy()
keys.setdefault('cidr', '10.0.0.0/24')
keys.setdefault('ip_version', 4)
keys.setdefault('enable_dhcp', True)
with self.subnet(network=network, **keys) as subnet:
# verify the response has each key with the correct value
self._validate_resource(subnet, keys, 'subnet')
# verify the configured validations are correct
if expected:
self._compare_resource(subnet, expected, 'subnet')
self._delete('subnets', subnet['subnet']['id'])
return subnet
def test_create_subnet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
subnet = self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr)
self.assertEqual(4, subnet['subnet']['ip_version'])
self.assertIn('name', subnet['subnet'])
def test_create_subnet_with_network_different_tenant(self):
with self.network(shared=False, tenant_id='tenant1') as network:
ctx = context.Context(user_id='non_admin',
tenant_id='tenant2',
is_admin=False)
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'gateway_ip': '10.0.2.1'}}
req = self.new_create_request('subnets', data,
self.fmt, context=ctx)
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_two_subnets(self):
gateway_ips = ['10.0.0.1', '10.0.1.1']
cidrs = ['10.0.0.0/24', '10.0.1.0/24']
with self.network() as network:
with self.subnet(network=network,
gateway_ip=gateway_ips[0],
cidr=cidrs[0]):
with self.subnet(network=network,
gateway_ip=gateway_ips[1],
cidr=cidrs[1]):
net_req = self.new_show_request('networks',
network['network']['id'])
raw_res = net_req.get_response(self.api)
net_res = self.deserialize(self.fmt, raw_res)
for subnet_id in net_res['network']['subnets']:
sub_req = self.new_show_request('subnets', subnet_id)
raw_res = sub_req.get_response(self.api)
sub_res = self.deserialize(self.fmt, raw_res)
self.assertIn(sub_res['subnet']['cidr'], cidrs)
self.assertIn(sub_res['subnet']['gateway_ip'],
gateway_ips)
def test_create_two_subnets_same_cidr_returns_400(self):
gateway_ip_1 = '10.0.0.1'
cidr_1 = '10.0.0.0/24'
gateway_ip_2 = '10.0.0.10'
cidr_2 = '10.0.0.0/24'
with self.network() as network:
with self.subnet(network=network,
gateway_ip=gateway_ip_1,
cidr=cidr_1):
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.subnet(network=network,
gateway_ip=gateway_ip_2,
cidr=cidr_2):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_V4_cidr(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0',
'ip_version': '4',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_no_ip_version(self):
with self.network() as network:
cfg.CONF.set_override('default_ipv4_subnet_pool', None)
cfg.CONF.set_override('default_ipv6_subnet_pool', None)
data = {'subnet': {'network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_only_ip_version_v6_no_pool(self):
with self.network() as network:
tenant_id = network['network']['tenant_id']
cfg.CONF.set_override('default_ipv6_subnet_pool', None)
data = {'subnet': {'network_id': network['network']['id'],
'ip_version': '6',
'tenant_id': tenant_id}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_only_ip_version_v4(self):
with self.network() as network:
tenant_id = network['network']['tenant_id']
subnetpool_prefix = '10.0.0.0/8'
with self.subnetpool(prefixes=[subnetpool_prefix],
admin=False,
name="My subnet pool",
tenant_id=tenant_id,
min_prefixlen='25') as subnetpool:
subnetpool_id = subnetpool['subnetpool']['id']
cfg.CONF.set_override('default_ipv4_subnet_pool',
subnetpool_id)
data = {'subnet': {'network_id': network['network']['id'],
'ip_version': '4',
'prefixlen': '27',
'tenant_id': tenant_id}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
subnet = self.deserialize(self.fmt, res)['subnet']
ip_net = netaddr.IPNetwork(subnet['cidr'])
self.assertTrue(ip_net in netaddr.IPNetwork(subnetpool_prefix))
self.assertEqual(27, ip_net.prefixlen)
self.assertEqual(subnetpool_id, subnet['subnetpool_id'])
def test_create_subnet_only_ip_version_v6(self):
with self.network() as network:
tenant_id = network['network']['tenant_id']
subnetpool_prefix = '2000::/56'
with self.subnetpool(prefixes=[subnetpool_prefix],
admin=False,
name="My ipv6 subnet pool",
tenant_id=tenant_id,
min_prefixlen='64') as subnetpool:
subnetpool_id = subnetpool['subnetpool']['id']
cfg.CONF.set_override('default_ipv6_subnet_pool',
subnetpool_id)
data = {'subnet': {'network_id': network['network']['id'],
'ip_version': '6',
'tenant_id': tenant_id}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
subnet = self.deserialize(self.fmt, res)['subnet']
self.assertEqual(subnetpool_id, subnet['subnetpool_id'])
ip_net = netaddr.IPNetwork(subnet['cidr'])
self.assertTrue(ip_net in netaddr.IPNetwork(subnetpool_prefix))
self.assertEqual(64, ip_net.prefixlen)
def test_create_subnet_bad_V4_cidr_prefix_len(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': constants.IPv4_ANY,
'ip_version': '4',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '0.0.0.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_V6_cidr(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::',
'ip_version': '6',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'fe80::1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_V6_slaac_big_prefix(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '2014::/65',
'ip_version': '6',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'fe80::1',
'ipv6_address_mode': 'slaac'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self):
cidr_1 = '10.0.0.0/23'
cidr_2 = '10.0.0.0/24'
cfg.CONF.set_override('allow_overlapping_ips', True)
with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2):
pass
def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self):
cidr_1 = '10.0.0.0/23'
cidr_2 = '10.0.0.0/24'
cfg.CONF.set_override('allow_overlapping_ips', False)
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnets_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'],
'test')
self._validate_behavior_on_bulk_success(res, 'subnets')
def test_create_subnets_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
self._validate_behavior_on_bulk_success(res, 'subnets')
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
orig = manager.NeutronManager.get_plugin().create_subnet
method_to_patch = _get_create_db_method('subnet')
with mock.patch.object(manager.NeutronManager.get_plugin(),
method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
self._delete('networks', net['network']['id'])
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'subnets', webob.exc.HTTPServerError.code
)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
plugin = manager.NeutronManager.get_plugin()
orig = plugin.create_subnet
method_to_patch = _get_create_db_method('subnet')
with mock.patch.object(plugin, method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'subnets', webob.exc.HTTPServerError.code
)
def test_delete_subnet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_port_exists_owned_by_network(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
self._create_port(self.fmt,
network['network']['id'],
device_owner=constants.DEVICE_OWNER_DHCP)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_dhcp_port_associated_with_other_subnets(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet1 = self._make_subnet(self.fmt, network, '10.0.0.1',
'10.0.0.0/24', ip_version=4)
subnet2 = self._make_subnet(self.fmt, network, '10.0.1.1',
'10.0.1.0/24', ip_version=4)
res = self._create_port(self.fmt,
network['network']['id'],
device_owner=constants.DEVICE_OWNER_DHCP,
fixed_ips=[
{'subnet_id': subnet1['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}
])
port = self.deserialize(self.fmt, res)
expected_subnets = [subnet1['subnet']['id'], subnet2['subnet']['id']]
self.assertEqual(expected_subnets,
[s['subnet_id'] for s in port['port']['fixed_ips']])
req = self.new_delete_request('subnets', subnet1['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
port = self._show('ports', port['port']['id'])
expected_subnets = [subnet2['subnet']['id']]
self.assertEqual(expected_subnets,
[s['subnet_id'] for s in port['port']['fixed_ips']])
req = self.new_delete_request('subnets', subnet2['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
port = self._show('ports', port['port']['id'])
self.assertFalse(port['port']['fixed_ips'])
def test_delete_subnet_port_exists_owned_by_other(self):
with self.subnet() as subnet:
with self.port(subnet=subnet):
id = subnet['subnet']['id']
req = self.new_delete_request('subnets', id)
res = req.get_response(self.api)
data = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
msg = str(n_exc.SubnetInUse(subnet_id=id))
self.assertEqual(data['NeutronError']['message'], msg)
def test_delete_subnet_with_other_subnet_on_network_still_in_use(self):
with self.network() as network:
with self.subnet(network=network) as subnet1,\
self.subnet(network=network,
cidr='10.0.1.0/24') as subnet2:
subnet1_id = subnet1['subnet']['id']
subnet2_id = subnet2['subnet']['id']
with self.port(
subnet=subnet1,
fixed_ips=[{'subnet_id': subnet1_id}]):
req = self.new_delete_request('subnets', subnet2_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPNoContent.code)
def _create_slaac_subnet_and_port(self, port_owner=None):
# Create an IPv6 SLAAC subnet and a port using that subnet
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway='fe80::1',
cidr='fe80::/64', ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC)
kwargs = {}
if port_owner:
kwargs['device_owner'] = port_owner
if port_owner in constants.ROUTER_INTERFACE_OWNERS:
kwargs['fixed_ips'] = [{'ip_address': 'fe80::1'}]
res = self._create_port(self.fmt, net_id=network['network']['id'],
**kwargs)
port = self.deserialize(self.fmt, res)
self.assertEqual(1, len(port['port']['fixed_ips']))
# The port should have an address from the subnet
req = self.new_show_request('ports', port['port']['id'], self.fmt)
res = req.get_response(self.api)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(1, len(sport['port']['fixed_ips']))
return subnet, port
def test_delete_subnet_ipv6_slaac_port_exists(self):
"""Test IPv6 SLAAC subnet delete when a port is still using subnet."""
subnet, port = self._create_slaac_subnet_and_port()
# Delete the subnet
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
# The port should no longer have an address from the deleted subnet
req = self.new_show_request('ports', port['port']['id'], self.fmt)
res = req.get_response(self.api)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(0, len(sport['port']['fixed_ips']))
def test_delete_subnet_ipv6_slaac_router_port_exists(self):
"""Test IPv6 SLAAC subnet delete with a router port using the subnet"""
subnet, port = self._create_slaac_subnet_and_port(
constants.DEVICE_OWNER_ROUTER_INTF)
# Delete the subnet and assert that we get a HTTP 409 error
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
# The subnet should still exist and the port should still have an
# address from the subnet
req = self.new_show_request('subnets', subnet['subnet']['id'],
self.fmt)
res = req.get_response(self.api)
ssubnet = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIsNotNone(ssubnet)
req = self.new_show_request('ports', port['port']['id'], self.fmt)
res = req.get_response(self.api)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(1, len(sport['port']['fixed_ips']))
port_subnet_ids = [fip['subnet_id'] for fip in
sport['port']['fixed_ips']]
self.assertIn(subnet['subnet']['id'], port_subnet_ids)
def test_delete_network(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr,
ip_version=4)
req = self.new_delete_request('networks', network['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_subnet_bad_tenant(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.2.0/24',
webob.exc.HTTPNotFound.code,
ip_version=4,
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
device_owner='fake_owner',
set_context=True)
def test_create_subnet_as_admin(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.2.0/24',
webob.exc.HTTPCreated.code,
ip_version=4,
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
device_owner='fake_owner',
set_context=False)
def test_create_subnet_nonzero_cidr(self):
with self.subnet(cidr='10.129.122.5/8') as v1,\
self.subnet(cidr='11.129.122.5/15') as v2,\
self.subnet(cidr='12.129.122.5/16') as v3,\
self.subnet(cidr='13.129.122.5/18') as v4,\
self.subnet(cidr='14.129.122.5/22') as v5,\
self.subnet(cidr='15.129.122.5/24') as v6,\
self.subnet(cidr='16.129.122.5/28') as v7,\
self.subnet(cidr='17.129.122.5/32', enable_dhcp=False) as v8:
subs = (v1, v2, v3, v4, v5, v6, v7, v8)
# the API should accept and correct these for users
self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8')
self.assertEqual(subs[1]['subnet']['cidr'], '11.128.0.0/15')
self.assertEqual(subs[2]['subnet']['cidr'], '12.129.0.0/16')
self.assertEqual(subs[3]['subnet']['cidr'], '13.129.64.0/18')
self.assertEqual(subs[4]['subnet']['cidr'], '14.129.120.0/22')
self.assertEqual(subs[5]['subnet']['cidr'], '15.129.122.0/24')
self.assertEqual(subs[6]['subnet']['cidr'], '16.129.122.0/28')
self.assertEqual(subs[7]['subnet']['cidr'], '17.129.122.5/32')
def _test_create_subnet_with_invalid_netmask_returns_400(self, *args):
with self.network() as network:
for cidr in args:
ip_version = netaddr.IPNetwork(cidr).version
self._create_subnet(self.fmt,
network['network']['id'],
cidr,
webob.exc.HTTPClientError.code,
ip_version=ip_version)
def test_create_subnet_with_invalid_netmask_returns_400_ipv4(self):
self._test_create_subnet_with_invalid_netmask_returns_400(
'10.0.0.0/31', '10.0.0.0/32')
def test_create_subnet_with_invalid_netmask_returns_400_ipv6(self):
self._test_create_subnet_with_invalid_netmask_returns_400(
'cafe:cafe::/127', 'cafe:cafe::/128')
def test_create_subnet_bad_ip_version(self):
with self.network() as network:
# Check bad IP version
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 'abc',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_ip_version_null(self):
with self.network() as network:
# Check bad IP version
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': None,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_uuid(self):
with self.network() as network:
# Check invalid UUID
data = {'subnet': {'network_id': None,
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_boolean(self):
with self.network() as network:
# Check invalid boolean
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'enable_dhcp': None,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_pools(self):
with self.network() as network:
# Check allocation pools
allocation_pools = [[{'end': '10.0.0.254'}],
[{'start': '10.0.0.254'}],
[{'start': '1000.0.0.254'}],
[{'start': '10.0.0.2', 'end': '10.0.0.254'},
{'end': '10.0.0.254'}],
None,
[{'start': '10.0.0.2', 'end': '10.0.0.3'},
{'start': '10.0.0.2', 'end': '10.0.0.3'}]]
tenant_id = network['network']['tenant_id']
for pool in allocation_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'allocation_pools': pool}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_nameserver(self):
with self.network() as network:
# Check nameservers
nameserver_pools = [['1100.0.0.2'],
['1.1.1.2', '1.1000.1.3'],
['1.1.1.2', '1.1.1.2']]
tenant_id = network['network']['tenant_id']
for nameservers in nameserver_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'dns_nameservers': nameservers}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_hostroutes(self):
with self.network() as network:
# Check hostroutes
hostroute_pools = [[{'destination': '100.0.0.0/24'}],
[{'nexthop': '10.0.2.20'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'},
{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'}]]
tenant_id = network['network']['tenant_id']
for hostroutes in hostroute_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'host_routes': hostroutes}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_defaults(self):
gateway = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
enable_dhcp = True
subnet = self._test_create_subnet()
# verify cidr & gw have been correctly generated
self.assertEqual(subnet['subnet']['cidr'], cidr)
self.assertEqual(subnet['subnet']['gateway_ip'], gateway)
self.assertEqual(subnet['subnet']['enable_dhcp'], enable_dhcp)
self.assertEqual(subnet['subnet']['allocation_pools'],
allocation_pools)
def test_create_subnet_gw_values(self):
cidr = '10.0.0.0/24'
# Gateway is last IP in range
gateway = '10.0.0.254'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.253'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected, gateway_ip=gateway)
# Gateway is first in subnet
gateway = '10.0.0.1'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected,
gateway_ip=gateway)
def test_create_subnet_ipv6_gw_values(self):
cidr = '2001::/64'
# Gateway is last IP in IPv6 DHCPv6 stateful subnet
gateway = '2001::ffff:ffff:ffff:fffe'
allocation_pools = [{'start': '2001::1',
'end': '2001::ffff:ffff:ffff:fffd'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected, gateway_ip=gateway,
cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
# Gateway is first IP in IPv6 DHCPv6 stateful subnet
gateway = '2001::1'
allocation_pools = [{'start': '2001::2',
'end': '2001::ffff:ffff:ffff:fffe'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected, gateway_ip=gateway,
cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
# If gateway_ip is not specified, allocate first IP from the subnet
expected = {'gateway_ip': gateway,
'cidr': cidr}
self._test_create_subnet(expected=expected,
cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC)
def test_create_subnet_gw_outside_cidr_returns_400(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='100.0.0.1')
def test_create_subnet_gw_of_network_returns_400(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='10.0.0.0')
def test_create_subnet_gw_bcast_returns_400(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='10.0.0.255')
def test_create_subnet_with_allocation_pool(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_none_gateway(self):
cidr = '10.0.0.0/24'
self._test_create_subnet(gateway_ip=None,
cidr=cidr)
def test_create_subnet_with_none_gateway_fully_allocated(self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.254'}]
self._test_create_subnet(gateway_ip=None,
cidr=cidr,
allocation_pools=allocation_pools)
def test_subnet_with_allocation_range(self):
with self.network() as network:
net_id = network['network']['id']
data = {'subnet': {'network_id': net_id,
'cidr': '10.0.0.0/24',
'ip_version': 4,
'gateway_ip': '10.0.0.1',
'tenant_id': network['network']['tenant_id'],
'allocation_pools': [{'start': '10.0.0.100',
'end': '10.0.0.120'}]}}
subnet_req = self.new_create_request('subnets', data)
subnet = self.deserialize(self.fmt,
subnet_req.get_response(self.api))
# Check fixed IP not in allocation range
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.10'}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
port = self.deserialize(self.fmt, res)
# delete the port
self._delete('ports', port['port']['id'])
# Check when fixed IP is gateway
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.1'}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
port = self.deserialize(self.fmt, res)
# delete the port
self._delete('ports', port['port']['id'])
def test_create_subnet_with_none_gateway_allocation_pool(self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
self._test_create_subnet(gateway_ip=None,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_v6_allocation_pool(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/80'
allocation_pools = [{'start': 'fe80::2',
'end': 'fe80::ffff:fffa:ffff'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
allocation_pools=allocation_pools)
def test_create_subnet_with_large_allocation_pool(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/8'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'},
{'start': '10.1.0.0',
'end': '10.200.0.100'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_multiple_allocation_pools(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'},
{'start': '10.0.0.110',
'end': '10.0.0.150'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_dhcp_disabled(self):
enable_dhcp = False
self._test_create_subnet(enable_dhcp=enable_dhcp)
def test_create_subnet_default_gw_conflict_allocation_pool_returns_409(
self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.5'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_gateway_in_allocation_pool_returns_409(self):
gateway_ip = '10.0.0.50'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.100'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_overlapping_allocation_pools_returns_409(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.150'},
{'start': '10.0.0.140',
'end': '10.0.0.180'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_invalid_allocation_pool_returns_400(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.256'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_out_of_range_allocation_pool_returns_400(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.1.6'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_shared_returns_400(self):
cidr = '10.0.0.0/24'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(cidr=cidr,
shared=True)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_cidrv4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 6,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_cidrv6(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_gatewayv6(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'gateway_ip': 'fe80::1',
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_gatewayv4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 6,
'gateway_ip': '192.168.0.1',
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_dns_v4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 6,
'dns_nameservers': ['192.168.0.1'],
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self):
host_routes = [{'destination': 'fe80::0/48',
'nexthop': '10.0.2.20'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': 'fe80::1'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def _test_validate_subnet_ipv6_modes(self, cur_subnet=None,
expect_success=True, **modes):
plugin = manager.NeutronManager.get_plugin()
ctx = context.get_admin_context()
new_subnet = {'ip_version': 6,
'cidr': 'fe80::/64',
'enable_dhcp': True,
'ipv6_address_mode': None,
'ipv6_ra_mode': None}
for mode, value in modes.items():
new_subnet[mode] = value
if expect_success:
plugin._validate_subnet(ctx, new_subnet, cur_subnet)
else:
self.assertRaises(n_exc.InvalidInput, plugin._validate_subnet,
ctx, new_subnet, cur_subnet)
def test_create_subnet_ipv6_ra_modes(self):
# Test all RA modes with no address mode specified
for ra_mode in constants.IPV6_MODES:
self._test_validate_subnet_ipv6_modes(
ipv6_ra_mode=ra_mode)
def test_create_subnet_ipv6_addr_modes(self):
# Test all address modes with no RA mode specified
for addr_mode in constants.IPV6_MODES:
self._test_validate_subnet_ipv6_modes(
ipv6_address_mode=addr_mode)
def test_create_subnet_ipv6_same_ra_and_addr_modes(self):
# Test all ipv6 modes with ra_mode==addr_mode
for ipv6_mode in constants.IPV6_MODES:
self._test_validate_subnet_ipv6_modes(
ipv6_ra_mode=ipv6_mode,
ipv6_address_mode=ipv6_mode)
def test_create_subnet_ipv6_different_ra_and_addr_modes(self):
# Test all ipv6 modes with ra_mode!=addr_mode
for ra_mode, addr_mode in itertools.permutations(
constants.IPV6_MODES, 2):
self._test_validate_subnet_ipv6_modes(
expect_success=not (ra_mode and addr_mode),
ipv6_ra_mode=ra_mode,
ipv6_address_mode=addr_mode)
def test_create_subnet_ipv6_out_of_cidr_global(self):
gateway_ip = '2000::1'
cidr = '2001::/64'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(
gateway_ip=gateway_ip, cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_ipv6_out_of_cidr_lla(self):
gateway_ip = 'fe80::1'
cidr = '2001::/64'
self._test_create_subnet(
gateway_ip=gateway_ip, cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC)
def test_create_subnet_ipv6_attributes_no_dhcp_enabled(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/64'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
for mode in constants.IPV6_MODES:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
enable_dhcp=False,
ipv6_ra_mode=mode,
ipv6_address_mode=mode)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_invalid_ipv6_ra_mode(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/80'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
ipv6_ra_mode='foo',
ipv6_address_mode='slaac')
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_invalid_ipv6_address_mode(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/80'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
ipv6_ra_mode='slaac',
ipv6_address_mode='baz')
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_ipv6_ra_mode_ip_version_4(self):
cidr = '10.0.2.0/24'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(cidr=cidr, ip_version=4,
ipv6_ra_mode=constants.DHCPV6_STATEFUL)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_ipv6_address_mode_ip_version_4(self):
cidr = '10.0.2.0/24'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(
cidr=cidr, ip_version=4,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def _test_create_subnet_ipv6_auto_addr_with_port_on_network(
self, addr_mode, device_owner=DEVICE_OWNER_COMPUTE,
insert_db_reference_error=False):
# Create a network with one IPv4 subnet and one port
with self.network() as network,\
self.subnet(network=network) as v4_subnet,\
self.port(subnet=v4_subnet, device_owner=device_owner) as port:
if insert_db_reference_error:
def db_ref_err_for_ipalloc(instance):
if instance.__class__.__name__ == 'IPAllocation':
raise db_exc.DBReferenceError(
'dummy_table', 'dummy_constraint',
'dummy_key', 'dummy_key_table')
mock.patch.object(orm.Session, 'add',
side_effect=db_ref_err_for_ipalloc).start()
# Add an IPv6 auto-address subnet to the network
v6_subnet = self._make_subnet(self.fmt, network, 'fe80::1',
'fe80::/64', ip_version=6,
ipv6_ra_mode=addr_mode,
ipv6_address_mode=addr_mode)
if (insert_db_reference_error
or device_owner == constants.DEVICE_OWNER_ROUTER_SNAT
or device_owner in constants.ROUTER_INTERFACE_OWNERS):
# DVR SNAT and router interfaces should not have been
# updated with addresses from the new auto-address subnet
self.assertEqual(1, len(port['port']['fixed_ips']))
else:
# Confirm that the port has been updated with an address
# from the new auto-address subnet
req = self.new_show_request('ports', port['port']['id'],
self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
fixed_ips = sport['port']['fixed_ips']
self.assertEqual(2, len(fixed_ips))
self.assertIn(v6_subnet['subnet']['id'],
[fixed_ip['subnet_id'] for fixed_ip
in fixed_ips])
def test_create_subnet_ipv6_slaac_with_port_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC)
def test_create_subnet_dhcpv6_stateless_with_port_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.DHCPV6_STATELESS)
def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC,
device_owner=constants.DEVICE_OWNER_DHCP)
def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF)
def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC,
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT)
def test_create_subnet_ipv6_slaac_with_db_reference_error(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC, insert_db_reference_error=True)
def test_update_subnet_no_gateway(self):
with self.subnet() as subnet:
data = {'subnet': {'gateway_ip': '10.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'],
data['subnet']['gateway_ip'])
data = {'subnet': {'gateway_ip': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIsNone(data['subnet']['gateway_ip'])
def test_update_subnet(self):
with self.subnet() as subnet:
data = {'subnet': {'gateway_ip': '10.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'],
data['subnet']['gateway_ip'])
def test_update_subnet_adding_additional_host_routes_and_dns(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'dns_nameservers': ['192.168.0.1'],
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, subnet_req.get_response(self.api))
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'},
{'destination': '192.168.0.0/24',
'nexthop': '10.0.2.3'}]
dns_nameservers = ['192.168.0.1', '192.168.0.2']
data = {'subnet': {'host_routes': host_routes,
'dns_nameservers': dns_nameservers}}
req = self.new_update_request('subnets', data,
res['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(res['subnet']['host_routes']),
sorted(host_routes))
self.assertEqual(sorted(res['subnet']['dns_nameservers']),
sorted(dns_nameservers))
def test_update_subnet_shared_returns_400(self):
with self.network(shared=True) as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'shared': True}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gw_outside_cidr_returns_400(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'gateway_ip': '100.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gw_ip_in_use_returns_409(self):
with self.network() as network:
with self.subnet(
network=network,
allocation_pools=[{'start': '10.0.0.100',
'end': '10.0.0.253'}]) as subnet:
subnet_data = subnet['subnet']
with self.port(
subnet=subnet,
fixed_ips=[{'subnet_id': subnet_data['id'],
'ip_address': subnet_data['gateway_ip']}]):
data = {'subnet': {'gateway_ip': '10.0.0.99'}}
req = self.new_update_request('subnets', data,
subnet_data['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
def test_update_subnet_inconsistent_ipv4_gatewayv6(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'gateway_ip': 'fe80::1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_gatewayv4(self):
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'gateway_ip': '10.1.1.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv4_dns_v6(self):
dns_nameservers = ['fe80::1']
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'dns_nameservers': dns_nameservers}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self):
host_routes = [{'destination': 'fe80::0/48',
'nexthop': '10.0.2.20'}]
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'host_routes': host_routes}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': 'fe80::1'}]
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'host_routes': host_routes}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gateway_in_allocation_pool_returns_409(self):
allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
cidr='10.0.0.0/24') as subnet:
data = {'subnet': {'gateway_ip': '10.0.0.50'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPConflict.code)
def test_update_subnet_ipv6_attributes_fails(self):
with self.subnet(ip_version=6, cidr='fe80::/64',
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL,
'ipv6_address_mode': constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_ra_mode_fails(self):
with self.subnet(ip_version=6, cidr='fe80::/64',
ipv6_ra_mode=constants.IPV6_SLAAC) as subnet:
data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_address_mode_fails(self):
with self.subnet(ip_version=6, cidr='fe80::/64',
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_cannot_disable_dhcp(self):
with self.subnet(ip_version=6, cidr='fe80::/64',
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
data = {'subnet': {'enable_dhcp': False}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_ra_mode_ip_version_4(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'ipv6_ra_mode':
constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_address_mode_ip_version_4(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'ipv6_address_mode':
constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_allocation_pools(self):
"""Test that we can successfully update with sane params.
This will create a subnet with specified allocation_pools
Then issue an update (PUT) to update these using correct
(i.e. non erroneous) params. Finally retrieve the updated
subnet and verify.
"""
allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
cidr='192.168.0.0/24') as subnet:
data = {'subnet': {'allocation_pools': [
{'start': '192.168.0.10', 'end': '192.168.0.20'},
{'start': '192.168.0.30', 'end': '192.168.0.40'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
#check res code but then do GET on subnet for verification
res = req.get_response(self.api)
self.assertEqual(res.status_code, 200)
req = self.new_show_request('subnets', subnet['subnet']['id'],
self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(len(res['subnet']['allocation_pools']), 2)
res_vals = res['subnet']['allocation_pools'][0].values() +\
res['subnet']['allocation_pools'][1].values()
for pool_val in ['10', '20', '30', '40']:
self.assertTrue('192.168.0.%s' % (pool_val) in res_vals)
#updating alloc pool to something outside subnet.cidr
def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self):
"""Test update alloc pool to something outside subnet.cidr.
This makes sure that an erroneous allocation_pool specified
in a subnet update (outside subnet cidr) will result in an error.
"""
allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
cidr='192.168.0.0/24') as subnet:
data = {'subnet': {'allocation_pools': [
{'start': '10.0.0.10', 'end': '10.0.0.20'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def _test_subnet_update_enable_dhcp_no_ip_available_returns_409(
self, allocation_pools, cidr):
ip_version = netaddr.IPNetwork(cidr).version
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
enable_dhcp=False,
cidr=cidr,
ip_version=ip_version) as subnet:
id = subnet['subnet']['network_id']
self._create_port(self.fmt, id)
data = {'subnet': {'enable_dhcp': True}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPConflict.code)
def test_subnet_update_enable_dhcp_no_ip_available_returns_409_ipv4(self):
allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.2'}]
cidr = '10.0.0.0/30'
self._test_subnet_update_enable_dhcp_no_ip_available_returns_409(
allocation_pools, cidr)
def test_subnet_update_enable_dhcp_no_ip_available_returns_409_ipv6(self):
allocation_pools = [{'start': '2001:db8::2', 'end': '2001:db8::2'}]
cidr = '2001:db8::/126'
self._test_subnet_update_enable_dhcp_no_ip_available_returns_409(
allocation_pools, cidr)
def test_show_subnet(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
req = self.new_show_request('subnets',
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['id'],
subnet['subnet']['id'])
self.assertEqual(res['subnet']['network_id'],
network['network']['id'])
def test_list_subnets(self):
with self.network() as network:
with self.subnet(network=network,
gateway_ip='10.0.0.1',
cidr='10.0.0.0/24') as v1,\
self.subnet(network=network,
gateway_ip='10.0.1.1',
cidr='10.0.1.0/24') as v2,\
self.subnet(network=network,
gateway_ip='10.0.2.1',
cidr='10.0.2.0/24') as v3:
subnets = (v1, v2, v3)
self._test_list_resources('subnet', subnets)
def test_list_subnets_shared(self):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as priv_subnet:
# normal user should see only 1 subnet
req = self.new_list_request('subnets')
req.environ['neutron.context'] = context.Context(
'', 'some_tenant')
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(len(res['subnets']), 1)
self.assertEqual(res['subnets'][0]['cidr'],
subnet['subnet']['cidr'])
# admin will see both subnets
admin_req = self.new_list_request('subnets')
admin_res = self.deserialize(
self.fmt, admin_req.get_response(self.api))
self.assertEqual(len(admin_res['subnets']), 2)
cidrs = [sub['cidr'] for sub in admin_res['subnets']]
self.assertIn(subnet['subnet']['cidr'], cidrs)
self.assertIn(priv_subnet['subnet']['cidr'], cidrs)
def test_list_subnets_with_parameter(self):
with self.network() as network:
with self.subnet(network=network,
gateway_ip='10.0.0.1',
cidr='10.0.0.0/24') as v1,\
self.subnet(network=network,
gateway_ip='10.0.1.1',
cidr='10.0.1.0/24') as v2:
subnets = (v1, v2)
query_params = 'ip_version=4&ip_version=6'
self._test_list_resources('subnet', subnets,
query_params=query_params)
query_params = 'ip_version=6'
self._test_list_resources('subnet', [],
query_params=query_params)
def test_list_subnets_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\
self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\
self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3:
self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1),
[('enable_dhcp', 'asc'),
('cidr', 'desc')])
def test_list_subnets_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\
self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\
self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3:
self._test_list_with_sort('subnet', (subnet3,
subnet2,
subnet1),
[('enable_dhcp', 'asc'),
('cidr', 'desc')])
def test_list_subnets_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented sorting feature")
with self.subnet(cidr='10.0.0.0/24') as subnet1,\
self.subnet(cidr='11.0.0.0/24') as subnet2,\
self.subnet(cidr='12.0.0.0/24') as subnet3:
self._test_list_with_pagination('subnet',
(subnet1, subnet2, subnet3),
('cidr', 'asc'), 2, 2)
def test_list_subnets_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.subnet(cidr='10.0.0.0/24') as subnet1,\
self.subnet(cidr='11.0.0.0/24') as subnet2,\
self.subnet(cidr='12.0.0.0/24') as subnet3:
self._test_list_with_pagination('subnet',
(subnet1, subnet2, subnet3),
('cidr', 'asc'), 2, 2)
def test_list_subnets_with_pagination_reverse_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.subnet(cidr='10.0.0.0/24') as subnet1,\
self.subnet(cidr='11.0.0.0/24') as subnet2,\
self.subnet(cidr='12.0.0.0/24') as subnet3:
self._test_list_with_pagination_reverse('subnet',
(subnet1, subnet2,
subnet3),
('cidr', 'asc'), 2, 2)
def test_list_subnets_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.subnet(cidr='10.0.0.0/24') as subnet1,\
self.subnet(cidr='11.0.0.0/24') as subnet2,\
self.subnet(cidr='12.0.0.0/24') as subnet3:
self._test_list_with_pagination_reverse('subnet',
(subnet1, subnet2,
subnet3),
('cidr', 'asc'), 2, 2)
def test_invalid_ip_version(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 7,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_subnet(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'invalid',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_ip_address(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'ipaddress'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_uuid(self):
with self.network() as network:
data = {'subnet': {'network_id': 'invalid-uuid',
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_with_one_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
dns_nameservers = ['1.2.3.4']
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
def test_create_subnet_with_two_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
dns_nameservers = ['1.2.3.4', '4.3.2.1']
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
def test_create_subnet_with_too_many_dns(self):
with self.network() as network:
dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1',
'dns_nameservers': dns_list}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_with_one_host_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
host_routes=host_routes)
def test_create_subnet_with_two_host_routes(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'},
{'destination': '12.0.0.0/8',
'nexthop': '4.3.2.1'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
host_routes=host_routes)
def test_create_subnet_with_too_many_routes(self):
with self.network() as network:
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'},
{'destination': '12.0.0.0/8',
'nexthop': '4.3.2.1'},
{'destination': '141.212.0.0/16',
'nexthop': '2.2.2.2'}]
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1',
'host_routes': host_routes}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_update_subnet_dns(self):
with self.subnet() as subnet:
data = {'subnet': {'dns_nameservers': ['11.0.0.1']}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['dns_nameservers'],
data['subnet']['dns_nameservers'])
def test_update_subnet_dns_to_None(self):
with self.subnet(dns_nameservers=['11.0.0.1']) as subnet:
data = {'subnet': {'dns_nameservers': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual([], res['subnet']['dns_nameservers'])
data = {'subnet': {'dns_nameservers': ['11.0.0.3']}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(data['subnet']['dns_nameservers'],
res['subnet']['dns_nameservers'])
def test_update_subnet_dns_with_too_many_entries(self):
with self.subnet() as subnet:
dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
data = {'subnet': {'dns_nameservers': dns_list}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_update_subnet_route(self):
with self.subnet() as subnet:
data = {'subnet': {'host_routes':
[{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['host_routes'],
data['subnet']['host_routes'])
def test_update_subnet_route_to_None(self):
with self.subnet(host_routes=[{'destination': '12.0.0.0/8',
'nexthop': '1.2.3.4'}]) as subnet:
data = {'subnet': {'host_routes': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual([], res['subnet']['host_routes'])
data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8',
'nexthop': '1.2.3.4'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(data['subnet']['host_routes'],
res['subnet']['host_routes'])
def test_update_subnet_route_with_too_many_entries(self):
with self.subnet() as subnet:
data = {'subnet': {'host_routes': [
{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'},
{'destination': '13.0.0.0/8', 'nexthop': '1.2.3.5'},
{'destination': '14.0.0.0/8', 'nexthop': '1.2.3.6'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_delete_subnet_with_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
dns_nameservers = ['1.2.3.4']
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
dns_nameservers=dns_nameservers)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_with_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
host_routes=host_routes)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_with_dns_and_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
dns_nameservers = ['1.2.3.4']
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
dns_nameservers=dns_nameservers,
host_routes=host_routes)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_with_callback(self):
with self.subnet() as subnet,\
mock.patch.object(registry, 'notify') as notify:
errors = [
exceptions.NotificationError(
'fake_id', n_exc.NeutronException()),
]
notify.side_effect = [
exceptions.CallbackFailure(errors=errors), None
]
# Make sure the delete request fails
delete_request = self.new_delete_request('subnets',
subnet['subnet']['id'])
delete_response = delete_request.get_response(self.api)
self.assertTrue('NeutronError' in delete_response.json)
self.assertEqual('SubnetInUse',
delete_response.json['NeutronError']['type'])
# Make sure the subnet wasn't deleted
list_request = self.new_list_request(
'subnets', params="id=%s" % subnet['subnet']['id'])
list_response = list_request.get_response(self.api)
self.assertEqual(subnet['subnet']['id'],
list_response.json['subnets'][0]['id'])
def _helper_test_validate_subnet(self, option, exception):
cfg.CONF.set_override(option, 0)
with self.network() as network:
subnet = {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.8.8'],
'host_routes': [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]}
plugin = manager.NeutronManager.get_plugin()
e = self.assertRaises(exception,
plugin._validate_subnet,
context.get_admin_context(),
subnet)
self.assertThat(
str(e),
matchers.Not(matchers.Contains('built-in function id')))
def test_validate_subnet_dns_nameservers_exhausted(self):
self._helper_test_validate_subnet(
'max_dns_nameservers',
n_exc.DNSNameServersExhausted)
def test_validate_subnet_host_routes_exhausted(self):
self._helper_test_validate_subnet(
'max_subnet_host_routes',
n_exc.HostRoutesExhausted)
def test_port_prevents_network_deletion(self):
with self.port() as p:
self._delete('networks', p['port']['network_id'],
expected_code=webob.exc.HTTPConflict.code)
def test_port_prevents_subnet_deletion(self):
with self.port() as p:
self._delete('subnets', p['port']['fixed_ips'][0]['subnet_id'],
expected_code=webob.exc.HTTPConflict.code)
class TestSubnetPoolsV2(NeutronDbPluginV2TestCase):
_POOL_NAME = 'test-pool'
def _test_create_subnetpool(self, prefixes, expected=None,
admin=False, **kwargs):
keys = kwargs.copy()
keys.setdefault('tenant_id', self._tenant_id)
with self.subnetpool(prefixes, admin, **keys) as subnetpool:
self._validate_resource(subnetpool, keys, 'subnetpool')
if expected:
self._compare_resource(subnetpool, expected, 'subnetpool')
return subnetpool
def _validate_default_prefix(self, prefix, subnetpool):
self.assertEqual(subnetpool['subnetpool']['default_prefixlen'], prefix)
def _validate_min_prefix(self, prefix, subnetpool):
self.assertEqual(subnetpool['subnetpool']['min_prefixlen'], prefix)
def _validate_max_prefix(self, prefix, subnetpool):
self.assertEqual(subnetpool['subnetpool']['max_prefixlen'], prefix)
def test_create_subnetpool_empty_prefix_list(self):
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[],
name=self._POOL_NAME,
tenant_id=self._tenant_id,
min_prefixlen='21')
def test_create_subnetpool_ipv4_24_with_defaults(self):
subnet = netaddr.IPNetwork('10.10.10.0/24')
subnetpool = self._test_create_subnetpool([subnet.cidr],
name=self._POOL_NAME,
tenant_id=self._tenant_id,
min_prefixlen='21')
self._validate_default_prefix('21', subnetpool)
self._validate_min_prefix('21', subnetpool)
def test_create_subnetpool_ipv4_21_with_defaults(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
subnetpool = self._test_create_subnetpool([subnet.cidr],
name=self._POOL_NAME,
tenant_id=self._tenant_id,
min_prefixlen='21')
self._validate_default_prefix('21', subnetpool)
self._validate_min_prefix('21', subnetpool)
def test_create_subnetpool_ipv4_default_prefix_too_small(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
default_prefixlen='20')
def test_create_subnetpool_ipv4_default_prefix_too_large(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
max_prefixlen=24,
default_prefixlen='32')
def test_create_subnetpool_ipv4_default_prefix_bounds(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME)
self._validate_min_prefix('8', subnetpool)
self._validate_default_prefix('8', subnetpool)
self._validate_max_prefix('32', subnetpool)
def test_create_subnetpool_ipv6_default_prefix_bounds(self):
subnet = netaddr.IPNetwork('fe80::/48')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME)
self._validate_min_prefix('64', subnetpool)
self._validate_default_prefix('64', subnetpool)
self._validate_max_prefix('128', subnetpool)
def test_create_subnetpool_ipv4_supported_default_prefix(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
default_prefixlen='26')
self._validate_default_prefix('26', subnetpool)
def test_create_subnetpool_ipv4_supported_min_prefix(self):
subnet = netaddr.IPNetwork('10.10.10.0/24')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='26')
self._validate_min_prefix('26', subnetpool)
self._validate_default_prefix('26', subnetpool)
def test_create_subnetpool_ipv4_default_prefix_smaller_than_min(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
default_prefixlen='22',
min_prefixlen='23')
def test_create_subnetpool_mixed_ip_version(self):
subnet_v4 = netaddr.IPNetwork('10.10.10.0/21')
subnet_v6 = netaddr.IPNetwork('fe80::/48')
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[subnet_v4.cidr, subnet_v6.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
def test_create_subnetpool_ipv6_with_defaults(self):
subnet = netaddr.IPNetwork('fe80::/48')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='48')
self._validate_default_prefix('48', subnetpool)
self._validate_min_prefix('48', subnetpool)
def test_get_subnetpool(self):
subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
req = self.new_show_request('subnetpools',
subnetpool['subnetpool']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(subnetpool['subnetpool']['id'],
res['subnetpool']['id'])
def test_get_subnetpool_different_tenants_not_shared(self):
subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
shared=False,
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
req = self.new_show_request('subnetpools',
subnetpool['subnetpool']['id'])
neutron_context = context.Context('', 'not-the-owner')
req.environ['neutron.context'] = neutron_context
res = req.get_response(self.api)
self.assertEqual(res.status_int, 404)
def test_get_subnetpool_different_tenants_shared(self):
subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
None,
True,
name=self._POOL_NAME,
min_prefixlen='24',
shared=True)
req = self.new_show_request('subnetpools',
subnetpool['subnetpool']['id'])
neutron_context = context.Context('', self._tenant_id)
req.environ['neutron.context'] = neutron_context
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(subnetpool['subnetpool']['id'],
res['subnetpool']['id'])
def test_list_subnetpools_different_tenants_shared(self):
self._test_create_subnetpool(['10.10.10.0/24'],
None,
True,
name=self._POOL_NAME,
min_prefixlen='24',
shared=True)
admin_res = self._list('subnetpools')
mortal_res = self._list('subnetpools',
neutron_context=context.Context('', 'not-the-owner'))
self.assertEqual(len(admin_res['subnetpools']), 1)
self.assertEqual(len(mortal_res['subnetpools']), 1)
def test_list_subnetpools_different_tenants_not_shared(self):
self._test_create_subnetpool(['10.10.10.0/24'],
None,
True,
name=self._POOL_NAME,
min_prefixlen='24',
shared=False)
admin_res = self._list('subnetpools')
mortal_res = self._list('subnetpools',
neutron_context=context.Context('', 'not-the-owner'))
self.assertEqual(len(admin_res['subnetpools']), 1)
self.assertEqual(len(mortal_res['subnetpools']), 0)
def test_delete_subnetpool(self):
subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
req = self.new_delete_request('subnetpools',
subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
def test_delete_nonexistent_subnetpool(self):
req = self.new_delete_request('subnetpools',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
res = req.get_response(self._api_for_resource('subnetpools'))
self.assertEqual(res.status_int, 404)
def test_update_subnetpool_prefix_list_append(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'prefixes': ['10.10.8.0/21', '3.3.3.0/24',
'2.2.2.0/24']}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
api = self._api_for_resource('subnetpools')
res = self.deserialize(self.fmt, req.get_response(api))
self.assertItemsEqual(res['subnetpool']['prefixes'],
['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24'])
def test_update_subnetpool_prefix_list_compaction(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'prefixes': ['10.10.10.0/24',
'10.10.11.0/24']}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
api = self._api_for_resource('subnetpools')
res = self.deserialize(self.fmt, req.get_response(api))
self.assertItemsEqual(res['subnetpool']['prefixes'],
['10.10.10.0/23'])
def test_illegal_subnetpool_prefix_list_update(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'prefixes': ['10.10.11.0/24']}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
api = self._api_for_resource('subnetpools')
res = req.get_response(api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_default_prefix(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'default_prefixlen': '26'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
api = self._api_for_resource('subnetpools')
res = self.deserialize(self.fmt, req.get_response(api))
self.assertEqual(res['subnetpool']['default_prefixlen'], 26)
def test_update_subnetpool_min_prefix(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'min_prefixlen': '21'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnetpool']['min_prefixlen'], 21)
def test_update_subnetpool_min_prefix_larger_than_max(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
max_prefixlen='24')
data = {'subnetpool': {'min_prefixlen': '28'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_max_prefix(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
max_prefixlen='24')
data = {'subnetpool': {'max_prefixlen': '26'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnetpool']['max_prefixlen'], 26)
def test_update_subnetpool_max_prefix_less_than_min(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'max_prefixlen': '21'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_max_prefix_less_than_default(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
default_prefixlen='24')
data = {'subnetpool': {'max_prefixlen': '22'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_default_prefix_less_than_min(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
data = {'subnetpool': {'default_prefixlen': '20'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_default_prefix_larger_than_max(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
max_prefixlen='24')
data = {'subnetpool': {'default_prefixlen': '28'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_prefix_list_mixed_ip_version(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'prefixes': ['fe80::/48']}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_default_quota(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24',
default_quota=10)
self.assertEqual(initial_subnetpool['subnetpool']['default_quota'],
10)
data = {'subnetpool': {'default_quota': '1'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnetpool']['default_quota'], 1)
def test_allocate_any_subnet_with_prefixlen(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a subnet allocation (no CIDR)
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'prefixlen': 24,
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
subnet = netaddr.IPNetwork(res['subnet']['cidr'])
self.assertEqual(subnet.prefixlen, 24)
# Assert the allocated subnet CIDR is a subnet of our pool prefix
supernet = netaddr.smallest_matching_cidr(
subnet,
sp['subnetpool']['prefixes'])
self.assertEqual(supernet, netaddr.IPNetwork('10.10.0.0/16'))
def test_allocate_any_subnet_with_default_prefixlen(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request any subnet allocation using default prefix
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
subnet = netaddr.IPNetwork(res['subnet']['cidr'])
self.assertEqual(subnet.prefixlen,
int(sp['subnetpool']['default_prefixlen']))
def test_allocate_specific_subnet_with_mismatch_prefixlen(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'prefixlen': 26,
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_specific_subnet_with_matching_prefixlen(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'prefixlen': 24,
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_specific_subnet(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
# Assert the allocated subnet CIDR is what we expect
subnet = netaddr.IPNetwork(res['subnet']['cidr'])
self.assertEqual(subnet, netaddr.IPNetwork('10.10.1.0/24'))
def test_allocate_specific_subnet_non_existent_prefix(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '192.168.1.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 500)
def test_allocate_specific_subnet_already_allocated(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.10.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
# Allocate the subnet
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
# Attempt to allocate it again
res = req.get_response(self.api)
# Assert error
self.assertEqual(res.status_int, 500)
def test_allocate_specific_subnet_prefix_too_small(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.0.0/20',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_specific_subnet_prefix_specific_gw(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'gateway_ip': '10.10.1.254',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'], '10.10.1.254')
def test_allocate_specific_subnet_prefix_allocation_pools(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
pools = [{'start': '10.10.1.2',
'end': '10.10.1.253'}]
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'gateway_ip': '10.10.1.1',
'ip_version': 4,
'allocation_pools': pools,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['allocation_pools'][0]['start'],
pools[0]['start'])
self.assertEqual(res['subnet']['allocation_pools'][0]['end'],
pools[0]['end'])
def test_allocate_any_subnet_prefix_allocation_pools(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request an any subnet allocation
pools = [{'start': '10.10.10.1',
'end': '10.10.10.254'}]
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'prefixlen': '24',
'ip_version': 4,
'allocation_pools': pools,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_specific_subnet_prefix_too_large(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
max_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.0.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_delete_subnetpool_existing_allocations(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.0.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
req.get_response(self.api)
req = self.new_delete_request('subnetpools',
sp['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_subnet_over_quota(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
default_quota=2048)
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'ip_version': 4,
'prefixlen': 21,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
# Allocate a subnet to fill the quota
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
# Attempt to allocate a /21 again
res = req.get_response(self.api)
# Assert error
self.assertEqual(res.status_int, 409)
def test_allocate_any_ipv4_subnet_ipv6_pool(self):
with self.network() as network:
sp = self._test_create_subnetpool(['2001:db8:1:2::/63'],
tenant_id=self._tenant_id,
name=self._POOL_NAME)
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
class DbModelTestCase(base.BaseTestCase):
"""DB model tests."""
def test_repr(self):
"""testing the string representation of 'model' classes."""
network = models_v2.Network(name="net_net", status="OK",
admin_state_up=True)
actual_repr_output = repr(network)
exp_start_with = "<neutron.db.models_v2.Network"
exp_middle = "[object at %x]" % id(network)
exp_end_with = (" {tenant_id=None, id=None, "
"name='net_net', status='OK', "
"admin_state_up=True, shared=None, "
"mtu=None, vlan_transparent=None}>")
final_exp = exp_start_with + exp_middle + exp_end_with
self.assertEqual(actual_repr_output, final_exp)
class TestNeutronDbPluginV2(base.BaseTestCase):
"""Unit Tests for NeutronDbPluginV2 IPAM Logic."""
def test_generate_ip(self):
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
'_try_generate_ip') as generate:
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
'_rebuild_availability_ranges') as rebuild:
non_ipam.IpamNonPluggableBackend._generate_ip('c', 's')
generate.assert_called_once_with('c', 's')
self.assertEqual(0, rebuild.call_count)
def test_generate_ip_exhausted_pool(self):
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
'_try_generate_ip') as generate:
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
'_rebuild_availability_ranges') as rebuild:
exception = n_exc.IpAddressGenerationFailure(net_id='n')
# fail first call but not second
generate.side_effect = [exception, None]
non_ipam.IpamNonPluggableBackend._generate_ip('c', 's')
self.assertEqual(2, generate.call_count)
rebuild.assert_called_once_with('c', 's')
def _validate_rebuild_availability_ranges(self, pools, allocations,
expected):
ip_qry = mock.Mock()
ip_qry.with_lockmode.return_value = ip_qry
ip_qry.filter_by.return_value = allocations
pool_qry = mock.Mock()
pool_qry.options.return_value = pool_qry
pool_qry.with_lockmode.return_value = pool_qry
pool_qry.filter_by.return_value = pools
def return_queries_side_effect(*args, **kwargs):
if args[0] == models_v2.IPAllocation:
return ip_qry
if args[0] == models_v2.IPAllocationPool:
return pool_qry
context = mock.Mock()
context.session.query.side_effect = return_queries_side_effect
subnets = [mock.MagicMock()]
db_base_plugin_v2.NeutronDbPluginV2._rebuild_availability_ranges(
context, subnets)
actual = [[args[0].allocation_pool_id,
args[0].first_ip, args[0].last_ip]
for _name, args, _kwargs in context.session.add.mock_calls]
self.assertEqual(expected, actual)
def test_rebuild_availability_ranges(self):
pools = [{'id': 'a',
'first_ip': '192.168.1.3',
'last_ip': '192.168.1.10'},
{'id': 'b',
'first_ip': '192.168.1.100',
'last_ip': '192.168.1.120'}]
allocations = [{'ip_address': '192.168.1.3'},
{'ip_address': '192.168.1.78'},
{'ip_address': '192.168.1.7'},
{'ip_address': '192.168.1.110'},
{'ip_address': '192.168.1.11'},
{'ip_address': '192.168.1.4'},
{'ip_address': '192.168.1.111'}]
expected = [['a', '192.168.1.5', '192.168.1.6'],
['a', '192.168.1.8', '192.168.1.10'],
['b', '192.168.1.100', '192.168.1.109'],
['b', '192.168.1.112', '192.168.1.120']]
self._validate_rebuild_availability_ranges(pools, allocations,
expected)
def test_rebuild_ipv6_availability_ranges(self):
pools = [{'id': 'a',
'first_ip': '2001::1',
'last_ip': '2001::50'},
{'id': 'b',
'first_ip': '2001::100',
'last_ip': '2001::ffff:ffff:ffff:fffe'}]
allocations = [{'ip_address': '2001::10'},
{'ip_address': '2001::45'},
{'ip_address': '2001::60'},
{'ip_address': '2001::111'},
{'ip_address': '2001::200'},
{'ip_address': '2001::ffff:ffff:ffff:ff10'},
{'ip_address': '2001::ffff:ffff:ffff:f2f0'}]
expected = [['a', '2001::1', '2001::f'],
['a', '2001::11', '2001::44'],
['a', '2001::46', '2001::50'],
['b', '2001::100', '2001::110'],
['b', '2001::112', '2001::1ff'],
['b', '2001::201', '2001::ffff:ffff:ffff:f2ef'],
['b', '2001::ffff:ffff:ffff:f2f1',
'2001::ffff:ffff:ffff:ff0f'],
['b', '2001::ffff:ffff:ffff:ff11',
'2001::ffff:ffff:ffff:fffe']]
self._validate_rebuild_availability_ranges(pools, allocations,
expected)
def _test__allocate_ips_for_port(self, subnets, port, expected):
plugin = db_base_plugin_v2.NeutronDbPluginV2()
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'get_subnets') as get_subnets:
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_check_unique_ip') as check_unique:
context = mock.Mock()
get_subnets.return_value = subnets
check_unique.return_value = True
actual = plugin._allocate_ips_for_port(context, port)
self.assertEqual(expected, actual)
def test__allocate_ips_for_port_2_slaac_subnets(self):
subnets = [
{
'cidr': u'2001:100::/64',
'enable_dhcp': True,
'gateway_ip': u'2001:100::1',
'id': u'd1a28edd-bd83-480a-bd40-93d036c89f13',
'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
'ip_version': 6,
'ipv6_address_mode': None,
'ipv6_ra_mode': u'slaac'},
{
'cidr': u'2001:200::/64',
'enable_dhcp': True,
'gateway_ip': u'2001:200::1',
'id': u'dc813d3d-ed66-4184-8570-7325c8195e28',
'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
'ip_version': 6,
'ipv6_address_mode': None,
'ipv6_ra_mode': u'slaac'}]
port = {'port': {
'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'mac_address': '12:34:56:78:44:ab',
'device_owner': 'compute'}}
expected = []
for subnet in subnets:
addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(
subnet['cidr'], port['port']['mac_address']))
expected.append({'ip_address': addr, 'subnet_id': subnet['id']})
self._test__allocate_ips_for_port(subnets, port, expected)
class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase,
testlib_api.SqlTestCase):
"""Tests for NeutronDbPluginV2 as Mixin.
While NeutronDbPluginV2TestCase checks NeutronDbPlugin and all plugins as
a complete plugin, this test case verifies abilities of NeutronDbPlugin
which are provided to other plugins (e.g. DB operations). This test case
may include tests only for NeutronDbPlugin, so this should not be used in
unit tests for other plugins.
"""
def setUp(self):
super(NeutronDbPluginV2AsMixinTestCase, self).setUp()
self.plugin = importutils.import_object(DB_PLUGIN_KLASS)
self.context = context.get_admin_context()
self.net_data = {'network': {'id': 'fake-id',
'name': 'net1',
'admin_state_up': True,
'tenant_id': 'test-tenant',
'shared': False}}
def test_create_network_with_default_status(self):
net = self.plugin.create_network(self.context, self.net_data)
default_net_create_status = 'ACTIVE'
expected = [('id', 'fake-id'), ('name', 'net1'),
('admin_state_up', True), ('tenant_id', 'test-tenant'),
('shared', False), ('status', default_net_create_status)]
for k, v in expected:
self.assertEqual(net[k], v)
def test_create_network_with_status_BUILD(self):
self.net_data['network']['status'] = 'BUILD'
net = self.plugin.create_network(self.context, self.net_data)
self.assertEqual(net['status'], 'BUILD')
def test_get_user_allocation_for_dhcp_port_returns_none(self):
plugin = manager.NeutronManager.get_plugin()
with self.network() as net, self.network() as net1:
with self.subnet(network=net, cidr='10.0.0.0/24') as subnet,\
self.subnet(network=net1, cidr='10.0.1.0/24') as subnet1:
with self.port(subnet=subnet, device_owner='network:dhcp'),\
self.port(subnet=subnet1):
# check that user allocations on another network don't
# affect _subnet_get_user_allocation method
res = plugin._subnet_get_user_allocation(
context.get_admin_context(),
subnet['subnet']['id'])
self.assertIsNone(res)
def test__validate_network_subnetpools(self):
network = models_v2.Network()
network.subnets = [models_v2.Subnet(subnetpool_id='test_id',
ip_version=4)]
new_subnetpool_id = None
self.assertRaises(n_exc.NetworkSubnetPoolAffinityError,
self.plugin._validate_network_subnetpools,
network, new_subnetpool_id, 4)
class TestNetworks(testlib_api.SqlTestCase):
def setUp(self):
super(TestNetworks, self).setUp()
self._tenant_id = 'test-tenant'
# Update the plugin
self.setup_coreplugin(DB_PLUGIN_KLASS)
def _create_network(self, plugin, ctx, shared=True):
network = {'network': {'name': 'net',
'shared': shared,
'admin_state_up': True,
'tenant_id': self._tenant_id}}
created_network = plugin.create_network(ctx, network)
return (network, created_network['id'])
def _create_port(self, plugin, ctx, net_id, device_owner, tenant_id):
port = {'port': {'name': 'port',
'network_id': net_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': 'device_id',
'device_owner': device_owner,
'tenant_id': tenant_id}}
plugin.create_port(ctx, port)
def _test_update_shared_net_used(self,
device_owner,
expected_exception=None):
plugin = manager.NeutronManager.get_plugin()
ctx = context.get_admin_context()
network, net_id = self._create_network(plugin, ctx)
self._create_port(plugin,
ctx,
net_id,
device_owner,
self._tenant_id + '1')
network['network']['shared'] = False
if (expected_exception):
with testlib_api.ExpectedException(expected_exception):
plugin.update_network(ctx, net_id, network)
else:
plugin.update_network(ctx, net_id, network)
def test_update_shared_net_used_fails(self):
self._test_update_shared_net_used('', n_exc.InvalidSharedSetting)
def test_update_shared_net_used_as_router_gateway(self):
self._test_update_shared_net_used(
constants.DEVICE_OWNER_ROUTER_GW)
def test_update_shared_net_used_by_floating_ip(self):
self._test_update_shared_net_used(
constants.DEVICE_OWNER_FLOATINGIP) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Additional decorator for extending template context with new objects."""
import six
from flask import g, request, current_app, has_app_context
from werkzeug.utils import cached_property
def register_template_context_processor(f):
"""Register globally the context processor."""
g._template_context_processor.append(f)
class template_args(object):
"""Register a context processor function for given endpoint.
If you need to pass some extra parameters to the template,
you can override particular template context of any view function.
Decorated function is executed for **every** single ``render_template``
executed within you view. For any execution heavy functions use
caching per request.
These arguments are local for this request and will be discarded
in the next request.
Any value passed through ``_invenio_template_args`` will override whatever
parent view function passed to the template.
Example of usage in an extension:
.. code-block:: python
def setup_app(app):
@template_args('search.index', app=app)
def foo():
return dict(foo='bar')
Example of usage in an overlay ``views.py``:
.. code-block:: python
from invenio.modules.search.views.search import index
@template_args(index)
def bar():
return {'baz': 'bar'}
If you define endpoint as string then ``template_args`` should only be
called from an extensions ``setup_app``.
.. note::
Make sure that each ``@template_args`` is called (initialized)
**only** once.
"""
def __init__(self, endpoint, app=None):
"""Initialize decorator."""
self._endpoint = endpoint
self._app = app
@cached_property
def endpoint(self):
"""Return view function for given endpoint."""
if isinstance(self._endpoint, six.string_types):
return self.app.view_functions[self._endpoint]
return self._endpoint
@cached_property
def app(self):
"""Return ``app`` from constructor or ``current_app``."""
if self._app is None and has_app_context():
return current_app._get_current_object()
if self._app is None:
raise Exception('Application context or app argument needed.')
return self._app
def _register(self, func):
"""Register a context processor function."""
if not hasattr(self.endpoint, '_invenio_template_args'):
setattr(self.endpoint, '_invenio_template_args', [])
self.endpoint._invenio_template_args.append(func)
def __call__(self, func):
"""Decorator."""
if isinstance(self._endpoint, six.string_types):
@self.app.before_first_request
def appender():
self._register(func)
else:
self._register(func)
def setup_app(app):
"""Initialize template context processor extension."""
@app.before_request
def reset_template_context():
"""Reset custom template context buffer."""
g._template_context_processor = []
@app.context_processor
def inject_template_context():
"""Update `Jinja2` context by dynamic context processors."""
context = {}
for func in getattr(g, '_template_context_processor', []):
context.update(func())
# used by ``template_args`` decorator.
endpoint = current_app.view_functions.get(request.endpoint)
for func in getattr(endpoint, '_invenio_template_args', []):
context.update(func())
reset_template_context()
return context
return app | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.