code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
"""
=======================
BRIEF binary descriptor
=======================
This example demonstrates the BRIEF binary description algorithm.
The descriptor consists of relatively few bits and can be computed using
a set of intensity difference tests. The short binary descriptor results
in low memory footprint and very efficient matching based on the Hamming
distance metric.
BRIEF does not provide rotation-invariance. Scale-invariance can be achieved by
detecting and extracting features at different scales.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)
keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)
extractor = BRIEF()
extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors
extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors
extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show() | unknown | codeparrot/codeparrot-clean | ||
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import MatchesFilterBase
#-------------------------------------------------------------------------
#
# MatchesFilter
#
#-------------------------------------------------------------------------
class MatchesFilter(MatchesFilterBase):
"""Rule that checks against another filter."""
name = _('Notes matching the <filter>')
description = _("Matches notes matched "
"by the specified filter name")
namespace = 'Note' | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Subtitle Decoder
Uses some library files from
http://xbmc-addon-repository.googlecode.com
Thanks!
"""
# import lxml
import os.path
import re
import shutil
import sys
#import HTMLParser
import altfuncs
from bs4 import BeautifulSoup
from crunchyDec import CrunchyDec
from unidecode import unidecode
# ----------
def decode(page_url):
print '''
--------------------------
---- Start New Export ----
--------------------------
CrunchyRoll Downloader Toolkit DX v0.98
Crunchyroll hasn't changed anything.
If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.
----------
Booting up...
'''
if page_url == '':
page_url = raw_input('Please enter Crunchyroll video URL:\n')
lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
#player_revision = altfuncs.playerrev(page_url)
html = altfuncs.gethtml(page_url)
#h = HTMLParser.HTMLParser()
title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
if len(os.path.join('export', title+'.ass')) > 255:
title = re.findall('^(.+?) \- ', title)[0]
### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}
rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))
### End stolen code ###
media_id = page_url[-6:]
xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')
try:
if '4' in xmlconfig.find_all('code')[0]:
print xmlconfig.find_all('msg')[0].text
sys.exit()
except IndexError:
pass
xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
xmllist = unidecode(xmllist).replace('><', '>\n<')
if '<media_id>None</media_id>' in xmllist:
print 'The video has hardcoded subtitles.'
hardcoded = True
sub_id = False
else:
try:
sub_id2 = re.findall("id=([0-9]+)", xmllist)
sub_id3 = re.findall("title='(\[.+\]) ", xmllist)
sub_id4 = re.findall("title='(\[.+\]) ", xmllist)
hardcoded = False
except IndexError:
print "The video's subtitles cannot be found, or are region-locked."
hardcoded = True
sub_id = False
sub_id3 = [word.replace('[English (US)]','eng') for word in sub_id3]
sub_id3 = [word.replace('[Deutsch]','deu') for word in sub_id3]
sub_id3 = [word.replace('[Portugues (Brasil)]','por') for word in sub_id3]
sub_id3 = [word.replace('[Francais (France)]','fre') for word in sub_id3]
sub_id3 = [word.replace('[Espanol (Espana)]','spa') for word in sub_id3]
sub_id3 = [word.replace('[Espanol]','spa') for word in sub_id3]
sub_id3 = [word.replace('[Italiano]','ita') for word in sub_id3]
sub_id3 = [word.replace('[l`rby@]','ara') for word in sub_id3]
#sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4]
sub_id4 = [word.replace('[l`rby@]',u'[Arabic]') for word in sub_id4]#else:
# try:
# sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
# hardcoded = False
# lang = lang1
# except IndexError:
# try:
# sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
# print 'Language not found, reverting to ' + lang2 + '.'
# hardcoded = False
# lang = lang2
# except IndexError:
# try:
# sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0] # default back to English
# print 'Backup language not found, reverting to English.'
# hardcoded = False
# lang = 'English'
# except IndexError:
# print "The video's subtitles cannot be found, or are region-locked."
# hardcoded = True
# sub_id = False
if not hardcoded:
for i in sub_id2:
#xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i)
formattedsubs = CrunchyDec().returnsubs(xmlsub)
if formattedsubs is None:
continue
#subfile = open(eptitle + '.ass', 'wb')
subfile = open(os.path.join('export', title+'['+sub_id3.pop(0)+']'+sub_id4.pop(0)+'.ass'), 'wb')
subfile.write(formattedsubs.encode('utf-8-sig'))
subfile.close()
#shutil.move(title + '.ass', os.path.join(os.getcwd(), 'export', ''))
print 'Subtitles for '+title+' have been downloaded'
if __name__ == '__main__':
try:
page_url = sys.argv[1]
except IndexError:
page_url = ''
decode(page_url) | unknown | codeparrot/codeparrot-clean | ||
# OData Python Client and Server Libraries ver. 1.0.0
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from codegen_initializer import *
import codegen_template as TP
INDENT = " "
class codegen_writer:
def __init__(self, initializer):
self._initializer = initializer
def begin_generate_file(self):
if self._initializer is None:
print "Initialize failed"
return
outf = open(self._initializer.get_config().file_name + ".py", "w")
self._generate_import_files(outf)
for schema_name in self._initializer._code_gen_map:
self._generate_enum_types(outf, self._initializer._code_gen_map[schema_name])
self._generate_complex_types(outf, self._initializer._code_gen_map[schema_name])
self._generate_entity_types(outf, self._initializer._code_gen_map[schema_name])
self._generate_entity_container(outf, self._initializer._code_gen_map[schema_name])
self._generate_derived_creator(outf, self._initializer._code_gen_map[schema_name])
def _primitive_resolver(self, _info):
ret = r"eval(primitive_value.to_string())"
if _info.class_member_type in ("uint8_t", "int8_t", "int16_t", "int32_t", "int64_t"):
ret = r"int(primitive_value.to_string())"
elif _info.class_member_type in ("float", "double", "long double"):
ret = r"float(primitive_value.to_string())"
elif _info.class_member_type == "::utility::string_t":
ret = r"primitive_value.to_string()"
elif _info.class_member_type == "bool":
ret = r"primitive_value.to_string() == 'true'"
return ret
def _generate_import_files(self, outf):
outf.write(r"import odata_client_python" + '\n')
outf.write(r"from odata_service_context import *" + '\n')
outf.write(r"from codegen_base import *" + '\n')
outf.write('\n')
def _generate_class_begin(self, outf, class_info):
if class_info.base_class_name:
outf.write(r"class {}({}):".format(class_info.class_name, class_info.base_class_name) + "\n")
else:
outf.write(r"class {}:".format(class_info.class_name) + "\n")
def _generate_derived_creator(self, outf, _schema_info):
for class_info in _schema_info.class_list:
if class_info.type != CLASS_TYPE.E_CLASS_COMPLEX and class_info.type != CLASS_TYPE.E_CLASS_ENTITY:
continue
if class_info.class_name in _schema_info.derived_classes:
derived_classes = _schema_info.derived_classes[class_info.class_name]
outf.write(r"{}._derived_creator_map = ".format(class_info.class_name) + '{')
for derived_class_name in derived_classes:
outf.write(r'"{0}" : {0}, '.format(derived_class_name))
outf.write('}\n')
def _generate_enum_types(self, outf, _schema_info):
for class_info in _schema_info.class_list:
if class_info.type != CLASS_TYPE.E_CLASS_ENUM:
continue
self._generate_class_begin(outf, class_info)
class_property_map = _schema_info.class_property_map[class_info.class_name]
for property_name in class_property_map:
_info = class_property_map[property_name]
self._generate_enum_member(outf, _info)
self._generate_common_methods_in_enum(outf, class_info, class_property_map)
outf.write("\n")
def _generate_common_methods_in_enum(self, outf, class_info, class_property_map):
outf.write(TP.GET_ENUM_TYPE_NAMESPACE.format(class_info.edm_namespace))
outf.write(TP.BEGIN_GET_ENUM_TYPE_FROM_STRING)
for property_name in class_property_map:
_info = class_property_map[property_name]
outf.write(TP.ON_GET_ENUM_TYPE_FROM_STRING.format(class_info.class_name, _info.edm_name, _info.class_member_name))
outf.write(TP.END_GET_ENUM_TYPE_FROM_STRING)
outf.write(TP.BEGIN_GET_STRING_FROM_ENUM_TYPE)
for property_name in class_property_map:
_info = class_property_map[property_name]
outf.write(TP.ON_GET_STRING_FROM_ENUM_TYPE.format(class_info.class_name, _info.edm_name, _info.class_member_name))
outf.write(TP.END_GET_STRING_FROM_ENUM_TYPE)
def _generate_enum_member(self, outf, _info):
outf.write(INDENT + _info.class_member_name + " = " + _info.default_value + "\n")
def _generate_complex_types(self, outf, _schema_info):
for class_info in _schema_info.class_list:
if class_info.type != CLASS_TYPE.E_CLASS_COMPLEX:
continue
self._generate_class_begin(outf, class_info)
class_property_map = _schema_info.class_property_map[class_info.class_name]
self._generate_common_methods_in_complex(outf, class_info, class_property_map)
for property in class_property_map:
_info = class_property_map[property]
self._generate_property_in_complex(outf, class_info, _info)
self._generate_complex_instance_creator(outf, class_info, class_property_map)
self._generate_to_complex_value(outf, class_info, class_property_map)
outf.write("\n")
def _generate_complex_constructor(self, outf, class_info, class_property_map):
outf.write(TP.BEGIN_COMPLEX_CONSTRUCTOR.format(class_info.base_class_name))
for property in class_property_map:
_info = class_property_map[property]
if _info.type == PROPERTY_TYPE.E_FUNCTION or _info.type == PROPERTY_TYPE.E_ACTION:
continue
outf.write(TP.ON_PROPERTY_IN_COMPLEX_CONSTRUCTOR.format(_info.class_member_name, _info.default_value))
outf.write('\n')
def _generate_common_methods_in_complex(self, outf, class_info, class_property_map):
self._generate_complex_constructor(outf, class_info, class_property_map)
outf.write(TP.GET_ROOT_URL)
outf.write(TP.EDM_INFO.format(class_info.class_name, class_info.edm_namespace, class_info.edm_name))
def _generate_property_in_complex(self, outf, class_info, _info):
if _info.type == PROPERTY_TYPE.E_PRIMITIVE:
outf.write(TP.PRIMITIVE_PROPERTY_IN_COMPLEX_MAPPING.format(_info.class_member_name, _info.edm_name, self._primitive_resolver(_info)))
elif _info.type == PROPERTY_TYPE.E_COMPLEX:
outf.write(TP.COMPLEX_PROPERTY_IN_COMPLEX_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
elif _info.type == PROPERTY_TYPE.E_ENUM:
outf.write(TP.ENUM_PROPERTY_IN_COMPLEX_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
elif _info.type == PROPERTY_TYPE.E_COLLECTION_PRIMITIVE:
outf.write(TP.COLLECTION_PRIMITIVE_PROPERTY_IN_COMPLEX_MAPPING.format(_info.class_member_name, _info.edm_name, self._primitive_resolver(_info)))
elif _info.type == PROPERTY_TYPE.E_COLLECTION_ENUM:
outf.write(TP.COLLECTION_ENUM_PROPERTY_IN_COMPLEX_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
outf.write('\n')
def _generate_complex_instance_creator(self, outf, class_info, class_property_map):
outf.write(TP.BEGIN_COMPLEX_INSTANCE_CREATOR.format(class_info.class_name, class_info.base_class_name))
for property in class_property_map:
_info = class_property_map[property]
if _info.type == PROPERTY_TYPE.E_ACTION or _info.type == PROPERTY_TYPE.E_FUNCTION:
continue
outf.write(TP.ON_PROPERTY_IN_COMPLEX_INSTANCE_CREATOR.format(_info.class_member_name))
outf.write(TP.END_COMPLEX_INSTANCE_CREATOR)
def _generate_to_complex_value(self, outf, class_info, class_property_map):
if class_info.base_class_name == "type_base":
outf.write(TP.BEGIN_TO_COMPLEX_VALUE)
else:
outf.write(TP.BEGIN_TO_COMPLEX_VALUE_WITH_BASE_CLASS.format(class_info.base_class_name))
for property in class_property_map:
_info = class_property_map[property]
if _info.type in (PROPERTY_TYPE.E_ACTION, PROPERTY_TYPE.E_FUNCTION, PROPERTY_TYPE.E_ENTITY, PROPERTY_TYPE.E_COLLECTION_ENTITY):
continue
outf.write(TP.ON_TO_COMPLEX_VALUE.format(_info.class_member_name))
outf.write(TP.END_TO_COMPLEX_VALUE)
def _generate_entity_types(self, outf, _schema_info):
for class_info in _schema_info.class_list:
if class_info.type != CLASS_TYPE.E_CLASS_ENTITY:
continue
self._generate_class_begin(outf, class_info)
class_property_map = _schema_info.class_property_map[class_info.class_name]
self._generate_common_methods_in_entity(outf, class_info, class_property_map)
for property in class_property_map:
_info = class_property_map[property]
self._generate_property_in_entity(outf, class_info, _info)
self._generate_entity_instance_creator(outf, class_info, class_property_map)
self._generate_to_entity_value(outf, class_info, class_property_map)
outf.write('\n')
def _generate_entity_constructor(self, outf, class_info, class_property_map):
outf.write(TP.BEGIN_ENTITY_CONSTRUCTOR.format(class_info.base_class_name))
for property in class_property_map:
_info = class_property_map[property]
if _info.type == PROPERTY_TYPE.E_FUNCTION or _info.type == PROPERTY_TYPE.E_ACTION:
continue
outf.write(TP.ON_PROPERTY_IN_ENTITY_CONSTRUCTOR.format(_info.class_member_name, _info.default_value))
outf.write('\n')
def _generate_common_methods_in_entity(self, outf, class_info, class_property_map):
self._generate_entity_constructor(outf, class_info, class_property_map)
outf.write(TP.GET_ROOT_URL)
outf.write(TP.EDM_INFO.format(class_info.class_name, class_info.edm_namespace, class_info.edm_name))
def _generate_property_in_entity(self, outf, class_info, _info):
if _info.type == PROPERTY_TYPE.E_ACTION or _info.type == PROPERTY_TYPE.E_FUNCTION:
self._generate_operation_in_entity(outf, class_info, _info)
return
if _info.type == PROPERTY_TYPE.E_PRIMITIVE:
outf.write(TP.PRIMITIVE_PROPERTY_IN_ENTITY_MAPPING.format(_info.class_member_name, _info.edm_name, self._primitive_resolver(_info)))
elif _info.type == PROPERTY_TYPE.E_ENUM:
outf.write(TP.ENUM_PROPERTY_IN_ENTITY_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
elif _info.type == PROPERTY_TYPE.E_COMPLEX:
outf.write(TP.COMPLEX_PROPERTY_IN_ENTITY_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
elif _info.type == PROPERTY_TYPE.E_ENTITY:
outf.write(TP.NAVIGATION_PROPERTY_IN_ENTITY_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
elif _info.type == PROPERTY_TYPE.E_COLLECTION_PRIMITIVE:
outf.write(TP.COLLECTION_PRIMITIVE_PROPERTY_IN_ENTITY_MAPPING.format(_info.class_member_name, _info.edm_name, self._primitive_resolver(_info)))
elif _info.type == PROPERTY_TYPE.E_COLLECTION_ENUM:
outf.write(TP.COLLECTION_ENUM_PROPERTY_IN_ENTITY_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
elif _info.type == PROPERTY_TYPE.E_COLLECTION_COMPLEX:
outf.write(TP.COLLECTION_COMPLEX_PROPERTY_IN_ENTITY_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
elif _info.type == PROPERTY_TYPE.E_COLLECTION_ENTITY:
outf.write(TP.COLLECTION_NAVIGATION_PROPERTY_IN_ENTITY_MAPPING.format(_info.class_member_name, _info.edm_name, _info.class_member_type))
outf.write('\n')
def _generate_operation_in_entity(self, outf, class_info, _info):
if _info is None:
return
_operation_info = _info.operation_info
if _operation_info is None:
return
arguments = "self"
for param in _operation_info.params:
arguments += ", " + param.member_name
outf.write(TP.BEGIN_OPERATION.format(_info.class_member_name, arguments))
for param in _operation_info.params:
if param.member_type == PROPERTY_TYPE.E_PRIMITIVE:
outf.write(TP.ON_PRIMITIVE_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name))
elif param.member_type == PROPERTY_TYPE.E_COMPLEX or param.member_type == PROPERTY_TYPE.E_ENTITY:
outf.write(TP.ON_CLASS_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name, param.member_strong_type_name))
elif param.member_type == PROPERTY_TYPE.E_ENUM:
outf.write(TP.ON_ENUM_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name, param.member_strong_type_name))
elif param.member_type == PROPERTY_TYPE.E_COLLECTION_PRIMITIVE:
outf.write(TP.ON_COLLECTION_PRIMITIVE_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name))
elif param.member_type == PROPERTY_TYPE.E_COLLECTION_ENTITY or param.member_type == PROPERTY_TYPE.E_COLLECTION_COMPLEX:
outf.write(TP.ON_COLLECTION_CLASS_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name, param.member_strong_type_name))
elif param.member_type == PROPERTY_TYPE.E_COLLECTION_ENUM:
outf.write(TP.ON_COLLECTION_ENUM_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name, param.member_strong_type_name))
is_function = "True" if _info.type == PROPERTY_TYPE.E_FUNCTION else "False"
if _operation_info.return_type == "void":
outf.write(TP.END_OPERATION_VOID.format(_operation_info.executor_name, is_function))
else:
return_type = _operation_info.return_type
if _operation_info.executor_name == "operation_query_primitive":
return_type = '"' + return_type + '"'
outf.write(TP.END_OPERATION_WITH_RETURN_VALUE.format(_operation_info.executor_name, is_function, return_type))
outf.write('\n')
def _generate_entity_instance_creator(self, outf, class_info, class_property_map):
outf.write(TP.BEGIN_ENTITY_INSTANCE_CREATOR.format(class_info.class_name, class_info.base_class_name))
for property in class_property_map:
_info = class_property_map[property]
if _info.type == PROPERTY_TYPE.E_ACTION or _info.type == PROPERTY_TYPE.E_FUNCTION:
continue
outf.write(TP.ON_PROPERTY_IN_ENTITY_INSTANCE_CREATOR.format(_info.class_member_name))
outf.write(TP.END_ENTITY_INSTANCE_CREATOR)
def _generate_to_entity_value(self, outf, class_info, class_property_map):
if class_info.base_class_name == "type_base":
outf.write(TP.BEGIN_TO_ENTITY_VALUE)
else:
outf.write(TP.BEGIN_TO_ENTITY_VALUE_WITH_BASE_CLASS.format(class_info.base_class_name))
for property in class_property_map:
_info = class_property_map[property]
if _info.type in (PROPERTY_TYPE.E_ACTION, PROPERTY_TYPE.E_FUNCTION, PROPERTY_TYPE.E_ENTITY, PROPERTY_TYPE.E_COLLECTION_ENTITY):
continue
outf.write(TP.ON_TO_ENTITY_VALUE.format(_info.class_member_name))
outf.write(TP.END_TO_ENTITY_VALUE)
def _generate_entity_container(self, outf, _schema_info):
for class_info in _schema_info.class_list:
if class_info.type != CLASS_TYPE.E_CLASS_CONTAINER:
continue
self._generate_class_begin(outf, class_info)
outf.write(TP.ENTITY_CONTAINER_CONSTRUCTOR)
class_property_map = _schema_info.class_property_map[class_info.class_name]
for property_name in class_property_map:
_info = class_property_map[property_name]
if _info.type == PROPERTY_TYPE.E_CONTAINER_ENTITY_SET:
self._generate_entity_set_in_entity_container(outf, class_info, _info)
elif _info.type == PROPERTY_TYPE.E_CONTAINER_SINGLETON:
self._generate_singleton_in_entity_conatiner(outf, class_info, _info)
elif _info.type == PROPERTY_TYPE.E_ACTION or _info.type == PROPERTY_TYPE.E_FUNCTION:
self._generate_operation_imports_in_entity_container(outf, class_info, _info)
else:
continue
def _generate_entity_set_in_entity_container(self, outf, class_info, _info):
outf.write(TP.QUERY_ENTITY_SET_IN_ENTITY_CONTAINER.format(_info.class_member_name, _info.edm_name, _info.strong_type_name))
def _generate_singleton_in_entity_conatiner(self, outf, class_info, _info):
outf.write(TP.QUERY_SINGLETON_IN_ENTITY_CONTAINER.format(_info.class_member_name, _info.edm_name, _info.strong_type_name))
def _generate_operation_imports_in_entity_container(self, outf, class_info, _info):
if _info is None:
return
_operation_info = _info.operation_info
if _operation_info is None:
return
arguments = "self"
for param in _operation_info.params:
arguments += ", " + param.member_name
outf.write(TP.BEGIN_OPERATION_IMPORT.format(_info.class_member_name, arguments))
for param in _operation_info.params:
if param.member_type == PROPERTY_TYPE.E_PRIMITIVE:
outf.write(TP.ON_PRIMITIVE_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name))
elif param.member_type == PROPERTY_TYPE.E_COMPLEX or param.member_type == PROPERTY_TYPE.E_ENTITY:
outf.write(TP.ON_CLASS_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name, param.member_strong_type_name))
elif param.member_type == PROPERTY_TYPE.E_ENUM:
outf.write(TP.ON_ENUM_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name, param.member_strong_type_name))
elif param.member_type == PROPERTY_TYPE.E_COLLECTION_PRIMITIVE:
outf.write(TP.ON_COLLECTION_PRIMITIVE_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name))
elif param.member_type == PROPERTY_TYPE.E_COLLECTION_ENTITY or param.member_type == PROPERTY_TYPE.E_COLLECTION_COMPLEX:
outf.write(TP.ON_COLLECTION_CLASS_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name, param.member_strong_type_name))
elif param.member_type == PROPERTY_TYPE.E_COLLECTION_ENUM:
outf.write(TP.ON_COLLECTION_ENUM_PARAMETER_IN_OPERATION.format(param.member_name, param.edm_name, param.member_strong_type_name))
is_function = "True" if _info.type == PROPERTY_TYPE.E_FUNCTION else "False"
if _operation_info.return_type == "void":
outf.write(TP.END_OPERATION_IMPORT_VOID.format(_operation_info.executor_name, is_function))
else:
return_type = _operation_info.return_type
if _operation_info.executor_name == "operation_query_primitive":
return_type = '"' + return_type + '"'
outf.write(TP.END_OPERATION_IMPORT_WITH_RETURN_VALUE.format(_operation_info.executor_name, is_function, return_type))
outf.write('\n') | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module FooHelper
redefine_method(:baz) { }
end | ruby | github | https://github.com/rails/rails | actionpack/test/fixtures/alternate_helpers/foo_helper.rb |
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/controller/ttlafterfinished/config
// +k8s:conversion-gen-external-types=k8s.io/kube-controller-manager/config/v1alpha1
package v1alpha1 | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/ttlafterfinished/config/v1alpha1/doc.go |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
import mxnet as mx
import numpy as np
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv2 + shortcut
def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
dtype : str
Precision (float32 or float16)
"""
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.sym.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.sym.Flatten(data=pool1)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 28:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnet(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype) | unknown | codeparrot/codeparrot-clean | ||
//===--- RangeContains.swift ----------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2024 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
import TestsUtils
public let benchmarks = [
BenchmarkInfo(
name: "RangeContainsRange",
runFunction: run_RangeContainsRange,
tags: [.validation, .api],
setUpFunction: buildRanges),
BenchmarkInfo(
name: "RangeContainsClosedRange",
runFunction: run_RangeContainsClosedRange,
tags: [.validation, .api],
setUpFunction: buildRanges),
BenchmarkInfo(
name: "ClosedRangeContainsRange",
runFunction: run_ClosedRangeContainsRange,
tags: [.validation, .api],
setUpFunction: buildRanges),
BenchmarkInfo(
name: "ClosedRangeContainsClosedRange",
runFunction: run_ClosedRangeContainsClosedRange,
tags: [.validation, .api],
setUpFunction: buildRanges),
]
private func buildRanges() {
blackHole(ranges)
blackHole(closedRanges)
}
private let ranges: [Range<Int>] = (-8...8).flatMap { a in (0...16).map { l in a..<(a+l) } }
private let closedRanges: [ClosedRange<Int>] = (-8...8).flatMap { a in (0...16).map { l in a...(a+l) } }
@inline(never)
public func run_RangeContainsRange(_ n: Int) {
var checksum: UInt64 = 0
for _ in 0..<n {
for lhs in ranges {
for rhs in ranges {
if lhs.contains(rhs) { checksum += 1 }
}
}
}
check(checksum == 15725 * UInt64(n))
}
@inline(never)
public func run_RangeContainsClosedRange(_ n: Int) {
var checksum: UInt64 = 0
for _ in 0..<n {
for lhs in ranges {
for rhs in closedRanges {
if lhs.contains(rhs) { checksum += 1 }
}
}
}
check(checksum == 10812 * UInt64(n))
}
@inline(never)
public func run_ClosedRangeContainsRange(_ n: Int) {
var checksum: UInt64 = 0
for _ in 0..<n {
for lhs in closedRanges {
for rhs in ranges {
if lhs.contains(rhs) { checksum += 1 }
}
}
}
check(checksum == 17493 * UInt64(n))
}
@inline(never)
public func run_ClosedRangeContainsClosedRange(_ n: Int) {
var checksum: UInt64 = 0
for _ in 0..<n {
for lhs in closedRanges {
for rhs in closedRanges {
if lhs.contains(rhs) { checksum += 1 }
}
}
}
check(checksum == 12597 * UInt64(n))
} | swift | github | https://github.com/apple/swift | benchmark/single-source/RangeContains.swift |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
__author__ = 'calthorpe_analytics'
def core_end_state_revert_to_base_condition(end_state, base):
end_state.built_form_key = base.built_form_key
end_state.built_form_base = None
end_state.built_form_id = base.built_form_id
end_state.land_development_category = None
end_state.intersection_density_sqmi = base.intersection_density_sqmi
end_state.pop = base.pop
end_state.hh =base.hh
end_state.du = base.du
end_state.du_detsf = base.du_detsf
end_state.du_attsf = base.du_attsf
end_state.du_mf = base.du_mf
end_state.emp = base.emp
end_state.emp_ret = base.emp_ret
end_state.emp_off = base.emp_off
end_state.emp_pub = base.emp_pub
end_state.emp_ind = base.emp_ind
end_state.emp_ag = base.emp_ag
end_state.emp_military = base.emp_military
end_state.du_detsf_sl = base.du_detsf_sl
end_state.du_detsf_ll = base.du_detsf_ll
end_state.du_mf2to4 = base.du_mf2to4
end_state.du_mf5p = base.du_mf5p
end_state.emp_retail_services = base.emp_retail_services
end_state.emp_restaurant = base.emp_restaurant
end_state.emp_accommodation = base.emp_accommodation
end_state.emp_arts_entertainment = base.emp_arts_entertainment
end_state.emp_other_services = base.emp_other_services
end_state.emp_office_services = base.emp_office_services
end_state.emp_public_admin = base.emp_public_admin
end_state.emp_education =base.emp_education
end_state.emp_medical_services = base.emp_medical_services
end_state.emp_manufacturing =base.emp_manufacturing
end_state.emp_wholesale = base.emp_wholesale
end_state.emp_transport_warehousing = base.emp_transport_warehousing
end_state.emp_utilities = base.emp_utilities
end_state.emp_construction = base.emp_construction
end_state.emp_agriculture = base.emp_agriculture
end_state.emp_extraction = base.emp_extraction
end_state.bldg_sqft_detsf_sl = base.bldg_sqft_detsf_sl
end_state.bldg_sqft_detsf_ll = base.bldg_sqft_detsf_ll
end_state.bldg_sqft_attsf = base.bldg_sqft_attsf
end_state.bldg_sqft_mf = base.bldg_sqft_mf
end_state.bldg_sqft_retail_services = base.bldg_sqft_retail_services
end_state.bldg_sqft_restaurant = base.bldg_sqft_restaurant
end_state.bldg_sqft_accommodation = base.bldg_sqft_accommodation
end_state.bldg_sqft_arts_entertainment = base.bldg_sqft_arts_entertainment
end_state.bldg_sqft_other_services = base.bldg_sqft_other_services
end_state.bldg_sqft_office_services = base.bldg_sqft_office_services
end_state.bldg_sqft_public_admin = base.bldg_sqft_public_admin
end_state.bldg_sqft_education = base.bldg_sqft_education
end_state.bldg_sqft_medical_services = base.bldg_sqft_medical_services
end_state.bldg_sqft_wholesale = base.bldg_sqft_wholesale
end_state.bldg_sqft_transport_warehousing = base.bldg_sqft_transport_warehousing
end_state.residential_irrigated_sqft = base.residential_irrigated_sqft
end_state.commercial_irrigated_sqft = base.commercial_irrigated_sqft
end_state.acres_parcel_res = base.acres_parcel_res
end_state.acres_parcel_res_detsf = base.acres_parcel_res_detsf
end_state.acres_parcel_res_detsf_sl = base.acres_parcel_res_detsf_sl
end_state.acres_parcel_res_detsf_ll = base.acres_parcel_res_detsf_ll
end_state.acres_parcel_res_attsf = base.acres_parcel_res_attsf
end_state.acres_parcel_res_mf = base.acres_parcel_res_mf
end_state.acres_parcel_emp = base.acres_parcel_emp
end_state.acres_parcel_emp_off = base.acres_parcel_emp_off
end_state.acres_parcel_emp_ret = base.acres_parcel_emp_ret
end_state.acres_parcel_emp_ind = base.acres_parcel_emp_ind
end_state.acres_parcel_emp_ag = base.acres_parcel_emp_ag
end_state.acres_parcel_emp_mixed = base.acres_parcel_emp_mixed
end_state.acres_parcel_emp_military = base.acres_parcel_emp_military
end_state.acres_parcel_mixed = base.acres_parcel_mixed
end_state.acres_parcel_mixed_w_off = base.acres_parcel_mixed_w_off
end_state.acres_parcel_mixed_no_off = base.acres_parcel_mixed_no_off
end_state.acres_parcel_no_use = base.acres_parcel_no_use
end_state.save()
def core_increment_revert_to_base_condition(increment):
increment.built_form_key = None
increment.built_form_id = None
increment.land_development_category = None
increment.refill_flag = False
increment.pop = 0
increment.hh = 0
increment.du = 0
increment.emp = 0
increment.du_detsf = 0
increment.du_detsf_ll = 0
increment.du_detsf_sl = 0
increment.du_attsf = 0
increment.du_mf = 0
increment.emp_ret = 0
increment.emp_retail_services = 0
increment.emp_restaurant = 0
increment.emp_accommodation = 0
increment.emp_arts_entertainment = 0
increment.emp_other_services = 0
increment.emp_off = 0
increment.emp_office_services = 0
increment.emp_medical_services = 0
increment.emp_pub = 0
increment.emp_education = 0
increment.emp_public_admin = 0
increment.emp_ind = 0
increment.emp_wholesale = 0
increment.emp_transport_warehousing = 0
increment.emp_manufacturing = 0
increment.emp_utilities = 0
increment.emp_construction = 0
increment.emp_ag = 0
increment.emp_agriculture = 0
increment.emp_extraction = 0
increment.emp_military = 0
increment.save() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot;
/**
* Public version {@link ApplicationEnvironment} for tests to use.
*
* @author Phillip Webb
*/
public class TestApplicationEnvironment extends ApplicationEnvironment {
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot/src/test/java/org/springframework/boot/TestApplicationEnvironment.java |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Blink.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import sys
_EXCLUDED_PATHS = ()
def _CheckForNonBlinkVariantMojomIncludes(input_api, output_api):
pattern = input_api.re.compile(r'#include\s+.+\.mojom(.*)\.h[>"]')
errors = []
for f in input_api.AffectedFiles():
for line_num, line in f.ChangedContents():
m = pattern.match(line)
if m and m.group(1) != '-blink':
errors.append(' %s:%d %s' % (
f.LocalPath(), line_num, line))
results = []
if errors:
results.append(output_api.PresubmitError(
'Files that include non-Blink variant mojoms found:', errors))
return results
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckWatchlist(input_api, output_api):
"""Check that the WATCHLIST file parses correctly."""
errors = []
for f in input_api.AffectedFiles():
if f.LocalPath() != 'WATCHLISTS':
continue
import StringIO
import logging
import watchlists
log_buffer = StringIO.StringIO()
log_handler = logging.StreamHandler(log_buffer)
log_handler.setFormatter(
logging.Formatter('%(levelname)s: %(message)s'))
logger = logging.getLogger()
logger.addHandler(log_handler)
wl = watchlists.Watchlists(input_api.change.RepositoryRoot())
logger.removeHandler(log_handler)
log_handler.flush()
log_buffer.flush()
if log_buffer.getvalue():
errors.append(output_api.PresubmitError(
'Cannot parse WATCHLISTS file, please resolve.',
log_buffer.getvalue().splitlines()))
return errors
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
# We should figure out what license checks we actually want to use.
license_header = r'.*'
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS,
maxlen=800, license_header=license_header))
results.extend(_CheckForNonBlinkVariantMojomIncludes(input_api, output_api))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckTestExpectations(input_api, output_api))
results.extend(_CheckChromiumPlatformMacros(input_api, output_api))
results.extend(_CheckWatchlist(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
return results
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _CheckTestExpectations(input_api, output_api):
local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
if any('LayoutTests' in path for path in local_paths):
lint_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'lint-test-expectations')
_, errs = input_api.subprocess.Popen(
[input_api.python_executable, lint_path],
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE).communicate()
if not errs:
return [output_api.PresubmitError(
"lint-test-expectations failed "
"to produce output; check by hand. ")]
if errs.strip() != 'Lint succeeded.':
return [output_api.PresubmitError(errs)]
return []
def _CheckStyle(input_api, output_api):
# Files that follow Chromium's coding style do not include capital letters.
re_chromium_style_file = re.compile(r'\b[a-z_]+\.(cc|h)$')
style_checker_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'check-webkit-style')
args = ([input_api.python_executable, style_checker_path, '--diff-files']
+ [input_api.os_path.join('..', '..', f.LocalPath())
for f in input_api.AffectedFiles()
# Filter out files that follow Chromium's coding style.
if not re_chromium_style_file.search(f.LocalPath())])
results = []
try:
child = input_api.subprocess.Popen(args,
stderr=input_api.subprocess.PIPE)
_, stderrdata = child.communicate()
if child.returncode != 0:
results.append(output_api.PresubmitError(
'check-webkit-style failed', [stderrdata]))
except Exception as e:
results.append(output_api.PresubmitNotifyResult(
'Could not run check-webkit-style', [str(e)]))
return results
def _CheckChromiumPlatformMacros(input_api, output_api, source_file_filter=None):
"""Ensures that Blink code uses WTF's platform macros instead of
Chromium's. Using the latter has resulted in at least one subtle
build breakage."""
os_macro_re = input_api.re.compile(r'^\s*#(el)?if.*\bOS_')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not os_macro_re.search(x),
input_api, source_file_filter)
errors = ['Found use of Chromium OS_* macro in %s. '
'Use WTF platform macros instead.' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptWarning('\n'.join(errors))]
return []
def _CheckForPrintfDebugging(input_api, output_api):
"""Generally speaking, we'd prefer not to land patches that printf
debug output."""
printf_re = input_api.re.compile(r'^\s*(printf\(|fprintf\(stderr,)')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not printf_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptOrNotify(
'printf debugging is best debugging! That said, it might '
'be a good idea to drop the following occurances from '
'your patch before uploading:\n%s' % '\n'.join(errors))]
return []
def _CheckForFailInFile(input_api, f):
pattern = input_api.re.compile('^FAIL')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
if input_api.platform == 'win32':
return []
args = [input_api.python_executable,
input_api.os_path.join(
input_api.change.RepositoryRoot(),
'tools/checkperms/checkperms.py'),
'--root', input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
try:
input_api.subprocess.check_output(args)
return []
except input_api.subprocess.CalledProcessError as error:
return [output_api.PresubmitError(
'checkperms.py failed:',
long_text=error.output)]
def _CheckForInvalidPreferenceError(input_api, output_api):
pattern = input_api.re.compile('Invalid name for preference: (.+)')
results = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('-expected.txt'):
continue
for line_num, line in f.ChangedContents():
error = pattern.search(line)
if error:
results.append(output_api.PresubmitError('Found an invalid preference %s in expected result %s:%s' % (error.group(1), f, line_num)))
return results
def _CheckForForbiddenNamespace(input_api, output_api):
"""Checks that Blink uses Chromium namespaces only in permitted code."""
# This list is not exhaustive, but covers likely ones.
chromium_namespaces = ["base", "cc", "content", "gfx", "net", "ui"]
chromium_forbidden_classes = ["scoped_refptr"]
chromium_allowed_classes = ["gfx::CubicBezier"]
def source_file_filter(path):
return input_api.FilterSourceFile(path,
white_list=[r'third_party/WebKit/Source/.*\.(h|cpp)$'],
black_list=[r'third_party/WebKit/Source/(platform|wtf|web)/'])
comment_re = input_api.re.compile(r'^\s*//')
result = []
for namespace in chromium_namespaces:
namespace_re = input_api.re.compile(r'\b{0}::([A-Za-z_][A-Za-z0-9_]*)'.format(input_api.re.escape(namespace)))
def uses_namespace_outside_comments(line):
if comment_re.search(line):
return False
re_result = namespace_re.search(line)
if not re_result:
return False
parsed_class_name = namespace + "::" + re_result.group(1)
return not (parsed_class_name in chromium_allowed_classes)
errors = input_api.canned_checks._FindNewViolationsOfRule(lambda _, line: not uses_namespace_outside_comments(line),
input_api, source_file_filter)
if errors:
result += [output_api.PresubmitError('Do not use Chromium class from namespace {} inside Blink core:\n{}'.format(namespace, '\n'.join(errors)))]
for namespace in chromium_namespaces:
namespace_re = input_api.re.compile(r'^\s*using namespace {0};|^\s*namespace {0} \{{'.format(input_api.re.escape(namespace)))
uses_namespace_outside_comments = lambda line: namespace_re.search(line) and not comment_re.search(line)
errors = input_api.canned_checks._FindNewViolationsOfRule(lambda _, line: not uses_namespace_outside_comments(line),
input_api, source_file_filter)
if errors:
result += [output_api.PresubmitError('Do not use Chromium namespace {} inside Blink core:\n{}'.format(namespace, '\n'.join(errors)))]
for class_name in chromium_forbidden_classes:
class_re = input_api.re.compile(r'\b{0}\b'.format(input_api.re.escape(class_name)))
uses_class_outside_comments = lambda line: class_re.search(line) and not comment_re.search(line)
errors = input_api.canned_checks._FindNewViolationsOfRule(lambda _, line: not uses_class_outside_comments(line),
input_api, source_file_filter)
if errors:
result += [output_api.PresubmitError('Do not use Chromium class {} inside Blink core:\n{}'.format(class_name, '\n'.join(errors)))]
return result
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckStyle(input_api, output_api))
results.extend(_CheckForPrintfDebugging(input_api, output_api))
results.extend(_CheckForInvalidPreferenceError(input_api, output_api))
results.extend(_CheckForForbiddenNamespace(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
return results
def GetPreferredTryMasters(project, change):
import json
import os.path
import platform
import subprocess
cq_config_path = os.path.join(
change.RepositoryRoot(), 'infra', 'config', 'cq.cfg')
# commit_queue.py below is a script in depot_tools directory, which has a
# 'builders' command to retrieve a list of CQ builders from the CQ config.
is_win = platform.system() == 'Windows'
masters = json.loads(subprocess.check_output(
['commit_queue', 'builders', cq_config_path], shell=is_win))
try_config = {}
for master in masters:
try_config.setdefault(master, {})
for builder in masters[master]:
# Do not trigger presubmit builders, since they're likely to fail
# (e.g. OWNERS checks before finished code review), and we're
# running local presubmit anyway.
if 'presubmit' not in builder:
try_config[master][builder] = ['defaulttests']
return try_config | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aop.aspectj.annotation;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.Serializable;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.reflect.AjType;
import org.aspectj.lang.reflect.AjTypeSystem;
import org.aspectj.lang.reflect.PerClauseKind;
import org.springframework.aop.Pointcut;
import org.springframework.aop.aspectj.AspectJExpressionPointcut;
import org.springframework.aop.aspectj.TypePatternClassFilter;
import org.springframework.aop.framework.AopConfigException;
import org.springframework.aop.support.ComposablePointcut;
/**
* Metadata for an AspectJ aspect class, with an additional Spring AOP pointcut
* for the per clause.
*
* <p>Uses AspectJ 5 AJType reflection API, enabling us to work with different
* AspectJ instantiation models such as "singleton", "pertarget" and "perthis".
*
* @author Rod Johnson
* @author Juergen Hoeller
* @since 2.0
* @see org.springframework.aop.aspectj.AspectJExpressionPointcut
*/
@SuppressWarnings("serial")
public class AspectMetadata implements Serializable {
/**
* The name of this aspect as defined to Spring (the bean name) -
* allows us to determine if two pieces of advice come from the
* same aspect and hence their relative precedence.
*/
private final String aspectName;
/**
* The aspect class, stored separately for re-resolution of the
* corresponding AjType on deserialization.
*/
private final Class<?> aspectClass;
/**
* AspectJ reflection information.
* <p>Re-resolved on deserialization since it isn't serializable itself.
*/
private transient AjType<?> ajType;
/**
* Spring AOP pointcut corresponding to the per clause of the
* aspect. Will be the {@code Pointcut.TRUE} canonical instance in the
* case of a singleton, otherwise an AspectJExpressionPointcut.
*/
private final Pointcut perClausePointcut;
/**
* Create a new AspectMetadata instance for the given aspect class.
* @param aspectClass the aspect class
* @param aspectName the name of the aspect
*/
public AspectMetadata(Class<?> aspectClass, String aspectName) {
this.aspectName = aspectName;
Class<?> currClass = aspectClass;
AjType<?> ajType = null;
while (currClass != Object.class) {
AjType<?> ajTypeToCheck = AjTypeSystem.getAjType(currClass);
if (ajTypeToCheck.isAspect()) {
ajType = ajTypeToCheck;
break;
}
currClass = currClass.getSuperclass();
}
if (ajType == null) {
throw new IllegalArgumentException("Class '" + aspectClass.getName() + "' is not an @AspectJ aspect");
}
if (ajType.getDeclarePrecedence().length > 0) {
throw new IllegalArgumentException("DeclarePrecedence not presently supported in Spring AOP");
}
this.aspectClass = ajType.getJavaClass();
this.ajType = ajType;
switch (this.ajType.getPerClause().getKind()) {
case SINGLETON -> {
this.perClausePointcut = Pointcut.TRUE;
}
case PERTARGET, PERTHIS -> {
AspectJExpressionPointcut ajexp = new AspectJExpressionPointcut();
ajexp.setLocation(aspectClass.getName());
ajexp.setExpression(findPerClause(aspectClass));
ajexp.setPointcutDeclarationScope(aspectClass);
this.perClausePointcut = ajexp;
}
case PERTYPEWITHIN -> {
// Works with a type pattern
this.perClausePointcut = new ComposablePointcut(new TypePatternClassFilter(findPerClause(aspectClass)));
}
default -> throw new AopConfigException(
"PerClause " + ajType.getPerClause().getKind() + " not supported by Spring AOP for " + aspectClass);
}
}
/**
* Extract contents from String of form {@code pertarget(contents)}.
*/
private String findPerClause(Class<?> aspectClass) {
Aspect ann = aspectClass.getAnnotation(Aspect.class);
if (ann == null) {
return "";
}
String value = ann.value();
int beginIndex = value.indexOf('(');
if (beginIndex < 0) {
return "";
}
return value.substring(beginIndex + 1, value.length() - 1);
}
/**
* Return AspectJ reflection information.
*/
public AjType<?> getAjType() {
return this.ajType;
}
/**
* Return the aspect class.
*/
public Class<?> getAspectClass() {
return this.aspectClass;
}
/**
* Return the aspect name.
*/
public String getAspectName() {
return this.aspectName;
}
/**
* Return a Spring pointcut expression for a singleton aspect.
* (for example, {@code Pointcut.TRUE} if it's a singleton).
*/
public Pointcut getPerClausePointcut() {
return this.perClausePointcut;
}
/**
* Return whether the aspect is defined as "perthis" or "pertarget".
*/
public boolean isPerThisOrPerTarget() {
PerClauseKind kind = getAjType().getPerClause().getKind();
return (kind == PerClauseKind.PERTARGET || kind == PerClauseKind.PERTHIS);
}
/**
* Return whether the aspect is defined as "pertypewithin".
*/
public boolean isPerTypeWithin() {
PerClauseKind kind = getAjType().getPerClause().getKind();
return (kind == PerClauseKind.PERTYPEWITHIN);
}
/**
* Return whether the aspect needs to be lazily instantiated.
*/
public boolean isLazilyInstantiated() {
return (isPerThisOrPerTarget() || isPerTypeWithin());
}
private void readObject(ObjectInputStream inputStream) throws IOException, ClassNotFoundException {
inputStream.defaultReadObject();
this.ajType = AjTypeSystem.getAjType(this.aspectClass);
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/AspectMetadata.java |
# -*- coding: utf-8 -*-
"""
***************************************************************************
LinesIntersection.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import QGis, QgsFeatureRequest, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class LinesIntersection(GeoAlgorithm):
INPUT_A = 'INPUT_A'
INPUT_B = 'INPUT_B'
FIELD_A = 'FIELD_A'
FIELD_B = 'FIELD_B'
OUTPUT = 'OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'lines_intersection.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Line intersections')
self.group, self.i18n_group = self.trAlgorithm('Vector overlay tools')
self.addParameter(ParameterVector(self.INPUT_A,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterVector(self.INPUT_B,
self.tr('Intersect layer'), [ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterTableField(
self.FIELD_A,
self.tr('Input unique ID field'),
self.INPUT_A,
optional=True))
self.addParameter(ParameterTableField(
self.FIELD_B,
self.tr('Intersect unique ID field'),
self.INPUT_B,
optional=True))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Intersections')))
def processAlgorithm(self, progress):
layerA = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_A))
layerB = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_B))
fieldA = self.getParameterValue(self.FIELD_A)
fieldB = self.getParameterValue(self.FIELD_B)
idxA = layerA.fieldNameIndex(fieldA)
idxB = layerB.fieldNameIndex(fieldB)
fieldList = [layerA.pendingFields()[idxA],
layerB.pendingFields()[idxB]]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fieldList,
QGis.WKBPoint, layerA.dataProvider().crs())
spatialIndex = vector.spatialindex(layerB)
outFeat = QgsFeature()
features = vector.features(layerA)
total = 100.0 / len(features)
hasIntersections = False
for current, inFeatA in enumerate(features):
inGeom = inFeatA.geometry()
hasIntersections = False
lines = spatialIndex.intersects(inGeom.boundingBox())
if len(lines) > 0:
hasIntersections = True
if hasIntersections:
for i in lines:
request = QgsFeatureRequest().setFilterFid(i)
inFeatB = layerB.getFeatures(request).next()
tmpGeom = QgsGeometry(inFeatB.geometry())
points = []
attrsA = inFeatA.attributes()
attrsB = inFeatB.attributes()
if inGeom.intersects(tmpGeom):
tempGeom = inGeom.intersection(tmpGeom)
if tempGeom.type() == QGis.Point:
if tempGeom.isMultipart():
points = tempGeom.asMultiPoint()
else:
points.append(tempGeom.asPoint())
for j in points:
outFeat.setGeometry(tempGeom.fromPoint(j))
outFeat.setAttributes([attrsA[idxA],
attrsB[idxB]])
writer.addFeature(outFeat)
progress.setPercentage(int(current * total))
del writer | unknown | codeparrot/codeparrot-clean | ||
// This file was automatically generated from flow.md by Knit tool. Do not edit.
package kotlinx.coroutines.guide.exampleFlow21
import kotlinx.coroutines.*
import kotlinx.coroutines.flow.*
fun main() = runBlocking<Unit> {
val nums = (1..3).asFlow().onEach { delay(300) } // numbers 1..3 every 300 ms
val strs = flowOf("one", "two", "three").onEach { delay(400) } // strings every 400 ms
val startTime = currentTimeMillis() // remember the start time
nums.zip(strs) { a, b -> "$a -> $b" } // compose a single string with "zip"
.collect { value -> // collect and print
println("$value at ${currentTimeMillis() - startTime} ms from start")
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jvm/test/guide/example-flow-21.kt |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
class _Reasons(object):
BACKEND_MISSING_INTERFACE = object()
UNSUPPORTED_HASH = object()
UNSUPPORTED_CIPHER = object()
UNSUPPORTED_PADDING = object()
UNSUPPORTED_MGF = object()
UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()
UNSUPPORTED_ELLIPTIC_CURVE = object()
UNSUPPORTED_SERIALIZATION = object()
UNSUPPORTED_X509 = object()
class UnsupportedAlgorithm(Exception):
def __init__(self, message, reason=None):
super(UnsupportedAlgorithm, self).__init__(message)
self._reason = reason
class AlreadyFinalized(Exception):
pass
class AlreadyUpdated(Exception):
pass
class NotYetFinalized(Exception):
pass
class InvalidTag(Exception):
pass
class InvalidSignature(Exception):
pass
class InternalError(Exception):
pass
class InvalidKey(Exception):
pass
class InvalidToken(Exception):
pass | unknown | codeparrot/codeparrot-clean | ||
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify_agent.installer.runners import winrm_runner
from cloudify_agent.tests import BaseTest
##############################################################################
# note that this file only tests validation and defaults of the fabric runner.
# it does not test the actual functionality because that requires starting
# a vm. functional tests are executed as local workflow tests in the system
# tests framework
##############################################################################
class TestValidations(BaseTest):
def test_validate_host(self):
# Missing host
session_config = {
'user': 'test_user',
'password': 'test_password'
}
try:
winrm_runner.validate(session_config)
self.fail('Expected ValueError for missing host')
except ValueError as e:
self.assertIn('Invalid host', e.message)
def test_validate_user(self):
# Missing user
session_config = {
'host': 'test_host',
'password': 'test_password'
}
try:
winrm_runner.validate(session_config)
self.fail('Expected ValueError for missing user')
except ValueError as e:
self.assertIn('Invalid user', e.message)
def test_validate_password(self):
# Missing password
session_config = {
'host': 'test_host',
'user': 'test_user'
}
try:
winrm_runner.validate(session_config)
self.fail('Expected ValueError for missing password')
except ValueError as e:
self.assertIn('Invalid password', e.message)
class TestDefaults(BaseTest):
def test_defaults(self):
runner = winrm_runner.WinRMRunner(
validate_connection=False,
host='test_host',
user='test_user',
password='test_password')
self.assertEquals(
runner.session_config['protocol'],
winrm_runner.DEFAULT_WINRM_PROTOCOL)
self.assertEquals(
runner.session_config['uri'],
winrm_runner.DEFAULT_WINRM_URI)
self.assertEquals(
runner.session_config['port'],
winrm_runner.DEFAULT_WINRM_PORT) | unknown | codeparrot/codeparrot-clean | ||
import unittest
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except Exception as e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except Exception as e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class _BaseLoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super().__init__()
def startTest(self, test):
self._events.append('startTest')
super().startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super().startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super().stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super().stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super().addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super().addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super().addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super().addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super().addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super().addUnexpectedSuccess(*args)
class LegacyLoggingResult(_BaseLoggingResult):
"""
A legacy TestResult implementation, without an addSubTest method,
which records its method calls.
"""
@property
def addSubTest(self):
raise AttributeError
class LoggingResult(_BaseLoggingResult):
"""
A TestResult implementation which records its method calls.
"""
def addSubTest(self, test, subtest, err):
if err is None:
self._events.append('addSubTestSuccess')
else:
self._events.append('addSubTestFailure')
super().addSubTest(test, subtest, err)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
class BufferedWriter:
def __init__(self):
self.result = ''
self.buffer = ''
def write(self, arg):
self.buffer += arg
def flush(self):
self.result += self.buffer
self.buffer = ''
def getvalue(self):
return self.result | python | github | https://github.com/python/cpython | Lib/test/test_unittest/support.py |
#!/usr/bin/env python
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
Vivake Gupta (v@nano.com)
Release 1: January 2001
Further adjustments by Santiago Bruno (bananabruno@gmail.com)
to allow word input not restricted to one word per line, leading
to:
Release 2: July 2008
Optimizations and cleanup of the code by Lars Buitinck, July 2012.
"""
from six.moves import xrange
class PorterStemmer(object):
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[0],
b[1] ... ending at b[k]. k is readjusted downwards as the stemming
progresses.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.j = 0 # j is a general offset into the string
def _cons(self, i):
"""True <=> b[i] is a consonant."""
ch = self.b[i]
if ch in "aeiou":
return False
if ch == 'y':
return i == 0 or not self._cons(i - 1)
return True
def _m(self):
"""Returns the number of consonant sequences between 0 and j.
If c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
i = 0
while True:
if i > self.j:
return 0
if not self._cons(i):
break
i += 1
i += 1
n = 0
while True:
while True:
if i > self.j:
return n
if self._cons(i):
break
i += 1
i += 1
n += 1
while 1:
if i > self.j:
return n
if not self._cons(i):
break
i += 1
i += 1
def _vowelinstem(self):
"""True <=> 0,...j contains a vowel"""
return not all(self._cons(i) for i in xrange(self.j + 1))
def _doublec(self, j):
"""True <=> j,(j-1) contain a double consonant."""
return j > 0 and self.b[j] == self.b[j-1] and self._cons(j)
def _cvc(self, i):
"""True <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. This is used when trying to
restore an e at the end of a short word, e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < 2 or not self._cons(i) or self._cons(i-1) or not self._cons(i-2):
return False
return self.b[i] not in "wxy"
def _ends(self, s):
"""True <=> 0,...k ends with the string s."""
if s[-1] != self.b[self.k]: # tiny speed-up
return 0
length = len(s)
if length > (self.k + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def _setto(self, s):
"""Set (j+1),...k to the characters in the string s, adjusting k."""
self.b = self.b[:self.j+1] + s
self.k = len(self.b) - 1
def _r(self, s):
if self._m() > 0:
self._setto(s)
def _step1ab(self):
"""Get rid of plurals and -ed or -ing. E.g.,
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self._ends("sses"):
self.k -= 2
elif self._ends("ies"):
self._setto("i")
elif self.b[self.k - 1] != 's':
self.k -= 1
if self._ends("eed"):
if self._m() > 0:
self.k -= 1
elif (self._ends("ed") or self._ends("ing")) and self._vowelinstem():
self.k = self.j
if self._ends("at"): self._setto("ate")
elif self._ends("bl"): self._setto("ble")
elif self._ends("iz"): self._setto("ize")
elif self._doublec(self.k):
if self.b[self.k - 1] not in "lsz":
self.k -= 1
elif self._m() == 1 and self._cvc(self.k):
self._setto("e")
def _step1c(self):
"""Turn terminal y to i when there is another vowel in the stem."""
if self._ends("y") and self._vowelinstem():
self.b = self.b[:self.k] + 'i'
def _step2(self):
"""Map double suffices to single ones.
So, -ization ( = -ize plus -ation) maps to -ize etc. Note that the
string before the suffix must give _m() > 0.
"""
ch = self.b[self.k - 1]
if ch == 'a':
if self._ends("ational"): self._r("ate")
elif self._ends("tional"): self._r("tion")
elif ch == 'c':
if self._ends("enci"): self._r("ence")
elif self._ends("anci"): self._r("ance")
elif ch == 'e':
if self._ends("izer"): self._r("ize")
elif ch == 'l':
if self._ends("bli"): self._r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self._ends("abli"): self._r("able")
elif self._ends("alli"): self._r("al")
elif self._ends("entli"): self._r("ent")
elif self._ends("eli"): self._r("e")
elif self._ends("ousli"): self._r("ous")
elif ch == 'o':
if self._ends("ization"): self._r("ize")
elif self._ends("ation"): self._r("ate")
elif self._ends("ator"): self._r("ate")
elif ch == 's':
if self._ends("alism"): self._r("al")
elif self._ends("iveness"): self._r("ive")
elif self._ends("fulness"): self._r("ful")
elif self._ends("ousness"): self._r("ous")
elif ch == 't':
if self._ends("aliti"): self._r("al")
elif self._ends("iviti"): self._r("ive")
elif self._ends("biliti"): self._r("ble")
elif ch == 'g': # --DEPARTURE--
if self._ends("logi"): self._r("log")
# To match the published algorithm, delete this phrase
def _step3(self):
"""Deal with -ic-, -full, -ness etc. Similar strategy to _step2."""
ch = self.b[self.k]
if ch == 'e':
if self._ends("icate"): self._r("ic")
elif self._ends("ative"): self._r("")
elif self._ends("alize"): self._r("al")
elif ch == 'i':
if self._ends("iciti"): self._r("ic")
elif ch == 'l':
if self._ends("ical"): self._r("ic")
elif self._ends("ful"): self._r("")
elif ch == 's':
if self._ends("ness"): self._r("")
def _step4(self):
"""_step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
ch = self.b[self.k - 1]
if ch == 'a':
if not self._ends("al"): return
elif ch == 'c':
if not self._ends("ance") and not self._ends("ence"): return
elif ch == 'e':
if not self._ends("er"): return
elif ch == 'i':
if not self._ends("ic"): return
elif ch == 'l':
if not self._ends("able") and not self._ends("ible"): return
elif ch == 'n':
if self._ends("ant"): pass
elif self._ends("ement"): pass
elif self._ends("ment"): pass
elif self._ends("ent"): pass
else: return
elif ch == 'o':
if self._ends("ion") and self.b[self.j] in "st": pass
elif self._ends("ou"): pass
# takes care of -ous
else: return
elif ch == 's':
if not self._ends("ism"): return
elif ch == 't':
if not self._ends("ate") and not self._ends("iti"): return
elif ch == 'u':
if not self._ends("ous"): return
elif ch == 'v':
if not self._ends("ive"): return
elif ch == 'z':
if not self._ends("ize"): return
else:
return
if self._m() > 1:
self.k = self.j
def _step5(self):
"""Remove a final -e if _m() > 1, and change -ll to -l if m() > 1.
"""
k = self.j = self.k
if self.b[k] == 'e':
a = self._m()
if a > 1 or (a == 1 and not self._cvc(k - 1)):
self.k -= 1
if self.b[self.k] == 'l' and self._doublec(self.k) and self._m() > 1:
self.k -= 1
def stem(self, w):
"""Stem the word w, return the stemmed form."""
w = w.lower()
k = len(w) - 1
if k <= 1:
return w # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.b = w
self.k = k
self._step1ab()
self._step1c()
self._step2()
self._step3()
self._step4()
self._step5()
return self.b[:self.k+1]
def stem_sentence(self, txt):
return " ".join(map(self.stem, txt.split()))
def stem_documents(self, docs):
return map(self.stem_sentence, docs)
if __name__ == '__main__':
import sys
p = PorterStemmer()
for f in sys.argv[1:]:
with open(f) as infile:
for line in infile:
print(p.stem_sentence(line)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ModelFnOps tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.client import session
from tensorflow.python.estimator.export import export_output as core_export_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
class ModelFnopsTest(test.TestCase):
"""Multi-output tests."""
def create_predictions(self):
probabilities = constant_op.constant([1., 1., 1.])
scores = constant_op.constant([1., 2., 3.])
classes = constant_op.constant([b"0", b"1", b"2"])
return {
"probabilities": probabilities,
"scores": scores,
"classes": classes}
def create_model_fn_ops(self, predictions, output_alternatives,
mode=model_fn.ModeKeys.INFER):
return model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=predictions,
loss=constant_op.constant([1]),
train_op=control_flow_ops.no_op(),
eval_metric_ops={
"metric_key": (constant_op.constant(1.), control_flow_ops.no_op()),
"loss": (constant_op.constant(1.), control_flow_ops.no_op()),
},
training_chief_hooks=[basic_session_run_hooks.StepCounterHook()],
training_hooks=[basic_session_run_hooks.StepCounterHook()],
output_alternatives=output_alternatives,
scaffold=monitored_session.Scaffold())
def assertEquals_except_export_and_eval_loss(
self, model_fn_ops, estimator_spec):
expected_eval_metric_ops = {}
for key, value in six.iteritems(model_fn_ops.eval_metric_ops):
if key != "loss":
expected_eval_metric_ops[key] = value
self.assertEqual(model_fn_ops.predictions, estimator_spec.predictions)
self.assertEqual(model_fn_ops.loss, estimator_spec.loss)
self.assertEqual(model_fn_ops.train_op, estimator_spec.train_op)
self.assertEqual(expected_eval_metric_ops,
estimator_spec.eval_metric_ops)
self.assertAllEqual(model_fn_ops.training_chief_hooks,
estimator_spec.training_chief_hooks)
self.assertAllEqual(model_fn_ops.training_hooks,
estimator_spec.training_hooks)
self.assertEqual(model_fn_ops.scaffold, estimator_spec.scaffold)
def testEstimatorSpec_except_export(self):
predictions = self.create_predictions()
model_fn_ops = self.create_model_fn_ops(
predictions, None, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
def testEstimatorSpec_export_regression_with_scores(self):
predictions = self.create_predictions()
output_alternatives = {"regression_head": (
constants.ProblemType.LINEAR_REGRESSION, predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
regression_output = estimator_spec.export_outputs["regression_head"]
self.assertTrue(isinstance(
regression_output, core_export_lib.RegressionOutput))
self.assertAllEqual(predictions["scores"].eval(),
regression_output.value.eval())
def testEstimatorSpec_export_regression_with_probabilities(self):
predictions = self.create_predictions()
output_alternatives_predictions = predictions.copy()
del output_alternatives_predictions["scores"]
output_alternatives = {"regression_head": (
constants.ProblemType.LINEAR_REGRESSION,
output_alternatives_predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
regression_output = estimator_spec.export_outputs["regression_head"]
self.assertTrue(isinstance(
regression_output, core_export_lib.RegressionOutput))
self.assertAllEqual(predictions["probabilities"].eval(),
regression_output.value.eval())
def testEstimatorSpec_export_classification(self):
predictions = self.create_predictions()
output_alternatives = {"classification_head": (
constants.ProblemType.CLASSIFICATION, predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
classification_output = estimator_spec.export_outputs[
"classification_head"]
self.assertTrue(isinstance(classification_output,
core_export_lib.ClassificationOutput))
self.assertAllEqual(predictions["scores"].eval(),
classification_output.scores.eval())
self.assertAllEqual(predictions["classes"].eval(),
classification_output.classes.eval())
def testEstimatorSpec_export_classification_with_missing_scores(self):
predictions = self.create_predictions()
output_alternatives_predictions = predictions.copy()
del output_alternatives_predictions["scores"]
output_alternatives = {"classification_head": (
constants.ProblemType.CLASSIFICATION, output_alternatives_predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
classification_output = estimator_spec.export_outputs[
"classification_head"]
self.assertTrue(isinstance(classification_output,
core_export_lib.ClassificationOutput))
self.assertAllEqual(predictions["probabilities"].eval(),
classification_output.scores.eval())
self.assertAllEqual(predictions["classes"].eval(),
classification_output.classes.eval())
def testEstimatorSpec_export_classification_with_missing_scores_proba(self):
predictions = self.create_predictions()
output_alternatives_predictions = predictions.copy()
del output_alternatives_predictions["scores"]
del output_alternatives_predictions["probabilities"]
output_alternatives = {"classification_head": (
constants.ProblemType.CLASSIFICATION, output_alternatives_predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
classification_output = estimator_spec.export_outputs[
"classification_head"]
self.assertTrue(isinstance(classification_output,
core_export_lib.ClassificationOutput))
self.assertIsNone(classification_output.scores)
self.assertAllEqual(predictions["classes"].eval(),
classification_output.classes.eval())
def testEstimatorSpec_export_classification_with_missing_classes(self):
predictions = self.create_predictions()
output_alternatives_predictions = predictions.copy()
del output_alternatives_predictions["classes"]
output_alternatives = {"classification_head": (
constants.ProblemType.CLASSIFICATION, output_alternatives_predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
classification_output = estimator_spec.export_outputs[
"classification_head"]
self.assertTrue(isinstance(classification_output,
core_export_lib.ClassificationOutput))
self.assertAllEqual(predictions["scores"].eval(),
classification_output.scores.eval())
self.assertIsNone(classification_output.classes)
def testEstimatorSpec_export_classification_with_nonstring_classes(self):
predictions = self.create_predictions()
output_alternatives_predictions = predictions.copy()
output_alternatives_predictions["classes"] = constant_op.constant(
[1, 2, 3])
output_alternatives = {"classification_head": (
constants.ProblemType.CLASSIFICATION, output_alternatives_predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
classification_output = estimator_spec.export_outputs[
"classification_head"]
self.assertTrue(isinstance(classification_output,
core_export_lib.ClassificationOutput))
self.assertAllEqual(predictions["scores"].eval(),
classification_output.scores.eval())
self.assertIsNone(classification_output.classes)
def testEstimatorSpec_export_logistic(self):
predictions = self.create_predictions()
output_alternatives = {"logistic_head": (
constants.ProblemType.LOGISTIC_REGRESSION, predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
logistic_output = estimator_spec.export_outputs["logistic_head"]
self.assertTrue(isinstance(logistic_output,
core_export_lib.ClassificationOutput))
self.assertAllEqual(predictions["scores"].eval(),
logistic_output.scores.eval())
self.assertAllEqual(predictions["classes"].eval(),
logistic_output.classes.eval())
def testEstimatorSpec_export_unspecified(self):
predictions = self.create_predictions()
output_alternatives = {"unspecified_head": (
constants.ProblemType.UNSPECIFIED, predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec()
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
unspecified_output = estimator_spec.export_outputs["unspecified_head"]
self.assertTrue(isinstance(unspecified_output,
core_export_lib.PredictOutput))
self.assertEqual(predictions, unspecified_output.outputs)
def testEstimatorSpec_export_multihead(self):
predictions = self.create_predictions()
output_alternatives = {
"regression_head": (
constants.ProblemType.LINEAR_REGRESSION, predictions),
"classification_head": (
constants.ProblemType.CLASSIFICATION, predictions)}
model_fn_ops = self.create_model_fn_ops(
predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
estimator_spec = model_fn_ops.estimator_spec("regression_head")
self.assertEquals_except_export_and_eval_loss(model_fn_ops, estimator_spec)
with session.Session():
regression_output = estimator_spec.export_outputs["regression_head"]
self.assertTrue(isinstance(
regression_output, core_export_lib.RegressionOutput))
self.assertAllEqual(predictions["scores"].eval(),
regression_output.value.eval())
default_output = estimator_spec.export_outputs[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.assertTrue(isinstance(default_output,
core_export_lib.RegressionOutput))
self.assertAllEqual(predictions["scores"].eval(),
default_output.value.eval())
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API.
#
# This is used for generating API documentation and the types used by the
# client/server. See api/README.md for more information.
#
# Some style notes:
# - This file is used by ReDoc, which allows GitHub Flavored Markdown in
# descriptions.
# - There is no maximum line length, for ease of editing and pretty diffs.
# - operationIds are in the format "NounVerb", with a singular noun.
swagger: "2.0"
schemes:
- "http"
- "https"
produces:
- "application/json"
- "text/plain"
consumes:
- "application/json"
- "text/plain"
basePath: "/v1.49"
info:
title: "Docker Engine API"
version: "1.49"
x-logo:
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
description: |
The Engine API is an HTTP API served by Docker Engine. It is the API the
Docker client uses to communicate with the Engine, so everything the Docker
client can do can be done with the API.
Most of the client's commands map directly to API endpoints (e.g. `docker ps`
is `GET /containers/json`). The notable exception is running containers,
which consists of several API calls.
# Errors
The API uses standard HTTP status codes to indicate the success or failure
of the API call. The body of the response will be JSON in the following
format:
```
{
"message": "page not found"
}
```
# Versioning
The API is usually changed in each release, so API calls are versioned to
ensure that clients don't break. To lock to a specific version of the API,
you prefix the URL with its version, for example, call `/v1.30/info` to use
the v1.30 version of the `/info` endpoint. If the API version specified in
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
If you omit the version-prefix, the current version of the API (v1.49) is used.
For example, calling `/info` is the same as calling `/v1.49/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
so your client will continue to work even if it is talking to a newer Engine.
The API uses an open schema model, which means the server may add extra properties
to responses. Likewise, the server will ignore any extra query parameters and
request body properties. When you write clients, you need to ignore additional
properties in responses to ensure they do not break when talking to newer
daemons.
# Authentication
Authentication for registries is handled client side. The client has to send
authentication details to various endpoints that need to communicate with
registries, such as `POST /images/(name)/push`. These are sent as
`X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5)
(JSON) string with the following structure:
```
{
"username": "string",
"password": "string",
"serveraddress": "string"
}
```
The `serveraddress` is a domain/IP without a protocol. Throughout this
structure, double quotes are required.
If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth),
you can just pass this instead of credentials:
```
{
"identitytoken": "9cbaf023786cd7..."
}
```
# The tags on paths define the menu sections in the ReDoc documentation, so
# the usage of tags must make sense for that:
# - They should be singular, not plural.
# - There should not be too many tags, or the menu becomes unwieldy. For
# example, it is preferable to add a path to the "System" tag instead of
# creating a tag with a single path in it.
# - The order of tags in this list defines the order in the menu.
tags:
# Primary objects
- name: "Container"
x-displayName: "Containers"
description: |
Create and manage containers.
- name: "Image"
x-displayName: "Images"
- name: "Network"
x-displayName: "Networks"
description: |
Networks are user-defined networks that containers can be attached to.
See the [networking documentation](https://docs.docker.com/network/)
for more information.
- name: "Volume"
x-displayName: "Volumes"
description: |
Create and manage persistent storage that can be attached to containers.
- name: "Exec"
x-displayName: "Exec"
description: |
Run new commands inside running containers. Refer to the
[command-line reference](https://docs.docker.com/engine/reference/commandline/exec/)
for more information.
To exec a command in a container, you first need to create an exec instance,
then start it. These two API endpoints are wrapped up in a single command-line
command, `docker exec`.
# Swarm things
- name: "Swarm"
x-displayName: "Swarm"
description: |
Engines can be clustered together in a swarm. Refer to the
[swarm mode documentation](https://docs.docker.com/engine/swarm/)
for more information.
- name: "Node"
x-displayName: "Nodes"
description: |
Nodes are instances of the Engine participating in a swarm. Swarm mode
must be enabled for these endpoints to work.
- name: "Service"
x-displayName: "Services"
description: |
Services are the definitions of tasks to run on a swarm. Swarm mode must
be enabled for these endpoints to work.
- name: "Task"
x-displayName: "Tasks"
description: |
A task is a container running on a swarm. It is the atomic scheduling unit
of swarm. Swarm mode must be enabled for these endpoints to work.
- name: "Secret"
x-displayName: "Secrets"
description: |
Secrets are sensitive data that can be used by services. Swarm mode must
be enabled for these endpoints to work.
- name: "Config"
x-displayName: "Configs"
description: |
Configs are application configurations that can be used by services. Swarm
mode must be enabled for these endpoints to work.
# System things
- name: "Plugin"
x-displayName: "Plugins"
- name: "System"
x-displayName: "System"
definitions:
ImageHistoryResponseItem:
type: "object"
x-go-name: HistoryResponseItem
title: "HistoryResponseItem"
description: "individual image layer information in response to ImageHistory operation"
required: [Id, Created, CreatedBy, Tags, Size, Comment]
properties:
Id:
type: "string"
x-nullable: false
Created:
type: "integer"
format: "int64"
x-nullable: false
CreatedBy:
type: "string"
x-nullable: false
Tags:
type: "array"
items:
type: "string"
Size:
type: "integer"
format: "int64"
x-nullable: false
Comment:
type: "string"
x-nullable: false
Port:
type: "object"
description: "An open port on a container"
required: [PrivatePort, Type]
properties:
IP:
type: "string"
format: "ip-address"
description: "Host IP address that the container's port is mapped to"
PrivatePort:
type: "integer"
format: "uint16"
x-nullable: false
description: "Port on the container"
PublicPort:
type: "integer"
format: "uint16"
description: "Port exposed on the host"
Type:
type: "string"
x-nullable: false
enum: ["tcp", "udp", "sctp"]
example:
PrivatePort: 8080
PublicPort: 80
Type: "tcp"
MountType:
description: |-
The mount type. Available types:
- `bind` a mount of a file or directory from the host into the container.
- `cluster` a Swarm cluster volume.
- `image` an OCI image.
- `npipe` a named pipe from the host into the container.
- `tmpfs` a `tmpfs`.
- `volume` a docker volume with the given `Name`.
type: "string"
enum:
- "bind"
- "cluster"
- "image"
- "npipe"
- "tmpfs"
- "volume"
example: "volume"
MountPoint:
type: "object"
description: |
MountPoint represents a mount point configuration inside the container.
This is used for reporting the mountpoints in use by a container.
properties:
Type:
description: |
The mount type:
- `bind` a mount of a file or directory from the host into the container.
- `cluster` a Swarm cluster volume.
- `image` an OCI image.
- `npipe` a named pipe from the host into the container.
- `tmpfs` a `tmpfs`.
- `volume` a docker volume with the given `Name`.
allOf:
- $ref: "#/definitions/MountType"
example: "volume"
Name:
description: |
Name is the name reference to the underlying data defined by `Source`
e.g., the volume name.
type: "string"
example: "myvolume"
Source:
description: |
Source location of the mount.
For volumes, this contains the storage location of the volume (within
`/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains
the source (host) part of the bind-mount. For `tmpfs` mount points, this
field is empty.
type: "string"
example: "/var/lib/docker/volumes/myvolume/_data"
Destination:
description: |
Destination is the path relative to the container root (`/`) where
the `Source` is mounted inside the container.
type: "string"
example: "/usr/share/nginx/html/"
Driver:
description: |
Driver is the volume driver used to create the volume (if it is a volume).
type: "string"
example: "local"
Mode:
description: |
Mode is a comma separated list of options supplied by the user when
creating the bind/volume mount.
The default is platform-specific (`"z"` on Linux, empty on Windows).
type: "string"
example: "z"
RW:
description: |
Whether the mount is mounted writable (read-write).
type: "boolean"
example: true
Propagation:
description: |
Propagation describes how mounts are propagated from the host into the
mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt)
for details. This field is not used on Windows.
type: "string"
example: ""
DeviceMapping:
type: "object"
description: "A device mapping between the host and container"
properties:
PathOnHost:
type: "string"
PathInContainer:
type: "string"
CgroupPermissions:
type: "string"
example:
PathOnHost: "/dev/deviceName"
PathInContainer: "/dev/deviceName"
CgroupPermissions: "mrw"
DeviceRequest:
type: "object"
description: "A request for devices to be sent to device drivers"
properties:
Driver:
type: "string"
example: "nvidia"
Count:
type: "integer"
example: -1
DeviceIDs:
type: "array"
items:
type: "string"
example:
- "0"
- "1"
- "GPU-fef8089b-4820-abfc-e83e-94318197576e"
Capabilities:
description: |
A list of capabilities; an OR list of AND lists of capabilities.
type: "array"
items:
type: "array"
items:
type: "string"
example:
# gpu AND nvidia AND compute
- ["gpu", "nvidia", "compute"]
Options:
description: |
Driver-specific options, specified as a key/value pairs. These options
are passed directly to the driver.
type: "object"
additionalProperties:
type: "string"
ThrottleDevice:
type: "object"
properties:
Path:
description: "Device path"
type: "string"
Rate:
description: "Rate"
type: "integer"
format: "int64"
minimum: 0
Mount:
type: "object"
properties:
Target:
description: "Container path."
type: "string"
Source:
description: |-
Mount source (e.g. a volume name, a host path). The source cannot be
specified when using `Type=tmpfs`. For `Type=bind`, the source path
must either exist, or the `CreateMountpoint` must be set to `true` to
create the source path on the host if missing.
For `Type=npipe`, the pipe must exist prior to creating the container.
type: "string"
Type:
description: |
The mount type. Available types:
- `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container.
- `cluster` a Swarm cluster volume
- `image` Mounts an image.
- `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container.
- `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs.
- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
allOf:
- $ref: "#/definitions/MountType"
ReadOnly:
description: "Whether the mount should be read-only."
type: "boolean"
Consistency:
description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`."
type: "string"
BindOptions:
description: "Optional configuration for the `bind` type."
type: "object"
properties:
Propagation:
description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`."
type: "string"
enum:
- "private"
- "rprivate"
- "shared"
- "rshared"
- "slave"
- "rslave"
NonRecursive:
description: "Disable recursive bind mount."
type: "boolean"
default: false
CreateMountpoint:
description: "Create mount point on host if missing"
type: "boolean"
default: false
ReadOnlyNonRecursive:
description: |
Make the mount non-recursively read-only, but still leave the mount recursive
(unless NonRecursive is set to `true` in conjunction).
Added in v1.44, before that version all read-only mounts were
non-recursive by default. To match the previous behaviour this
will default to `true` for clients on versions prior to v1.44.
type: "boolean"
default: false
ReadOnlyForceRecursive:
description: "Raise an error if the mount cannot be made recursively read-only."
type: "boolean"
default: false
VolumeOptions:
description: "Optional configuration for the `volume` type."
type: "object"
properties:
NoCopy:
description: "Populate volume with data from the target."
type: "boolean"
default: false
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
DriverConfig:
description: "Map of driver specific options"
type: "object"
properties:
Name:
description: "Name of the driver to use to create the volume."
type: "string"
Options:
description: "key/value map of driver specific options."
type: "object"
additionalProperties:
type: "string"
Subpath:
description: "Source path inside the volume. Must be relative without any back traversals."
type: "string"
example: "dir-inside-volume/subdirectory"
ImageOptions:
description: "Optional configuration for the `image` type."
type: "object"
properties:
Subpath:
description: "Source path inside the image. Must be relative without any back traversals."
type: "string"
example: "dir-inside-image/subdirectory"
TmpfsOptions:
description: "Optional configuration for the `tmpfs` type."
type: "object"
properties:
SizeBytes:
description: "The size for the tmpfs mount in bytes."
type: "integer"
format: "int64"
Mode:
description: |
The permission mode for the tmpfs mount in an integer.
The value must not be in octal format (e.g. 755) but rather
the decimal representation of the octal value (e.g. 493).
type: "integer"
Options:
description: |
The options to be passed to the tmpfs mount. An array of arrays.
Flag options should be provided as 1-length arrays. Other types
should be provided as as 2-length arrays, where the first item is
the key and the second the value.
type: "array"
items:
type: "array"
minItems: 1
maxItems: 2
items:
type: "string"
example:
[["noexec"]]
RestartPolicy:
description: |
The behavior to apply when the container exits. The default is not to
restart.
An ever increasing delay (double the previous delay, starting at 100ms) is
added before each restart to prevent flooding the server.
type: "object"
properties:
Name:
type: "string"
description: |
- Empty string means not to restart
- `no` Do not automatically restart
- `always` Always restart
- `unless-stopped` Restart always except when the user has manually stopped the container
- `on-failure` Restart only when the container exit code is non-zero
enum:
- ""
- "no"
- "always"
- "unless-stopped"
- "on-failure"
MaximumRetryCount:
type: "integer"
description: |
If `on-failure` is used, the number of times to retry before giving up.
Resources:
description: "A container's resources (cgroups config, ulimits, etc)"
type: "object"
properties:
# Applicable to all platforms
CpuShares:
description: |
An integer value representing this container's relative CPU weight
versus other containers.
type: "integer"
Memory:
description: "Memory limit in bytes."
type: "integer"
format: "int64"
default: 0
# Applicable to UNIX platforms
CgroupParent:
description: |
Path to `cgroups` under which the container's `cgroup` is created. If
the path is not absolute, the path is considered to be relative to the
`cgroups` path of the init process. Cgroups are created if they do not
already exist.
type: "string"
BlkioWeight:
description: "Block IO weight (relative weight)."
type: "integer"
minimum: 0
maximum: 1000
BlkioWeightDevice:
description: |
Block IO weight (relative device weight) in the form:
```
[{"Path": "device_path", "Weight": weight}]
```
type: "array"
items:
type: "object"
properties:
Path:
type: "string"
Weight:
type: "integer"
minimum: 0
BlkioDeviceReadBps:
description: |
Limit read rate (bytes per second) from a device, in the form:
```
[{"Path": "device_path", "Rate": rate}]
```
type: "array"
items:
$ref: "#/definitions/ThrottleDevice"
BlkioDeviceWriteBps:
description: |
Limit write rate (bytes per second) to a device, in the form:
```
[{"Path": "device_path", "Rate": rate}]
```
type: "array"
items:
$ref: "#/definitions/ThrottleDevice"
BlkioDeviceReadIOps:
description: |
Limit read rate (IO per second) from a device, in the form:
```
[{"Path": "device_path", "Rate": rate}]
```
type: "array"
items:
$ref: "#/definitions/ThrottleDevice"
BlkioDeviceWriteIOps:
description: |
Limit write rate (IO per second) to a device, in the form:
```
[{"Path": "device_path", "Rate": rate}]
```
type: "array"
items:
$ref: "#/definitions/ThrottleDevice"
CpuPeriod:
description: "The length of a CPU period in microseconds."
type: "integer"
format: "int64"
CpuQuota:
description: |
Microseconds of CPU time that the container can get in a CPU period.
type: "integer"
format: "int64"
CpuRealtimePeriod:
description: |
The length of a CPU real-time period in microseconds. Set to 0 to
allocate no time allocated to real-time tasks.
type: "integer"
format: "int64"
CpuRealtimeRuntime:
description: |
The length of a CPU real-time runtime in microseconds. Set to 0 to
allocate no time allocated to real-time tasks.
type: "integer"
format: "int64"
CpusetCpus:
description: |
CPUs in which to allow execution (e.g., `0-3`, `0,1`).
type: "string"
example: "0-3"
CpusetMems:
description: |
Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
effective on NUMA systems.
type: "string"
Devices:
description: "A list of devices to add to the container."
type: "array"
items:
$ref: "#/definitions/DeviceMapping"
DeviceCgroupRules:
description: "a list of cgroup rules to apply to the container"
type: "array"
items:
type: "string"
example: "c 13:* rwm"
DeviceRequests:
description: |
A list of requests for devices to be sent to device drivers.
type: "array"
items:
$ref: "#/definitions/DeviceRequest"
KernelMemoryTCP:
description: |
Hard limit for kernel TCP buffer memory (in bytes). Depending on the
OCI runtime in use, this option may be ignored. It is no longer supported
by the default (runc) runtime.
This field is omitted when empty.
type: "integer"
format: "int64"
MemoryReservation:
description: "Memory soft limit in bytes."
type: "integer"
format: "int64"
MemorySwap:
description: |
Total memory limit (memory + swap). Set as `-1` to enable unlimited
swap.
type: "integer"
format: "int64"
MemorySwappiness:
description: |
Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
type: "integer"
format: "int64"
minimum: 0
maximum: 100
NanoCpus:
description: "CPU quota in units of 10<sup>-9</sup> CPUs."
type: "integer"
format: "int64"
OomKillDisable:
description: "Disable OOM Killer for the container."
type: "boolean"
Init:
description: |
Run an init inside the container that forwards signals and reaps
processes. This field is omitted if empty, and the default (as
configured on the daemon) is used.
type: "boolean"
x-nullable: true
PidsLimit:
description: |
Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`
to not change.
type: "integer"
format: "int64"
x-nullable: true
Ulimits:
description: |
A list of resource limits to set in the container. For example:
```
{"Name": "nofile", "Soft": 1024, "Hard": 2048}
```
type: "array"
items:
type: "object"
properties:
Name:
description: "Name of ulimit"
type: "string"
Soft:
description: "Soft limit"
type: "integer"
Hard:
description: "Hard limit"
type: "integer"
# Applicable to Windows
CpuCount:
description: |
The number of usable CPUs (Windows only).
On Windows Server containers, the processor resource controls are
mutually exclusive. The order of precedence is `CPUCount` first, then
`CPUShares`, and `CPUPercent` last.
type: "integer"
format: "int64"
CpuPercent:
description: |
The usable percentage of the available CPUs (Windows only).
On Windows Server containers, the processor resource controls are
mutually exclusive. The order of precedence is `CPUCount` first, then
`CPUShares`, and `CPUPercent` last.
type: "integer"
format: "int64"
IOMaximumIOps:
description: "Maximum IOps for the container system drive (Windows only)"
type: "integer"
format: "int64"
IOMaximumBandwidth:
description: |
Maximum IO in bytes per second for the container system drive
(Windows only).
type: "integer"
format: "int64"
Limit:
description: |
An object describing a limit on resources which can be requested by a task.
type: "object"
properties:
NanoCPUs:
type: "integer"
format: "int64"
example: 4000000000
MemoryBytes:
type: "integer"
format: "int64"
example: 8272408576
Pids:
description: |
Limits the maximum number of PIDs in the container. Set `0` for unlimited.
type: "integer"
format: "int64"
default: 0
example: 100
ResourceObject:
description: |
An object describing the resources which can be advertised by a node and
requested by a task.
type: "object"
properties:
NanoCPUs:
type: "integer"
format: "int64"
example: 4000000000
MemoryBytes:
type: "integer"
format: "int64"
example: 8272408576
GenericResources:
$ref: "#/definitions/GenericResources"
GenericResources:
description: |
User-defined resources can be either Integer resources (e.g, `SSD=3`) or
String resources (e.g, `GPU=UUID1`).
type: "array"
items:
type: "object"
properties:
NamedResourceSpec:
type: "object"
properties:
Kind:
type: "string"
Value:
type: "string"
DiscreteResourceSpec:
type: "object"
properties:
Kind:
type: "string"
Value:
type: "integer"
format: "int64"
example:
- DiscreteResourceSpec:
Kind: "SSD"
Value: 3
- NamedResourceSpec:
Kind: "GPU"
Value: "UUID1"
- NamedResourceSpec:
Kind: "GPU"
Value: "UUID2"
HealthConfig:
description: |
A test to perform to check that the container is healthy.
Healthcheck commands should be side-effect free.
type: "object"
properties:
Test:
description: |
The test to perform. Possible values are:
- `[]` inherit healthcheck from image or parent image
- `["NONE"]` disable healthcheck
- `["CMD", args...]` exec arguments directly
- `["CMD-SHELL", command]` run command with system's default shell
A non-zero exit code indicates a failed healthcheck:
- `0` healthy
- `1` unhealthy
- `2` reserved (treated as unhealthy)
- other values: error running probe
type: "array"
items:
type: "string"
Interval:
description: |
The time to wait between checks in nanoseconds. It should be 0 or at
least 1000000 (1 ms). 0 means inherit.
type: "integer"
format: "int64"
Timeout:
description: |
The time to wait before considering the check to have hung. It should
be 0 or at least 1000000 (1 ms). 0 means inherit.
If the health check command does not complete within this timeout,
the check is considered failed and the health check process is
forcibly terminated without a graceful shutdown.
type: "integer"
format: "int64"
Retries:
description: |
The number of consecutive failures needed to consider a container as
unhealthy. 0 means inherit.
type: "integer"
StartPeriod:
description: |
Start period for the container to initialize before starting
health-retries countdown in nanoseconds. It should be 0 or at least
1000000 (1 ms). 0 means inherit.
type: "integer"
format: "int64"
StartInterval:
description: |
The time to wait between checks in nanoseconds during the start period.
It should be 0 or at least 1000000 (1 ms). 0 means inherit.
type: "integer"
format: "int64"
Health:
description: |
Health stores information about the container's healthcheck results.
type: "object"
x-nullable: true
properties:
Status:
description: |
Status is one of `none`, `starting`, `healthy` or `unhealthy`
- "none" Indicates there is no healthcheck
- "starting" Starting indicates that the container is not yet ready
- "healthy" Healthy indicates that the container is running correctly
- "unhealthy" Unhealthy indicates that the container has a problem
type: "string"
enum:
- "none"
- "starting"
- "healthy"
- "unhealthy"
example: "healthy"
FailingStreak:
description: "FailingStreak is the number of consecutive failures"
type: "integer"
example: 0
Log:
type: "array"
description: |
Log contains the last few results (oldest first)
items:
$ref: "#/definitions/HealthcheckResult"
HealthcheckResult:
description: |
HealthcheckResult stores information about a single run of a healthcheck probe
type: "object"
x-nullable: true
properties:
Start:
description: |
Date and time at which this check started in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "date-time"
example: "2020-01-04T10:44:24.496525531Z"
End:
description: |
Date and time at which this check ended in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
example: "2020-01-04T10:45:21.364524523Z"
ExitCode:
description: |
ExitCode meanings:
- `0` healthy
- `1` unhealthy
- `2` reserved (considered unhealthy)
- other values: error running probe
type: "integer"
example: 0
Output:
description: "Output from last check"
type: "string"
HostConfig:
description: "Container configuration that depends on the host we are running on"
allOf:
- $ref: "#/definitions/Resources"
- type: "object"
properties:
# Applicable to all platforms
Binds:
type: "array"
description: |
A list of volume bindings for this container. Each volume binding
is a string in one of these forms:
- `host-src:container-dest[:options]` to bind-mount a host path
into the container. Both `host-src`, and `container-dest` must
be an _absolute_ path.
- `volume-name:container-dest[:options]` to bind-mount a volume
managed by a volume driver into the container. `container-dest`
must be an _absolute_ path.
`options` is an optional, comma-delimited list of:
- `nocopy` disables automatic copying of data from the container
path to the volume. The `nocopy` flag only applies to named volumes.
- `[ro|rw]` mounts a volume read-only or read-write, respectively.
If omitted or set to `rw`, volumes are mounted read-write.
- `[z|Z]` applies SELinux labels to allow or deny multiple containers
to read and write to the same volume.
- `z`: a _shared_ content label is applied to the content. This
label indicates that multiple containers can share the volume
content, for both reading and writing.
- `Z`: a _private unshared_ label is applied to the content.
This label indicates that only the current container can use
a private volume. Labeling systems such as SELinux require
proper labels to be placed on volume content that is mounted
into a container. Without a label, the security system can
prevent a container's processes from using the content. By
default, the labels set by the host operating system are not
modified.
- `[[r]shared|[r]slave|[r]private]` specifies mount
[propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).
This only applies to bind-mounted volumes, not internal volumes
or named volumes. Mount propagation requires the source mount
point (the location where the source directory is mounted in the
host operating system) to have the correct propagation properties.
For shared volumes, the source mount point must be set to `shared`.
For slave volumes, the mount must be set to either `shared` or
`slave`.
items:
type: "string"
ContainerIDFile:
type: "string"
description: "Path to a file where the container ID is written"
example: ""
LogConfig:
type: "object"
description: "The logging configuration for this container"
properties:
Type:
description: |-
Name of the logging driver used for the container or "none"
if logging is disabled.
type: "string"
enum:
- "local"
- "json-file"
- "syslog"
- "journald"
- "gelf"
- "fluentd"
- "awslogs"
- "splunk"
- "etwlogs"
- "none"
Config:
description: |-
Driver-specific configuration options for the logging driver.
type: "object"
additionalProperties:
type: "string"
example:
"max-file": "5"
"max-size": "10m"
NetworkMode:
type: "string"
description: |
Network mode to use for this container. Supported standard values
are: `bridge`, `host`, `none`, and `container:<name|id>`. Any
other value is taken as a custom network's name to which this
container should connect to.
PortBindings:
$ref: "#/definitions/PortMap"
RestartPolicy:
$ref: "#/definitions/RestartPolicy"
AutoRemove:
type: "boolean"
description: |
Automatically remove the container when the container's process
exits. This has no effect if `RestartPolicy` is set.
VolumeDriver:
type: "string"
description: "Driver that this container uses to mount volumes."
VolumesFrom:
type: "array"
description: |
A list of volumes to inherit from another container, specified in
the form `<container name>[:<ro|rw>]`.
items:
type: "string"
Mounts:
description: |
Specification for mounts to be added to the container.
type: "array"
items:
$ref: "#/definitions/Mount"
ConsoleSize:
type: "array"
description: |
Initial console size, as an `[height, width]` array.
x-nullable: true
minItems: 2
maxItems: 2
items:
type: "integer"
minimum: 0
example: [80, 64]
Annotations:
type: "object"
description: |
Arbitrary non-identifying metadata attached to container and
provided to the runtime when the container is started.
additionalProperties:
type: "string"
# Applicable to UNIX platforms
CapAdd:
type: "array"
description: |
A list of kernel capabilities to add to the container. Conflicts
with option 'Capabilities'.
items:
type: "string"
CapDrop:
type: "array"
description: |
A list of kernel capabilities to drop from the container. Conflicts
with option 'Capabilities'.
items:
type: "string"
CgroupnsMode:
type: "string"
enum:
- "private"
- "host"
description: |
cgroup namespace mode for the container. Possible values are:
- `"private"`: the container runs in its own private cgroup namespace
- `"host"`: use the host system's cgroup namespace
If not specified, the daemon default is used, which can either be `"private"`
or `"host"`, depending on daemon version, kernel support and configuration.
Dns:
type: "array"
description: "A list of DNS servers for the container to use."
items:
type: "string"
DnsOptions:
type: "array"
description: "A list of DNS options."
items:
type: "string"
DnsSearch:
type: "array"
description: "A list of DNS search domains."
items:
type: "string"
ExtraHosts:
type: "array"
description: |
A list of hostnames/IP mappings to add to the container's `/etc/hosts`
file. Specified in the form `["hostname:IP"]`.
items:
type: "string"
GroupAdd:
type: "array"
description: |
A list of additional groups that the container process will run as.
items:
type: "string"
IpcMode:
type: "string"
description: |
IPC sharing mode for the container. Possible values are:
- `"none"`: own private IPC namespace, with /dev/shm not mounted
- `"private"`: own private IPC namespace
- `"shareable"`: own private IPC namespace, with a possibility to share it with other containers
- `"container:<name|id>"`: join another (shareable) container's IPC namespace
- `"host"`: use the host system's IPC namespace
If not specified, daemon default is used, which can either be `"private"`
or `"shareable"`, depending on daemon version and configuration.
Cgroup:
type: "string"
description: "Cgroup to use for the container."
Links:
type: "array"
description: |
A list of links for the container in the form `container_name:alias`.
items:
type: "string"
OomScoreAdj:
type: "integer"
description: |
An integer value containing the score given to the container in
order to tune OOM killer preferences.
example: 500
PidMode:
type: "string"
description: |
Set the PID (Process) Namespace mode for the container. It can be
either:
- `"container:<name|id>"`: joins another container's PID namespace
- `"host"`: use the host's PID namespace inside the container
Privileged:
type: "boolean"
description: |-
Gives the container full access to the host.
PublishAllPorts:
type: "boolean"
description: |
Allocates an ephemeral host port for all of a container's
exposed ports.
Ports are de-allocated when the container stops and allocated when
the container starts. The allocated port might be changed when
restarting the container.
The port is selected from the ephemeral port range that depends on
the kernel. For example, on Linux the range is defined by
`/proc/sys/net/ipv4/ip_local_port_range`.
ReadonlyRootfs:
type: "boolean"
description: "Mount the container's root filesystem as read only."
SecurityOpt:
type: "array"
description: |
A list of string values to customize labels for MLS systems, such
as SELinux.
items:
type: "string"
StorageOpt:
type: "object"
description: |
Storage driver options for this container, in the form `{"size": "120G"}`.
additionalProperties:
type: "string"
Tmpfs:
type: "object"
description: |
A map of container directories which should be replaced by tmpfs
mounts, and their corresponding mount options. For example:
```
{ "/run": "rw,noexec,nosuid,size=65536k" }
```
additionalProperties:
type: "string"
UTSMode:
type: "string"
description: "UTS namespace to use for the container."
UsernsMode:
type: "string"
description: |
Sets the usernamespace mode for the container when usernamespace
remapping option is enabled.
ShmSize:
type: "integer"
format: "int64"
description: |
Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.
minimum: 0
Sysctls:
type: "object"
x-nullable: true
description: |-
A list of kernel parameters (sysctls) to set in the container.
This field is omitted if not set.
additionalProperties:
type: "string"
example:
"net.ipv4.ip_forward": "1"
Runtime:
type: "string"
x-nullable: true
description: |-
Runtime to use with this container.
# Applicable to Windows
Isolation:
type: "string"
description: |
Isolation technology of the container. (Windows only)
enum:
- "default"
- "process"
- "hyperv"
- ""
MaskedPaths:
type: "array"
description: |
The list of paths to be masked inside the container (this overrides
the default set of paths).
items:
type: "string"
example:
- "/proc/asound"
- "/proc/acpi"
- "/proc/kcore"
- "/proc/keys"
- "/proc/latency_stats"
- "/proc/timer_list"
- "/proc/timer_stats"
- "/proc/sched_debug"
- "/proc/scsi"
- "/sys/firmware"
- "/sys/devices/virtual/powercap"
ReadonlyPaths:
type: "array"
description: |
The list of paths to be set as read-only inside the container
(this overrides the default set of paths).
items:
type: "string"
example:
- "/proc/bus"
- "/proc/fs"
- "/proc/irq"
- "/proc/sys"
- "/proc/sysrq-trigger"
ContainerConfig:
description: |
Configuration for a container that is portable between hosts.
type: "object"
properties:
Hostname:
description: |
The hostname to use for the container, as a valid RFC 1123 hostname.
type: "string"
example: "439f4e91bd1d"
Domainname:
description: |
The domain name to use for the container.
type: "string"
User:
description: |-
Commands run as this user inside the container. If omitted, commands
run as the user specified in the image the container was started from.
Can be either user-name or UID, and optional group-name or GID,
separated by a colon (`<user-name|UID>[<:group-name|GID>]`).
type: "string"
example: "123:456"
AttachStdin:
description: "Whether to attach to `stdin`."
type: "boolean"
default: false
AttachStdout:
description: "Whether to attach to `stdout`."
type: "boolean"
default: true
AttachStderr:
description: "Whether to attach to `stderr`."
type: "boolean"
default: true
ExposedPorts:
description: |
An object mapping ports to an empty object in the form:
`{"<port>/<tcp|udp|sctp>": {}}`
type: "object"
x-nullable: true
additionalProperties:
type: "object"
enum:
- {}
default: {}
example: {
"80/tcp": {},
"443/tcp": {}
}
Tty:
description: |
Attach standard streams to a TTY, including `stdin` if it is not closed.
type: "boolean"
default: false
OpenStdin:
description: "Open `stdin`"
type: "boolean"
default: false
StdinOnce:
description: "Close `stdin` after one attached client disconnects"
type: "boolean"
default: false
Env:
description: |
A list of environment variables to set inside the container in the
form `["VAR=value", ...]`. A variable without `=` is removed from the
environment, rather than to have an empty value.
type: "array"
items:
type: "string"
example:
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Cmd:
description: |
Command to run specified as a string or an array of strings.
type: "array"
items:
type: "string"
example: ["/bin/sh"]
Healthcheck:
$ref: "#/definitions/HealthConfig"
ArgsEscaped:
description: "Command is already escaped (Windows only)"
type: "boolean"
default: false
example: false
x-nullable: true
Image:
description: |
The name (or reference) of the image to use when creating the container,
or which was used when the container was created.
type: "string"
example: "example-image:1.0"
Volumes:
description: |
An object mapping mount point paths inside the container to empty
objects.
type: "object"
additionalProperties:
type: "object"
enum:
- {}
default: {}
WorkingDir:
description: "The working directory for commands to run in."
type: "string"
example: "/public/"
Entrypoint:
description: |
The entry point for the container as a string or an array of strings.
If the array consists of exactly one empty string (`[""]`) then the
entry point is reset to system default (i.e., the entry point used by
docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
type: "array"
items:
type: "string"
example: []
NetworkDisabled:
description: "Disable networking for the container."
type: "boolean"
x-nullable: true
MacAddress:
description: |
MAC address of the container.
Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead.
type: "string"
x-nullable: true
OnBuild:
description: |
`ONBUILD` metadata that were defined in the image's `Dockerfile`.
type: "array"
x-nullable: true
items:
type: "string"
example: []
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
StopSignal:
description: |
Signal to stop a container as a string or unsigned integer.
type: "string"
example: "SIGTERM"
x-nullable: true
StopTimeout:
description: "Timeout to stop a container in seconds."
type: "integer"
default: 10
x-nullable: true
Shell:
description: |
Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
type: "array"
x-nullable: true
items:
type: "string"
example: ["/bin/sh", "-c"]
ImageConfig:
description: |
Configuration of the image. These fields are used as defaults
when starting a container from the image.
type: "object"
properties:
Hostname:
description: |
The hostname to use for the container, as a valid RFC 1123 hostname.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always empty. It must not be used, and will be removed in API v1.50.
type: "string"
example: ""
Domainname:
description: |
The domain name to use for the container.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always empty. It must not be used, and will be removed in API v1.50.
type: "string"
example: ""
User:
description: "The user that commands are run as inside the container."
type: "string"
example: "web:web"
AttachStdin:
description: |
Whether to attach to `stdin`.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always false. It must not be used, and will be removed in API v1.50.
type: "boolean"
default: false
example: false
AttachStdout:
description: |
Whether to attach to `stdout`.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always false. It must not be used, and will be removed in API v1.50.
type: "boolean"
default: false
example: false
AttachStderr:
description: |
Whether to attach to `stderr`.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always false. It must not be used, and will be removed in API v1.50.
type: "boolean"
default: false
example: false
ExposedPorts:
description: |
An object mapping ports to an empty object in the form:
`{"<port>/<tcp|udp|sctp>": {}}`
type: "object"
x-nullable: true
additionalProperties:
type: "object"
enum:
- {}
default: {}
example: {
"80/tcp": {},
"443/tcp": {}
}
Tty:
description: |
Attach standard streams to a TTY, including `stdin` if it is not closed.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always false. It must not be used, and will be removed in API v1.50.
type: "boolean"
default: false
example: false
OpenStdin:
description: |
Open `stdin`
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always false. It must not be used, and will be removed in API v1.50.
type: "boolean"
default: false
example: false
StdinOnce:
description: |
Close `stdin` after one attached client disconnects.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always false. It must not be used, and will be removed in API v1.50.
type: "boolean"
default: false
example: false
Env:
description: |
A list of environment variables to set inside the container in the
form `["VAR=value", ...]`. A variable without `=` is removed from the
environment, rather than to have an empty value.
type: "array"
items:
type: "string"
example:
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Cmd:
description: |
Command to run specified as a string or an array of strings.
type: "array"
items:
type: "string"
example: ["/bin/sh"]
Healthcheck:
$ref: "#/definitions/HealthConfig"
ArgsEscaped:
description: "Command is already escaped (Windows only)"
type: "boolean"
default: false
example: false
x-nullable: true
Image:
description: |
The name (or reference) of the image to use when creating the container,
or which was used when the container was created.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always empty. It must not be used, and will be removed in API v1.50.
type: "string"
default: ""
example: ""
Volumes:
description: |
An object mapping mount point paths inside the container to empty
objects.
type: "object"
additionalProperties:
type: "object"
enum:
- {}
default: {}
example:
"/app/data": {}
"/app/config": {}
WorkingDir:
description: "The working directory for commands to run in."
type: "string"
example: "/public/"
Entrypoint:
description: |
The entry point for the container as a string or an array of strings.
If the array consists of exactly one empty string (`[""]`) then the
entry point is reset to system default (i.e., the entry point used by
docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
type: "array"
items:
type: "string"
example: []
NetworkDisabled:
description: |
Disable networking for the container.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always omitted. It must not be used, and will be removed in API v1.50.
type: "boolean"
default: false
example: false
x-nullable: true
MacAddress:
description: |
MAC address of the container.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always omitted. It must not be used, and will be removed in API v1.50.
type: "string"
default: ""
example: ""
x-nullable: true
OnBuild:
description: |
`ONBUILD` metadata that were defined in the image's `Dockerfile`.
type: "array"
x-nullable: true
items:
type: "string"
example: []
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
StopSignal:
description: |
Signal to stop a container as a string or unsigned integer.
type: "string"
example: "SIGTERM"
x-nullable: true
StopTimeout:
description: |
Timeout to stop a container in seconds.
<p><br /></p>
> **Deprecated**: this field is not part of the image specification and is
> always omitted. It must not be used, and will be removed in API v1.50.
type: "integer"
default: 10
x-nullable: true
Shell:
description: |
Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
type: "array"
x-nullable: true
items:
type: "string"
example: ["/bin/sh", "-c"]
# FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed.
example:
"Hostname": ""
"Domainname": ""
"User": "web:web"
"AttachStdin": false
"AttachStdout": false
"AttachStderr": false
"ExposedPorts": {
"80/tcp": {},
"443/tcp": {}
}
"Tty": false
"OpenStdin": false
"StdinOnce": false
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
"Cmd": ["/bin/sh"]
"Healthcheck": {
"Test": ["string"],
"Interval": 0,
"Timeout": 0,
"Retries": 0,
"StartPeriod": 0,
"StartInterval": 0
}
"ArgsEscaped": true
"Image": ""
"Volumes": {
"/app/data": {},
"/app/config": {}
}
"WorkingDir": "/public/"
"Entrypoint": []
"OnBuild": []
"Labels": {
"com.example.some-label": "some-value",
"com.example.some-other-label": "some-other-value"
}
"StopSignal": "SIGTERM"
"Shell": ["/bin/sh", "-c"]
NetworkingConfig:
description: |
NetworkingConfig represents the container's networking configuration for
each of its interfaces.
It is used for the networking configs specified in the `docker create`
and `docker network connect` commands.
type: "object"
properties:
EndpointsConfig:
description: |
A mapping of network name to endpoint configuration for that network.
The endpoint configuration can be left empty to connect to that
network with no particular endpoint configuration.
type: "object"
additionalProperties:
$ref: "#/definitions/EndpointSettings"
example:
# putting an example here, instead of using the example values from
# /definitions/EndpointSettings, because EndpointSettings contains
# operational data returned when inspecting a container that we don't
# accept here.
EndpointsConfig:
isolated_nw:
IPAMConfig:
IPv4Address: "172.20.30.33"
IPv6Address: "2001:db8:abcd::3033"
LinkLocalIPs:
- "169.254.34.68"
- "fe80::3468"
MacAddress: "02:42:ac:12:05:02"
Links:
- "container_1"
- "container_2"
Aliases:
- "server_x"
- "server_y"
database_nw: {}
NetworkSettings:
description: "NetworkSettings exposes the network settings in the API"
type: "object"
properties:
Bridge:
description: |
Name of the default bridge interface when dockerd's --bridge flag is set.
type: "string"
example: "docker0"
SandboxID:
description: SandboxID uniquely represents a container's network stack.
type: "string"
example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"
HairpinMode:
description: |
Indicates if hairpin NAT should be enabled on the virtual interface.
Deprecated: This field is never set and will be removed in a future release.
type: "boolean"
example: false
LinkLocalIPv6Address:
description: |
IPv6 unicast address using the link-local prefix.
Deprecated: This field is never set and will be removed in a future release.
type: "string"
example: ""
LinkLocalIPv6PrefixLen:
description: |
Prefix length of the IPv6 unicast address.
Deprecated: This field is never set and will be removed in a future release.
type: "integer"
example: ""
Ports:
$ref: "#/definitions/PortMap"
SandboxKey:
description: SandboxKey is the full path of the netns handle
type: "string"
example: "/var/run/docker/netns/8ab54b426c38"
SecondaryIPAddresses:
description: "Deprecated: This field is never set and will be removed in a future release."
type: "array"
items:
$ref: "#/definitions/Address"
x-nullable: true
SecondaryIPv6Addresses:
description: "Deprecated: This field is never set and will be removed in a future release."
type: "array"
items:
$ref: "#/definitions/Address"
x-nullable: true
# TODO properties below are part of DefaultNetworkSettings, which is
# marked as deprecated since Docker 1.9 and to be removed in Docker v17.12
EndpointID:
description: |
EndpointID uniquely represents a service endpoint in a Sandbox.
<p><br /></p>
> **Deprecated**: This field is only propagated when attached to the
> default "bridge" network. Use the information from the "bridge"
> network inside the `Networks` map instead, which contains the same
> information. This field was deprecated in Docker 1.9 and is scheduled
> to be removed in Docker 17.12.0
type: "string"
example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
Gateway:
description: |
Gateway address for the default "bridge" network.
<p><br /></p>
> **Deprecated**: This field is only propagated when attached to the
> default "bridge" network. Use the information from the "bridge"
> network inside the `Networks` map instead, which contains the same
> information. This field was deprecated in Docker 1.9 and is scheduled
> to be removed in Docker 17.12.0
type: "string"
example: "172.17.0.1"
GlobalIPv6Address:
description: |
Global IPv6 address for the default "bridge" network.
<p><br /></p>
> **Deprecated**: This field is only propagated when attached to the
> default "bridge" network. Use the information from the "bridge"
> network inside the `Networks` map instead, which contains the same
> information. This field was deprecated in Docker 1.9 and is scheduled
> to be removed in Docker 17.12.0
type: "string"
example: "2001:db8::5689"
GlobalIPv6PrefixLen:
description: |
Mask length of the global IPv6 address.
<p><br /></p>
> **Deprecated**: This field is only propagated when attached to the
> default "bridge" network. Use the information from the "bridge"
> network inside the `Networks` map instead, which contains the same
> information. This field was deprecated in Docker 1.9 and is scheduled
> to be removed in Docker 17.12.0
type: "integer"
example: 64
IPAddress:
description: |
IPv4 address for the default "bridge" network.
<p><br /></p>
> **Deprecated**: This field is only propagated when attached to the
> default "bridge" network. Use the information from the "bridge"
> network inside the `Networks` map instead, which contains the same
> information. This field was deprecated in Docker 1.9 and is scheduled
> to be removed in Docker 17.12.0
type: "string"
example: "172.17.0.4"
IPPrefixLen:
description: |
Mask length of the IPv4 address.
<p><br /></p>
> **Deprecated**: This field is only propagated when attached to the
> default "bridge" network. Use the information from the "bridge"
> network inside the `Networks` map instead, which contains the same
> information. This field was deprecated in Docker 1.9 and is scheduled
> to be removed in Docker 17.12.0
type: "integer"
example: 16
IPv6Gateway:
description: |
IPv6 gateway address for this network.
<p><br /></p>
> **Deprecated**: This field is only propagated when attached to the
> default "bridge" network. Use the information from the "bridge"
> network inside the `Networks` map instead, which contains the same
> information. This field was deprecated in Docker 1.9 and is scheduled
> to be removed in Docker 17.12.0
type: "string"
example: "2001:db8:2::100"
MacAddress:
description: |
MAC address for the container on the default "bridge" network.
<p><br /></p>
> **Deprecated**: This field is only propagated when attached to the
> default "bridge" network. Use the information from the "bridge"
> network inside the `Networks` map instead, which contains the same
> information. This field was deprecated in Docker 1.9 and is scheduled
> to be removed in Docker 17.12.0
type: "string"
example: "02:42:ac:11:00:04"
Networks:
description: |
Information about all networks that the container is connected to.
type: "object"
additionalProperties:
$ref: "#/definitions/EndpointSettings"
Address:
description: Address represents an IPv4 or IPv6 IP address.
type: "object"
properties:
Addr:
description: IP address.
type: "string"
PrefixLen:
description: Mask length of the IP address.
type: "integer"
PortMap:
description: |
PortMap describes the mapping of container ports to host ports, using the
container's port-number and protocol as key in the format `<port>/<protocol>`,
for example, `80/udp`.
If a container's port is mapped for multiple protocols, separate entries
are added to the mapping table.
type: "object"
additionalProperties:
type: "array"
x-nullable: true
items:
$ref: "#/definitions/PortBinding"
example:
"443/tcp":
- HostIp: "127.0.0.1"
HostPort: "4443"
"80/tcp":
- HostIp: "0.0.0.0"
HostPort: "80"
- HostIp: "0.0.0.0"
HostPort: "8080"
"80/udp":
- HostIp: "0.0.0.0"
HostPort: "80"
"53/udp":
- HostIp: "0.0.0.0"
HostPort: "53"
"2377/tcp": null
PortBinding:
description: |
PortBinding represents a binding between a host IP address and a host
port.
type: "object"
properties:
HostIp:
description: "Host IP address that the container's port is mapped to."
type: "string"
example: "127.0.0.1"
HostPort:
description: "Host port number that the container's port is mapped to."
type: "string"
example: "4443"
DriverData:
description: |
Information about the storage driver used to store the container's and
image's filesystem.
type: "object"
required: [Name, Data]
properties:
Name:
description: "Name of the storage driver."
type: "string"
x-nullable: false
example: "overlay2"
Data:
description: |
Low-level storage metadata, provided as key/value pairs.
This information is driver-specific, and depends on the storage-driver
in use, and should be used for informational purposes only.
type: "object"
x-nullable: false
additionalProperties:
type: "string"
example: {
"MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged",
"UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff",
"WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work"
}
FilesystemChange:
description: |
Change in the container's filesystem.
type: "object"
required: [Path, Kind]
properties:
Path:
description: |
Path to file or directory that has changed.
type: "string"
x-nullable: false
Kind:
$ref: "#/definitions/ChangeType"
ChangeType:
description: |
Kind of change
Can be one of:
- `0`: Modified ("C")
- `1`: Added ("A")
- `2`: Deleted ("D")
type: "integer"
format: "uint8"
enum: [0, 1, 2]
x-nullable: false
ImageInspect:
description: |
Information about an image in the local image cache.
type: "object"
properties:
Id:
description: |
ID is the content-addressable ID of an image.
This identifier is a content-addressable digest calculated from the
image's configuration (which includes the digests of layers used by
the image).
Note that this digest differs from the `RepoDigests` below, which
holds digests of image manifests that reference the image.
type: "string"
x-nullable: false
example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
Descriptor:
description: |
Descriptor is an OCI descriptor of the image target.
In case of a multi-platform image, this descriptor points to the OCI index
or a manifest list.
This field is only present if the daemon provides a multi-platform image store.
WARNING: This is experimental and may change at any time without any backward
compatibility.
x-nullable: true
$ref: "#/definitions/OCIDescriptor"
Manifests:
description: |
Manifests is a list of image manifests available in this image. It
provides a more detailed view of the platform-specific image manifests or
other image-attached data like build attestations.
Only available if the daemon provides a multi-platform image store
and the `manifests` option is set in the inspect request.
WARNING: This is experimental and may change at any time without any backward
compatibility.
type: "array"
x-nullable: true
items:
$ref: "#/definitions/ImageManifestSummary"
RepoTags:
description: |
List of image names/tags in the local image cache that reference this
image.
Multiple image tags can refer to the same image, and this list may be
empty if no tags reference the image, in which case the image is
"untagged", in which case it can still be referenced by its ID.
type: "array"
items:
type: "string"
example:
- "example:1.0"
- "example:latest"
- "example:stable"
- "internal.registry.example.com:5000/example:1.0"
RepoDigests:
description: |
List of content-addressable digests of locally available image manifests
that the image is referenced from. Multiple manifests can refer to the
same image.
These digests are usually only available if the image was either pulled
from a registry, or if the image was pushed to a registry, which is when
the manifest is generated and its digest calculated.
type: "array"
items:
type: "string"
example:
- "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb"
- "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
Parent:
description: |
ID of the parent image.
Depending on how the image was created, this field may be empty and
is only set for images that were built/created locally. This field
is empty if the image was pulled from an image registry.
type: "string"
x-nullable: false
example: ""
Comment:
description: |
Optional message that was set when committing or importing the image.
type: "string"
x-nullable: false
example: ""
Created:
description: |
Date and time at which the image was created, formatted in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
This information is only available if present in the image,
and omitted otherwise.
type: "string"
format: "dateTime"
x-nullable: true
example: "2022-02-04T21:20:12.497794809Z"
DockerVersion:
description: |
The version of Docker that was used to build the image.
Depending on how the image was created, this field may be empty.
type: "string"
x-nullable: false
example: "27.0.1"
Author:
description: |
Name of the author that was specified when committing the image, or as
specified through MAINTAINER (deprecated) in the Dockerfile.
type: "string"
x-nullable: false
example: ""
Config:
$ref: "#/definitions/ImageConfig"
Architecture:
description: |
Hardware CPU architecture that the image runs on.
type: "string"
x-nullable: false
example: "arm"
Variant:
description: |
CPU architecture variant (presently ARM-only).
type: "string"
x-nullable: true
example: "v7"
Os:
description: |
Operating System the image is built to run on.
type: "string"
x-nullable: false
example: "linux"
OsVersion:
description: |
Operating System version the image is built to run on (especially
for Windows).
type: "string"
example: ""
x-nullable: true
Size:
description: |
Total size of the image including all layers it is composed of.
type: "integer"
format: "int64"
x-nullable: false
example: 1239828
GraphDriver:
$ref: "#/definitions/DriverData"
RootFS:
description: |
Information about the image's RootFS, including the layer IDs.
type: "object"
required: [Type]
properties:
Type:
type: "string"
x-nullable: false
example: "layers"
Layers:
type: "array"
items:
type: "string"
example:
- "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6"
- "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
Metadata:
description: |
Additional metadata of the image in the local cache. This information
is local to the daemon, and not part of the image itself.
type: "object"
properties:
LastTagTime:
description: |
Date and time at which the image was last tagged in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
This information is only available if the image was tagged locally,
and omitted otherwise.
type: "string"
format: "dateTime"
example: "2022-02-28T14:40:02.623929178Z"
x-nullable: true
ImageSummary:
type: "object"
x-go-name: "Summary"
required:
- Id
- ParentId
- RepoTags
- RepoDigests
- Created
- Size
- SharedSize
- Labels
- Containers
properties:
Id:
description: |
ID is the content-addressable ID of an image.
This identifier is a content-addressable digest calculated from the
image's configuration (which includes the digests of layers used by
the image).
Note that this digest differs from the `RepoDigests` below, which
holds digests of image manifests that reference the image.
type: "string"
x-nullable: false
example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
ParentId:
description: |
ID of the parent image.
Depending on how the image was created, this field may be empty and
is only set for images that were built/created locally. This field
is empty if the image was pulled from an image registry.
type: "string"
x-nullable: false
example: ""
RepoTags:
description: |
List of image names/tags in the local image cache that reference this
image.
Multiple image tags can refer to the same image, and this list may be
empty if no tags reference the image, in which case the image is
"untagged", in which case it can still be referenced by its ID.
type: "array"
x-nullable: false
items:
type: "string"
example:
- "example:1.0"
- "example:latest"
- "example:stable"
- "internal.registry.example.com:5000/example:1.0"
RepoDigests:
description: |
List of content-addressable digests of locally available image manifests
that the image is referenced from. Multiple manifests can refer to the
same image.
These digests are usually only available if the image was either pulled
from a registry, or if the image was pushed to a registry, which is when
the manifest is generated and its digest calculated.
type: "array"
x-nullable: false
items:
type: "string"
example:
- "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb"
- "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
Created:
description: |
Date and time at which the image was created as a Unix timestamp
(number of seconds since EPOCH).
type: "integer"
x-nullable: false
example: "1644009612"
Size:
description: |
Total size of the image including all layers it is composed of.
type: "integer"
format: "int64"
x-nullable: false
example: 172064416
SharedSize:
description: |
Total size of image layers that are shared between this image and other
images.
This size is not calculated by default. `-1` indicates that the value
has not been set / calculated.
type: "integer"
format: "int64"
x-nullable: false
example: 1239828
Labels:
description: "User-defined key/value metadata."
type: "object"
x-nullable: false
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
Containers:
description: |
Number of containers using this image. Includes both stopped and running
containers.
This size is not calculated by default, and depends on which API endpoint
is used. `-1` indicates that the value has not been set / calculated.
x-nullable: false
type: "integer"
example: 2
Manifests:
description: |
Manifests is a list of manifests available in this image.
It provides a more detailed view of the platform-specific image manifests
or other image-attached data like build attestations.
WARNING: This is experimental and may change at any time without any backward
compatibility.
type: "array"
x-nullable: false
x-omitempty: true
items:
$ref: "#/definitions/ImageManifestSummary"
Descriptor:
description: |
Descriptor is an OCI descriptor of the image target.
In case of a multi-platform image, this descriptor points to the OCI index
or a manifest list.
This field is only present if the daemon provides a multi-platform image store.
WARNING: This is experimental and may change at any time without any backward
compatibility.
x-nullable: true
$ref: "#/definitions/OCIDescriptor"
AuthConfig:
type: "object"
properties:
username:
type: "string"
password:
type: "string"
email:
description: |
Email is an optional value associated with the username.
> **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release.
type: "string"
serveraddress:
type: "string"
example:
username: "hannibal"
password: "xxxx"
serveraddress: "https://index.docker.io/v1/"
ProcessConfig:
type: "object"
properties:
privileged:
type: "boolean"
user:
type: "string"
tty:
type: "boolean"
entrypoint:
type: "string"
arguments:
type: "array"
items:
type: "string"
Volume:
type: "object"
required: [Name, Driver, Mountpoint, Labels, Scope, Options]
properties:
Name:
type: "string"
description: "Name of the volume."
x-nullable: false
example: "tardis"
Driver:
type: "string"
description: "Name of the volume driver used by the volume."
x-nullable: false
example: "custom"
Mountpoint:
type: "string"
description: "Mount path of the volume on the host."
x-nullable: false
example: "/var/lib/docker/volumes/tardis"
CreatedAt:
type: "string"
format: "dateTime"
description: "Date/Time the volume was created."
example: "2016-06-07T20:31:11.853781916Z"
Status:
type: "object"
description: |
Low-level details about the volume, provided by the volume driver.
Details are returned as a map with key/value pairs:
`{"key":"value","key2":"value2"}`.
The `Status` field is optional, and is omitted if the volume driver
does not support this feature.
additionalProperties:
type: "object"
example:
hello: "world"
Labels:
type: "object"
description: "User-defined key/value metadata."
x-nullable: false
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
Scope:
type: "string"
description: |
The level at which the volume exists. Either `global` for cluster-wide,
or `local` for machine level.
default: "local"
x-nullable: false
enum: ["local", "global"]
example: "local"
ClusterVolume:
$ref: "#/definitions/ClusterVolume"
Options:
type: "object"
description: |
The driver specific options used when creating the volume.
additionalProperties:
type: "string"
example:
device: "tmpfs"
o: "size=100m,uid=1000"
type: "tmpfs"
UsageData:
type: "object"
x-nullable: true
x-go-name: "UsageData"
required: [Size, RefCount]
description: |
Usage details about the volume. This information is used by the
`GET /system/df` endpoint, and omitted in other endpoints.
properties:
Size:
type: "integer"
format: "int64"
default: -1
description: |
Amount of disk space used by the volume (in bytes). This information
is only available for volumes created with the `"local"` volume
driver. For volumes created with other volume drivers, this field
is set to `-1` ("not available")
x-nullable: false
RefCount:
type: "integer"
format: "int64"
default: -1
description: |
The number of containers referencing this volume. This field
is set to `-1` if the reference-count is not available.
x-nullable: false
VolumeCreateOptions:
description: "Volume configuration"
type: "object"
title: "VolumeConfig"
x-go-name: "CreateOptions"
properties:
Name:
description: |
The new volume's name. If not specified, Docker generates a name.
type: "string"
x-nullable: false
example: "tardis"
Driver:
description: "Name of the volume driver to use."
type: "string"
default: "local"
x-nullable: false
example: "custom"
DriverOpts:
description: |
A mapping of driver options and values. These options are
passed directly to the driver and are driver specific.
type: "object"
additionalProperties:
type: "string"
example:
device: "tmpfs"
o: "size=100m,uid=1000"
type: "tmpfs"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
ClusterVolumeSpec:
$ref: "#/definitions/ClusterVolumeSpec"
VolumeListResponse:
type: "object"
title: "VolumeListResponse"
x-go-name: "ListResponse"
description: "Volume list response"
properties:
Volumes:
type: "array"
description: "List of volumes"
items:
$ref: "#/definitions/Volume"
Warnings:
type: "array"
description: |
Warnings that occurred when fetching the list of volumes.
items:
type: "string"
example: []
Network:
type: "object"
properties:
Name:
description: |
Name of the network.
type: "string"
example: "my_network"
Id:
description: |
ID that uniquely identifies a network on a single machine.
type: "string"
example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
Created:
description: |
Date and time at which the network was created in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
example: "2016-10-19T04:33:30.360899459Z"
Scope:
description: |
The level at which the network exists (e.g. `swarm` for cluster-wide
or `local` for machine level)
type: "string"
example: "local"
Driver:
description: |
The name of the driver used to create the network (e.g. `bridge`,
`overlay`).
type: "string"
example: "overlay"
EnableIPv4:
description: |
Whether the network was created with IPv4 enabled.
type: "boolean"
example: true
EnableIPv6:
description: |
Whether the network was created with IPv6 enabled.
type: "boolean"
example: false
IPAM:
$ref: "#/definitions/IPAM"
Internal:
description: |
Whether the network is created to only allow internal networking
connectivity.
type: "boolean"
default: false
example: false
Attachable:
description: |
Whether a global / swarm scope network is manually attachable by regular
containers from workers in swarm mode.
type: "boolean"
default: false
example: false
Ingress:
description: |
Whether the network is providing the routing-mesh for the swarm cluster.
type: "boolean"
default: false
example: false
ConfigFrom:
$ref: "#/definitions/ConfigReference"
ConfigOnly:
description: |
Whether the network is a config-only network. Config-only networks are
placeholder networks for network configurations to be used by other
networks. Config-only networks cannot be used directly to run containers
or services.
type: "boolean"
default: false
Containers:
description: |
Contains endpoints attached to the network.
type: "object"
additionalProperties:
$ref: "#/definitions/NetworkContainer"
example:
19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
Name: "test"
EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
MacAddress: "02:42:ac:13:00:02"
IPv4Address: "172.19.0.2/16"
IPv6Address: ""
Options:
description: |
Network-specific options uses when creating the network.
type: "object"
additionalProperties:
type: "string"
example:
com.docker.network.bridge.default_bridge: "true"
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
com.docker.network.bridge.name: "docker0"
com.docker.network.driver.mtu: "1500"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
Peers:
description: |
List of peer nodes for an overlay network. This field is only present
for overlay networks, and omitted for other network types.
type: "array"
items:
$ref: "#/definitions/PeerInfo"
x-nullable: true
# TODO: Add Services (only present when "verbose" is set).
ConfigReference:
description: |
The config-only network source to provide the configuration for
this network.
type: "object"
properties:
Network:
description: |
The name of the config-only network that provides the network's
configuration. The specified network must be an existing config-only
network. Only network names are allowed, not network IDs.
type: "string"
example: "config_only_network_01"
IPAM:
type: "object"
properties:
Driver:
description: "Name of the IPAM driver to use."
type: "string"
default: "default"
example: "default"
Config:
description: |
List of IPAM configuration options, specified as a map:
```
{"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>}
```
type: "array"
items:
$ref: "#/definitions/IPAMConfig"
Options:
description: "Driver-specific options, specified as a map."
type: "object"
additionalProperties:
type: "string"
example:
foo: "bar"
IPAMConfig:
type: "object"
properties:
Subnet:
type: "string"
example: "172.20.0.0/16"
IPRange:
type: "string"
example: "172.20.10.0/24"
Gateway:
type: "string"
example: "172.20.10.11"
AuxiliaryAddresses:
type: "object"
additionalProperties:
type: "string"
NetworkContainer:
type: "object"
properties:
Name:
type: "string"
example: "container_1"
EndpointID:
type: "string"
example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
MacAddress:
type: "string"
example: "02:42:ac:13:00:02"
IPv4Address:
type: "string"
example: "172.19.0.2/16"
IPv6Address:
type: "string"
example: ""
PeerInfo:
description: |
PeerInfo represents one peer of an overlay network.
type: "object"
properties:
Name:
description:
ID of the peer-node in the Swarm cluster.
type: "string"
example: "6869d7c1732b"
IP:
description:
IP-address of the peer-node in the Swarm cluster.
type: "string"
example: "10.133.77.91"
NetworkCreateResponse:
description: "OK response to NetworkCreate operation"
type: "object"
title: "NetworkCreateResponse"
x-go-name: "CreateResponse"
required: [Id, Warning]
properties:
Id:
description: "The ID of the created network."
type: "string"
x-nullable: false
example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d"
Warning:
description: "Warnings encountered when creating the container"
type: "string"
x-nullable: false
example: ""
BuildInfo:
type: "object"
properties:
id:
type: "string"
stream:
type: "string"
error:
type: "string"
x-nullable: true
description: |-
errors encountered during the operation.
> **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead.
errorDetail:
$ref: "#/definitions/ErrorDetail"
status:
type: "string"
progress:
type: "string"
x-nullable: true
description: |-
Progress is a pre-formatted presentation of progressDetail.
> **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead.
progressDetail:
$ref: "#/definitions/ProgressDetail"
aux:
$ref: "#/definitions/ImageID"
BuildCache:
type: "object"
description: |
BuildCache contains information about a build cache record.
properties:
ID:
type: "string"
description: |
Unique ID of the build cache record.
example: "ndlpt0hhvkqcdfkputsk4cq9c"
Parents:
description: |
List of parent build cache record IDs.
type: "array"
items:
type: "string"
x-nullable: true
example: ["hw53o5aio51xtltp5xjp8v7fx"]
Type:
type: "string"
description: |
Cache record type.
example: "regular"
# see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84
enum:
- "internal"
- "frontend"
- "source.local"
- "source.git.checkout"
- "exec.cachemount"
- "regular"
Description:
type: "string"
description: |
Description of the build-step that produced the build cache.
example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
InUse:
type: "boolean"
description: |
Indicates if the build cache is in use.
example: false
Shared:
type: "boolean"
description: |
Indicates if the build cache is shared.
example: true
Size:
description: |
Amount of disk space used by the build cache (in bytes).
type: "integer"
example: 51
CreatedAt:
description: |
Date and time at which the build cache was created in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
example: "2016-08-18T10:44:24.496525531Z"
LastUsedAt:
description: |
Date and time at which the build cache was last used in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
x-nullable: true
example: "2017-08-09T07:09:37.632105588Z"
UsageCount:
type: "integer"
example: 26
ImageID:
type: "object"
description: "Image ID or Digest"
properties:
ID:
type: "string"
example:
ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
CreateImageInfo:
type: "object"
properties:
id:
type: "string"
error:
type: "string"
x-nullable: true
description: |-
errors encountered during the operation.
> **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead.
errorDetail:
$ref: "#/definitions/ErrorDetail"
status:
type: "string"
progress:
type: "string"
x-nullable: true
description: |-
Progress is a pre-formatted presentation of progressDetail.
> **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead.
progressDetail:
$ref: "#/definitions/ProgressDetail"
PushImageInfo:
type: "object"
properties:
error:
type: "string"
x-nullable: true
description: |-
errors encountered during the operation.
> **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead.
errorDetail:
$ref: "#/definitions/ErrorDetail"
status:
type: "string"
progress:
type: "string"
x-nullable: true
description: |-
Progress is a pre-formatted presentation of progressDetail.
> **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead.
progressDetail:
$ref: "#/definitions/ProgressDetail"
ErrorDetail:
type: "object"
properties:
code:
type: "integer"
message:
type: "string"
ProgressDetail:
type: "object"
properties:
current:
type: "integer"
total:
type: "integer"
ErrorResponse:
description: "Represents an error."
type: "object"
required: ["message"]
properties:
message:
description: "The error message."
type: "string"
x-nullable: false
example:
message: "Something went wrong."
IDResponse:
description: "Response to an API call that returns just an Id"
type: "object"
x-go-name: "IDResponse"
required: ["Id"]
properties:
Id:
description: "The id of the newly created object."
type: "string"
x-nullable: false
EndpointSettings:
description: "Configuration for a network endpoint."
type: "object"
properties:
# Configurations
IPAMConfig:
$ref: "#/definitions/EndpointIPAMConfig"
Links:
type: "array"
items:
type: "string"
example:
- "container_1"
- "container_2"
MacAddress:
description: |
MAC address for the endpoint on this network. The network driver might ignore this parameter.
type: "string"
example: "02:42:ac:11:00:04"
Aliases:
type: "array"
items:
type: "string"
example:
- "server_x"
- "server_y"
DriverOpts:
description: |
DriverOpts is a mapping of driver options and values. These options
are passed directly to the driver and are driver specific.
type: "object"
x-nullable: true
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
GwPriority:
description: |
This property determines which endpoint will provide the default
gateway for a container. The endpoint with the highest priority will
be used. If multiple endpoints have the same priority, endpoints are
lexicographically sorted based on their network name, and the one
that sorts first is picked.
type: "integer"
format: "int64"
example:
- 10
# Operational data
NetworkID:
description: |
Unique ID of the network.
type: "string"
example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"
EndpointID:
description: |
Unique ID for the service endpoint in a Sandbox.
type: "string"
example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
Gateway:
description: |
Gateway address for this network.
type: "string"
example: "172.17.0.1"
IPAddress:
description: |
IPv4 address.
type: "string"
example: "172.17.0.4"
IPPrefixLen:
description: |
Mask length of the IPv4 address.
type: "integer"
example: 16
IPv6Gateway:
description: |
IPv6 gateway address.
type: "string"
example: "2001:db8:2::100"
GlobalIPv6Address:
description: |
Global IPv6 address.
type: "string"
example: "2001:db8::5689"
GlobalIPv6PrefixLen:
description: |
Mask length of the global IPv6 address.
type: "integer"
format: "int64"
example: 64
DNSNames:
description: |
List of all DNS names an endpoint has on a specific network. This
list is based on the container name, network aliases, container short
ID, and hostname.
These DNS names are non-fully qualified but can contain several dots.
You can get fully qualified DNS names by appending `.<network-name>`.
For instance, if container name is `my.ctr` and the network is named
`testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be
`my.ctr.testnet`.
type: array
items:
type: string
example: ["foobar", "server_x", "server_y", "my.ctr"]
EndpointIPAMConfig:
description: |
EndpointIPAMConfig represents an endpoint's IPAM configuration.
type: "object"
x-nullable: true
properties:
IPv4Address:
type: "string"
example: "172.20.30.33"
IPv6Address:
type: "string"
example: "2001:db8:abcd::3033"
LinkLocalIPs:
type: "array"
items:
type: "string"
example:
- "169.254.34.68"
- "fe80::3468"
PluginMount:
type: "object"
x-nullable: false
required: [Name, Description, Settable, Source, Destination, Type, Options]
properties:
Name:
type: "string"
x-nullable: false
example: "some-mount"
Description:
type: "string"
x-nullable: false
example: "This is a mount that's used by the plugin."
Settable:
type: "array"
items:
type: "string"
Source:
type: "string"
example: "/var/lib/docker/plugins/"
Destination:
type: "string"
x-nullable: false
example: "/mnt/state"
Type:
type: "string"
x-nullable: false
example: "bind"
Options:
type: "array"
items:
type: "string"
example:
- "rbind"
- "rw"
PluginDevice:
type: "object"
required: [Name, Description, Settable, Path]
x-nullable: false
properties:
Name:
type: "string"
x-nullable: false
Description:
type: "string"
x-nullable: false
Settable:
type: "array"
items:
type: "string"
Path:
type: "string"
example: "/dev/fuse"
PluginEnv:
type: "object"
x-nullable: false
required: [Name, Description, Settable, Value]
properties:
Name:
x-nullable: false
type: "string"
Description:
x-nullable: false
type: "string"
Settable:
type: "array"
items:
type: "string"
Value:
type: "string"
PluginInterfaceType:
type: "object"
x-nullable: false
required: [Prefix, Capability, Version]
properties:
Prefix:
type: "string"
x-nullable: false
Capability:
type: "string"
x-nullable: false
Version:
type: "string"
x-nullable: false
PluginPrivilege:
description: |
Describes a permission the user has to accept upon installing
the plugin.
type: "object"
x-go-name: "PluginPrivilege"
properties:
Name:
type: "string"
example: "network"
Description:
type: "string"
Value:
type: "array"
items:
type: "string"
example:
- "host"
Plugin:
description: "A plugin for the Engine API"
type: "object"
required: [Settings, Enabled, Config, Name]
properties:
Id:
type: "string"
example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
Name:
type: "string"
x-nullable: false
example: "tiborvass/sample-volume-plugin"
Enabled:
description:
True if the plugin is running. False if the plugin is not running,
only installed.
type: "boolean"
x-nullable: false
example: true
Settings:
description: "Settings that can be modified by users."
type: "object"
x-nullable: false
required: [Args, Devices, Env, Mounts]
properties:
Mounts:
type: "array"
items:
$ref: "#/definitions/PluginMount"
Env:
type: "array"
items:
type: "string"
example:
- "DEBUG=0"
Args:
type: "array"
items:
type: "string"
Devices:
type: "array"
items:
$ref: "#/definitions/PluginDevice"
PluginReference:
description: "plugin remote reference used to push/pull the plugin"
type: "string"
x-nullable: false
example: "localhost:5000/tiborvass/sample-volume-plugin:latest"
Config:
description: "The config of a plugin."
type: "object"
x-nullable: false
required:
- Description
- Documentation
- Interface
- Entrypoint
- WorkDir
- Network
- Linux
- PidHost
- PropagatedMount
- IpcHost
- Mounts
- Env
- Args
properties:
DockerVersion:
description: "Docker Version used to create the plugin"
type: "string"
x-nullable: false
example: "17.06.0-ce"
Description:
type: "string"
x-nullable: false
example: "A sample volume plugin for Docker"
Documentation:
type: "string"
x-nullable: false
example: "https://docs.docker.com/engine/extend/plugins/"
Interface:
description: "The interface between Docker and the plugin"
x-nullable: false
type: "object"
required: [Types, Socket]
properties:
Types:
type: "array"
items:
$ref: "#/definitions/PluginInterfaceType"
example:
- "docker.volumedriver/1.0"
Socket:
type: "string"
x-nullable: false
example: "plugins.sock"
ProtocolScheme:
type: "string"
example: "some.protocol/v1.0"
description: "Protocol to use for clients connecting to the plugin."
enum:
- ""
- "moby.plugins.http/v1"
Entrypoint:
type: "array"
items:
type: "string"
example:
- "/usr/bin/sample-volume-plugin"
- "/data"
WorkDir:
type: "string"
x-nullable: false
example: "/bin/"
User:
type: "object"
x-nullable: false
properties:
UID:
type: "integer"
format: "uint32"
example: 1000
GID:
type: "integer"
format: "uint32"
example: 1000
Network:
type: "object"
x-nullable: false
required: [Type]
properties:
Type:
x-nullable: false
type: "string"
example: "host"
Linux:
type: "object"
x-nullable: false
required: [Capabilities, AllowAllDevices, Devices]
properties:
Capabilities:
type: "array"
items:
type: "string"
example:
- "CAP_SYS_ADMIN"
- "CAP_SYSLOG"
AllowAllDevices:
type: "boolean"
x-nullable: false
example: false
Devices:
type: "array"
items:
$ref: "#/definitions/PluginDevice"
PropagatedMount:
type: "string"
x-nullable: false
example: "/mnt/volumes"
IpcHost:
type: "boolean"
x-nullable: false
example: false
PidHost:
type: "boolean"
x-nullable: false
example: false
Mounts:
type: "array"
items:
$ref: "#/definitions/PluginMount"
Env:
type: "array"
items:
$ref: "#/definitions/PluginEnv"
example:
- Name: "DEBUG"
Description: "If set, prints debug messages"
Settable: null
Value: "0"
Args:
type: "object"
x-nullable: false
required: [Name, Description, Settable, Value]
properties:
Name:
x-nullable: false
type: "string"
example: "args"
Description:
x-nullable: false
type: "string"
example: "command line arguments"
Settable:
type: "array"
items:
type: "string"
Value:
type: "array"
items:
type: "string"
rootfs:
type: "object"
properties:
type:
type: "string"
example: "layers"
diff_ids:
type: "array"
items:
type: "string"
example:
- "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887"
- "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
ObjectVersion:
description: |
The version number of the object such as node, service, etc. This is needed
to avoid conflicting writes. The client must send the version number along
with the modified specification when updating these objects.
This approach ensures safe concurrency and determinism in that the change
on the object may not be applied if the version number has changed from the
last read. In other words, if two update requests specify the same base
version, only one of the requests can succeed. As a result, two separate
update requests that happen at the same time will not unintentionally
overwrite each other.
type: "object"
properties:
Index:
type: "integer"
format: "uint64"
example: 373531
NodeSpec:
type: "object"
properties:
Name:
description: "Name for the node."
type: "string"
example: "my-node"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
Role:
description: "Role of the node."
type: "string"
enum:
- "worker"
- "manager"
example: "manager"
Availability:
description: "Availability of the node."
type: "string"
enum:
- "active"
- "pause"
- "drain"
example: "active"
example:
Availability: "active"
Name: "node-name"
Role: "manager"
Labels:
foo: "bar"
Node:
type: "object"
properties:
ID:
type: "string"
example: "24ifsmvkjbyhk"
Version:
$ref: "#/definitions/ObjectVersion"
CreatedAt:
description: |
Date and time at which the node was added to the swarm in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
example: "2016-08-18T10:44:24.496525531Z"
UpdatedAt:
description: |
Date and time at which the node was last updated in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
example: "2017-08-09T07:09:37.632105588Z"
Spec:
$ref: "#/definitions/NodeSpec"
Description:
$ref: "#/definitions/NodeDescription"
Status:
$ref: "#/definitions/NodeStatus"
ManagerStatus:
$ref: "#/definitions/ManagerStatus"
NodeDescription:
description: |
NodeDescription encapsulates the properties of the Node as reported by the
agent.
type: "object"
properties:
Hostname:
type: "string"
example: "bf3067039e47"
Platform:
$ref: "#/definitions/Platform"
Resources:
$ref: "#/definitions/ResourceObject"
Engine:
$ref: "#/definitions/EngineDescription"
TLSInfo:
$ref: "#/definitions/TLSInfo"
Platform:
description: |
Platform represents the platform (Arch/OS).
type: "object"
properties:
Architecture:
description: |
Architecture represents the hardware architecture (for example,
`x86_64`).
type: "string"
example: "x86_64"
OS:
description: |
OS represents the Operating System (for example, `linux` or `windows`).
type: "string"
example: "linux"
EngineDescription:
description: "EngineDescription provides information about an engine."
type: "object"
properties:
EngineVersion:
type: "string"
example: "17.06.0"
Labels:
type: "object"
additionalProperties:
type: "string"
example:
foo: "bar"
Plugins:
type: "array"
items:
type: "object"
properties:
Type:
type: "string"
Name:
type: "string"
example:
- Type: "Log"
Name: "awslogs"
- Type: "Log"
Name: "fluentd"
- Type: "Log"
Name: "gcplogs"
- Type: "Log"
Name: "gelf"
- Type: "Log"
Name: "journald"
- Type: "Log"
Name: "json-file"
- Type: "Log"
Name: "splunk"
- Type: "Log"
Name: "syslog"
- Type: "Network"
Name: "bridge"
- Type: "Network"
Name: "host"
- Type: "Network"
Name: "ipvlan"
- Type: "Network"
Name: "macvlan"
- Type: "Network"
Name: "null"
- Type: "Network"
Name: "overlay"
- Type: "Volume"
Name: "local"
- Type: "Volume"
Name: "localhost:5000/vieux/sshfs:latest"
- Type: "Volume"
Name: "vieux/sshfs:latest"
TLSInfo:
description: |
Information about the issuer of leaf TLS certificates and the trusted root
CA certificate.
type: "object"
properties:
TrustRoot:
description: |
The root CA certificate(s) that are used to validate leaf TLS
certificates.
type: "string"
CertIssuerSubject:
description:
The base64-url-safe-encoded raw subject bytes of the issuer.
type: "string"
CertIssuerPublicKey:
description: |
The base64-url-safe-encoded raw public key bytes of the issuer.
type: "string"
example:
TrustRoot: |
-----BEGIN CERTIFICATE-----
MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw
EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0
MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf
3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO
PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz
pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H
-----END CERTIFICATE-----
CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh"
CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A=="
NodeStatus:
description: |
NodeStatus represents the status of a node.
It provides the current status of the node, as seen by the manager.
type: "object"
properties:
State:
$ref: "#/definitions/NodeState"
Message:
type: "string"
example: ""
Addr:
description: "IP address of the node."
type: "string"
example: "172.17.0.2"
NodeState:
description: "NodeState represents the state of a node."
type: "string"
enum:
- "unknown"
- "down"
- "ready"
- "disconnected"
example: "ready"
ManagerStatus:
description: |
ManagerStatus represents the status of a manager.
It provides the current status of a node's manager component, if the node
is a manager.
x-nullable: true
type: "object"
properties:
Leader:
type: "boolean"
default: false
example: true
Reachability:
$ref: "#/definitions/Reachability"
Addr:
description: |
The IP address and port at which the manager is reachable.
type: "string"
example: "10.0.0.46:2377"
Reachability:
description: "Reachability represents the reachability of a node."
type: "string"
enum:
- "unknown"
- "unreachable"
- "reachable"
example: "reachable"
SwarmSpec:
description: "User modifiable swarm configuration."
type: "object"
properties:
Name:
description: "Name of the swarm."
type: "string"
example: "default"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
example:
com.example.corp.type: "production"
com.example.corp.department: "engineering"
Orchestration:
description: "Orchestration configuration."
type: "object"
x-nullable: true
properties:
TaskHistoryRetentionLimit:
description: |
The number of historic tasks to keep per instance or node. If
negative, never remove completed or failed tasks.
type: "integer"
format: "int64"
example: 10
Raft:
description: "Raft configuration."
type: "object"
properties:
SnapshotInterval:
description: "The number of log entries between snapshots."
type: "integer"
format: "uint64"
example: 10000
KeepOldSnapshots:
description: |
The number of snapshots to keep beyond the current snapshot.
type: "integer"
format: "uint64"
LogEntriesForSlowFollowers:
description: |
The number of log entries to keep around to sync up slow followers
after a snapshot is created.
type: "integer"
format: "uint64"
example: 500
ElectionTick:
description: |
The number of ticks that a follower will wait for a message from
the leader before becoming a candidate and starting an election.
`ElectionTick` must be greater than `HeartbeatTick`.
A tick currently defaults to one second, so these translate
directly to seconds currently, but this is NOT guaranteed.
type: "integer"
example: 3
HeartbeatTick:
description: |
The number of ticks between heartbeats. Every HeartbeatTick ticks,
the leader will send a heartbeat to the followers.
A tick currently defaults to one second, so these translate
directly to seconds currently, but this is NOT guaranteed.
type: "integer"
example: 1
Dispatcher:
description: "Dispatcher configuration."
type: "object"
x-nullable: true
properties:
HeartbeatPeriod:
description: |
The delay for an agent to send a heartbeat to the dispatcher.
type: "integer"
format: "int64"
example: 5000000000
CAConfig:
description: "CA configuration."
type: "object"
x-nullable: true
properties:
NodeCertExpiry:
description: "The duration node certificates are issued for."
type: "integer"
format: "int64"
example: 7776000000000000
ExternalCAs:
description: |
Configuration for forwarding signing requests to an external
certificate authority.
type: "array"
items:
type: "object"
properties:
Protocol:
description: |
Protocol for communication with the external CA (currently
only `cfssl` is supported).
type: "string"
enum:
- "cfssl"
default: "cfssl"
URL:
description: |
URL where certificate signing requests should be sent.
type: "string"
Options:
description: |
An object with key/value pairs that are interpreted as
protocol-specific options for the external CA driver.
type: "object"
additionalProperties:
type: "string"
CACert:
description: |
The root CA certificate (in PEM format) this external CA uses
to issue TLS certificates (assumed to be to the current swarm
root CA certificate if not provided).
type: "string"
SigningCACert:
description: |
The desired signing CA certificate for all swarm node TLS leaf
certificates, in PEM format.
type: "string"
SigningCAKey:
description: |
The desired signing CA key for all swarm node TLS leaf certificates,
in PEM format.
type: "string"
ForceRotate:
description: |
An integer whose purpose is to force swarm to generate a new
signing CA certificate and key, if none have been specified in
`SigningCACert` and `SigningCAKey`
format: "uint64"
type: "integer"
EncryptionConfig:
description: "Parameters related to encryption-at-rest."
type: "object"
properties:
AutoLockManagers:
description: |
If set, generate a key and use it to lock data stored on the
managers.
type: "boolean"
example: false
TaskDefaults:
description: "Defaults for creating tasks in this cluster."
type: "object"
properties:
LogDriver:
description: |
The log driver to use for tasks created in the orchestrator if
unspecified by a service.
Updating this value only affects new tasks. Existing tasks continue
to use their previously configured log driver until recreated.
type: "object"
properties:
Name:
description: |
The log driver to use as a default for new tasks.
type: "string"
example: "json-file"
Options:
description: |
Driver-specific options for the selected log driver, specified
as key/value pairs.
type: "object"
additionalProperties:
type: "string"
example:
"max-file": "10"
"max-size": "100m"
# The Swarm information for `GET /info`. It is the same as `GET /swarm`, but
# without `JoinTokens`.
ClusterInfo:
description: |
ClusterInfo represents information about the swarm as is returned by the
"/info" endpoint. Join-tokens are not included.
x-nullable: true
type: "object"
properties:
ID:
description: "The ID of the swarm."
type: "string"
example: "abajmipo7b4xz5ip2nrla6b11"
Version:
$ref: "#/definitions/ObjectVersion"
CreatedAt:
description: |
Date and time at which the swarm was initialised in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
example: "2016-08-18T10:44:24.496525531Z"
UpdatedAt:
description: |
Date and time at which the swarm was last updated in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
example: "2017-08-09T07:09:37.632105588Z"
Spec:
$ref: "#/definitions/SwarmSpec"
TLSInfo:
$ref: "#/definitions/TLSInfo"
RootRotationInProgress:
description: |
Whether there is currently a root CA rotation in progress for the swarm
type: "boolean"
example: false
DataPathPort:
description: |
DataPathPort specifies the data path port number for data traffic.
Acceptable port range is 1024 to 49151.
If no port is set or is set to 0, the default port (4789) is used.
type: "integer"
format: "uint32"
default: 4789
example: 4789
DefaultAddrPool:
description: |
Default Address Pool specifies default subnet pools for global scope
networks.
type: "array"
items:
type: "string"
format: "CIDR"
example: ["10.10.0.0/16", "20.20.0.0/16"]
SubnetSize:
description: |
SubnetSize specifies the subnet size of the networks created from the
default subnet pool.
type: "integer"
format: "uint32"
maximum: 29
default: 24
example: 24
JoinTokens:
description: |
JoinTokens contains the tokens workers and managers need to join the swarm.
type: "object"
properties:
Worker:
description: |
The token workers can use to join the swarm.
type: "string"
example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
Manager:
description: |
The token managers can use to join the swarm.
type: "string"
example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
Swarm:
type: "object"
allOf:
- $ref: "#/definitions/ClusterInfo"
- type: "object"
properties:
JoinTokens:
$ref: "#/definitions/JoinTokens"
TaskSpec:
description: "User modifiable task configuration."
type: "object"
properties:
PluginSpec:
type: "object"
description: |
Plugin spec for the service. *(Experimental release only.)*
<p><br /></p>
> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
> mutually exclusive. PluginSpec is only used when the Runtime field
> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
> field is set to `attachment`.
properties:
Name:
description: "The name or 'alias' to use for the plugin."
type: "string"
Remote:
description: "The plugin image reference to use."
type: "string"
Disabled:
description: "Disable the plugin once scheduled."
type: "boolean"
PluginPrivilege:
type: "array"
items:
$ref: "#/definitions/PluginPrivilege"
ContainerSpec:
type: "object"
description: |
Container spec for the service.
<p><br /></p>
> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
> mutually exclusive. PluginSpec is only used when the Runtime field
> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
> field is set to `attachment`.
properties:
Image:
description: "The image name to use for the container"
type: "string"
Labels:
description: "User-defined key/value data."
type: "object"
additionalProperties:
type: "string"
Command:
description: "The command to be run in the image."
type: "array"
items:
type: "string"
Args:
description: "Arguments to the command."
type: "array"
items:
type: "string"
Hostname:
description: |
The hostname to use for the container, as a valid
[RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.
type: "string"
Env:
description: |
A list of environment variables in the form `VAR=value`.
type: "array"
items:
type: "string"
Dir:
description: "The working directory for commands to run in."
type: "string"
User:
description: "The user inside the container."
type: "string"
Groups:
type: "array"
description: |
A list of additional groups that the container process will run as.
items:
type: "string"
Privileges:
type: "object"
description: "Security options for the container"
properties:
CredentialSpec:
type: "object"
description: "CredentialSpec for managed service account (Windows only)"
properties:
Config:
type: "string"
example: "0bt9dmxjvjiqermk6xrop3ekq"
description: |
Load credential spec from a Swarm Config with the given ID.
The specified config must also be present in the Configs
field with the Runtime property set.
<p><br /></p>
> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
> and `CredentialSpec.Config` are mutually exclusive.
File:
type: "string"
example: "spec.json"
description: |
Load credential spec from this file. The file is read by
the daemon, and must be present in the `CredentialSpecs`
subdirectory in the docker data directory, which defaults
to `C:\ProgramData\Docker\` on Windows.
For example, specifying `spec.json` loads
`C:\ProgramData\Docker\CredentialSpecs\spec.json`.
<p><br /></p>
> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
> and `CredentialSpec.Config` are mutually exclusive.
Registry:
type: "string"
description: |
Load credential spec from this value in the Windows
registry. The specified registry value must be located in:
`HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`
<p><br /></p>
> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
> and `CredentialSpec.Config` are mutually exclusive.
SELinuxContext:
type: "object"
description: "SELinux labels of the container"
properties:
Disable:
type: "boolean"
description: "Disable SELinux"
User:
type: "string"
description: "SELinux user label"
Role:
type: "string"
description: "SELinux role label"
Type:
type: "string"
description: "SELinux type label"
Level:
type: "string"
description: "SELinux level label"
Seccomp:
type: "object"
description: "Options for configuring seccomp on the container"
properties:
Mode:
type: "string"
enum:
- "default"
- "unconfined"
- "custom"
Profile:
description: "The custom seccomp profile as a json object"
type: "string"
AppArmor:
type: "object"
description: "Options for configuring AppArmor on the container"
properties:
Mode:
type: "string"
enum:
- "default"
- "disabled"
NoNewPrivileges:
type: "boolean"
description: "Configuration of the no_new_privs bit in the container"
TTY:
description: "Whether a pseudo-TTY should be allocated."
type: "boolean"
OpenStdin:
description: "Open `stdin`"
type: "boolean"
ReadOnly:
description: "Mount the container's root filesystem as read only."
type: "boolean"
Mounts:
description: |
Specification for mounts to be added to containers created as part
of the service.
type: "array"
items:
$ref: "#/definitions/Mount"
StopSignal:
description: "Signal to stop the container."
type: "string"
StopGracePeriod:
description: |
Amount of time to wait for the container to terminate before
forcefully killing it.
type: "integer"
format: "int64"
HealthCheck:
$ref: "#/definitions/HealthConfig"
Hosts:
type: "array"
description: |
A list of hostname/IP mappings to add to the container's `hosts`
file. The format of extra hosts is specified in the
[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)
man page:
IP_address canonical_hostname [aliases...]
items:
type: "string"
DNSConfig:
description: |
Specification for DNS related configurations in resolver configuration
file (`resolv.conf`).
type: "object"
properties:
Nameservers:
description: "The IP addresses of the name servers."
type: "array"
items:
type: "string"
Search:
description: "A search list for host-name lookup."
type: "array"
items:
type: "string"
Options:
description: |
A list of internal resolver variables to be modified (e.g.,
`debug`, `ndots:3`, etc.).
type: "array"
items:
type: "string"
Secrets:
description: |
Secrets contains references to zero or more secrets that will be
exposed to the service.
type: "array"
items:
type: "object"
properties:
File:
description: |
File represents a specific target that is backed by a file.
type: "object"
properties:
Name:
description: |
Name represents the final filename in the filesystem.
type: "string"
UID:
description: "UID represents the file UID."
type: "string"
GID:
description: "GID represents the file GID."
type: "string"
Mode:
description: "Mode represents the FileMode of the file."
type: "integer"
format: "uint32"
SecretID:
description: |
SecretID represents the ID of the specific secret that we're
referencing.
type: "string"
SecretName:
description: |
SecretName is the name of the secret that this references,
but this is just provided for lookup/display purposes. The
secret in the reference will be identified by its ID.
type: "string"
OomScoreAdj:
type: "integer"
format: "int64"
description: |
An integer value containing the score given to the container in
order to tune OOM killer preferences.
example: 0
Configs:
description: |
Configs contains references to zero or more configs that will be
exposed to the service.
type: "array"
items:
type: "object"
properties:
File:
description: |
File represents a specific target that is backed by a file.
<p><br /><p>
> **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive
type: "object"
properties:
Name:
description: |
Name represents the final filename in the filesystem.
type: "string"
UID:
description: "UID represents the file UID."
type: "string"
GID:
description: "GID represents the file GID."
type: "string"
Mode:
description: "Mode represents the FileMode of the file."
type: "integer"
format: "uint32"
Runtime:
description: |
Runtime represents a target that is not mounted into the
container but is used by the task
<p><br /><p>
> **Note**: `Configs.File` and `Configs.Runtime` are mutually
> exclusive
type: "object"
ConfigID:
description: |
ConfigID represents the ID of the specific config that we're
referencing.
type: "string"
ConfigName:
description: |
ConfigName is the name of the config that this references,
but this is just provided for lookup/display purposes. The
config in the reference will be identified by its ID.
type: "string"
Isolation:
type: "string"
description: |
Isolation technology of the containers running the service.
(Windows only)
enum:
- "default"
- "process"
- "hyperv"
- ""
Init:
description: |
Run an init inside the container that forwards signals and reaps
processes. This field is omitted if empty, and the default (as
configured on the daemon) is used.
type: "boolean"
x-nullable: true
Sysctls:
description: |
Set kernel namedspaced parameters (sysctls) in the container.
The Sysctls option on services accepts the same sysctls as the
are supported on containers. Note that while the same sysctls are
supported, no guarantees or checks are made about their
suitability for a clustered environment, and it's up to the user
to determine whether a given sysctl will work properly in a
Service.
type: "object"
additionalProperties:
type: "string"
# This option is not used by Windows containers
CapabilityAdd:
type: "array"
description: |
A list of kernel capabilities to add to the default set
for the container.
items:
type: "string"
example:
- "CAP_NET_RAW"
- "CAP_SYS_ADMIN"
- "CAP_SYS_CHROOT"
- "CAP_SYSLOG"
CapabilityDrop:
type: "array"
description: |
A list of kernel capabilities to drop from the default set
for the container.
items:
type: "string"
example:
- "CAP_NET_RAW"
Ulimits:
description: |
A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
type: "array"
items:
type: "object"
properties:
Name:
description: "Name of ulimit"
type: "string"
Soft:
description: "Soft limit"
type: "integer"
Hard:
description: "Hard limit"
type: "integer"
NetworkAttachmentSpec:
description: |
Read-only spec type for non-swarm containers attached to swarm overlay
networks.
<p><br /></p>
> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
> mutually exclusive. PluginSpec is only used when the Runtime field
> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
> field is set to `attachment`.
type: "object"
properties:
ContainerID:
description: "ID of the container represented by this task"
type: "string"
Resources:
description: |
Resource requirements which apply to each individual container created
as part of the service.
type: "object"
properties:
Limits:
description: "Define resources limits."
$ref: "#/definitions/Limit"
Reservations:
description: "Define resources reservation."
$ref: "#/definitions/ResourceObject"
RestartPolicy:
description: |
Specification for the restart policy which applies to containers
created as part of this service.
type: "object"
properties:
Condition:
description: "Condition for restart."
type: "string"
enum:
- "none"
- "on-failure"
- "any"
Delay:
description: "Delay between restart attempts."
type: "integer"
format: "int64"
MaxAttempts:
description: |
Maximum attempts to restart a given container before giving up
(default value is 0, which is ignored).
type: "integer"
format: "int64"
default: 0
Window:
description: |
Windows is the time window used to evaluate the restart policy
(default value is 0, which is unbounded).
type: "integer"
format: "int64"
default: 0
Placement:
type: "object"
properties:
Constraints:
description: |
An array of constraint expressions to limit the set of nodes where
a task can be scheduled. Constraint expressions can either use a
_match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find
nodes that satisfy every expression (AND match). Constraints can
match node or Docker Engine labels as follows:
node attribute | matches | example
---------------------|--------------------------------|-----------------------------------------------
`node.id` | Node ID | `node.id==2ivku8v2gvtg4`
`node.hostname` | Node hostname | `node.hostname!=node-2`
`node.role` | Node role (`manager`/`worker`) | `node.role==manager`
`node.platform.os` | Node operating system | `node.platform.os==windows`
`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`
`node.labels` | User-defined node labels | `node.labels.security==high`
`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04`
`engine.labels` apply to Docker Engine labels like operating system,
drivers, etc. Swarm administrators add `node.labels` for operational
purposes by using the [`node update endpoint`](#operation/NodeUpdate).
type: "array"
items:
type: "string"
example:
- "node.hostname!=node3.corp.example.com"
- "node.role!=manager"
- "node.labels.type==production"
- "node.platform.os==linux"
- "node.platform.arch==x86_64"
Preferences:
description: |
Preferences provide a way to make the scheduler aware of factors
such as topology. They are provided in order from highest to
lowest precedence.
type: "array"
items:
type: "object"
properties:
Spread:
type: "object"
properties:
SpreadDescriptor:
description: |
label descriptor, such as `engine.labels.az`.
type: "string"
example:
- Spread:
SpreadDescriptor: "node.labels.datacenter"
- Spread:
SpreadDescriptor: "node.labels.rack"
MaxReplicas:
description: |
Maximum number of replicas for per node (default value is 0, which
is unlimited)
type: "integer"
format: "int64"
default: 0
Platforms:
description: |
Platforms stores all the platforms that the service's image can
run on. This field is used in the platform filter for scheduling.
If empty, then the platform filter is off, meaning there are no
scheduling restrictions.
type: "array"
items:
$ref: "#/definitions/Platform"
ForceUpdate:
description: |
A counter that triggers an update even if no relevant parameters have
been changed.
type: "integer"
format: "uint64"
Runtime:
description: |
Runtime is the type of runtime specified for the task executor.
type: "string"
Networks:
description: "Specifies which networks the service should attach to."
type: "array"
items:
$ref: "#/definitions/NetworkAttachmentConfig"
LogDriver:
description: |
Specifies the log driver to use for tasks created from this spec. If
not present, the default one for the swarm will be used, finally
falling back to the engine default if not specified.
type: "object"
properties:
Name:
type: "string"
Options:
type: "object"
additionalProperties:
type: "string"
TaskState:
type: "string"
enum:
- "new"
- "allocated"
- "pending"
- "assigned"
- "accepted"
- "preparing"
- "ready"
- "starting"
- "running"
- "complete"
- "shutdown"
- "failed"
- "rejected"
- "remove"
- "orphaned"
ContainerStatus:
type: "object"
description: "represents the status of a container."
properties:
ContainerID:
type: "string"
PID:
type: "integer"
ExitCode:
type: "integer"
PortStatus:
type: "object"
description: "represents the port status of a task's host ports whose service has published host ports"
properties:
Ports:
type: "array"
items:
$ref: "#/definitions/EndpointPortConfig"
TaskStatus:
type: "object"
description: "represents the status of a task."
properties:
Timestamp:
type: "string"
format: "dateTime"
State:
$ref: "#/definitions/TaskState"
Message:
type: "string"
Err:
type: "string"
ContainerStatus:
$ref: "#/definitions/ContainerStatus"
PortStatus:
$ref: "#/definitions/PortStatus"
Task:
type: "object"
properties:
ID:
description: "The ID of the task."
type: "string"
Version:
$ref: "#/definitions/ObjectVersion"
CreatedAt:
type: "string"
format: "dateTime"
UpdatedAt:
type: "string"
format: "dateTime"
Name:
description: "Name of the task."
type: "string"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
Spec:
$ref: "#/definitions/TaskSpec"
ServiceID:
description: "The ID of the service this task is part of."
type: "string"
Slot:
type: "integer"
NodeID:
description: "The ID of the node that this task is on."
type: "string"
AssignedGenericResources:
$ref: "#/definitions/GenericResources"
Status:
$ref: "#/definitions/TaskStatus"
DesiredState:
$ref: "#/definitions/TaskState"
JobIteration:
description: |
If the Service this Task belongs to is a job-mode service, contains
the JobIteration of the Service this Task was created for. Absent if
the Task was created for a Replicated or Global Service.
$ref: "#/definitions/ObjectVersion"
example:
ID: "0kzzo1i0y4jz6027t0k7aezc7"
Version:
Index: 71
CreatedAt: "2016-06-07T21:07:31.171892745Z"
UpdatedAt: "2016-06-07T21:07:31.376370513Z"
Spec:
ContainerSpec:
Image: "redis"
Resources:
Limits: {}
Reservations: {}
RestartPolicy:
Condition: "any"
MaxAttempts: 0
Placement: {}
ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
Slot: 1
NodeID: "60gvrl6tm78dmak4yl7srz94v"
Status:
Timestamp: "2016-06-07T21:07:31.290032978Z"
State: "running"
Message: "started"
ContainerStatus:
ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
PID: 677
DesiredState: "running"
NetworksAttachments:
- Network:
ID: "4qvuz4ko70xaltuqbt8956gd1"
Version:
Index: 18
CreatedAt: "2016-06-07T20:31:11.912919752Z"
UpdatedAt: "2016-06-07T21:07:29.955277358Z"
Spec:
Name: "ingress"
Labels:
com.docker.swarm.internal: "true"
DriverConfiguration: {}
IPAMOptions:
Driver: {}
Configs:
- Subnet: "10.255.0.0/16"
Gateway: "10.255.0.1"
DriverState:
Name: "overlay"
Options:
com.docker.network.driver.overlay.vxlanid_list: "256"
IPAMOptions:
Driver:
Name: "default"
Configs:
- Subnet: "10.255.0.0/16"
Gateway: "10.255.0.1"
Addresses:
- "10.255.0.10/16"
AssignedGenericResources:
- DiscreteResourceSpec:
Kind: "SSD"
Value: 3
- NamedResourceSpec:
Kind: "GPU"
Value: "UUID1"
- NamedResourceSpec:
Kind: "GPU"
Value: "UUID2"
ServiceSpec:
description: "User modifiable configuration for a service."
type: object
properties:
Name:
description: "Name of the service."
type: "string"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
TaskTemplate:
$ref: "#/definitions/TaskSpec"
Mode:
description: "Scheduling mode for the service."
type: "object"
properties:
Replicated:
type: "object"
properties:
Replicas:
type: "integer"
format: "int64"
Global:
type: "object"
ReplicatedJob:
description: |
The mode used for services with a finite number of tasks that run
to a completed state.
type: "object"
properties:
MaxConcurrent:
description: |
The maximum number of replicas to run simultaneously.
type: "integer"
format: "int64"
default: 1
TotalCompletions:
description: |
The total number of replicas desired to reach the Completed
state. If unset, will default to the value of `MaxConcurrent`
type: "integer"
format: "int64"
GlobalJob:
description: |
The mode used for services which run a task to the completed state
on each valid node.
type: "object"
UpdateConfig:
description: "Specification for the update strategy of the service."
type: "object"
properties:
Parallelism:
description: |
Maximum number of tasks to be updated in one iteration (0 means
unlimited parallelism).
type: "integer"
format: "int64"
Delay:
description: "Amount of time between updates, in nanoseconds."
type: "integer"
format: "int64"
FailureAction:
description: |
Action to take if an updated task fails to run, or stops running
during the update.
type: "string"
enum:
- "continue"
- "pause"
- "rollback"
Monitor:
description: |
Amount of time to monitor each updated task for failures, in
nanoseconds.
type: "integer"
format: "int64"
MaxFailureRatio:
description: |
The fraction of tasks that may fail during an update before the
failure action is invoked, specified as a floating point number
between 0 and 1.
type: "number"
default: 0
Order:
description: |
The order of operations when rolling out an updated task. Either
the old task is shut down before the new task is started, or the
new task is started before the old task is shut down.
type: "string"
enum:
- "stop-first"
- "start-first"
RollbackConfig:
description: "Specification for the rollback strategy of the service."
type: "object"
properties:
Parallelism:
description: |
Maximum number of tasks to be rolled back in one iteration (0 means
unlimited parallelism).
type: "integer"
format: "int64"
Delay:
description: |
Amount of time between rollback iterations, in nanoseconds.
type: "integer"
format: "int64"
FailureAction:
description: |
Action to take if an rolled back task fails to run, or stops
running during the rollback.
type: "string"
enum:
- "continue"
- "pause"
Monitor:
description: |
Amount of time to monitor each rolled back task for failures, in
nanoseconds.
type: "integer"
format: "int64"
MaxFailureRatio:
description: |
The fraction of tasks that may fail during a rollback before the
failure action is invoked, specified as a floating point number
between 0 and 1.
type: "number"
default: 0
Order:
description: |
The order of operations when rolling back a task. Either the old
task is shut down before the new task is started, or the new task
is started before the old task is shut down.
type: "string"
enum:
- "stop-first"
- "start-first"
Networks:
description: |
Specifies which networks the service should attach to.
Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead.
type: "array"
items:
$ref: "#/definitions/NetworkAttachmentConfig"
EndpointSpec:
$ref: "#/definitions/EndpointSpec"
EndpointPortConfig:
type: "object"
properties:
Name:
type: "string"
Protocol:
type: "string"
enum:
- "tcp"
- "udp"
- "sctp"
TargetPort:
description: "The port inside the container."
type: "integer"
PublishedPort:
description: "The port on the swarm hosts."
type: "integer"
PublishMode:
description: |
The mode in which port is published.
<p><br /></p>
- "ingress" makes the target port accessible on every node,
regardless of whether there is a task for the service running on
that node or not.
- "host" bypasses the routing mesh and publish the port directly on
the swarm node where that service is running.
type: "string"
enum:
- "ingress"
- "host"
default: "ingress"
example: "ingress"
EndpointSpec:
description: "Properties that can be configured to access and load balance a service."
type: "object"
properties:
Mode:
description: |
The mode of resolution to use for internal load balancing between tasks.
type: "string"
enum:
- "vip"
- "dnsrr"
default: "vip"
Ports:
description: |
List of exposed ports that this service is accessible on from the
outside. Ports can only be provided if `vip` resolution mode is used.
type: "array"
items:
$ref: "#/definitions/EndpointPortConfig"
Service:
type: "object"
properties:
ID:
type: "string"
Version:
$ref: "#/definitions/ObjectVersion"
CreatedAt:
type: "string"
format: "dateTime"
UpdatedAt:
type: "string"
format: "dateTime"
Spec:
$ref: "#/definitions/ServiceSpec"
Endpoint:
type: "object"
properties:
Spec:
$ref: "#/definitions/EndpointSpec"
Ports:
type: "array"
items:
$ref: "#/definitions/EndpointPortConfig"
VirtualIPs:
type: "array"
items:
type: "object"
properties:
NetworkID:
type: "string"
Addr:
type: "string"
UpdateStatus:
description: "The status of a service update."
type: "object"
properties:
State:
type: "string"
enum:
- "updating"
- "paused"
- "completed"
StartedAt:
type: "string"
format: "dateTime"
CompletedAt:
type: "string"
format: "dateTime"
Message:
type: "string"
ServiceStatus:
description: |
The status of the service's tasks. Provided only when requested as
part of a ServiceList operation.
type: "object"
properties:
RunningTasks:
description: |
The number of tasks for the service currently in the Running state.
type: "integer"
format: "uint64"
example: 7
DesiredTasks:
description: |
The number of tasks for the service desired to be running.
For replicated services, this is the replica count from the
service spec. For global services, this is computed by taking
count of all tasks for the service with a Desired State other
than Shutdown.
type: "integer"
format: "uint64"
example: 10
CompletedTasks:
description: |
The number of tasks for a job that are in the Completed state.
This field must be cross-referenced with the service type, as the
value of 0 may mean the service is not in a job mode, or it may
mean the job-mode service has no tasks yet Completed.
type: "integer"
format: "uint64"
JobStatus:
description: |
The status of the service when it is in one of ReplicatedJob or
GlobalJob modes. Absent on Replicated and Global mode services. The
JobIteration is an ObjectVersion, but unlike the Service's version,
does not need to be sent with an update request.
type: "object"
properties:
JobIteration:
description: |
JobIteration is a value increased each time a Job is executed,
successfully or otherwise. "Executed", in this case, means the
job as a whole has been started, not that an individual Task has
been launched. A job is "Executed" when its ServiceSpec is
updated. JobIteration can be used to disambiguate Tasks belonging
to different executions of a job. Though JobIteration will
increase with each subsequent execution, it may not necessarily
increase by 1, and so JobIteration should not be used to
$ref: "#/definitions/ObjectVersion"
LastExecution:
description: |
The last time, as observed by the server, that this job was
started.
type: "string"
format: "dateTime"
example:
ID: "9mnpnzenvg8p8tdbtq4wvbkcz"
Version:
Index: 19
CreatedAt: "2016-06-07T21:05:51.880065305Z"
UpdatedAt: "2016-06-07T21:07:29.962229872Z"
Spec:
Name: "hopeful_cori"
TaskTemplate:
ContainerSpec:
Image: "redis"
Resources:
Limits: {}
Reservations: {}
RestartPolicy:
Condition: "any"
MaxAttempts: 0
Placement: {}
ForceUpdate: 0
Mode:
Replicated:
Replicas: 1
UpdateConfig:
Parallelism: 1
Delay: 1000000000
FailureAction: "pause"
Monitor: 15000000000
MaxFailureRatio: 0.15
RollbackConfig:
Parallelism: 1
Delay: 1000000000
FailureAction: "pause"
Monitor: 15000000000
MaxFailureRatio: 0.15
EndpointSpec:
Mode: "vip"
Ports:
-
Protocol: "tcp"
TargetPort: 6379
PublishedPort: 30001
Endpoint:
Spec:
Mode: "vip"
Ports:
-
Protocol: "tcp"
TargetPort: 6379
PublishedPort: 30001
Ports:
-
Protocol: "tcp"
TargetPort: 6379
PublishedPort: 30001
VirtualIPs:
-
NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
Addr: "10.255.0.2/16"
-
NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
Addr: "10.255.0.3/16"
ImageDeleteResponseItem:
type: "object"
x-go-name: "DeleteResponse"
properties:
Untagged:
description: "The image ID of an image that was untagged"
type: "string"
Deleted:
description: "The image ID of an image that was deleted"
type: "string"
ServiceCreateResponse:
type: "object"
description: |
contains the information returned to a client on the
creation of a new service.
properties:
ID:
description: "The ID of the created service."
type: "string"
x-nullable: false
example: "ak7w3gjqoa3kuz8xcpnyy0pvl"
Warnings:
description: |
Optional warning message.
FIXME(thaJeztah): this should have "omitempty" in the generated type.
type: "array"
x-nullable: true
items:
type: "string"
example:
- "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
ServiceUpdateResponse:
type: "object"
properties:
Warnings:
description: "Optional warning messages"
type: "array"
items:
type: "string"
example:
Warnings:
- "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
ContainerInspectResponse:
type: "object"
title: "ContainerInspectResponse"
x-go-name: "InspectResponse"
properties:
Id:
description: |-
The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes).
type: "string"
x-go-name: "ID"
minLength: 64
maxLength: 64
pattern: "^[0-9a-fA-F]{64}$"
example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf"
Created:
description: |-
Date and time at which the container was created, formatted in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
x-nullable: true
example: "2025-02-17T17:43:39.64001363Z"
Path:
description: |-
The path to the command being run
type: "string"
example: "/bin/sh"
Args:
description: "The arguments to the command being run"
type: "array"
items:
type: "string"
example:
- "-c"
- "exit 9"
State:
$ref: "#/definitions/ContainerState"
Image:
description: |-
The ID (digest) of the image that this container was created from.
type: "string"
example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782"
ResolvConfPath:
description: |-
Location of the `/etc/resolv.conf` generated for the container on the
host.
This file is managed through the docker daemon, and should not be
accessed or modified by other tools.
type: "string"
example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf"
HostnamePath:
description: |-
Location of the `/etc/hostname` generated for the container on the
host.
This file is managed through the docker daemon, and should not be
accessed or modified by other tools.
type: "string"
example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname"
HostsPath:
description: |-
Location of the `/etc/hosts` generated for the container on the
host.
This file is managed through the docker daemon, and should not be
accessed or modified by other tools.
type: "string"
example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts"
LogPath:
description: |-
Location of the file used to buffer the container's logs. Depending on
the logging-driver used for the container, this field may be omitted.
This file is managed through the docker daemon, and should not be
accessed or modified by other tools.
type: "string"
x-nullable: true
example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log"
Name:
description: |-
The name associated with this container.
For historic reasons, the name may be prefixed with a forward-slash (`/`).
type: "string"
example: "/funny_chatelet"
RestartCount:
description: |-
Number of times the container was restarted since it was created,
or since daemon was started.
type: "integer"
example: 0
Driver:
description: |-
The storage-driver used for the container's filesystem (graph-driver
or snapshotter).
type: "string"
example: "overlayfs"
Platform:
description: |-
The platform (operating system) for which the container was created.
This field was introduced for the experimental "LCOW" (Linux Containers
On Windows) features, which has been removed. In most cases, this field
is equal to the host's operating system (`linux` or `windows`).
type: "string"
example: "linux"
ImageManifestDescriptor:
$ref: "#/definitions/OCIDescriptor"
description: |-
OCI descriptor of the platform-specific manifest of the image
the container was created from.
Note: Only available if the daemon provides a multi-platform
image store.
MountLabel:
description: |-
SELinux mount label set for the container.
type: "string"
example: ""
ProcessLabel:
description: |-
SELinux process label set for the container.
type: "string"
example: ""
AppArmorProfile:
description: |-
The AppArmor profile set for the container.
type: "string"
example: ""
ExecIDs:
description: |-
IDs of exec instances that are running in the container.
type: "array"
items:
type: "string"
x-nullable: true
example:
- "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca"
- "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4"
HostConfig:
$ref: "#/definitions/HostConfig"
GraphDriver:
$ref: "#/definitions/DriverData"
SizeRw:
description: |-
The size of files that have been created or changed by this container.
This field is omitted by default, and only set when size is requested
in the API request.
type: "integer"
format: "int64"
x-nullable: true
example: "122880"
SizeRootFs:
description: |-
The total size of all files in the read-only layers from the image
that the container uses. These layers can be shared between containers.
This field is omitted by default, and only set when size is requested
in the API request.
type: "integer"
format: "int64"
x-nullable: true
example: "1653948416"
Mounts:
description: |-
List of mounts used by the container.
type: "array"
items:
$ref: "#/definitions/MountPoint"
Config:
$ref: "#/definitions/ContainerConfig"
NetworkSettings:
$ref: "#/definitions/NetworkSettings"
ContainerSummary:
type: "object"
properties:
Id:
description: |-
The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes).
type: "string"
x-go-name: "ID"
minLength: 64
maxLength: 64
pattern: "^[0-9a-fA-F]{64}$"
example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf"
Names:
description: |-
The names associated with this container. Most containers have a single
name, but when using legacy "links", the container can have multiple
names.
For historic reasons, names are prefixed with a forward-slash (`/`).
type: "array"
items:
type: "string"
example:
- "/funny_chatelet"
Image:
description: |-
The name or ID of the image used to create the container.
This field shows the image reference as was specified when creating the container,
which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest`
or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`),
short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`).
The content of this field can be updated at runtime if the image used to
create the container is untagged, in which case the field is updated to
contain the the image ID (digest) it was resolved to in its canonical,
non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`).
type: "string"
example: "docker.io/library/ubuntu:latest"
ImageID:
description: |-
The ID (digest) of the image that this container was created from.
type: "string"
example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782"
ImageManifestDescriptor:
$ref: "#/definitions/OCIDescriptor"
x-nullable: true
description: |
OCI descriptor of the platform-specific manifest of the image
the container was created from.
Note: Only available if the daemon provides a multi-platform
image store.
This field is not populated in the `GET /system/df` endpoint.
Command:
description: "Command to run when starting the container"
type: "string"
example: "/bin/bash"
Created:
description: |-
Date and time at which the container was created as a Unix timestamp
(number of seconds since EPOCH).
type: "integer"
format: "int64"
example: "1739811096"
Ports:
description: |-
Port-mappings for the container.
type: "array"
items:
$ref: "#/definitions/Port"
SizeRw:
description: |-
The size of files that have been created or changed by this container.
This field is omitted by default, and only set when size is requested
in the API request.
type: "integer"
format: "int64"
x-nullable: true
example: "122880"
SizeRootFs:
description: |-
The total size of all files in the read-only layers from the image
that the container uses. These layers can be shared between containers.
This field is omitted by default, and only set when size is requested
in the API request.
type: "integer"
format: "int64"
x-nullable: true
example: "1653948416"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
example:
com.example.vendor: "Acme"
com.example.license: "GPL"
com.example.version: "1.0"
State:
description: |
The state of this container.
type: "string"
enum:
- "created"
- "running"
- "paused"
- "restarting"
- "exited"
- "removing"
- "dead"
example: "running"
Status:
description: |-
Additional human-readable status of this container (e.g. `Exit 0`)
type: "string"
example: "Up 4 days"
HostConfig:
type: "object"
description: |-
Summary of host-specific runtime information of the container. This
is a reduced set of information in the container's "HostConfig" as
available in the container "inspect" response.
properties:
NetworkMode:
description: |-
Networking mode (`host`, `none`, `container:<id>`) or name of the
primary network the container is using.
This field is primarily for backward compatibility. The container
can be connected to multiple networks for which information can be
found in the `NetworkSettings.Networks` field, which enumerates
settings per network.
type: "string"
example: "mynetwork"
Annotations:
description: |-
Arbitrary key-value metadata attached to the container.
type: "object"
x-nullable: true
additionalProperties:
type: "string"
example:
io.kubernetes.docker.type: "container"
io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3"
NetworkSettings:
description: |-
Summary of the container's network settings
type: "object"
properties:
Networks:
type: "object"
description: |-
Summary of network-settings for each network the container is
attached to.
additionalProperties:
$ref: "#/definitions/EndpointSettings"
Mounts:
type: "array"
description: |-
List of mounts used by the container.
items:
$ref: "#/definitions/MountPoint"
Driver:
description: "Driver represents a driver (network, logging, secrets)."
type: "object"
required: [Name]
properties:
Name:
description: "Name of the driver."
type: "string"
x-nullable: false
example: "some-driver"
Options:
description: "Key/value map of driver-specific options."
type: "object"
x-nullable: false
additionalProperties:
type: "string"
example:
OptionA: "value for driver-specific option A"
OptionB: "value for driver-specific option B"
SecretSpec:
type: "object"
properties:
Name:
description: "User-defined name of the secret."
type: "string"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
Data:
description: |
Data is the data to store as a secret, formatted as a standard base64-encoded
([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string.
It must be empty if the Driver field is set, in which case the data is
loaded from an external secret store. The maximum allowed size is 500KB,
as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize).
This field is only used to _create_ a secret, and is not returned by
other endpoints.
type: "string"
example: ""
Driver:
description: |
Name of the secrets driver used to fetch the secret's value from an
external secret store.
$ref: "#/definitions/Driver"
Templating:
description: |
Templating driver, if applicable
Templating controls whether and how to evaluate the config payload as
a template. If no driver is set, no templating is used.
$ref: "#/definitions/Driver"
Secret:
type: "object"
properties:
ID:
type: "string"
example: "blt1owaxmitz71s9v5zh81zun"
Version:
$ref: "#/definitions/ObjectVersion"
CreatedAt:
type: "string"
format: "dateTime"
example: "2017-07-20T13:55:28.678958722Z"
UpdatedAt:
type: "string"
format: "dateTime"
example: "2017-07-20T13:55:28.678958722Z"
Spec:
$ref: "#/definitions/SecretSpec"
ConfigSpec:
type: "object"
properties:
Name:
description: "User-defined name of the config."
type: "string"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
Data:
description: |
Data is the data to store as a config, formatted as a standard base64-encoded
([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string.
The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize).
type: "string"
Templating:
description: |
Templating driver, if applicable
Templating controls whether and how to evaluate the config payload as
a template. If no driver is set, no templating is used.
$ref: "#/definitions/Driver"
Config:
type: "object"
properties:
ID:
type: "string"
Version:
$ref: "#/definitions/ObjectVersion"
CreatedAt:
type: "string"
format: "dateTime"
UpdatedAt:
type: "string"
format: "dateTime"
Spec:
$ref: "#/definitions/ConfigSpec"
ContainerState:
description: |
ContainerState stores container's running state. It's part of ContainerJSONBase
and will be returned by the "inspect" command.
type: "object"
x-nullable: true
properties:
Status:
description: |
String representation of the container state. Can be one of "created",
"running", "paused", "restarting", "removing", "exited", or "dead".
type: "string"
enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
example: "running"
Running:
description: |
Whether this container is running.
Note that a running container can be _paused_. The `Running` and `Paused`
booleans are not mutually exclusive:
When pausing a container (on Linux), the freezer cgroup is used to suspend
all processes in the container. Freezing the process requires the process to
be running. As a result, paused containers are both `Running` _and_ `Paused`.
Use the `Status` field instead to determine if a container's state is "running".
type: "boolean"
example: true
Paused:
description: "Whether this container is paused."
type: "boolean"
example: false
Restarting:
description: "Whether this container is restarting."
type: "boolean"
example: false
OOMKilled:
description: |
Whether a process within this container has been killed because it ran
out of memory since the container was last started.
type: "boolean"
example: false
Dead:
type: "boolean"
example: false
Pid:
description: "The process ID of this container"
type: "integer"
example: 1234
ExitCode:
description: "The last exit code of this container"
type: "integer"
example: 0
Error:
type: "string"
StartedAt:
description: "The time when this container was last started."
type: "string"
example: "2020-01-06T09:06:59.461876391Z"
FinishedAt:
description: "The time when this container last exited."
type: "string"
example: "2020-01-06T09:07:59.461876391Z"
Health:
$ref: "#/definitions/Health"
ContainerCreateResponse:
description: "OK response to ContainerCreate operation"
type: "object"
title: "ContainerCreateResponse"
x-go-name: "CreateResponse"
required: [Id, Warnings]
properties:
Id:
description: "The ID of the created container"
type: "string"
x-nullable: false
example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
Warnings:
description: "Warnings encountered when creating the container"
type: "array"
x-nullable: false
items:
type: "string"
example: []
ContainerUpdateResponse:
type: "object"
title: "ContainerUpdateResponse"
x-go-name: "UpdateResponse"
description: |-
Response for a successful container-update.
properties:
Warnings:
type: "array"
description: |-
Warnings encountered when updating the container.
items:
type: "string"
example: ["Published ports are discarded when using host network mode"]
ContainerStatsResponse:
description: |
Statistics sample for a container.
type: "object"
x-go-name: "StatsResponse"
title: "ContainerStatsResponse"
properties:
name:
description: "Name of the container"
type: "string"
x-nullable: true
example: "boring_wozniak"
id:
description: "ID of the container"
type: "string"
x-nullable: true
example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
read:
description: |
Date and time at which this sample was collected.
The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
with nano-seconds.
type: "string"
format: "date-time"
example: "2025-01-16T13:55:22.165243637Z"
preread:
description: |
Date and time at which this first sample was collected. This field
is not propagated if the "one-shot" option is set. If the "one-shot"
option is set, this field may be omitted, empty, or set to a default
date (`0001-01-01T00:00:00Z`).
The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
with nano-seconds.
type: "string"
format: "date-time"
example: "2025-01-16T13:55:21.160452595Z"
pids_stats:
$ref: "#/definitions/ContainerPidsStats"
blkio_stats:
$ref: "#/definitions/ContainerBlkioStats"
num_procs:
description: |
The number of processors on the system.
This field is Windows-specific and always zero for Linux containers.
type: "integer"
format: "uint32"
example: 16
storage_stats:
$ref: "#/definitions/ContainerStorageStats"
cpu_stats:
$ref: "#/definitions/ContainerCPUStats"
precpu_stats:
$ref: "#/definitions/ContainerCPUStats"
memory_stats:
$ref: "#/definitions/ContainerMemoryStats"
networks:
description: |
Network statistics for the container per interface.
This field is omitted if the container has no networking enabled.
x-nullable: true
additionalProperties:
$ref: "#/definitions/ContainerNetworkStats"
example:
eth0:
rx_bytes: 5338
rx_dropped: 0
rx_errors: 0
rx_packets: 36
tx_bytes: 648
tx_dropped: 0
tx_errors: 0
tx_packets: 8
eth5:
rx_bytes: 4641
rx_dropped: 0
rx_errors: 0
rx_packets: 26
tx_bytes: 690
tx_dropped: 0
tx_errors: 0
tx_packets: 9
ContainerBlkioStats:
description: |
BlkioStats stores all IO service stats for data read and write.
This type is Linux-specific and holds many fields that are specific to cgroups v1.
On a cgroup v2 host, all fields other than `io_service_bytes_recursive`
are omitted or `null`.
This type is only populated on Linux and omitted for Windows containers.
type: "object"
x-go-name: "BlkioStats"
x-nullable: true
properties:
io_service_bytes_recursive:
type: "array"
items:
$ref: "#/definitions/ContainerBlkioStatEntry"
io_serviced_recursive:
description: |
This field is only available when using Linux containers with
cgroups v1. It is omitted or `null` when using cgroups v2.
x-nullable: true
type: "array"
items:
$ref: "#/definitions/ContainerBlkioStatEntry"
io_queue_recursive:
description: |
This field is only available when using Linux containers with
cgroups v1. It is omitted or `null` when using cgroups v2.
x-nullable: true
type: "array"
items:
$ref: "#/definitions/ContainerBlkioStatEntry"
io_service_time_recursive:
description: |
This field is only available when using Linux containers with
cgroups v1. It is omitted or `null` when using cgroups v2.
x-nullable: true
type: "array"
items:
$ref: "#/definitions/ContainerBlkioStatEntry"
io_wait_time_recursive:
description: |
This field is only available when using Linux containers with
cgroups v1. It is omitted or `null` when using cgroups v2.
x-nullable: true
type: "array"
items:
$ref: "#/definitions/ContainerBlkioStatEntry"
io_merged_recursive:
description: |
This field is only available when using Linux containers with
cgroups v1. It is omitted or `null` when using cgroups v2.
x-nullable: true
type: "array"
items:
$ref: "#/definitions/ContainerBlkioStatEntry"
io_time_recursive:
description: |
This field is only available when using Linux containers with
cgroups v1. It is omitted or `null` when using cgroups v2.
x-nullable: true
type: "array"
items:
$ref: "#/definitions/ContainerBlkioStatEntry"
sectors_recursive:
description: |
This field is only available when using Linux containers with
cgroups v1. It is omitted or `null` when using cgroups v2.
x-nullable: true
type: "array"
items:
$ref: "#/definitions/ContainerBlkioStatEntry"
example:
io_service_bytes_recursive: [
{"major": 254, "minor": 0, "op": "read", "value": 7593984},
{"major": 254, "minor": 0, "op": "write", "value": 100}
]
io_serviced_recursive: null
io_queue_recursive: null
io_service_time_recursive: null
io_wait_time_recursive: null
io_merged_recursive: null
io_time_recursive: null
sectors_recursive: null
ContainerBlkioStatEntry:
description: |
Blkio stats entry.
This type is Linux-specific and omitted for Windows containers.
type: "object"
x-go-name: "BlkioStatEntry"
x-nullable: true
properties:
major:
type: "integer"
format: "uint64"
example: 254
minor:
type: "integer"
format: "uint64"
example: 0
op:
type: "string"
example: "read"
value:
type: "integer"
format: "uint64"
example: 7593984
ContainerCPUStats:
description: |
CPU related info of the container
type: "object"
x-go-name: "CPUStats"
x-nullable: true
properties:
cpu_usage:
$ref: "#/definitions/ContainerCPUUsage"
system_cpu_usage:
description: |
System Usage.
This field is Linux-specific and omitted for Windows containers.
type: "integer"
format: "uint64"
x-nullable: true
example: 5
online_cpus:
description: |
Number of online CPUs.
This field is Linux-specific and omitted for Windows containers.
type: "integer"
format: "uint32"
x-nullable: true
example: 5
throttling_data:
$ref: "#/definitions/ContainerThrottlingData"
ContainerCPUUsage:
description: |
All CPU stats aggregated since container inception.
type: "object"
x-go-name: "CPUUsage"
x-nullable: true
properties:
total_usage:
description: |
Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows).
type: "integer"
format: "uint64"
example: 29912000
percpu_usage:
description: |
Total CPU time (in nanoseconds) consumed per core (Linux).
This field is Linux-specific when using cgroups v1. It is omitted
when using cgroups v2 and Windows containers.
type: "array"
x-nullable: true
items:
type: "integer"
format: "uint64"
example: 29912000
usage_in_kernelmode:
description: |
Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux),
or time spent (in 100's of nanoseconds) by all container processes in
kernel mode (Windows).
Not populated for Windows containers using Hyper-V isolation.
type: "integer"
format: "uint64"
example: 21994000
usage_in_usermode:
description: |
Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux),
or time spent (in 100's of nanoseconds) by all container processes in
kernel mode (Windows).
Not populated for Windows containers using Hyper-V isolation.
type: "integer"
format: "uint64"
example: 7918000
ContainerPidsStats:
description: |
PidsStats contains Linux-specific stats of a container's process-IDs (PIDs).
This type is Linux-specific and omitted for Windows containers.
type: "object"
x-go-name: "PidsStats"
x-nullable: true
properties:
current:
description: |
Current is the number of PIDs in the cgroup.
type: "integer"
format: "uint64"
x-nullable: true
example: 5
limit:
description: |
Limit is the hard limit on the number of pids in the cgroup.
A "Limit" of 0 means that there is no limit.
type: "integer"
format: "uint64"
x-nullable: true
example: "18446744073709551615"
ContainerThrottlingData:
description: |
CPU throttling stats of the container.
This type is Linux-specific and omitted for Windows containers.
type: "object"
x-go-name: "ThrottlingData"
x-nullable: true
properties:
periods:
description: |
Number of periods with throttling active.
type: "integer"
format: "uint64"
example: 0
throttled_periods:
description: |
Number of periods when the container hit its throttling limit.
type: "integer"
format: "uint64"
example: 0
throttled_time:
description: |
Aggregated time (in nanoseconds) the container was throttled for.
type: "integer"
format: "uint64"
example: 0
ContainerMemoryStats:
description: |
Aggregates all memory stats since container inception on Linux.
Windows returns stats for commit and private working set only.
type: "object"
x-go-name: "MemoryStats"
properties:
usage:
description: |
Current `res_counter` usage for memory.
This field is Linux-specific and omitted for Windows containers.
type: "integer"
format: "uint64"
x-nullable: true
example: 0
max_usage:
description: |
Maximum usage ever recorded.
This field is Linux-specific and only supported on cgroups v1.
It is omitted when using cgroups v2 and for Windows containers.
type: "integer"
format: "uint64"
x-nullable: true
example: 0
stats:
description: |
All the stats exported via memory.stat.
The fields in this object differ between cgroups v1 and v2.
On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available.
On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available.
This field is Linux-specific and omitted for Windows containers.
type: "object"
additionalProperties:
type: "integer"
format: "uint64"
x-nullable: true
example:
{
"active_anon": 1572864,
"active_file": 5115904,
"anon": 1572864,
"anon_thp": 0,
"file": 7626752,
"file_dirty": 0,
"file_mapped": 2723840,
"file_writeback": 0,
"inactive_anon": 0,
"inactive_file": 2510848,
"kernel_stack": 16384,
"pgactivate": 0,
"pgdeactivate": 0,
"pgfault": 2042,
"pglazyfree": 0,
"pglazyfreed": 0,
"pgmajfault": 45,
"pgrefill": 0,
"pgscan": 0,
"pgsteal": 0,
"shmem": 0,
"slab": 1180928,
"slab_reclaimable": 725576,
"slab_unreclaimable": 455352,
"sock": 0,
"thp_collapse_alloc": 0,
"thp_fault_alloc": 1,
"unevictable": 0,
"workingset_activate": 0,
"workingset_nodereclaim": 0,
"workingset_refault": 0
}
failcnt:
description: |
Number of times memory usage hits limits.
This field is Linux-specific and only supported on cgroups v1.
It is omitted when using cgroups v2 and for Windows containers.
type: "integer"
format: "uint64"
x-nullable: true
example: 0
limit:
description: |
This field is Linux-specific and omitted for Windows containers.
type: "integer"
format: "uint64"
x-nullable: true
example: 8217579520
commitbytes:
description: |
Committed bytes.
This field is Windows-specific and omitted for Linux containers.
type: "integer"
format: "uint64"
x-nullable: true
example: 0
commitpeakbytes:
description: |
Peak committed bytes.
This field is Windows-specific and omitted for Linux containers.
type: "integer"
format: "uint64"
x-nullable: true
example: 0
privateworkingset:
description: |
Private working set.
This field is Windows-specific and omitted for Linux containers.
type: "integer"
format: "uint64"
x-nullable: true
example: 0
ContainerNetworkStats:
description: |
Aggregates the network stats of one container
type: "object"
x-go-name: "NetworkStats"
x-nullable: true
properties:
rx_bytes:
description: |
Bytes received. Windows and Linux.
type: "integer"
format: "uint64"
example: 5338
rx_packets:
description: |
Packets received. Windows and Linux.
type: "integer"
format: "uint64"
example: 36
rx_errors:
description: |
Received errors. Not used on Windows.
This field is Linux-specific and always zero for Windows containers.
type: "integer"
format: "uint64"
example: 0
rx_dropped:
description: |
Incoming packets dropped. Windows and Linux.
type: "integer"
format: "uint64"
example: 0
tx_bytes:
description: |
Bytes sent. Windows and Linux.
type: "integer"
format: "uint64"
example: 1200
tx_packets:
description: |
Packets sent. Windows and Linux.
type: "integer"
format: "uint64"
example: 12
tx_errors:
description: |
Sent errors. Not used on Windows.
This field is Linux-specific and always zero for Windows containers.
type: "integer"
format: "uint64"
example: 0
tx_dropped:
description: |
Outgoing packets dropped. Windows and Linux.
type: "integer"
format: "uint64"
example: 0
endpoint_id:
description: |
Endpoint ID. Not used on Linux.
This field is Windows-specific and omitted for Linux containers.
type: "string"
x-nullable: true
instance_id:
description: |
Instance ID. Not used on Linux.
This field is Windows-specific and omitted for Linux containers.
type: "string"
x-nullable: true
ContainerStorageStats:
description: |
StorageStats is the disk I/O stats for read/write on Windows.
This type is Windows-specific and omitted for Linux containers.
type: "object"
x-go-name: "StorageStats"
x-nullable: true
properties:
read_count_normalized:
type: "integer"
format: "uint64"
x-nullable: true
example: 7593984
read_size_bytes:
type: "integer"
format: "uint64"
x-nullable: true
example: 7593984
write_count_normalized:
type: "integer"
format: "uint64"
x-nullable: true
example: 7593984
write_size_bytes:
type: "integer"
format: "uint64"
x-nullable: true
example: 7593984
ContainerTopResponse:
type: "object"
x-go-name: "TopResponse"
title: "ContainerTopResponse"
description: |-
Container "top" response.
properties:
Titles:
description: "The ps column titles"
type: "array"
items:
type: "string"
example:
Titles:
- "UID"
- "PID"
- "PPID"
- "C"
- "STIME"
- "TTY"
- "TIME"
- "CMD"
Processes:
description: |-
Each process running in the container, where each process
is an array of values corresponding to the titles.
type: "array"
items:
type: "array"
items:
type: "string"
example:
Processes:
-
- "root"
- "13642"
- "882"
- "0"
- "17:03"
- "pts/0"
- "00:00:00"
- "/bin/bash"
-
- "root"
- "13735"
- "13642"
- "0"
- "17:06"
- "pts/0"
- "00:00:00"
- "sleep 10"
ContainerWaitResponse:
description: "OK response to ContainerWait operation"
type: "object"
x-go-name: "WaitResponse"
title: "ContainerWaitResponse"
required: [StatusCode]
properties:
StatusCode:
description: "Exit code of the container"
type: "integer"
format: "int64"
x-nullable: false
Error:
$ref: "#/definitions/ContainerWaitExitError"
ContainerWaitExitError:
description: "container waiting error, if any"
type: "object"
x-go-name: "WaitExitError"
properties:
Message:
description: "Details of an error"
type: "string"
SystemVersion:
type: "object"
description: |
Response of Engine API: GET "/version"
properties:
Platform:
type: "object"
required: [Name]
properties:
Name:
type: "string"
Components:
type: "array"
description: |
Information about system components
items:
type: "object"
x-go-name: ComponentVersion
required: [Name, Version]
properties:
Name:
description: |
Name of the component
type: "string"
example: "Engine"
Version:
description: |
Version of the component
type: "string"
x-nullable: false
example: "27.0.1"
Details:
description: |
Key/value pairs of strings with additional information about the
component. These values are intended for informational purposes
only, and their content is not defined, and not part of the API
specification.
These messages can be printed by the client as information to the user.
type: "object"
x-nullable: true
Version:
description: "The version of the daemon"
type: "string"
example: "27.0.1"
ApiVersion:
description: |
The default (and highest) API version that is supported by the daemon
type: "string"
example: "1.47"
MinAPIVersion:
description: |
The minimum API version that is supported by the daemon
type: "string"
example: "1.24"
GitCommit:
description: |
The Git commit of the source code that was used to build the daemon
type: "string"
example: "48a66213fe"
GoVersion:
description: |
The version Go used to compile the daemon, and the version of the Go
runtime in use.
type: "string"
example: "go1.22.7"
Os:
description: |
The operating system that the daemon is running on ("linux" or "windows")
type: "string"
example: "linux"
Arch:
description: |
Architecture of the daemon, as returned by the Go runtime (`GOARCH`).
A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).
type: "string"
example: "amd64"
KernelVersion:
description: |
The kernel version (`uname -r`) that the daemon is running on.
This field is omitted when empty.
type: "string"
example: "6.8.0-31-generic"
Experimental:
description: |
Indicates if the daemon is started with experimental features enabled.
This field is omitted when empty / false.
type: "boolean"
example: true
BuildTime:
description: |
The date and time that the daemon was compiled.
type: "string"
example: "2020-06-22T15:49:27.000000000+00:00"
SystemInfo:
type: "object"
properties:
ID:
description: |
Unique identifier of the daemon.
<p><br /></p>
> **Note**: The format of the ID itself is not part of the API, and
> should not be considered stable.
type: "string"
example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"
Containers:
description: "Total number of containers on the host."
type: "integer"
example: 14
ContainersRunning:
description: |
Number of containers with status `"running"`.
type: "integer"
example: 3
ContainersPaused:
description: |
Number of containers with status `"paused"`.
type: "integer"
example: 1
ContainersStopped:
description: |
Number of containers with status `"stopped"`.
type: "integer"
example: 10
Images:
description: |
Total number of images on the host.
Both _tagged_ and _untagged_ (dangling) images are counted.
type: "integer"
example: 508
Driver:
description: "Name of the storage driver in use."
type: "string"
example: "overlay2"
DriverStatus:
description: |
Information specific to the storage driver, provided as
"label" / "value" pairs.
This information is provided by the storage driver, and formatted
in a way consistent with the output of `docker info` on the command
line.
<p><br /></p>
> **Note**: The information returned in this field, including the
> formatting of values and labels, should not be considered stable,
> and may change without notice.
type: "array"
items:
type: "array"
items:
type: "string"
example:
- ["Backing Filesystem", "extfs"]
- ["Supports d_type", "true"]
- ["Native Overlay Diff", "true"]
DockerRootDir:
description: |
Root directory of persistent Docker state.
Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker`
on Windows.
type: "string"
example: "/var/lib/docker"
Plugins:
$ref: "#/definitions/PluginsInfo"
MemoryLimit:
description: "Indicates if the host has memory limit support enabled."
type: "boolean"
example: true
SwapLimit:
description: "Indicates if the host has memory swap limit support enabled."
type: "boolean"
example: true
KernelMemoryTCP:
description: |
Indicates if the host has kernel memory TCP limit support enabled. This
field is omitted if not supported.
Kernel memory TCP limits are not supported when using cgroups v2, which
does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup.
type: "boolean"
example: true
CpuCfsPeriod:
description: |
Indicates if CPU CFS(Completely Fair Scheduler) period is supported by
the host.
type: "boolean"
example: true
CpuCfsQuota:
description: |
Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by
the host.
type: "boolean"
example: true
CPUShares:
description: |
Indicates if CPU Shares limiting is supported by the host.
type: "boolean"
example: true
CPUSet:
description: |
Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.
See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)
type: "boolean"
example: true
PidsLimit:
description: "Indicates if the host kernel has PID limit support enabled."
type: "boolean"
example: true
OomKillDisable:
description: "Indicates if OOM killer disable is supported on the host."
type: "boolean"
IPv4Forwarding:
description: "Indicates IPv4 forwarding is enabled."
type: "boolean"
example: true
BridgeNfIptables:
description: |
Indicates if `bridge-nf-call-iptables` is available on the host when
the daemon was started.
<p><br /></p>
> **Deprecated**: netfilter module is now loaded on-demand and no longer
> during daemon startup, making this field obsolete. This field is always
> `false` and will be removed in a API v1.50.
type: "boolean"
example: false
BridgeNfIp6tables:
description: |
Indicates if `bridge-nf-call-ip6tables` is available on the host.
<p><br /></p>
> **Deprecated**: netfilter module is now loaded on-demand, and no longer
> during daemon startup, making this field obsolete. This field is always
> `false` and will be removed in a API v1.50.
type: "boolean"
example: false
Debug:
description: |
Indicates if the daemon is running in debug-mode / with debug-level
logging enabled.
type: "boolean"
example: true
NFd:
description: |
The total number of file Descriptors in use by the daemon process.
This information is only returned if debug-mode is enabled.
type: "integer"
example: 64
NGoroutines:
description: |
The number of goroutines that currently exist.
This information is only returned if debug-mode is enabled.
type: "integer"
example: 174
SystemTime:
description: |
Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
format with nano-seconds.
type: "string"
example: "2017-08-08T20:28:29.06202363Z"
LoggingDriver:
description: |
The logging driver to use as a default for new containers.
type: "string"
CgroupDriver:
description: |
The driver to use for managing cgroups.
type: "string"
enum: ["cgroupfs", "systemd", "none"]
default: "cgroupfs"
example: "cgroupfs"
CgroupVersion:
description: |
The version of the cgroup.
type: "string"
enum: ["1", "2"]
default: "1"
example: "1"
NEventsListener:
description: "Number of event listeners subscribed."
type: "integer"
example: 30
KernelVersion:
description: |
Kernel version of the host.
On Linux, this information obtained from `uname`. On Windows this
information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd>
registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.
type: "string"
example: "6.8.0-31-generic"
OperatingSystem:
description: |
Name of the host's operating system, for example: "Ubuntu 24.04 LTS"
or "Windows Server 2016 Datacenter"
type: "string"
example: "Ubuntu 24.04 LTS"
OSVersion:
description: |
Version of the host's operating system
<p><br /></p>
> **Note**: The information returned in this field, including its
> very existence, and the formatting of values, should not be considered
> stable, and may change without notice.
type: "string"
example: "24.04"
OSType:
description: |
Generic type of the operating system of the host, as returned by the
Go runtime (`GOOS`).
Currently returned values are "linux" and "windows". A full list of
possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).
type: "string"
example: "linux"
Architecture:
description: |
Hardware architecture of the host, as returned by the operating system.
This is equivalent to the output of `uname -m` on Linux.
Unlike `Arch` (from `/version`), this reports the machine's native
architecture, which can differ from the Go runtime architecture when
running a binary compiled for a different architecture (for example,
a 32-bit binary running on 64-bit hardware).
type: "string"
example: "x86_64"
NCPU:
description: |
The number of logical CPUs usable by the daemon.
The number of available CPUs is checked by querying the operating
system when the daemon starts. Changes to operating system CPU
allocation after the daemon is started are not reflected.
type: "integer"
example: 4
MemTotal:
description: |
Total amount of physical memory available on the host, in bytes.
type: "integer"
format: "int64"
example: 2095882240
IndexServerAddress:
description: |
Address / URL of the index server that is used for image search,
and as a default for user authentication for Docker Hub and Docker Cloud.
default: "https://index.docker.io/v1/"
type: "string"
example: "https://index.docker.io/v1/"
RegistryConfig:
$ref: "#/definitions/RegistryServiceConfig"
GenericResources:
$ref: "#/definitions/GenericResources"
HttpProxy:
description: |
HTTP-proxy configured for the daemon. This value is obtained from the
[`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
are masked in the API response.
Containers do not automatically inherit this configuration.
type: "string"
example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080"
HttpsProxy:
description: |
HTTPS-proxy configured for the daemon. This value is obtained from the
[`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
are masked in the API response.
Containers do not automatically inherit this configuration.
type: "string"
example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443"
NoProxy:
description: |
Comma-separated list of domain extensions for which no proxy should be
used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)
environment variable.
Containers do not automatically inherit this configuration.
type: "string"
example: "*.local, 169.254/16"
Name:
description: "Hostname of the host."
type: "string"
example: "node5.corp.example.com"
Labels:
description: |
User-defined labels (key/value metadata) as set on the daemon.
<p><br /></p>
> **Note**: When part of a Swarm, nodes can both have _daemon_ labels,
> set through the daemon configuration, and _node_ labels, set from a
> manager node in the Swarm. Node labels are not included in this
> field. Node labels can be retrieved using the `/nodes/(id)` endpoint
> on a manager node in the Swarm.
type: "array"
items:
type: "string"
example: ["storage=ssd", "production"]
ExperimentalBuild:
description: |
Indicates if experimental features are enabled on the daemon.
type: "boolean"
example: true
ServerVersion:
description: |
Version string of the daemon.
type: "string"
example: "27.0.1"
Runtimes:
description: |
List of [OCI compliant](https://github.com/opencontainers/runtime-spec)
runtimes configured on the daemon. Keys hold the "name" used to
reference the runtime.
The Docker daemon relies on an OCI compliant runtime (invoked via the
`containerd` daemon) as its interface to the Linux kernel namespaces,
cgroups, and SELinux.
The default runtime is `runc`, and automatically configured. Additional
runtimes can be configured by the user and will be listed here.
type: "object"
additionalProperties:
$ref: "#/definitions/Runtime"
default:
runc:
path: "runc"
example:
runc:
path: "runc"
runc-master:
path: "/go/bin/runc"
custom:
path: "/usr/local/bin/my-oci-runtime"
runtimeArgs: ["--debug", "--systemd-cgroup=false"]
DefaultRuntime:
description: |
Name of the default OCI runtime that is used when starting containers.
The default can be overridden per-container at create time.
type: "string"
default: "runc"
example: "runc"
Swarm:
$ref: "#/definitions/SwarmInfo"
LiveRestoreEnabled:
description: |
Indicates if live restore is enabled.
If enabled, containers are kept running when the daemon is shutdown
or upon daemon start if running containers are detected.
type: "boolean"
default: false
example: false
Isolation:
description: |
Represents the isolation technology to use as a default for containers.
The supported values are platform-specific.
If no isolation value is specified on daemon start, on Windows client,
the default is `hyperv`, and on Windows server, the default is `process`.
This option is currently not used on other platforms.
default: "default"
type: "string"
enum:
- "default"
- "hyperv"
- "process"
- ""
InitBinary:
description: |
Name and, optional, path of the `docker-init` binary.
If the path is omitted, the daemon searches the host's `$PATH` for the
binary and uses the first result.
type: "string"
example: "docker-init"
ContainerdCommit:
$ref: "#/definitions/Commit"
RuncCommit:
$ref: "#/definitions/Commit"
InitCommit:
$ref: "#/definitions/Commit"
SecurityOptions:
description: |
List of security features that are enabled on the daemon, such as
apparmor, seccomp, SELinux, user-namespaces (userns), rootless and
no-new-privileges.
Additional configuration options for each security feature may
be present, and are included as a comma-separated list of key/value
pairs.
type: "array"
items:
type: "string"
example:
- "name=apparmor"
- "name=seccomp,profile=default"
- "name=selinux"
- "name=userns"
- "name=rootless"
ProductLicense:
description: |
Reports a summary of the product license on the daemon.
If a commercial license has been applied to the daemon, information
such as number of nodes, and expiration are included.
type: "string"
example: "Community Engine"
DefaultAddressPools:
description: |
List of custom default address pools for local networks, which can be
specified in the daemon.json file or dockerd option.
Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256
10.10.[0-255].0/24 address pools.
type: "array"
items:
type: "object"
properties:
Base:
description: "The network address in CIDR format"
type: "string"
example: "10.10.0.0/16"
Size:
description: "The network pool size"
type: "integer"
example: "24"
FirewallBackend:
$ref: "#/definitions/FirewallInfo"
Warnings:
description: |
List of warnings / informational messages about missing features, or
issues related to the daemon configuration.
These messages can be printed by the client as information to the user.
type: "array"
items:
type: "string"
example:
- "WARNING: No memory limit support"
CDISpecDirs:
description: |
List of directories where (Container Device Interface) CDI
specifications are located.
These specifications define vendor-specific modifications to an OCI
runtime specification for a container being created.
An empty list indicates that CDI device injection is disabled.
Note that since using CDI device injection requires the daemon to have
experimental enabled. For non-experimental daemons an empty list will
always be returned.
type: "array"
items:
type: "string"
example:
- "/etc/cdi"
- "/var/run/cdi"
Containerd:
$ref: "#/definitions/ContainerdInfo"
ContainerdInfo:
description: |
Information for connecting to the containerd instance that is used by the daemon.
This is included for debugging purposes only.
type: "object"
x-nullable: true
properties:
Address:
description: "The address of the containerd socket."
type: "string"
example: "/run/containerd/containerd.sock"
Namespaces:
description: |
The namespaces that the daemon uses for running containers and
plugins in containerd. These namespaces can be configured in the
daemon configuration, and are considered to be used exclusively
by the daemon, Tampering with the containerd instance may cause
unexpected behavior.
As these namespaces are considered to be exclusively accessed
by the daemon, it is not recommended to change these values,
or to change them to a value that is used by other systems,
such as cri-containerd.
type: "object"
properties:
Containers:
description: |
The default containerd namespace used for containers managed
by the daemon.
The default namespace for containers is "moby", but will be
suffixed with the `<uid>.<gid>` of the remapped `root` if
user-namespaces are enabled and the containerd image-store
is used.
type: "string"
default: "moby"
example: "moby"
Plugins:
description: |
The default containerd namespace used for plugins managed by
the daemon.
The default namespace for plugins is "plugins.moby", but will be
suffixed with the `<uid>.<gid>` of the remapped `root` if
user-namespaces are enabled and the containerd image-store
is used.
type: "string"
default: "plugins.moby"
example: "plugins.moby"
FirewallInfo:
description: |
Information about the daemon's firewalling configuration.
This field is currently only used on Linux, and omitted on other platforms.
type: "object"
x-nullable: true
properties:
Driver:
description: |
The name of the firewall backend driver.
type: "string"
example: "nftables"
Info:
description: |
Information about the firewall backend, provided as
"label" / "value" pairs.
<p><br /></p>
> **Note**: The information returned in this field, including the
> formatting of values and labels, should not be considered stable,
> and may change without notice.
type: "array"
items:
type: "array"
items:
type: "string"
example:
- ["ReloadedAt", "2025-01-01T00:00:00Z"]
# PluginsInfo is a temp struct holding Plugins name
# registered with docker daemon. It is used by Info struct
PluginsInfo:
description: |
Available plugins per type.
<p><br /></p>
> **Note**: Only unmanaged (V1) plugins are included in this list.
> V1 plugins are "lazily" loaded, and are not returned in this list
> if there is no resource using the plugin.
type: "object"
properties:
Volume:
description: "Names of available volume-drivers, and network-driver plugins."
type: "array"
items:
type: "string"
example: ["local"]
Network:
description: "Names of available network-drivers, and network-driver plugins."
type: "array"
items:
type: "string"
example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]
Authorization:
description: "Names of available authorization plugins."
type: "array"
items:
type: "string"
example: ["img-authz-plugin", "hbm"]
Log:
description: "Names of available logging-drivers, and logging-driver plugins."
type: "array"
items:
type: "string"
example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"]
RegistryServiceConfig:
description: |
RegistryServiceConfig stores daemon registry services configuration.
type: "object"
x-nullable: true
properties:
InsecureRegistryCIDRs:
description: |
List of IP ranges of insecure registries, using the CIDR syntax
([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries
accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates
from unknown CAs) communication.
By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as
insecure. All other registries are secure. Communicating with an
insecure registry is not possible if the daemon assumes that registry
is secure.
This configuration override this behavior, insecure communication with
registries whose resolved IP address is within the subnet described by
the CIDR syntax.
Registries can also be marked insecure by hostname. Those registries
are listed under `IndexConfigs` and have their `Secure` field set to
`false`.
> **Warning**: Using this option can be useful when running a local
> registry, but introduces security vulnerabilities. This option
> should therefore ONLY be used for testing purposes. For increased
> security, users should add their CA to their system's list of trusted
> CAs instead of enabling this option.
type: "array"
items:
type: "string"
example: ["::1/128", "127.0.0.0/8"]
IndexConfigs:
type: "object"
additionalProperties:
$ref: "#/definitions/IndexInfo"
example:
"127.0.0.1:5000":
"Name": "127.0.0.1:5000"
"Mirrors": []
"Secure": false
"Official": false
"[2001:db8:a0b:12f0::1]:80":
"Name": "[2001:db8:a0b:12f0::1]:80"
"Mirrors": []
"Secure": false
"Official": false
"docker.io":
Name: "docker.io"
Mirrors: ["https://hub-mirror.corp.example.com:5000/"]
Secure: true
Official: true
"registry.internal.corp.example.com:3000":
Name: "registry.internal.corp.example.com:3000"
Mirrors: []
Secure: false
Official: false
Mirrors:
description: |
List of registry URLs that act as a mirror for the official
(`docker.io`) registry.
type: "array"
items:
type: "string"
example:
- "https://hub-mirror.corp.example.com:5000/"
- "https://[2001:db8:a0b:12f0::1]/"
IndexInfo:
description:
IndexInfo contains information about a registry.
type: "object"
x-nullable: true
properties:
Name:
description: |
Name of the registry, such as "docker.io".
type: "string"
example: "docker.io"
Mirrors:
description: |
List of mirrors, expressed as URIs.
type: "array"
items:
type: "string"
example:
- "https://hub-mirror.corp.example.com:5000/"
- "https://registry-2.docker.io/"
- "https://registry-3.docker.io/"
Secure:
description: |
Indicates if the registry is part of the list of insecure
registries.
If `false`, the registry is insecure. Insecure registries accept
un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from
unknown CAs) communication.
> **Warning**: Insecure registries can be useful when running a local
> registry. However, because its use creates security vulnerabilities
> it should ONLY be enabled for testing purposes. For increased
> security, users should add their CA to their system's list of
> trusted CAs instead of enabling this option.
type: "boolean"
example: true
Official:
description: |
Indicates whether this is an official registry (i.e., Docker Hub / docker.io)
type: "boolean"
example: true
Runtime:
description: |
Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec)
runtime.
The runtime is invoked by the daemon via the `containerd` daemon. OCI
runtimes act as an interface to the Linux kernel namespaces, cgroups,
and SELinux.
type: "object"
properties:
path:
description: |
Name and, optional, path, of the OCI executable binary.
If the path is omitted, the daemon searches the host's `$PATH` for the
binary and uses the first result.
type: "string"
example: "/usr/local/bin/my-oci-runtime"
runtimeArgs:
description: |
List of command-line arguments to pass to the runtime when invoked.
type: "array"
x-nullable: true
items:
type: "string"
example: ["--debug", "--systemd-cgroup=false"]
status:
description: |
Information specific to the runtime.
While this API specification does not define data provided by runtimes,
the following well-known properties may be provided by runtimes:
`org.opencontainers.runtime-spec.features`: features structure as defined
in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md),
in a JSON string representation.
<p><br /></p>
> **Note**: The information returned in this field, including the
> formatting of values and labels, should not be considered stable,
> and may change without notice.
type: "object"
x-nullable: true
additionalProperties:
type: "string"
example:
"org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}"
Commit:
description: |
Commit holds the Git-commit (SHA1) that a binary was built from, as
reported in the version-string of external tools, such as `containerd`,
or `runC`.
type: "object"
properties:
ID:
description: "Actual commit ID of external tool."
type: "string"
example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
SwarmInfo:
description: |
Represents generic information about swarm.
type: "object"
properties:
NodeID:
description: "Unique identifier of for this node in the swarm."
type: "string"
default: ""
example: "k67qz4598weg5unwwffg6z1m1"
NodeAddr:
description: |
IP address at which this node can be reached by other nodes in the
swarm.
type: "string"
default: ""
example: "10.0.0.46"
LocalNodeState:
$ref: "#/definitions/LocalNodeState"
ControlAvailable:
type: "boolean"
default: false
example: true
Error:
type: "string"
default: ""
RemoteManagers:
description: |
List of ID's and addresses of other managers in the swarm.
type: "array"
default: null
x-nullable: true
items:
$ref: "#/definitions/PeerNode"
example:
- NodeID: "71izy0goik036k48jg985xnds"
Addr: "10.0.0.158:2377"
- NodeID: "79y6h1o4gv8n120drcprv5nmc"
Addr: "10.0.0.159:2377"
- NodeID: "k67qz4598weg5unwwffg6z1m1"
Addr: "10.0.0.46:2377"
Nodes:
description: "Total number of nodes in the swarm."
type: "integer"
x-nullable: true
example: 4
Managers:
description: "Total number of managers in the swarm."
type: "integer"
x-nullable: true
example: 3
Cluster:
$ref: "#/definitions/ClusterInfo"
LocalNodeState:
description: "Current local status of this node."
type: "string"
default: ""
enum:
- ""
- "inactive"
- "pending"
- "active"
- "error"
- "locked"
example: "active"
PeerNode:
description: "Represents a peer-node in the swarm"
type: "object"
properties:
NodeID:
description: "Unique identifier of for this node in the swarm."
type: "string"
Addr:
description: |
IP address and ports at which this node can be reached.
type: "string"
NetworkAttachmentConfig:
description: |
Specifies how a service should be attached to a particular network.
type: "object"
properties:
Target:
description: |
The target network for attachment. Must be a network name or ID.
type: "string"
Aliases:
description: |
Discoverable alternate names for the service on this network.
type: "array"
items:
type: "string"
DriverOpts:
description: |
Driver attachment options for the network target.
type: "object"
additionalProperties:
type: "string"
EventActor:
description: |
Actor describes something that generates events, like a container, network,
or a volume.
type: "object"
properties:
ID:
description: "The ID of the object emitting the event"
type: "string"
example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
Attributes:
description: |
Various key/value attributes of the object, depending on its type.
type: "object"
additionalProperties:
type: "string"
example:
com.example.some-label: "some-label-value"
image: "alpine:latest"
name: "my-container"
EventMessage:
description: |
EventMessage represents the information an event contains.
type: "object"
title: "SystemEventsResponse"
properties:
Type:
description: "The type of object emitting the event"
type: "string"
enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"]
example: "container"
Action:
description: "The type of event"
type: "string"
example: "create"
Actor:
$ref: "#/definitions/EventActor"
scope:
description: |
Scope of the event. Engine events are `local` scope. Cluster (Swarm)
events are `swarm` scope.
type: "string"
enum: ["local", "swarm"]
time:
description: "Timestamp of event"
type: "integer"
format: "int64"
example: 1629574695
timeNano:
description: "Timestamp of event, with nanosecond accuracy"
type: "integer"
format: "int64"
example: 1629574695515050031
OCIDescriptor:
type: "object"
x-go-name: Descriptor
description: |
A descriptor struct containing digest, media type, and size, as defined in
the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md).
properties:
mediaType:
description: |
The media type of the object this schema refers to.
type: "string"
example: "application/vnd.oci.image.manifest.v1+json"
digest:
description: |
The digest of the targeted content.
type: "string"
example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
size:
description: |
The size in bytes of the blob.
type: "integer"
format: "int64"
example: 424
urls:
description: |-
List of URLs from which this object MAY be downloaded.
type: "array"
items:
type: "string"
format: "uri"
x-nullable: true
annotations:
description: |-
Arbitrary metadata relating to the targeted content.
type: "object"
x-nullable: true
additionalProperties:
type: "string"
example:
"com.docker.official-images.bashbrew.arch": "amd64"
"org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8"
"org.opencontainers.image.base.name": "scratch"
"org.opencontainers.image.created": "2025-01-27T00:00:00Z"
"org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79"
"org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base"
"org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu"
"org.opencontainers.image.version": "24.04"
data:
type: string
x-nullable: true
description: |-
Data is an embedding of the targeted content. This is encoded as a base64
string when marshalled to JSON (automatically, by encoding/json). If
present, Data can be used directly to avoid fetching the targeted content.
example: null
platform:
$ref: "#/definitions/OCIPlatform"
artifactType:
description: |-
ArtifactType is the IANA media type of this artifact.
type: "string"
x-nullable: true
example: null
OCIPlatform:
type: "object"
x-go-name: Platform
x-nullable: true
description: |
Describes the platform which the image in the manifest runs on, as defined
in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md).
properties:
architecture:
description: |
The CPU architecture, for example `amd64` or `ppc64`.
type: "string"
example: "arm"
os:
description: |
The operating system, for example `linux` or `windows`.
type: "string"
example: "windows"
os.version:
description: |
Optional field specifying the operating system version, for example on
Windows `10.0.19041.1165`.
type: "string"
example: "10.0.19041.1165"
os.features:
description: |
Optional field specifying an array of strings, each listing a required
OS feature (for example on Windows `win32k`).
type: "array"
items:
type: "string"
example:
- "win32k"
variant:
description: |
Optional field specifying a variant of the CPU, for example `v7` to
specify ARMv7 when architecture is `arm`.
type: "string"
example: "v7"
DistributionInspect:
type: "object"
x-go-name: DistributionInspect
title: "DistributionInspectResponse"
required: [Descriptor, Platforms]
description: |
Describes the result obtained from contacting the registry to retrieve
image metadata.
properties:
Descriptor:
$ref: "#/definitions/OCIDescriptor"
Platforms:
type: "array"
description: |
An array containing all platforms supported by the image.
items:
$ref: "#/definitions/OCIPlatform"
ClusterVolume:
type: "object"
description: |
Options and information specific to, and only present on, Swarm CSI
cluster volumes.
properties:
ID:
type: "string"
description: |
The Swarm ID of this volume. Because cluster volumes are Swarm
objects, they have an ID, unlike non-cluster volumes. This ID can
be used to refer to the Volume instead of the name.
Version:
$ref: "#/definitions/ObjectVersion"
CreatedAt:
type: "string"
format: "dateTime"
UpdatedAt:
type: "string"
format: "dateTime"
Spec:
$ref: "#/definitions/ClusterVolumeSpec"
Info:
type: "object"
description: |
Information about the global status of the volume.
properties:
CapacityBytes:
type: "integer"
format: "int64"
description: |
The capacity of the volume in bytes. A value of 0 indicates that
the capacity is unknown.
VolumeContext:
type: "object"
description: |
A map of strings to strings returned from the storage plugin when
the volume is created.
additionalProperties:
type: "string"
VolumeID:
type: "string"
description: |
The ID of the volume as returned by the CSI storage plugin. This
is distinct from the volume's ID as provided by Docker. This ID
is never used by the user when communicating with Docker to refer
to this volume. If the ID is blank, then the Volume has not been
successfully created in the plugin yet.
AccessibleTopology:
type: "array"
description: |
The topology this volume is actually accessible from.
items:
$ref: "#/definitions/Topology"
PublishStatus:
type: "array"
description: |
The status of the volume as it pertains to its publishing and use on
specific nodes
items:
type: "object"
properties:
NodeID:
type: "string"
description: |
The ID of the Swarm node the volume is published on.
State:
type: "string"
description: |
The published state of the volume.
* `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed.
* `published` The volume is published successfully to the node.
* `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so.
* `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller.
enum:
- "pending-publish"
- "published"
- "pending-node-unpublish"
- "pending-controller-unpublish"
PublishContext:
type: "object"
description: |
A map of strings to strings returned by the CSI controller
plugin when a volume is published.
additionalProperties:
type: "string"
ClusterVolumeSpec:
type: "object"
description: |
Cluster-specific options used to create the volume.
properties:
Group:
type: "string"
description: |
Group defines the volume group of this volume. Volumes belonging to
the same group can be referred to by group name when creating
Services. Referring to a volume by group instructs Swarm to treat
volumes in that group interchangeably for the purpose of scheduling.
Volumes with an empty string for a group technically all belong to
the same, emptystring group.
AccessMode:
type: "object"
description: |
Defines how the volume is used by tasks.
properties:
Scope:
type: "string"
description: |
The set of nodes this volume can be used on at one time.
- `single` The volume may only be scheduled to one node at a time.
- `multi` the volume may be scheduled to any supported number of nodes at a time.
default: "single"
enum: ["single", "multi"]
x-nullable: false
Sharing:
type: "string"
description: |
The number and way that different tasks can use this volume
at one time.
- `none` The volume may only be used by one task at a time.
- `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly
- `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write.
- `all` The volume may have any number of readers and writers.
default: "none"
enum: ["none", "readonly", "onewriter", "all"]
x-nullable: false
MountVolume:
type: "object"
description: |
Options for using this volume as a Mount-type volume.
Either MountVolume or BlockVolume, but not both, must be
present.
properties:
FsType:
type: "string"
description: |
Specifies the filesystem type for the mount volume.
Optional.
MountFlags:
type: "array"
description: |
Flags to pass when mounting the volume. Optional.
items:
type: "string"
BlockVolume:
type: "object"
description: |
Options for using this volume as a Block-type volume.
Intentionally empty.
Secrets:
type: "array"
description: |
Swarm Secrets that are passed to the CSI storage plugin when
operating on this volume.
items:
type: "object"
description: |
One cluster volume secret entry. Defines a key-value pair that
is passed to the plugin.
properties:
Key:
type: "string"
description: |
Key is the name of the key of the key-value pair passed to
the plugin.
Secret:
type: "string"
description: |
Secret is the swarm Secret object from which to read data.
This can be a Secret name or ID. The Secret data is
retrieved by swarm and used as the value of the key-value
pair passed to the plugin.
AccessibilityRequirements:
type: "object"
description: |
Requirements for the accessible topology of the volume. These
fields are optional. For an in-depth description of what these
fields mean, see the CSI specification.
properties:
Requisite:
type: "array"
description: |
A list of required topologies, at least one of which the
volume must be accessible from.
items:
$ref: "#/definitions/Topology"
Preferred:
type: "array"
description: |
A list of topologies that the volume should attempt to be
provisioned in.
items:
$ref: "#/definitions/Topology"
CapacityRange:
type: "object"
description: |
The desired capacity that the volume should be created with. If
empty, the plugin will decide the capacity.
properties:
RequiredBytes:
type: "integer"
format: "int64"
description: |
The volume must be at least this big. The value of 0
indicates an unspecified minimum
LimitBytes:
type: "integer"
format: "int64"
description: |
The volume must not be bigger than this. The value of 0
indicates an unspecified maximum.
Availability:
type: "string"
description: |
The availability of the volume for use in tasks.
- `active` The volume is fully available for scheduling on the cluster
- `pause` No new workloads should use the volume, but existing workloads are not stopped.
- `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started.
default: "active"
x-nullable: false
enum:
- "active"
- "pause"
- "drain"
Topology:
description: |
A map of topological domains to topological segments. For in depth
details, see documentation for the Topology object in the CSI
specification.
type: "object"
additionalProperties:
type: "string"
ImageManifestSummary:
x-go-name: "ManifestSummary"
description: |
ImageManifestSummary represents a summary of an image manifest.
type: "object"
required: ["ID", "Descriptor", "Available", "Size", "Kind"]
properties:
ID:
description: |
ID is the content-addressable ID of an image and is the same as the
digest of the image manifest.
type: "string"
example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"
Descriptor:
$ref: "#/definitions/OCIDescriptor"
Available:
description: Indicates whether all the child content (image config, layers) is fully available locally.
type: "boolean"
example: true
Size:
type: "object"
x-nullable: false
required: ["Content", "Total"]
properties:
Total:
type: "integer"
format: "int64"
example: 8213251
description: |
Total is the total size (in bytes) of all the locally present
data (both distributable and non-distributable) that's related to
this manifest and its children.
This equal to the sum of [Content] size AND all the sizes in the
[Size] struct present in the Kind-specific data struct.
For example, for an image kind (Kind == "image")
this would include the size of the image content and unpacked
image snapshots ([Size.Content] + [ImageData.Size.Unpacked]).
Content:
description: |
Content is the size (in bytes) of all the locally present
content in the content store (e.g. image config, layers)
referenced by this manifest and its children.
This only includes blobs in the content store.
type: "integer"
format: "int64"
example: 3987495
Kind:
type: "string"
example: "image"
enum:
- "image"
- "attestation"
- "unknown"
description: |
The kind of the manifest.
kind | description
-------------|-----------------------------------------------------------
image | Image manifest that can be used to start a container.
attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest.
ImageData:
description: |
The image data for the image manifest.
This field is only populated when Kind is "image".
type: "object"
x-nullable: true
x-omitempty: true
required: ["Platform", "Containers", "Size", "UnpackedSize"]
properties:
Platform:
$ref: "#/definitions/OCIPlatform"
description: |
OCI platform of the image. This will be the platform specified in the
manifest descriptor from the index/manifest list.
If it's not available, it will be obtained from the image config.
Containers:
description: |
The IDs of the containers that are using this image.
type: "array"
items:
type: "string"
example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"]
Size:
type: "object"
x-nullable: false
required: ["Unpacked"]
properties:
Unpacked:
type: "integer"
format: "int64"
example: 3987495
description: |
Unpacked is the size (in bytes) of the locally unpacked
(uncompressed) image content that's directly usable by the containers
running this image.
It's independent of the distributable content - e.g.
the image might still have an unpacked data that's still used by
some container even when the distributable/compressed content is
already gone.
AttestationData:
description: |
The image data for the attestation manifest.
This field is only populated when Kind is "attestation".
type: "object"
x-nullable: true
x-omitempty: true
required: ["For"]
properties:
For:
description: |
The digest of the image manifest that this attestation is for.
type: "string"
example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"
paths:
/containers/json:
get:
summary: "List containers"
description: |
Returns a list of containers. For details on the format, see the
[inspect endpoint](#operation/ContainerInspect).
Note that it uses a different, smaller representation of a container
than inspecting a single container. For example, the list of linked
containers is not propagated .
operationId: "ContainerList"
produces:
- "application/json"
parameters:
- name: "all"
in: "query"
description: |
Return all containers. By default, only running containers are shown.
type: "boolean"
default: false
- name: "limit"
in: "query"
description: |
Return this number of most recently created containers, including
non-running ones.
type: "integer"
- name: "size"
in: "query"
description: |
Return the size of container as fields `SizeRw` and `SizeRootFs`.
type: "boolean"
default: false
- name: "filters"
in: "query"
description: |
Filters to process on the container list, encoded as JSON (a
`map[string][]string`). For example, `{"status": ["paused"]}` will
only return paused containers.
Available filters:
- `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`)
- `before`=(`<container id>` or `<container name>`)
- `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
- `exited=<int>` containers with exit code of `<int>`
- `health`=(`starting`|`healthy`|`unhealthy`|`none`)
- `id=<ID>` a container's ID
- `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)
- `is-task=`(`true`|`false`)
- `label=key` or `label="key=value"` of a container label
- `name=<name>` a container's name
- `network`=(`<network id>` or `<network name>`)
- `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
- `since`=(`<container id>` or `<container name>`)
- `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)
- `volume`=(`<volume name>` or `<mount point destination>`)
type: "string"
responses:
200:
description: "no error"
schema:
type: "array"
items:
$ref: "#/definitions/ContainerSummary"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Container"]
/containers/create:
post:
summary: "Create a container"
operationId: "ContainerCreate"
consumes:
- "application/json"
- "application/octet-stream"
produces:
- "application/json"
parameters:
- name: "name"
in: "query"
description: |
Assign the specified name to the container. Must match
`/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
type: "string"
pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
- name: "platform"
in: "query"
description: |
Platform in the format `os[/arch[/variant]]` used for image lookup.
When specified, the daemon checks if the requested image is present
in the local image cache with the given OS and Architecture, and
otherwise returns a `404` status.
If the option is not set, the host's native OS and Architecture are
used to look up the image in the image cache. However, if no platform
is passed and the given image does exist in the local image cache,
but its OS or architecture does not match, the container is created
with the available image, and a warning is added to the `Warnings`
field in the response, for example;
WARNING: The requested image's platform (linux/arm64/v8) does not
match the detected host platform (linux/amd64) and no
specific platform was requested
type: "string"
default: ""
- name: "body"
in: "body"
description: "Container to create"
schema:
allOf:
- $ref: "#/definitions/ContainerConfig"
- type: "object"
properties:
HostConfig:
$ref: "#/definitions/HostConfig"
NetworkingConfig:
$ref: "#/definitions/NetworkingConfig"
example:
Hostname: ""
Domainname: ""
User: ""
AttachStdin: false
AttachStdout: true
AttachStderr: true
Tty: false
OpenStdin: false
StdinOnce: false
Env:
- "FOO=bar"
- "BAZ=quux"
Cmd:
- "date"
Entrypoint: ""
Image: "ubuntu"
Labels:
com.example.vendor: "Acme"
com.example.license: "GPL"
com.example.version: "1.0"
Volumes:
/volumes/data: {}
WorkingDir: ""
NetworkDisabled: false
MacAddress: "12:34:56:78:9a:bc"
ExposedPorts:
22/tcp: {}
StopSignal: "SIGTERM"
StopTimeout: 10
HostConfig:
Binds:
- "/tmp:/tmp"
Links:
- "redis3:redis"
Memory: 0
MemorySwap: 0
MemoryReservation: 0
NanoCpus: 500000
CpuPercent: 80
CpuShares: 512
CpuPeriod: 100000
CpuRealtimePeriod: 1000000
CpuRealtimeRuntime: 10000
CpuQuota: 50000
CpusetCpus: "0,1"
CpusetMems: "0,1"
MaximumIOps: 0
MaximumIOBps: 0
BlkioWeight: 300
BlkioWeightDevice:
- {}
BlkioDeviceReadBps:
- {}
BlkioDeviceReadIOps:
- {}
BlkioDeviceWriteBps:
- {}
BlkioDeviceWriteIOps:
- {}
DeviceRequests:
- Driver: "nvidia"
Count: -1
DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]
Capabilities: [["gpu", "nvidia", "compute"]]
Options:
property1: "string"
property2: "string"
MemorySwappiness: 60
OomKillDisable: false
OomScoreAdj: 500
PidMode: ""
PidsLimit: 0
PortBindings:
22/tcp:
- HostPort: "11022"
PublishAllPorts: false
Privileged: false
ReadonlyRootfs: false
Dns:
- "8.8.8.8"
DnsOptions:
- ""
DnsSearch:
- ""
VolumesFrom:
- "parent"
- "other:ro"
CapAdd:
- "NET_ADMIN"
CapDrop:
- "MKNOD"
GroupAdd:
- "newgroup"
RestartPolicy:
Name: ""
MaximumRetryCount: 0
AutoRemove: true
NetworkMode: "bridge"
Devices: []
Ulimits:
- {}
LogConfig:
Type: "json-file"
Config: {}
SecurityOpt: []
StorageOpt: {}
CgroupParent: ""
VolumeDriver: ""
ShmSize: 67108864
NetworkingConfig:
EndpointsConfig:
isolated_nw:
IPAMConfig:
IPv4Address: "172.20.30.33"
IPv6Address: "2001:db8:abcd::3033"
LinkLocalIPs:
- "169.254.34.68"
- "fe80::3468"
Links:
- "container_1"
- "container_2"
Aliases:
- "server_x"
- "server_y"
database_nw: {}
required: true
responses:
201:
description: "Container created successfully"
schema:
$ref: "#/definitions/ContainerCreateResponse"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such image"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such image: c2ada9df5af8"
409:
description: "conflict"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Container"]
/containers/{id}/json:
get:
summary: "Inspect a container"
description: "Return low-level information about a container."
operationId: "ContainerInspect"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/ContainerInspectResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "size"
in: "query"
type: "boolean"
default: false
description: "Return the size of container as fields `SizeRw` and `SizeRootFs`"
tags: ["Container"]
/containers/{id}/top:
get:
summary: "List processes running inside a container"
description: |
On Unix systems, this is done by running the `ps` command. This endpoint
is not supported on Windows.
operationId: "ContainerTop"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/ContainerTopResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "ps_args"
in: "query"
description: "The arguments to pass to `ps`. For example, `aux`"
type: "string"
default: "-ef"
tags: ["Container"]
/containers/{id}/logs:
get:
summary: "Get container logs"
description: |
Get `stdout` and `stderr` logs from a container.
Note: This endpoint works only for containers with the `json-file` or
`journald` logging driver.
produces:
- "application/vnd.docker.raw-stream"
- "application/vnd.docker.multiplexed-stream"
operationId: "ContainerLogs"
responses:
200:
description: |
logs returned as a stream in response body.
For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
Note that unlike the attach endpoint, the logs endpoint does not
upgrade the connection and does not set Content-Type.
schema:
type: "string"
format: "binary"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "follow"
in: "query"
description: "Keep connection after returning logs."
type: "boolean"
default: false
- name: "stdout"
in: "query"
description: "Return logs from `stdout`"
type: "boolean"
default: false
- name: "stderr"
in: "query"
description: "Return logs from `stderr`"
type: "boolean"
default: false
- name: "since"
in: "query"
description: "Only return logs since this time, as a UNIX timestamp"
type: "integer"
default: 0
- name: "until"
in: "query"
description: "Only return logs before this time, as a UNIX timestamp"
type: "integer"
default: 0
- name: "timestamps"
in: "query"
description: "Add timestamps to every log line"
type: "boolean"
default: false
- name: "tail"
in: "query"
description: |
Only return this number of log lines from the end of the logs.
Specify as an integer or `all` to output all log lines.
type: "string"
default: "all"
tags: ["Container"]
/containers/{id}/changes:
get:
summary: "Get changes on a container’s filesystem"
description: |
Returns which files in a container's filesystem have been added, deleted,
or modified. The `Kind` of modification can be one of:
- `0`: Modified ("C")
- `1`: Added ("A")
- `2`: Deleted ("D")
operationId: "ContainerChanges"
produces: ["application/json"]
responses:
200:
description: "The list of changes"
schema:
type: "array"
items:
$ref: "#/definitions/FilesystemChange"
examples:
application/json:
- Path: "/dev"
Kind: 0
- Path: "/dev/kmsg"
Kind: 1
- Path: "/test"
Kind: 1
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
tags: ["Container"]
/containers/{id}/export:
get:
summary: "Export a container"
description: "Export the contents of a container as a tarball."
operationId: "ContainerExport"
produces:
- "application/octet-stream"
responses:
200:
description: "no error"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
tags: ["Container"]
/containers/{id}/stats:
get:
summary: "Get container stats based on resource usage"
description: |
This endpoint returns a live stream of a container’s resource usage
statistics.
The `precpu_stats` is the CPU statistic of the *previous* read, and is
used to calculate the CPU usage percentage. It is not an exact copy
of the `cpu_stats` field.
If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is
nil then for compatibility with older daemons the length of the
corresponding `cpu_usage.percpu_usage` array should be used.
On a cgroup v2 host, the following fields are not set
* `blkio_stats`: all fields other than `io_service_bytes_recursive`
* `cpu_stats`: `cpu_usage.percpu_usage`
* `memory_stats`: `max_usage` and `failcnt`
Also, `memory_stats.stats` fields are incompatible with cgroup v1.
To calculate the values shown by the `stats` command of the docker cli tool
the following formulas can be used:
* used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1)
* used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2)
* available_memory = `memory_stats.limit`
* Memory usage % = `(used_memory / available_memory) * 100.0`
* cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage`
* system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage`
* number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
* CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0`
operationId: "ContainerStats"
produces: ["application/json"]
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/ContainerStatsResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "stream"
in: "query"
description: |
Stream the output. If false, the stats will be output once and then
it will disconnect.
type: "boolean"
default: true
- name: "one-shot"
in: "query"
description: |
Only get a single stat instead of waiting for 2 cycles. Must be used
with `stream=false`.
type: "boolean"
default: false
tags: ["Container"]
/containers/{id}/resize:
post:
summary: "Resize a container TTY"
description: "Resize the TTY for a container."
operationId: "ContainerResize"
consumes:
- "application/octet-stream"
produces:
- "text/plain"
responses:
200:
description: "no error"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "cannot resize container"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "h"
in: "query"
required: true
description: "Height of the TTY session in characters"
type: "integer"
- name: "w"
in: "query"
required: true
description: "Width of the TTY session in characters"
type: "integer"
tags: ["Container"]
/containers/{id}/start:
post:
summary: "Start a container"
operationId: "ContainerStart"
responses:
204:
description: "no error"
304:
description: "container already started"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "detachKeys"
in: "query"
description: |
Override the key sequence for detaching a container. Format is a
single character `[a-Z]` or `ctrl-<value>` where `<value>` is one
of: `a-z`, `@`, `^`, `[`, `,` or `_`.
type: "string"
tags: ["Container"]
/containers/{id}/stop:
post:
summary: "Stop a container"
operationId: "ContainerStop"
responses:
204:
description: "no error"
304:
description: "container already stopped"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "signal"
in: "query"
description: |
Signal to send to the container as an integer or string (e.g. `SIGINT`).
type: "string"
- name: "t"
in: "query"
description: "Number of seconds to wait before killing the container"
type: "integer"
tags: ["Container"]
/containers/{id}/restart:
post:
summary: "Restart a container"
operationId: "ContainerRestart"
responses:
204:
description: "no error"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "signal"
in: "query"
description: |
Signal to send to the container as an integer or string (e.g. `SIGINT`).
type: "string"
- name: "t"
in: "query"
description: "Number of seconds to wait before killing the container"
type: "integer"
tags: ["Container"]
/containers/{id}/kill:
post:
summary: "Kill a container"
description: |
Send a POSIX signal to a container, defaulting to killing to the
container.
operationId: "ContainerKill"
responses:
204:
description: "no error"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
409:
description: "container is not running"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "signal"
in: "query"
description: |
Signal to send to the container as an integer or string (e.g. `SIGINT`).
type: "string"
default: "SIGKILL"
tags: ["Container"]
/containers/{id}/update:
post:
summary: "Update a container"
description: |
Change various configuration options of a container without having to
recreate it.
operationId: "ContainerUpdate"
consumes: ["application/json"]
produces: ["application/json"]
responses:
200:
description: "The container has been updated."
schema:
$ref: "#/definitions/ContainerUpdateResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "update"
in: "body"
required: true
schema:
allOf:
- $ref: "#/definitions/Resources"
- type: "object"
properties:
RestartPolicy:
$ref: "#/definitions/RestartPolicy"
example:
BlkioWeight: 300
CpuShares: 512
CpuPeriod: 100000
CpuQuota: 50000
CpuRealtimePeriod: 1000000
CpuRealtimeRuntime: 10000
CpusetCpus: "0,1"
CpusetMems: "0"
Memory: 314572800
MemorySwap: 514288000
MemoryReservation: 209715200
RestartPolicy:
MaximumRetryCount: 4
Name: "on-failure"
tags: ["Container"]
/containers/{id}/rename:
post:
summary: "Rename a container"
operationId: "ContainerRename"
responses:
204:
description: "no error"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
409:
description: "name already in use"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "name"
in: "query"
required: true
description: "New name for the container"
type: "string"
tags: ["Container"]
/containers/{id}/pause:
post:
summary: "Pause a container"
description: |
Use the freezer cgroup to suspend all processes in a container.
Traditionally, when suspending a process the `SIGSTOP` signal is used,
which is observable by the process being suspended. With the freezer
cgroup the process is unaware, and unable to capture, that it is being
suspended, and subsequently resumed.
operationId: "ContainerPause"
responses:
204:
description: "no error"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
tags: ["Container"]
/containers/{id}/unpause:
post:
summary: "Unpause a container"
description: "Resume a container which has been paused."
operationId: "ContainerUnpause"
responses:
204:
description: "no error"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
tags: ["Container"]
/containers/{id}/attach:
post:
summary: "Attach to a container"
description: |
Attach to a container to read its output or send it input. You can attach
to the same container multiple times and you can reattach to containers
that have been detached.
Either the `stream` or `logs` parameter must be `true` for this endpoint
to do anything.
See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/)
for more details.
### Hijacking
This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`,
and `stderr` on the same socket.
This is the response from the daemon for an attach request:
```
HTTP/1.1 200 OK
Content-Type: application/vnd.docker.raw-stream
[STREAM]
```
After the headers and two new lines, the TCP connection can now be used
for raw, bidirectional communication between the client and server.
To hint potential proxies about connection hijacking, the Docker client
can also optionally send connection upgrade headers.
For example, the client sends this request to upgrade the connection:
```
POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1
Upgrade: tcp
Connection: Upgrade
```
The Docker daemon will respond with a `101 UPGRADED` response, and will
similarly follow with the raw stream:
```
HTTP/1.1 101 UPGRADED
Content-Type: application/vnd.docker.raw-stream
Connection: Upgrade
Upgrade: tcp
[STREAM]
```
### Stream format
When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate),
the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream
and the stream over the hijacked connected is multiplexed to separate out
`stdout` and `stderr`. The stream consists of a series of frames, each
containing a header and a payload.
The header contains the information which the stream writes (`stdout` or
`stderr`). It also contains the size of the associated frame encoded in
the last four bytes (`uint32`).
It is encoded on the first eight bytes like this:
```go
header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
```
`STREAM_TYPE` can be:
- 0: `stdin` (is written on `stdout`)
- 1: `stdout`
- 2: `stderr`
`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size
encoded as big endian.
Following the header is the payload, which is the specified number of
bytes of `STREAM_TYPE`.
The simplest way to implement this protocol is the following:
1. Read 8 bytes.
2. Choose `stdout` or `stderr` depending on the first byte.
3. Extract the frame size from the last four bytes.
4. Read the extracted size and output it on the correct output.
5. Goto 1.
### Stream format when using a TTY
When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate),
the stream is not multiplexed. The data exchanged over the hijacked
connection is simply the raw data from the process PTY and client's
`stdin`.
operationId: "ContainerAttach"
produces:
- "application/vnd.docker.raw-stream"
- "application/vnd.docker.multiplexed-stream"
responses:
101:
description: "no error, hints proxy about hijacking"
200:
description: "no error, no upgrade header found"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "detachKeys"
in: "query"
description: |
Override the key sequence for detaching a container.Format is a single
character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`,
`@`, `^`, `[`, `,` or `_`.
type: "string"
- name: "logs"
in: "query"
description: |
Replay previous logs from the container.
This is useful for attaching to a container that has started and you
want to output everything since the container started.
If `stream` is also enabled, once all the previous output has been
returned, it will seamlessly transition into streaming current
output.
type: "boolean"
default: false
- name: "stream"
in: "query"
description: |
Stream attached streams from the time the request was made onwards.
type: "boolean"
default: false
- name: "stdin"
in: "query"
description: "Attach to `stdin`"
type: "boolean"
default: false
- name: "stdout"
in: "query"
description: "Attach to `stdout`"
type: "boolean"
default: false
- name: "stderr"
in: "query"
description: "Attach to `stderr`"
type: "boolean"
default: false
tags: ["Container"]
/containers/{id}/attach/ws:
get:
summary: "Attach to a container via a websocket"
operationId: "ContainerAttachWebsocket"
responses:
101:
description: "no error, hints proxy about hijacking"
200:
description: "no error, no upgrade header found"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "detachKeys"
in: "query"
description: |
Override the key sequence for detaching a container.Format is a single
character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`,
`@`, `^`, `[`, `,`, or `_`.
type: "string"
- name: "logs"
in: "query"
description: "Return logs"
type: "boolean"
default: false
- name: "stream"
in: "query"
description: "Return stream"
type: "boolean"
default: false
- name: "stdin"
in: "query"
description: "Attach to `stdin`"
type: "boolean"
default: false
- name: "stdout"
in: "query"
description: "Attach to `stdout`"
type: "boolean"
default: false
- name: "stderr"
in: "query"
description: "Attach to `stderr`"
type: "boolean"
default: false
tags: ["Container"]
/containers/{id}/wait:
post:
summary: "Wait for a container"
description: "Block until a container stops, then returns the exit code."
operationId: "ContainerWait"
produces: ["application/json"]
responses:
200:
description: "The container has exit."
schema:
$ref: "#/definitions/ContainerWaitResponse"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "condition"
in: "query"
description: |
Wait until a container state reaches the given condition.
Defaults to `not-running` if omitted or empty.
type: "string"
enum:
- "not-running"
- "next-exit"
- "removed"
default: "not-running"
tags: ["Container"]
/containers/{id}:
delete:
summary: "Remove a container"
operationId: "ContainerDelete"
responses:
204:
description: "no error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
409:
description: "conflict"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: |
You cannot remove a running container: c2ada9df5af8. Stop the
container before attempting removal or force remove
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "v"
in: "query"
description: "Remove anonymous volumes associated with the container."
type: "boolean"
default: false
- name: "force"
in: "query"
description: "If the container is running, kill it before removing it."
type: "boolean"
default: false
- name: "link"
in: "query"
description: "Remove the specified link associated with the container."
type: "boolean"
default: false
tags: ["Container"]
/containers/{id}/archive:
head:
summary: "Get information about files in a container"
description: |
A response header `X-Docker-Container-Path-Stat` is returned, containing
a base64 - encoded JSON object with some filesystem header information
about the path.
operationId: "ContainerArchiveInfo"
responses:
200:
description: "no error"
headers:
X-Docker-Container-Path-Stat:
type: "string"
description: |
A base64 - encoded JSON object with some filesystem header
information about the path
400:
description: "Bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "Container or path does not exist"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "path"
in: "query"
required: true
description: "Resource in the container’s filesystem to archive."
type: "string"
tags: ["Container"]
get:
summary: "Get an archive of a filesystem resource in a container"
description: "Get a tar archive of a resource in the filesystem of container id."
operationId: "ContainerArchive"
produces: ["application/x-tar"]
responses:
200:
description: "no error"
400:
description: "Bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "Container or path does not exist"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "path"
in: "query"
required: true
description: "Resource in the container’s filesystem to archive."
type: "string"
tags: ["Container"]
put:
summary: "Extract an archive of files or folders to a directory in a container"
description: |
Upload a tar archive to be extracted to a path in the filesystem of container id.
`path` parameter is asserted to be a directory. If it exists as a file, 400 error
will be returned with message "not a directory".
operationId: "PutContainerArchive"
consumes: ["application/x-tar", "application/octet-stream"]
responses:
200:
description: "The content was extracted successfully"
400:
description: "Bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "not a directory"
403:
description: "Permission denied, the volume or container rootfs is marked as read-only."
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "No such container or path does not exist inside the container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "path"
in: "query"
required: true
description: "Path to a directory in the container to extract the archive’s contents into. "
type: "string"
- name: "noOverwriteDirNonDir"
in: "query"
description: |
If `1`, `true`, or `True` then it will be an error if unpacking the
given content would cause an existing directory to be replaced with
a non-directory and vice versa.
type: "string"
- name: "copyUIDGID"
in: "query"
description: |
If `1`, `true`, then it will copy UID/GID maps to the dest file or
dir
type: "string"
- name: "inputStream"
in: "body"
required: true
description: |
The input stream must be a tar archive compressed with one of the
following algorithms: `identity` (no compression), `gzip`, `bzip2`,
or `xz`.
schema:
type: "string"
format: "binary"
tags: ["Container"]
/containers/prune:
post:
summary: "Delete stopped containers"
produces:
- "application/json"
operationId: "ContainerPrune"
parameters:
- name: "filters"
in: "query"
description: |
Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
Available filters:
- `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels.
type: "string"
responses:
200:
description: "No error"
schema:
type: "object"
title: "ContainerPruneResponse"
properties:
ContainersDeleted:
description: "Container IDs that were deleted"
type: "array"
items:
type: "string"
SpaceReclaimed:
description: "Disk space reclaimed in bytes"
type: "integer"
format: "int64"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Container"]
/images/json:
get:
summary: "List Images"
description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image."
operationId: "ImageList"
produces:
- "application/json"
responses:
200:
description: "Summary image data for the images matching the query"
schema:
type: "array"
items:
$ref: "#/definitions/ImageSummary"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "all"
in: "query"
description: "Show all images. Only images from a final layer (no children) are shown by default."
type: "boolean"
default: false
- name: "filters"
in: "query"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to
process on the images list.
Available filters:
- `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
- `dangling=true`
- `label=key` or `label="key=value"` of an image label
- `reference`=(`<image-name>[:<tag>]`)
- `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
- `until=<timestamp>`
type: "string"
- name: "shared-size"
in: "query"
description: "Compute and show shared size as a `SharedSize` field on each image."
type: "boolean"
default: false
- name: "digests"
in: "query"
description: "Show digest information as a `RepoDigests` field on each image."
type: "boolean"
default: false
- name: "manifests"
in: "query"
description: "Include `Manifests` in the image summary."
type: "boolean"
default: false
tags: ["Image"]
/build:
post:
summary: "Build an image"
description: |
Build an image from a tar archive with a `Dockerfile` in it.
The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).
The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.
The build is canceled if the client drops the connection by quitting or being killed.
operationId: "ImageBuild"
consumes:
- "application/octet-stream"
produces:
- "application/json"
parameters:
- name: "inputStream"
in: "body"
description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
schema:
type: "string"
format: "binary"
- name: "dockerfile"
in: "query"
description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`."
type: "string"
default: "Dockerfile"
- name: "t"
in: "query"
description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters."
type: "string"
- name: "extrahosts"
in: "query"
description: "Extra hosts to add to /etc/hosts"
type: "string"
- name: "remote"
in: "query"
description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball."
type: "string"
- name: "q"
in: "query"
description: "Suppress verbose build output."
type: "boolean"
default: false
- name: "nocache"
in: "query"
description: "Do not use the cache when building the image."
type: "boolean"
default: false
- name: "cachefrom"
in: "query"
description: "JSON array of images used for build cache resolution."
type: "string"
- name: "pull"
in: "query"
description: "Attempt to pull the image even if an older image exists locally."
type: "string"
- name: "rm"
in: "query"
description: "Remove intermediate containers after a successful build."
type: "boolean"
default: true
- name: "forcerm"
in: "query"
description: "Always remove intermediate containers, even upon failure."
type: "boolean"
default: false
- name: "memory"
in: "query"
description: "Set memory limit for build."
type: "integer"
- name: "memswap"
in: "query"
description: "Total memory (memory + swap). Set as `-1` to disable swap."
type: "integer"
- name: "cpushares"
in: "query"
description: "CPU shares (relative weight)."
type: "integer"
- name: "cpusetcpus"
in: "query"
description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)."
type: "string"
- name: "cpuperiod"
in: "query"
description: "The length of a CPU period in microseconds."
type: "integer"
- name: "cpuquota"
in: "query"
description: "Microseconds of CPU time that the container can get in a CPU period."
type: "integer"
- name: "buildargs"
in: "query"
description: >
JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker
uses the buildargs as the environment context for commands run via the `Dockerfile` RUN
instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for
passing secret values.
For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the
query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded.
[Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)
type: "string"
- name: "shmsize"
in: "query"
description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB."
type: "integer"
- name: "squash"
in: "query"
description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*"
type: "boolean"
- name: "labels"
in: "query"
description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs."
type: "string"
- name: "networkmode"
in: "query"
description: |
Sets the networking mode for the run commands during build. Supported
standard values are: `bridge`, `host`, `none`, and `container:<name|id>`.
Any other value is taken as a custom network's name or ID to which this
container should connect to.
type: "string"
- name: "Content-type"
in: "header"
type: "string"
enum:
- "application/x-tar"
default: "application/x-tar"
- name: "X-Registry-Config"
in: "header"
description: |
This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to.
The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example:
```
{
"docker.example.com": {
"username": "janedoe",
"password": "hunter2"
},
"https://index.docker.io/v1/": {
"username": "mobydock",
"password": "conta1n3rize14"
}
}
```
Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.
type: "string"
- name: "platform"
in: "query"
description: "Platform in the format os[/arch[/variant]]"
type: "string"
default: ""
- name: "target"
in: "query"
description: "Target build stage"
type: "string"
default: ""
- name: "outputs"
in: "query"
description: |
BuildKit output configuration in the format of a stringified JSON array of objects.
Each object must have two top-level properties: `Type` and `Attrs`.
The `Type` property must be set to 'moby'.
The `Attrs` property is a map of attributes for the BuildKit output configuration.
See https://docs.docker.com/build/exporters/oci-docker/ for more information.
Example:
```
[{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}]
```
type: "string"
default: ""
- name: "version"
in: "query"
type: "string"
default: "1"
enum: ["1", "2"]
description: |
Version of the builder backend to use.
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
- `2` is [BuildKit](https://github.com/moby/buildkit)
responses:
200:
description: "no error"
400:
description: "Bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Image"]
/build/prune:
post:
summary: "Delete builder cache"
produces:
- "application/json"
operationId: "BuildPrune"
parameters:
- name: "keep-storage"
in: "query"
description: |
Amount of disk space in bytes to keep for cache
> **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space".
> It is kept for backward compatibility and will be removed in API v1.52.
type: "integer"
format: "int64"
- name: "reserved-space"
in: "query"
description: "Amount of disk space in bytes to keep for cache"
type: "integer"
format: "int64"
- name: "max-used-space"
in: "query"
description: "Maximum amount of disk space allowed to keep for cache"
type: "integer"
format: "int64"
- name: "min-free-space"
in: "query"
description: "Target amount of free disk space after pruning"
type: "integer"
format: "int64"
- name: "all"
in: "query"
type: "boolean"
description: "Remove all types of build cache"
- name: "filters"
in: "query"
type: "string"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to
process on the list of build cache objects.
Available filters:
- `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time.
- `id=<id>`
- `parent=<id>`
- `type=<string>`
- `description=<string>`
- `inuse`
- `shared`
- `private`
responses:
200:
description: "No error"
schema:
type: "object"
title: "BuildPruneResponse"
properties:
CachesDeleted:
type: "array"
items:
description: "ID of build cache object"
type: "string"
SpaceReclaimed:
description: "Disk space reclaimed in bytes"
type: "integer"
format: "int64"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Image"]
/images/create:
post:
summary: "Create an image"
description: "Pull or import an image."
operationId: "ImageCreate"
consumes:
- "text/plain"
- "application/octet-stream"
produces:
- "application/json"
responses:
200:
description: "no error"
404:
description: "repository does not exist or no read access"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "fromImage"
in: "query"
description: |
Name of the image to pull. If the name includes a tag or digest, specific behavior applies:
- If only `fromImage` includes a tag, that tag is used.
- If both `fromImage` and `tag` are provided, `tag` takes precedence.
- If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored.
- If neither a tag nor digest is specified, all tags are pulled.
type: "string"
- name: "fromSrc"
in: "query"
description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image."
type: "string"
- name: "repo"
in: "query"
description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image."
type: "string"
- name: "tag"
in: "query"
description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
type: "string"
- name: "message"
in: "query"
description: "Set commit message for imported image."
type: "string"
- name: "inputImage"
in: "body"
description: "Image content if the value `-` has been specified in fromSrc query parameter"
schema:
type: "string"
required: false
- name: "X-Registry-Auth"
in: "header"
description: |
A base64url-encoded auth configuration.
Refer to the [authentication section](#section/Authentication) for
details.
type: "string"
- name: "changes"
in: "query"
description: |
Apply `Dockerfile` instructions to the image that is created,
for example: `changes=ENV DEBUG=true`.
Note that `ENV DEBUG=true` should be URI component encoded.
Supported `Dockerfile` instructions:
`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
type: "array"
items:
type: "string"
- name: "platform"
in: "query"
description: |
Platform in the format os[/arch[/variant]].
When used in combination with the `fromImage` option, the daemon checks
if the given image is present in the local image cache with the given
OS and Architecture, and otherwise attempts to pull the image. If the
option is not set, the host's native OS and Architecture are used.
If the given image does not exist in the local image cache, the daemon
attempts to pull the image with the host's native OS and Architecture.
If the given image does exists in the local image cache, but its OS or
architecture does not match, a warning is produced.
When used with the `fromSrc` option to import an image from an archive,
this option sets the platform information for the imported image. If
the option is not set, the host's native OS and Architecture are used
for the imported image.
type: "string"
default: ""
tags: ["Image"]
/images/{name}/json:
get:
summary: "Inspect an image"
description: "Return low-level information about an image."
operationId: "ImageInspect"
produces:
- "application/json"
responses:
200:
description: "No error"
schema:
$ref: "#/definitions/ImageInspect"
404:
description: "No such image"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such image: someimage (tag: latest)"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: "Image name or id"
type: "string"
required: true
- name: "manifests"
in: "query"
description: "Include Manifests in the image summary."
type: "boolean"
default: false
required: false
tags: ["Image"]
/images/{name}/history:
get:
summary: "Get the history of an image"
description: "Return parent layers of an image."
operationId: "ImageHistory"
produces: ["application/json"]
responses:
200:
description: "List of image layers"
schema:
type: "array"
items:
$ref: "#/definitions/ImageHistoryResponseItem"
examples:
application/json:
- Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710"
Created: 1398108230
CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /"
Tags:
- "ubuntu:lucid"
- "ubuntu:10.04"
Size: 182964289
Comment: ""
- Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8"
Created: 1398108222
CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/"
Tags: []
Size: 0
Comment: ""
- Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
Created: 1371157430
CreatedBy: ""
Tags:
- "scratch12:latest"
- "scratch:latest"
Size: 0
Comment: "Imported from -"
404:
description: "No such image"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: "Image name or ID"
type: "string"
required: true
- name: "platform"
type: "string"
in: "query"
description: |
JSON-encoded OCI platform to select the platform-variant.
If omitted, it defaults to any locally available platform,
prioritizing the daemon's host platform.
If the daemon provides a multi-platform image store, this selects
the platform-variant to show the history for. If the image is
a single-platform image, or if the multi-platform image does not
provide a variant matching the given platform, an error is returned.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/{name}/push:
post:
summary: "Push an image"
description: |
Push an image to a registry.
If you wish to push an image on to a private registry, that image must
already have a tag which references the registry. For example,
`registry.example.com/myimage:latest`.
The push is cancelled if the HTTP connection is closed.
operationId: "ImagePush"
consumes:
- "application/octet-stream"
responses:
200:
description: "No error"
404:
description: "No such image"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: |
Name of the image to push. For example, `registry.example.com/myimage`.
The image must be present in the local image store with the same name.
The name should be provided without tag; if a tag is provided, it
is ignored. For example, `registry.example.com/myimage:latest` is
considered equivalent to `registry.example.com/myimage`.
Use the `tag` parameter to specify the tag to push.
type: "string"
required: true
- name: "tag"
in: "query"
description: |
Tag of the image to push. For example, `latest`. If no tag is provided,
all tags of the given image that are present in the local image store
are pushed.
type: "string"
- name: "platform"
type: "string"
in: "query"
description: |
JSON-encoded OCI platform to select the platform-variant to push.
If not provided, all available variants will attempt to be pushed.
If the daemon provides a multi-platform image store, this selects
the platform-variant to push to the registry. If the image is
a single-platform image, or if the multi-platform image does not
provide a variant matching the given platform, an error is returned.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
- name: "X-Registry-Auth"
in: "header"
description: |
A base64url-encoded auth configuration.
Refer to the [authentication section](#section/Authentication) for
details.
type: "string"
required: true
tags: ["Image"]
/images/{name}/tag:
post:
summary: "Tag an image"
description: |
Create a tag that refers to a source image.
This creates an additional reference (tag) to the source image. The tag
can include a different repository name and/or tag. If the repository
or tag already exists, it will be overwritten.
operationId: "ImageTag"
responses:
201:
description: "No error"
400:
description: "Bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "No such image"
schema:
$ref: "#/definitions/ErrorResponse"
409:
description: "Conflict"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: "Image name or ID to tag."
type: "string"
required: true
- name: "repo"
in: "query"
description: "The repository to tag in. For example, `someuser/someimage`."
type: "string"
- name: "tag"
in: "query"
description: "The name of the new tag."
type: "string"
tags: ["Image"]
/images/{name}:
delete:
summary: "Remove an image"
description: |
Remove an image, along with any untagged parent images that were
referenced by that image.
Images can't be removed if they have descendant images, are being
used by a running container or are being used by a build.
operationId: "ImageDelete"
produces: ["application/json"]
responses:
200:
description: "The image was deleted successfully"
schema:
type: "array"
items:
$ref: "#/definitions/ImageDeleteResponseItem"
examples:
application/json:
- Untagged: "3e2f21a89f"
- Deleted: "3e2f21a89f"
- Deleted: "53b4f83ac9"
404:
description: "No such image"
schema:
$ref: "#/definitions/ErrorResponse"
409:
description: "Conflict"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: "Image name or ID"
type: "string"
required: true
- name: "force"
in: "query"
description: "Remove the image even if it is being used by stopped containers or has other tags"
type: "boolean"
default: false
- name: "noprune"
in: "query"
description: "Do not delete untagged parent images"
type: "boolean"
default: false
tags: ["Image"]
/images/search:
get:
summary: "Search images"
description: "Search for an image on Docker Hub."
operationId: "ImageSearch"
produces:
- "application/json"
responses:
200:
description: "No error"
schema:
type: "array"
items:
type: "object"
title: "ImageSearchResponseItem"
properties:
description:
type: "string"
is_official:
type: "boolean"
is_automated:
description: |
Whether this repository has automated builds enabled.
<p><br /></p>
> **Deprecated**: This field is deprecated and will always be "false".
type: "boolean"
example: false
name:
type: "string"
star_count:
type: "integer"
examples:
application/json:
- description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!"
is_official: true
is_automated: false
name: "alpine"
star_count: 10093
- description: "Busybox base image."
is_official: true
is_automated: false
name: "Busybox base image."
star_count: 3037
- description: "The PostgreSQL object-relational database system provides reliability and data integrity."
is_official: true
is_automated: false
name: "postgres"
star_count: 12408
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "term"
in: "query"
description: "Term to search"
type: "string"
required: true
- name: "limit"
in: "query"
description: "Maximum number of results to return"
type: "integer"
- name: "filters"
in: "query"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
- `is-official=(true|false)`
- `stars=<number>` Matches images that has at least 'number' stars.
type: "string"
tags: ["Image"]
/images/prune:
post:
summary: "Delete unused images"
produces:
- "application/json"
operationId: "ImagePrune"
parameters:
- name: "filters"
in: "query"
description: |
Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:
- `dangling=<boolean>` When set to `true` (or `1`), prune only
unused *and* untagged images. When set to `false`
(or `0`), all unused images are pruned.
- `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels.
type: "string"
responses:
200:
description: "No error"
schema:
type: "object"
title: "ImagePruneResponse"
properties:
ImagesDeleted:
description: "Images that were deleted"
type: "array"
items:
$ref: "#/definitions/ImageDeleteResponseItem"
SpaceReclaimed:
description: "Disk space reclaimed in bytes"
type: "integer"
format: "int64"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Image"]
/auth:
post:
summary: "Check auth configuration"
description: |
Validate credentials for a registry and, if available, get an identity
token for accessing the registry without password.
operationId: "SystemAuth"
consumes: ["application/json"]
produces: ["application/json"]
responses:
200:
description: "An identity token was generated successfully."
schema:
type: "object"
title: "SystemAuthResponse"
required: [Status]
properties:
Status:
description: "The status of the authentication"
type: "string"
x-nullable: false
IdentityToken:
description: "An opaque token used to authenticate a user after a successful login"
type: "string"
x-nullable: false
examples:
application/json:
Status: "Login Succeeded"
IdentityToken: "9cbaf023786cd7..."
204:
description: "No error"
401:
description: "Auth error"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "authConfig"
in: "body"
description: "Authentication to check"
schema:
$ref: "#/definitions/AuthConfig"
tags: ["System"]
/info:
get:
summary: "Get system information"
operationId: "SystemInfo"
produces:
- "application/json"
responses:
200:
description: "No error"
schema:
$ref: "#/definitions/SystemInfo"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["System"]
/version:
get:
summary: "Get version"
description: "Returns the version of Docker that is running and various information about the system that Docker is running on."
operationId: "SystemVersion"
produces: ["application/json"]
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/SystemVersion"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["System"]
/_ping:
get:
summary: "Ping"
description: "This is a dummy endpoint you can use to test if the server is accessible."
operationId: "SystemPing"
produces: ["text/plain"]
responses:
200:
description: "no error"
schema:
type: "string"
example: "OK"
headers:
Api-Version:
type: "string"
description: "Max API Version the server supports"
Builder-Version:
type: "string"
description: |
Default version of docker image builder
The default on Linux is version "2" (BuildKit), but the daemon
can be configured to recommend version "1" (classic Builder).
Windows does not yet support BuildKit for native Windows images,
and uses "1" (classic builder) as a default.
This value is a recommendation as advertised by the daemon, and
it is up to the client to choose which builder to use.
default: "2"
Docker-Experimental:
type: "boolean"
description: "If the server is running with experimental mode enabled"
Swarm:
type: "string"
enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"]
description: |
Contains information about Swarm status of the daemon,
and if the daemon is acting as a manager or worker node.
default: "inactive"
Cache-Control:
type: "string"
default: "no-cache, no-store, must-revalidate"
Pragma:
type: "string"
default: "no-cache"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
headers:
Cache-Control:
type: "string"
default: "no-cache, no-store, must-revalidate"
Pragma:
type: "string"
default: "no-cache"
tags: ["System"]
head:
summary: "Ping"
description: "This is a dummy endpoint you can use to test if the server is accessible."
operationId: "SystemPingHead"
produces: ["text/plain"]
responses:
200:
description: "no error"
schema:
type: "string"
example: "(empty)"
headers:
Api-Version:
type: "string"
description: "Max API Version the server supports"
Builder-Version:
type: "string"
description: "Default version of docker image builder"
Docker-Experimental:
type: "boolean"
description: "If the server is running with experimental mode enabled"
Swarm:
type: "string"
enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"]
description: |
Contains information about Swarm status of the daemon,
and if the daemon is acting as a manager or worker node.
default: "inactive"
Cache-Control:
type: "string"
default: "no-cache, no-store, must-revalidate"
Pragma:
type: "string"
default: "no-cache"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["System"]
/commit:
post:
summary: "Create a new image from a container"
operationId: "ImageCommit"
consumes:
- "application/json"
produces:
- "application/json"
responses:
201:
description: "no error"
schema:
$ref: "#/definitions/IDResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "containerConfig"
in: "body"
description: "The container configuration"
schema:
$ref: "#/definitions/ContainerConfig"
- name: "container"
in: "query"
description: "The ID or name of the container to commit"
type: "string"
- name: "repo"
in: "query"
description: "Repository name for the created image"
type: "string"
- name: "tag"
in: "query"
description: "Tag name for the create image"
type: "string"
- name: "comment"
in: "query"
description: "Commit message"
type: "string"
- name: "author"
in: "query"
description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)"
type: "string"
- name: "pause"
in: "query"
description: "Whether to pause the container before committing"
type: "boolean"
default: true
- name: "changes"
in: "query"
description: "`Dockerfile` instructions to apply while committing"
type: "string"
tags: ["Image"]
/events:
get:
summary: "Monitor events"
description: |
Stream real-time events from the server.
Various objects within Docker report events when something happens to them.
Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune`
Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune`
Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune`
Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune`
The Docker daemon reports these events: `reload`
Services report these events: `create`, `update`, and `remove`
Nodes report these events: `create`, `update`, and `remove`
Secrets report these events: `create`, `update`, and `remove`
Configs report these events: `create`, `update`, and `remove`
The Builder reports `prune` events
operationId: "SystemEvents"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/EventMessage"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "since"
in: "query"
description: "Show events created since this timestamp then stream new events."
type: "string"
- name: "until"
in: "query"
description: "Show events created until this timestamp then stop streaming."
type: "string"
- name: "filters"
in: "query"
description: |
A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:
- `config=<string>` config name or ID
- `container=<string>` container name or ID
- `daemon=<string>` daemon name or ID
- `event=<string>` event type
- `image=<string>` image name or ID
- `label=<string>` image or container label
- `network=<string>` network name or ID
- `node=<string>` node ID
- `plugin`=<string> plugin name or ID
- `scope`=<string> local or swarm
- `secret=<string>` secret name or ID
- `service=<string>` service name or ID
- `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config`
- `volume=<string>` volume name
type: "string"
tags: ["System"]
/system/df:
get:
summary: "Get data usage information"
operationId: "SystemDataUsage"
responses:
200:
description: "no error"
schema:
type: "object"
title: "SystemDataUsageResponse"
properties:
LayersSize:
type: "integer"
format: "int64"
Images:
type: "array"
items:
$ref: "#/definitions/ImageSummary"
Containers:
type: "array"
items:
$ref: "#/definitions/ContainerSummary"
Volumes:
type: "array"
items:
$ref: "#/definitions/Volume"
BuildCache:
type: "array"
items:
$ref: "#/definitions/BuildCache"
example:
LayersSize: 1092588
Images:
-
Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
ParentId: ""
RepoTags:
- "busybox:latest"
RepoDigests:
- "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6"
Created: 1466724217
Size: 1092588
SharedSize: 0
Labels: {}
Containers: 1
Containers:
-
Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148"
Names:
- "/top"
Image: "busybox"
ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
Command: "top"
Created: 1472592424
Ports: []
SizeRootFs: 1092588
Labels: {}
State: "exited"
Status: "Exited (0) 56 minutes ago"
HostConfig:
NetworkMode: "default"
NetworkSettings:
Networks:
bridge:
IPAMConfig: null
Links: null
Aliases: null
NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92"
EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a"
Gateway: "172.18.0.1"
IPAddress: "172.18.0.2"
IPPrefixLen: 16
IPv6Gateway: ""
GlobalIPv6Address: ""
GlobalIPv6PrefixLen: 0
MacAddress: "02:42:ac:12:00:02"
Mounts: []
Volumes:
-
Name: "my-volume"
Driver: "local"
Mountpoint: "/var/lib/docker/volumes/my-volume/_data"
Labels: null
Scope: "local"
Options: null
UsageData:
Size: 10920104
RefCount: 2
BuildCache:
-
ID: "hw53o5aio51xtltp5xjp8v7fx"
Parents: []
Type: "regular"
Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0"
InUse: false
Shared: true
Size: 0
CreatedAt: "2021-06-28T13:31:01.474619385Z"
LastUsedAt: "2021-07-07T22:02:32.738075951Z"
UsageCount: 26
-
ID: "ndlpt0hhvkqcdfkputsk4cq9c"
Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"]
Type: "regular"
Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
InUse: false
Shared: true
Size: 51
CreatedAt: "2021-06-28T13:31:03.002625487Z"
LastUsedAt: "2021-07-07T22:02:32.773909517Z"
UsageCount: 26
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "type"
in: "query"
description: |
Object types, for which to compute and return data.
type: "array"
collectionFormat: multi
items:
type: "string"
enum: ["container", "image", "volume", "build-cache"]
tags: ["System"]
/images/{name}/get:
get:
summary: "Export an image"
description: |
Get a tarball containing all images and metadata for a repository.
If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced.
### Image tarball format
An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content).
Additionally, includes the manifest.json file associated with a backwards compatible docker save format.
If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
```json
{
"hello-world": {
"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"
}
}
```
operationId: "ImageGet"
produces:
- "application/x-tar"
responses:
200:
description: "no error"
schema:
type: "string"
format: "binary"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: "Image name or ID"
type: "string"
required: true
- name: "platform"
type: "string"
in: "query"
description: |
JSON encoded OCI platform describing a platform which will be used
to select a platform-specific image to be saved if the image is
multi-platform.
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/get:
get:
summary: "Export several images"
description: |
Get a tarball containing all images and metadata for several image
repositories.
For each value of the `names` parameter: if it is a specific name and
tag (e.g. `ubuntu:latest`), then only that image (and its parents) are
returned; if it is an image ID, similarly only that image (and its parents)
are returned and there would be no names referenced in the 'repositories'
file for this image ID.
For details on the format, see the [export image endpoint](#operation/ImageGet).
operationId: "ImageGetAll"
produces:
- "application/x-tar"
responses:
200:
description: "no error"
schema:
type: "string"
format: "binary"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "names"
in: "query"
description: "Image names to filter by"
type: "array"
items:
type: "string"
- name: "platform"
type: "string"
in: "query"
description: |
JSON encoded OCI platform describing a platform which will be used
to select a platform-specific image to be saved if the image is
multi-platform.
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/load:
post:
summary: "Import images"
description: |
Load a set of images and tags into a repository.
For details on the format, see the [export image endpoint](#operation/ImageGet).
operationId: "ImageLoad"
consumes:
- "application/x-tar"
produces:
- "application/json"
responses:
200:
description: "no error"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "imagesTarball"
in: "body"
description: "Tar archive containing images"
schema:
type: "string"
format: "binary"
- name: "quiet"
in: "query"
description: "Suppress progress details during load."
type: "boolean"
default: false
- name: "platform"
type: "string"
in: "query"
description: |
JSON encoded OCI platform describing a platform which will be used
to select a platform-specific image to be load if the image is
multi-platform.
If not provided, the full multi-platform image will be loaded.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/containers/{id}/exec:
post:
summary: "Create an exec instance"
description: "Run a command inside a running container."
operationId: "ContainerExec"
consumes:
- "application/json"
produces:
- "application/json"
responses:
201:
description: "no error"
schema:
$ref: "#/definitions/IDResponse"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
409:
description: "container is paused"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "execConfig"
in: "body"
description: "Exec configuration"
schema:
type: "object"
title: "ExecConfig"
properties:
AttachStdin:
type: "boolean"
description: "Attach to `stdin` of the exec command."
AttachStdout:
type: "boolean"
description: "Attach to `stdout` of the exec command."
AttachStderr:
type: "boolean"
description: "Attach to `stderr` of the exec command."
ConsoleSize:
type: "array"
description: "Initial console size, as an `[height, width]` array."
x-nullable: true
minItems: 2
maxItems: 2
items:
type: "integer"
minimum: 0
example: [80, 64]
DetachKeys:
type: "string"
description: |
Override the key sequence for detaching a container. Format is
a single character `[a-Z]` or `ctrl-<value>` where `<value>`
is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.
Tty:
type: "boolean"
description: "Allocate a pseudo-TTY."
Env:
description: |
A list of environment variables in the form `["VAR=value", ...]`.
type: "array"
items:
type: "string"
Cmd:
type: "array"
description: "Command to run, as a string or array of strings."
items:
type: "string"
Privileged:
type: "boolean"
description: "Runs the exec process with extended privileges."
default: false
User:
type: "string"
description: |
The user, and optionally, group to run the exec process inside
the container. Format is one of: `user`, `user:group`, `uid`,
or `uid:gid`.
WorkingDir:
type: "string"
description: |
The working directory for the exec process inside the container.
example:
AttachStdin: false
AttachStdout: true
AttachStderr: true
DetachKeys: "ctrl-p,ctrl-q"
Tty: false
Cmd:
- "date"
Env:
- "FOO=bar"
- "BAZ=quux"
required: true
- name: "id"
in: "path"
description: "ID or name of container"
type: "string"
required: true
tags: ["Exec"]
/exec/{id}/start:
post:
summary: "Start an exec instance"
description: |
Starts a previously set up exec instance. If detach is true, this endpoint
returns immediately after starting the command. Otherwise, it sets up an
interactive session with the command.
operationId: "ExecStart"
consumes:
- "application/json"
produces:
- "application/vnd.docker.raw-stream"
- "application/vnd.docker.multiplexed-stream"
responses:
200:
description: "No error"
404:
description: "No such exec instance"
schema:
$ref: "#/definitions/ErrorResponse"
409:
description: "Container is stopped or paused"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "execStartConfig"
in: "body"
schema:
type: "object"
title: "ExecStartConfig"
properties:
Detach:
type: "boolean"
description: "Detach from the command."
example: false
Tty:
type: "boolean"
description: "Allocate a pseudo-TTY."
example: true
ConsoleSize:
type: "array"
description: "Initial console size, as an `[height, width]` array."
x-nullable: true
minItems: 2
maxItems: 2
items:
type: "integer"
minimum: 0
example: [80, 64]
- name: "id"
in: "path"
description: "Exec instance ID"
required: true
type: "string"
tags: ["Exec"]
/exec/{id}/resize:
post:
summary: "Resize an exec instance"
description: |
Resize the TTY session used by an exec instance. This endpoint only works
if `tty` was specified as part of creating and starting the exec instance.
operationId: "ExecResize"
responses:
200:
description: "No error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "No such exec instance"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "Exec instance ID"
required: true
type: "string"
- name: "h"
in: "query"
required: true
description: "Height of the TTY session in characters"
type: "integer"
- name: "w"
in: "query"
required: true
description: "Width of the TTY session in characters"
type: "integer"
tags: ["Exec"]
/exec/{id}/json:
get:
summary: "Inspect an exec instance"
description: "Return low-level information about an exec instance."
operationId: "ExecInspect"
produces:
- "application/json"
responses:
200:
description: "No error"
schema:
type: "object"
title: "ExecInspectResponse"
properties:
CanRemove:
type: "boolean"
DetachKeys:
type: "string"
ID:
type: "string"
Running:
type: "boolean"
ExitCode:
type: "integer"
ProcessConfig:
$ref: "#/definitions/ProcessConfig"
OpenStdin:
type: "boolean"
OpenStderr:
type: "boolean"
OpenStdout:
type: "boolean"
ContainerID:
type: "string"
Pid:
type: "integer"
description: "The system process ID for the exec process."
examples:
application/json:
CanRemove: false
ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126"
DetachKeys: ""
ExitCode: 2
ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b"
OpenStderr: true
OpenStdin: true
OpenStdout: true
ProcessConfig:
arguments:
- "-c"
- "exit 2"
entrypoint: "sh"
privileged: false
tty: true
user: "1000"
Running: false
Pid: 42000
404:
description: "No such exec instance"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "Exec instance ID"
required: true
type: "string"
tags: ["Exec"]
/volumes:
get:
summary: "List volumes"
operationId: "VolumeList"
produces: ["application/json"]
responses:
200:
description: "Summary volume data that matches the query"
schema:
$ref: "#/definitions/VolumeListResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
description: |
JSON encoded value of the filters (a `map[string][]string`) to
process on the volumes list. Available filters:
- `dangling=<boolean>` When set to `true` (or `1`), returns all
volumes that are not in use by a container. When set to `false`
(or `0`), only volumes that are in use by one or more
containers are returned.
- `driver=<volume-driver-name>` Matches volumes based on their driver.
- `label=<key>` or `label=<key>:<value>` Matches volumes based on
the presence of a `label` alone or a `label` and a value.
- `name=<volume-name>` Matches all or part of a volume name.
type: "string"
format: "json"
tags: ["Volume"]
/volumes/create:
post:
summary: "Create a volume"
operationId: "VolumeCreate"
consumes: ["application/json"]
produces: ["application/json"]
responses:
201:
description: "The volume was created successfully"
schema:
$ref: "#/definitions/Volume"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "volumeConfig"
in: "body"
required: true
description: "Volume configuration"
schema:
$ref: "#/definitions/VolumeCreateOptions"
tags: ["Volume"]
/volumes/{name}:
get:
summary: "Inspect a volume"
operationId: "VolumeInspect"
produces: ["application/json"]
responses:
200:
description: "No error"
schema:
$ref: "#/definitions/Volume"
404:
description: "No such volume"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
required: true
description: "Volume name or ID"
type: "string"
tags: ["Volume"]
put:
summary: |
"Update a volume. Valid only for Swarm cluster volumes"
operationId: "VolumeUpdate"
consumes: ["application/json"]
produces: ["application/json"]
responses:
200:
description: "no error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such volume"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: "The name or ID of the volume"
type: "string"
required: true
- name: "body"
in: "body"
schema:
# though the schema for is an object that contains only a
# ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object
# means that if, later on, we support things like changing the
# labels, we can do so without duplicating that information to the
# ClusterVolumeSpec.
type: "object"
description: "Volume configuration"
properties:
Spec:
$ref: "#/definitions/ClusterVolumeSpec"
description: |
The spec of the volume to update. Currently, only Availability may
change. All other fields must remain unchanged.
- name: "version"
in: "query"
description: |
The version number of the volume being updated. This is required to
avoid conflicting writes. Found in the volume's `ClusterVolume`
field.
type: "integer"
format: "int64"
required: true
tags: ["Volume"]
delete:
summary: "Remove a volume"
description: "Instruct the driver to remove the volume."
operationId: "VolumeDelete"
responses:
204:
description: "The volume was removed"
404:
description: "No such volume or volume driver"
schema:
$ref: "#/definitions/ErrorResponse"
409:
description: "Volume is in use and cannot be removed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
required: true
description: "Volume name or ID"
type: "string"
- name: "force"
in: "query"
description: "Force the removal of the volume"
type: "boolean"
default: false
tags: ["Volume"]
/volumes/prune:
post:
summary: "Delete unused volumes"
produces:
- "application/json"
operationId: "VolumePrune"
parameters:
- name: "filters"
in: "query"
description: |
Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
Available filters:
- `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
- `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes.
type: "string"
responses:
200:
description: "No error"
schema:
type: "object"
title: "VolumePruneResponse"
properties:
VolumesDeleted:
description: "Volumes that were deleted"
type: "array"
items:
type: "string"
SpaceReclaimed:
description: "Disk space reclaimed in bytes"
type: "integer"
format: "int64"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Volume"]
/networks:
get:
summary: "List networks"
description: |
Returns a list of networks. For details on the format, see the
[network inspect endpoint](#operation/NetworkInspect).
Note that it uses a different, smaller representation of a network than
inspecting a single network. For example, the list of containers attached
to the network is not propagated in API versions 1.28 and up.
operationId: "NetworkList"
produces:
- "application/json"
responses:
200:
description: "No error"
schema:
type: "array"
items:
$ref: "#/definitions/Network"
examples:
application/json:
- Name: "bridge"
Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566"
Created: "2016-10-19T06:21:00.416543526Z"
Scope: "local"
Driver: "bridge"
EnableIPv4: true
EnableIPv6: false
Internal: false
Attachable: false
Ingress: false
IPAM:
Driver: "default"
Config:
-
Subnet: "172.17.0.0/16"
Options:
com.docker.network.bridge.default_bridge: "true"
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
com.docker.network.bridge.name: "docker0"
com.docker.network.driver.mtu: "1500"
- Name: "none"
Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794"
Created: "0001-01-01T00:00:00Z"
Scope: "local"
Driver: "null"
EnableIPv4: false
EnableIPv6: false
Internal: false
Attachable: false
Ingress: false
IPAM:
Driver: "default"
Config: []
Containers: {}
Options: {}
- Name: "host"
Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e"
Created: "0001-01-01T00:00:00Z"
Scope: "local"
Driver: "host"
EnableIPv4: false
EnableIPv6: false
Internal: false
Attachable: false
Ingress: false
IPAM:
Driver: "default"
Config: []
Containers: {}
Options: {}
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
description: |
JSON encoded value of the filters (a `map[string][]string`) to process
on the networks list.
Available filters:
- `dangling=<boolean>` When set to `true` (or `1`), returns all
networks that are not in use by a container. When set to `false`
(or `0`), only networks that are in use by one or more
containers are returned.
- `driver=<driver-name>` Matches a network's driver.
- `id=<network-id>` Matches all or part of a network ID.
- `label=<key>` or `label=<key>=<value>` of a network label.
- `name=<network-name>` Matches all or part of a network name.
- `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`).
- `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks.
type: "string"
tags: ["Network"]
/networks/{id}:
get:
summary: "Inspect a network"
operationId: "NetworkInspect"
produces:
- "application/json"
responses:
200:
description: "No error"
schema:
$ref: "#/definitions/Network"
404:
description: "Network not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "Network ID or name"
required: true
type: "string"
- name: "verbose"
in: "query"
description: "Detailed inspect output for troubleshooting"
type: "boolean"
default: false
- name: "scope"
in: "query"
description: "Filter the network by scope (swarm, global, or local)"
type: "string"
tags: ["Network"]
delete:
summary: "Remove a network"
operationId: "NetworkDelete"
responses:
204:
description: "No error"
403:
description: "operation not supported for pre-defined networks"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such network"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "Network ID or name"
required: true
type: "string"
tags: ["Network"]
/networks/create:
post:
summary: "Create a network"
operationId: "NetworkCreate"
consumes:
- "application/json"
produces:
- "application/json"
responses:
201:
description: "Network created successfully"
schema:
$ref: "#/definitions/NetworkCreateResponse"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
403:
description: |
Forbidden operation. This happens when trying to create a network named after a pre-defined network,
or when trying to create an overlay network on a daemon which is not part of a Swarm cluster.
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "plugin not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "networkConfig"
in: "body"
description: "Network configuration"
required: true
schema:
type: "object"
title: "NetworkCreateRequest"
required: ["Name"]
properties:
Name:
description: "The network's name."
type: "string"
example: "my_network"
Driver:
description: "Name of the network driver plugin to use."
type: "string"
default: "bridge"
example: "bridge"
Scope:
description: |
The level at which the network exists (e.g. `swarm` for cluster-wide
or `local` for machine level).
type: "string"
Internal:
description: "Restrict external access to the network."
type: "boolean"
Attachable:
description: |
Globally scoped network is manually attachable by regular
containers from workers in swarm mode.
type: "boolean"
example: true
Ingress:
description: |
Ingress network is the network which provides the routing-mesh
in swarm mode.
type: "boolean"
example: false
ConfigOnly:
description: |
Creates a config-only network. Config-only networks are placeholder
networks for network configurations to be used by other networks.
Config-only networks cannot be used directly to run containers
or services.
type: "boolean"
default: false
example: false
ConfigFrom:
description: |
Specifies the source which will provide the configuration for
this network. The specified network must be an existing
config-only network; see ConfigOnly.
$ref: "#/definitions/ConfigReference"
IPAM:
description: "Optional custom IP scheme for the network."
$ref: "#/definitions/IPAM"
EnableIPv4:
description: "Enable IPv4 on the network."
type: "boolean"
example: true
EnableIPv6:
description: "Enable IPv6 on the network."
type: "boolean"
example: true
Options:
description: "Network specific options to be used by the drivers."
type: "object"
additionalProperties:
type: "string"
example:
com.docker.network.bridge.default_bridge: "true"
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
com.docker.network.bridge.name: "docker0"
com.docker.network.driver.mtu: "1500"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
tags: ["Network"]
/networks/{id}/connect:
post:
summary: "Connect a container to a network"
description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container"
operationId: "NetworkConnect"
consumes:
- "application/json"
responses:
200:
description: "No error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
403:
description: "Operation forbidden"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "Network or container not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "Network ID or name"
required: true
type: "string"
- name: "container"
in: "body"
required: true
schema:
type: "object"
title: "NetworkConnectRequest"
properties:
Container:
type: "string"
description: "The ID or name of the container to connect to the network."
EndpointConfig:
$ref: "#/definitions/EndpointSettings"
example:
Container: "3613f73ba0e4"
EndpointConfig:
IPAMConfig:
IPv4Address: "172.24.56.89"
IPv6Address: "2001:db8::5689"
MacAddress: "02:42:ac:12:05:02"
Priority: 100
tags: ["Network"]
/networks/{id}/disconnect:
post:
summary: "Disconnect a container from a network"
operationId: "NetworkDisconnect"
consumes:
- "application/json"
responses:
200:
description: "No error"
403:
description: "Operation not supported for swarm scoped networks"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "Network or container not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "Network ID or name"
required: true
type: "string"
- name: "container"
in: "body"
required: true
schema:
type: "object"
title: "NetworkDisconnectRequest"
properties:
Container:
type: "string"
description: |
The ID or name of the container to disconnect from the network.
Force:
type: "boolean"
description: |
Force the container to disconnect from the network.
tags: ["Network"]
/networks/prune:
post:
summary: "Delete unused networks"
produces:
- "application/json"
operationId: "NetworkPrune"
parameters:
- name: "filters"
in: "query"
description: |
Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
Available filters:
- `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels.
type: "string"
responses:
200:
description: "No error"
schema:
type: "object"
title: "NetworkPruneResponse"
properties:
NetworksDeleted:
description: "Networks that were deleted"
type: "array"
items:
type: "string"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Network"]
/plugins:
get:
summary: "List plugins"
operationId: "PluginList"
description: "Returns information about installed plugins."
produces: ["application/json"]
responses:
200:
description: "No error"
schema:
type: "array"
items:
$ref: "#/definitions/Plugin"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
type: "string"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to
process on the plugin list.
Available filters:
- `capability=<capability name>`
- `enable=<true>|<false>`
tags: ["Plugin"]
/plugins/privileges:
get:
summary: "Get plugin privileges"
operationId: "GetPluginPrivileges"
responses:
200:
description: "no error"
schema:
type: "array"
items:
$ref: "#/definitions/PluginPrivilege"
example:
- Name: "network"
Description: ""
Value:
- "host"
- Name: "mount"
Description: ""
Value:
- "/data"
- Name: "device"
Description: ""
Value:
- "/dev/cpu_dma_latency"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "remote"
in: "query"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
tags:
- "Plugin"
/plugins/pull:
post:
summary: "Install a plugin"
operationId: "PluginPull"
description: |
Pulls and installs a plugin. After the plugin is installed, it can be
enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
produces:
- "application/json"
responses:
204:
description: "no error"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "remote"
in: "query"
description: |
Remote reference for plugin to install.
The `:latest` tag is optional, and is used as the default if omitted.
required: true
type: "string"
- name: "name"
in: "query"
description: |
Local name for the pulled plugin.
The `:latest` tag is optional, and is used as the default if omitted.
required: false
type: "string"
- name: "X-Registry-Auth"
in: "header"
description: |
A base64url-encoded auth configuration to use when pulling a plugin
from a registry.
Refer to the [authentication section](#section/Authentication) for
details.
type: "string"
- name: "body"
in: "body"
schema:
type: "array"
items:
$ref: "#/definitions/PluginPrivilege"
example:
- Name: "network"
Description: ""
Value:
- "host"
- Name: "mount"
Description: ""
Value:
- "/data"
- Name: "device"
Description: ""
Value:
- "/dev/cpu_dma_latency"
tags: ["Plugin"]
/plugins/{name}/json:
get:
summary: "Inspect a plugin"
operationId: "PluginInspect"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Plugin"
404:
description: "plugin is not installed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
tags: ["Plugin"]
/plugins/{name}:
delete:
summary: "Remove a plugin"
operationId: "PluginDelete"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Plugin"
404:
description: "plugin is not installed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
- name: "force"
in: "query"
description: |
Disable the plugin before removing. This may result in issues if the
plugin is in use by a container.
type: "boolean"
default: false
tags: ["Plugin"]
/plugins/{name}/enable:
post:
summary: "Enable a plugin"
operationId: "PluginEnable"
responses:
200:
description: "no error"
404:
description: "plugin is not installed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
- name: "timeout"
in: "query"
description: "Set the HTTP client timeout (in seconds)"
type: "integer"
default: 0
tags: ["Plugin"]
/plugins/{name}/disable:
post:
summary: "Disable a plugin"
operationId: "PluginDisable"
responses:
200:
description: "no error"
404:
description: "plugin is not installed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
- name: "force"
in: "query"
description: |
Force disable a plugin even if still in use.
required: false
type: "boolean"
tags: ["Plugin"]
/plugins/{name}/upgrade:
post:
summary: "Upgrade a plugin"
operationId: "PluginUpgrade"
responses:
204:
description: "no error"
404:
description: "plugin not installed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
- name: "remote"
in: "query"
description: |
Remote reference to upgrade to.
The `:latest` tag is optional, and is used as the default if omitted.
required: true
type: "string"
- name: "X-Registry-Auth"
in: "header"
description: |
A base64url-encoded auth configuration to use when pulling a plugin
from a registry.
Refer to the [authentication section](#section/Authentication) for
details.
type: "string"
- name: "body"
in: "body"
schema:
type: "array"
items:
$ref: "#/definitions/PluginPrivilege"
example:
- Name: "network"
Description: ""
Value:
- "host"
- Name: "mount"
Description: ""
Value:
- "/data"
- Name: "device"
Description: ""
Value:
- "/dev/cpu_dma_latency"
tags: ["Plugin"]
/plugins/create:
post:
summary: "Create a plugin"
operationId: "PluginCreate"
consumes:
- "application/x-tar"
responses:
204:
description: "no error"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "query"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
- name: "tarContext"
in: "body"
description: "Path to tar containing plugin rootfs and manifest"
schema:
type: "string"
format: "binary"
tags: ["Plugin"]
/plugins/{name}/push:
post:
summary: "Push a plugin"
operationId: "PluginPush"
description: |
Push a plugin to the registry.
parameters:
- name: "name"
in: "path"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
responses:
200:
description: "no error"
404:
description: "plugin not installed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Plugin"]
/plugins/{name}/set:
post:
summary: "Configure a plugin"
operationId: "PluginSet"
consumes:
- "application/json"
parameters:
- name: "name"
in: "path"
description: |
The name of the plugin. The `:latest` tag is optional, and is the
default if omitted.
required: true
type: "string"
- name: "body"
in: "body"
schema:
type: "array"
items:
type: "string"
example: ["DEBUG=1"]
responses:
204:
description: "No error"
404:
description: "Plugin not installed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Plugin"]
/nodes:
get:
summary: "List nodes"
operationId: "NodeList"
responses:
200:
description: "no error"
schema:
type: "array"
items:
$ref: "#/definitions/Node"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
description: |
Filters to process on the nodes list, encoded as JSON (a `map[string][]string`).
Available filters:
- `id=<node id>`
- `label=<engine label>`
- `membership=`(`accepted`|`pending`)`
- `name=<node name>`
- `node.label=<node label>`
- `role=`(`manager`|`worker`)`
type: "string"
tags: ["Node"]
/nodes/{id}:
get:
summary: "Inspect a node"
operationId: "NodeInspect"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Node"
404:
description: "no such node"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "The ID or name of the node"
type: "string"
required: true
tags: ["Node"]
delete:
summary: "Delete a node"
operationId: "NodeDelete"
responses:
200:
description: "no error"
404:
description: "no such node"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "The ID or name of the node"
type: "string"
required: true
- name: "force"
in: "query"
description: "Force remove a node from the swarm"
default: false
type: "boolean"
tags: ["Node"]
/nodes/{id}/update:
post:
summary: "Update a node"
operationId: "NodeUpdate"
responses:
200:
description: "no error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such node"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "The ID of the node"
type: "string"
required: true
- name: "body"
in: "body"
schema:
$ref: "#/definitions/NodeSpec"
- name: "version"
in: "query"
description: |
The version number of the node object being updated. This is required
to avoid conflicting writes.
type: "integer"
format: "int64"
required: true
tags: ["Node"]
/swarm:
get:
summary: "Inspect swarm"
operationId: "SwarmInspect"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Swarm"
404:
description: "no such swarm"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Swarm"]
/swarm/init:
post:
summary: "Initialize a new swarm"
operationId: "SwarmInit"
produces:
- "application/json"
- "text/plain"
responses:
200:
description: "no error"
schema:
description: "The node ID"
type: "string"
example: "7v2t30z9blmxuhnyo6s4cpenp"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is already part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "body"
in: "body"
required: true
schema:
type: "object"
title: "SwarmInitRequest"
properties:
ListenAddr:
description: |
Listen address used for inter-manager communication, as well
as determining the networking interface used for the VXLAN
Tunnel Endpoint (VTEP). This can either be an address/port
combination in the form `192.168.1.1:4567`, or an interface
followed by a port number, like `eth0:4567`. If the port number
is omitted, the default swarm listening port is used.
type: "string"
AdvertiseAddr:
description: |
Externally reachable address advertised to other nodes. This
can either be an address/port combination in the form
`192.168.1.1:4567`, or an interface followed by a port number,
like `eth0:4567`. If the port number is omitted, the port
number from the listen address is used. If `AdvertiseAddr` is
not specified, it will be automatically detected when possible.
type: "string"
DataPathAddr:
description: |
Address or interface to use for data path traffic (format:
`<ip|interface>`), for example, `192.168.1.1`, or an interface,
like `eth0`. If `DataPathAddr` is unspecified, the same address
as `AdvertiseAddr` is used.
The `DataPathAddr` specifies the address that global scope
network drivers will publish towards other nodes in order to
reach the containers running on this node. Using this parameter
it is possible to separate the container data traffic from the
management traffic of the cluster.
type: "string"
DataPathPort:
description: |
DataPathPort specifies the data path port number for data traffic.
Acceptable port range is 1024 to 49151.
if no port is set or is set to 0, default port 4789 will be used.
type: "integer"
format: "uint32"
DefaultAddrPool:
description: |
Default Address Pool specifies default subnet pools for global
scope networks.
type: "array"
items:
type: "string"
example: ["10.10.0.0/16", "20.20.0.0/16"]
ForceNewCluster:
description: "Force creation of a new swarm."
type: "boolean"
SubnetSize:
description: |
SubnetSize specifies the subnet size of the networks created
from the default subnet pool.
type: "integer"
format: "uint32"
Spec:
$ref: "#/definitions/SwarmSpec"
example:
ListenAddr: "0.0.0.0:2377"
AdvertiseAddr: "192.168.1.1:2377"
DataPathPort: 4789
DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"]
SubnetSize: 24
ForceNewCluster: false
Spec:
Orchestration: {}
Raft: {}
Dispatcher: {}
CAConfig: {}
EncryptionConfig:
AutoLockManagers: false
tags: ["Swarm"]
/swarm/join:
post:
summary: "Join an existing swarm"
operationId: "SwarmJoin"
responses:
200:
description: "no error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is already part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "body"
in: "body"
required: true
schema:
type: "object"
title: "SwarmJoinRequest"
properties:
ListenAddr:
description: |
Listen address used for inter-manager communication if the node
gets promoted to manager, as well as determining the networking
interface used for the VXLAN Tunnel Endpoint (VTEP).
type: "string"
AdvertiseAddr:
description: |
Externally reachable address advertised to other nodes. This
can either be an address/port combination in the form
`192.168.1.1:4567`, or an interface followed by a port number,
like `eth0:4567`. If the port number is omitted, the port
number from the listen address is used. If `AdvertiseAddr` is
not specified, it will be automatically detected when possible.
type: "string"
DataPathAddr:
description: |
Address or interface to use for data path traffic (format:
`<ip|interface>`), for example, `192.168.1.1`, or an interface,
like `eth0`. If `DataPathAddr` is unspecified, the same address
as `AdvertiseAddr` is used.
The `DataPathAddr` specifies the address that global scope
network drivers will publish towards other nodes in order to
reach the containers running on this node. Using this parameter
it is possible to separate the container data traffic from the
management traffic of the cluster.
type: "string"
RemoteAddrs:
description: |
Addresses of manager nodes already participating in the swarm.
type: "array"
items:
type: "string"
JoinToken:
description: "Secret token for joining this swarm."
type: "string"
example:
ListenAddr: "0.0.0.0:2377"
AdvertiseAddr: "192.168.1.1:2377"
DataPathAddr: "192.168.1.1"
RemoteAddrs:
- "node1:2377"
JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
tags: ["Swarm"]
/swarm/leave:
post:
summary: "Leave a swarm"
operationId: "SwarmLeave"
responses:
200:
description: "no error"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "force"
description: |
Force leave swarm, even if this is the last manager or that it will
break the cluster.
in: "query"
type: "boolean"
default: false
tags: ["Swarm"]
/swarm/update:
post:
summary: "Update a swarm"
operationId: "SwarmUpdate"
responses:
200:
description: "no error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "body"
in: "body"
required: true
schema:
$ref: "#/definitions/SwarmSpec"
- name: "version"
in: "query"
description: |
The version number of the swarm object being updated. This is
required to avoid conflicting writes.
type: "integer"
format: "int64"
required: true
- name: "rotateWorkerToken"
in: "query"
description: "Rotate the worker join token."
type: "boolean"
default: false
- name: "rotateManagerToken"
in: "query"
description: "Rotate the manager join token."
type: "boolean"
default: false
- name: "rotateManagerUnlockKey"
in: "query"
description: "Rotate the manager unlock key."
type: "boolean"
default: false
tags: ["Swarm"]
/swarm/unlockkey:
get:
summary: "Get the unlock key"
operationId: "SwarmUnlockkey"
consumes:
- "application/json"
responses:
200:
description: "no error"
schema:
type: "object"
title: "UnlockKeyResponse"
properties:
UnlockKey:
description: "The swarm's unlock key."
type: "string"
example:
UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Swarm"]
/swarm/unlock:
post:
summary: "Unlock a locked manager"
operationId: "SwarmUnlock"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- name: "body"
in: "body"
required: true
schema:
type: "object"
title: "SwarmUnlockRequest"
properties:
UnlockKey:
description: "The swarm's unlock key."
type: "string"
example:
UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
responses:
200:
description: "no error"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Swarm"]
/services:
get:
summary: "List services"
operationId: "ServiceList"
responses:
200:
description: "no error"
schema:
type: "array"
items:
$ref: "#/definitions/Service"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
type: "string"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to
process on the services list.
Available filters:
- `id=<service id>`
- `label=<service label>`
- `mode=["replicated"|"global"]`
- `name=<service name>`
- name: "status"
in: "query"
type: "boolean"
description: |
Include service status, with count of running and desired tasks.
tags: ["Service"]
/services/create:
post:
summary: "Create a service"
operationId: "ServiceCreate"
consumes:
- "application/json"
produces:
- "application/json"
responses:
201:
description: "no error"
schema:
$ref: "#/definitions/ServiceCreateResponse"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
403:
description: "network is not eligible for services"
schema:
$ref: "#/definitions/ErrorResponse"
409:
description: "name conflicts with an existing service"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "body"
in: "body"
required: true
schema:
allOf:
- $ref: "#/definitions/ServiceSpec"
- type: "object"
example:
Name: "web"
TaskTemplate:
ContainerSpec:
Image: "nginx:alpine"
Mounts:
-
ReadOnly: true
Source: "web-data"
Target: "/usr/share/nginx/html"
Type: "volume"
VolumeOptions:
DriverConfig: {}
Labels:
com.example.something: "something-value"
Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"]
User: "33"
DNSConfig:
Nameservers: ["8.8.8.8"]
Search: ["example.org"]
Options: ["timeout:3"]
Secrets:
-
File:
Name: "www.example.org.key"
UID: "33"
GID: "33"
Mode: 384
SecretID: "fpjqlhnwb19zds35k8wn80lq9"
SecretName: "example_org_domain_key"
OomScoreAdj: 0
LogDriver:
Name: "json-file"
Options:
max-file: "3"
max-size: "10M"
Placement: {}
Resources:
Limits:
MemoryBytes: 104857600
Reservations: {}
RestartPolicy:
Condition: "on-failure"
Delay: 10000000000
MaxAttempts: 10
Mode:
Replicated:
Replicas: 4
UpdateConfig:
Parallelism: 2
Delay: 1000000000
FailureAction: "pause"
Monitor: 15000000000
MaxFailureRatio: 0.15
RollbackConfig:
Parallelism: 1
Delay: 1000000000
FailureAction: "pause"
Monitor: 15000000000
MaxFailureRatio: 0.15
EndpointSpec:
Ports:
-
Protocol: "tcp"
PublishedPort: 8080
TargetPort: 80
Labels:
foo: "bar"
- name: "X-Registry-Auth"
in: "header"
description: |
A base64url-encoded auth configuration for pulling from private
registries.
Refer to the [authentication section](#section/Authentication) for
details.
type: "string"
tags: ["Service"]
/services/{id}:
get:
summary: "Inspect a service"
operationId: "ServiceInspect"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Service"
404:
description: "no such service"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "ID or name of service."
required: true
type: "string"
- name: "insertDefaults"
in: "query"
description: "Fill empty fields with default values."
type: "boolean"
default: false
tags: ["Service"]
delete:
summary: "Delete a service"
operationId: "ServiceDelete"
responses:
200:
description: "no error"
404:
description: "no such service"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "ID or name of service."
required: true
type: "string"
tags: ["Service"]
/services/{id}/update:
post:
summary: "Update a service"
operationId: "ServiceUpdate"
consumes: ["application/json"]
produces: ["application/json"]
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/ServiceUpdateResponse"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such service"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "ID or name of service."
required: true
type: "string"
- name: "body"
in: "body"
required: true
schema:
allOf:
- $ref: "#/definitions/ServiceSpec"
- type: "object"
example:
Name: "top"
TaskTemplate:
ContainerSpec:
Image: "busybox"
Args:
- "top"
OomScoreAdj: 0
Resources:
Limits: {}
Reservations: {}
RestartPolicy:
Condition: "any"
MaxAttempts: 0
Placement: {}
ForceUpdate: 0
Mode:
Replicated:
Replicas: 1
UpdateConfig:
Parallelism: 2
Delay: 1000000000
FailureAction: "pause"
Monitor: 15000000000
MaxFailureRatio: 0.15
RollbackConfig:
Parallelism: 1
Delay: 1000000000
FailureAction: "pause"
Monitor: 15000000000
MaxFailureRatio: 0.15
EndpointSpec:
Mode: "vip"
- name: "version"
in: "query"
description: |
The version number of the service object being updated. This is
required to avoid conflicting writes.
This version number should be the value as currently set on the
service *before* the update. You can find the current version by
calling `GET /services/{id}`
required: true
type: "integer"
- name: "registryAuthFrom"
in: "query"
description: |
If the `X-Registry-Auth` header is not specified, this parameter
indicates where to find registry authorization credentials.
type: "string"
enum: ["spec", "previous-spec"]
default: "spec"
- name: "rollback"
in: "query"
description: |
Set to this parameter to `previous` to cause a server-side rollback
to the previous service spec. The supplied spec will be ignored in
this case.
type: "string"
- name: "X-Registry-Auth"
in: "header"
description: |
A base64url-encoded auth configuration for pulling from private
registries.
Refer to the [authentication section](#section/Authentication) for
details.
type: "string"
tags: ["Service"]
/services/{id}/logs:
get:
summary: "Get service logs"
description: |
Get `stdout` and `stderr` logs from a service. See also
[`/containers/{id}/logs`](#operation/ContainerLogs).
**Note**: This endpoint works only for services with the `local`,
`json-file` or `journald` logging drivers.
produces:
- "application/vnd.docker.raw-stream"
- "application/vnd.docker.multiplexed-stream"
operationId: "ServiceLogs"
responses:
200:
description: "logs returned as a stream in response body"
schema:
type: "string"
format: "binary"
404:
description: "no such service"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such service: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the service"
type: "string"
- name: "details"
in: "query"
description: "Show service context and extra details provided to logs."
type: "boolean"
default: false
- name: "follow"
in: "query"
description: "Keep connection after returning logs."
type: "boolean"
default: false
- name: "stdout"
in: "query"
description: "Return logs from `stdout`"
type: "boolean"
default: false
- name: "stderr"
in: "query"
description: "Return logs from `stderr`"
type: "boolean"
default: false
- name: "since"
in: "query"
description: "Only return logs since this time, as a UNIX timestamp"
type: "integer"
default: 0
- name: "timestamps"
in: "query"
description: "Add timestamps to every log line"
type: "boolean"
default: false
- name: "tail"
in: "query"
description: |
Only return this number of log lines from the end of the logs.
Specify as an integer or `all` to output all log lines.
type: "string"
default: "all"
tags: ["Service"]
/tasks:
get:
summary: "List tasks"
operationId: "TaskList"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
type: "array"
items:
$ref: "#/definitions/Task"
example:
- ID: "0kzzo1i0y4jz6027t0k7aezc7"
Version:
Index: 71
CreatedAt: "2016-06-07T21:07:31.171892745Z"
UpdatedAt: "2016-06-07T21:07:31.376370513Z"
Spec:
ContainerSpec:
Image: "redis"
Resources:
Limits: {}
Reservations: {}
RestartPolicy:
Condition: "any"
MaxAttempts: 0
Placement: {}
ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
Slot: 1
NodeID: "60gvrl6tm78dmak4yl7srz94v"
Status:
Timestamp: "2016-06-07T21:07:31.290032978Z"
State: "running"
Message: "started"
ContainerStatus:
ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
PID: 677
DesiredState: "running"
NetworksAttachments:
- Network:
ID: "4qvuz4ko70xaltuqbt8956gd1"
Version:
Index: 18
CreatedAt: "2016-06-07T20:31:11.912919752Z"
UpdatedAt: "2016-06-07T21:07:29.955277358Z"
Spec:
Name: "ingress"
Labels:
com.docker.swarm.internal: "true"
DriverConfiguration: {}
IPAMOptions:
Driver: {}
Configs:
- Subnet: "10.255.0.0/16"
Gateway: "10.255.0.1"
DriverState:
Name: "overlay"
Options:
com.docker.network.driver.overlay.vxlanid_list: "256"
IPAMOptions:
Driver:
Name: "default"
Configs:
- Subnet: "10.255.0.0/16"
Gateway: "10.255.0.1"
Addresses:
- "10.255.0.10/16"
- ID: "1yljwbmlr8er2waf8orvqpwms"
Version:
Index: 30
CreatedAt: "2016-06-07T21:07:30.019104782Z"
UpdatedAt: "2016-06-07T21:07:30.231958098Z"
Name: "hopeful_cori"
Spec:
ContainerSpec:
Image: "redis"
Resources:
Limits: {}
Reservations: {}
RestartPolicy:
Condition: "any"
MaxAttempts: 0
Placement: {}
ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
Slot: 1
NodeID: "60gvrl6tm78dmak4yl7srz94v"
Status:
Timestamp: "2016-06-07T21:07:30.202183143Z"
State: "shutdown"
Message: "shutdown"
ContainerStatus:
ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"
DesiredState: "shutdown"
NetworksAttachments:
- Network:
ID: "4qvuz4ko70xaltuqbt8956gd1"
Version:
Index: 18
CreatedAt: "2016-06-07T20:31:11.912919752Z"
UpdatedAt: "2016-06-07T21:07:29.955277358Z"
Spec:
Name: "ingress"
Labels:
com.docker.swarm.internal: "true"
DriverConfiguration: {}
IPAMOptions:
Driver: {}
Configs:
- Subnet: "10.255.0.0/16"
Gateway: "10.255.0.1"
DriverState:
Name: "overlay"
Options:
com.docker.network.driver.overlay.vxlanid_list: "256"
IPAMOptions:
Driver:
Name: "default"
Configs:
- Subnet: "10.255.0.0/16"
Gateway: "10.255.0.1"
Addresses:
- "10.255.0.5/16"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
type: "string"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to
process on the tasks list.
Available filters:
- `desired-state=(running | shutdown | accepted)`
- `id=<task id>`
- `label=key` or `label="key=value"`
- `name=<task name>`
- `node=<node id or name>`
- `service=<service name>`
tags: ["Task"]
/tasks/{id}:
get:
summary: "Inspect a task"
operationId: "TaskInspect"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Task"
404:
description: "no such task"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "ID of the task"
required: true
type: "string"
tags: ["Task"]
/tasks/{id}/logs:
get:
summary: "Get task logs"
description: |
Get `stdout` and `stderr` logs from a task.
See also [`/containers/{id}/logs`](#operation/ContainerLogs).
**Note**: This endpoint works only for services with the `local`,
`json-file` or `journald` logging drivers.
operationId: "TaskLogs"
produces:
- "application/vnd.docker.raw-stream"
- "application/vnd.docker.multiplexed-stream"
responses:
200:
description: "logs returned as a stream in response body"
schema:
type: "string"
format: "binary"
404:
description: "no such task"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such task: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID of the task"
type: "string"
- name: "details"
in: "query"
description: "Show task context and extra details provided to logs."
type: "boolean"
default: false
- name: "follow"
in: "query"
description: "Keep connection after returning logs."
type: "boolean"
default: false
- name: "stdout"
in: "query"
description: "Return logs from `stdout`"
type: "boolean"
default: false
- name: "stderr"
in: "query"
description: "Return logs from `stderr`"
type: "boolean"
default: false
- name: "since"
in: "query"
description: "Only return logs since this time, as a UNIX timestamp"
type: "integer"
default: 0
- name: "timestamps"
in: "query"
description: "Add timestamps to every log line"
type: "boolean"
default: false
- name: "tail"
in: "query"
description: |
Only return this number of log lines from the end of the logs.
Specify as an integer or `all` to output all log lines.
type: "string"
default: "all"
tags: ["Task"]
/secrets:
get:
summary: "List secrets"
operationId: "SecretList"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
type: "array"
items:
$ref: "#/definitions/Secret"
example:
- ID: "blt1owaxmitz71s9v5zh81zun"
Version:
Index: 85
CreatedAt: "2017-07-20T13:55:28.678958722Z"
UpdatedAt: "2017-07-20T13:55:28.678958722Z"
Spec:
Name: "mysql-passwd"
Labels:
some.label: "some.value"
Driver:
Name: "secret-bucket"
Options:
OptionA: "value for driver option A"
OptionB: "value for driver option B"
- ID: "ktnbjxoalbkvbvedmg1urrz8h"
Version:
Index: 11
CreatedAt: "2016-11-05T01:20:17.327670065Z"
UpdatedAt: "2016-11-05T01:20:17.327670065Z"
Spec:
Name: "app-dev.crt"
Labels:
foo: "bar"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
type: "string"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to
process on the secrets list.
Available filters:
- `id=<secret id>`
- `label=<key> or label=<key>=value`
- `name=<secret name>`
- `names=<secret name>`
tags: ["Secret"]
/secrets/create:
post:
summary: "Create a secret"
operationId: "SecretCreate"
consumes:
- "application/json"
produces:
- "application/json"
responses:
201:
description: "no error"
schema:
$ref: "#/definitions/IDResponse"
409:
description: "name conflicts with an existing object"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "body"
in: "body"
schema:
allOf:
- $ref: "#/definitions/SecretSpec"
- type: "object"
example:
Name: "app-key.crt"
Labels:
foo: "bar"
Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
Driver:
Name: "secret-bucket"
Options:
OptionA: "value for driver option A"
OptionB: "value for driver option B"
tags: ["Secret"]
/secrets/{id}:
get:
summary: "Inspect a secret"
operationId: "SecretInspect"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Secret"
examples:
application/json:
ID: "ktnbjxoalbkvbvedmg1urrz8h"
Version:
Index: 11
CreatedAt: "2016-11-05T01:20:17.327670065Z"
UpdatedAt: "2016-11-05T01:20:17.327670065Z"
Spec:
Name: "app-dev.crt"
Labels:
foo: "bar"
Driver:
Name: "secret-bucket"
Options:
OptionA: "value for driver option A"
OptionB: "value for driver option B"
404:
description: "secret not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
type: "string"
description: "ID of the secret"
tags: ["Secret"]
delete:
summary: "Delete a secret"
operationId: "SecretDelete"
produces:
- "application/json"
responses:
204:
description: "no error"
404:
description: "secret not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
type: "string"
description: "ID of the secret"
tags: ["Secret"]
/secrets/{id}/update:
post:
summary: "Update a Secret"
operationId: "SecretUpdate"
responses:
200:
description: "no error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such secret"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "The ID or name of the secret"
type: "string"
required: true
- name: "body"
in: "body"
schema:
$ref: "#/definitions/SecretSpec"
description: |
The spec of the secret to update. Currently, only the Labels field
can be updated. All other fields must remain unchanged from the
[SecretInspect endpoint](#operation/SecretInspect) response values.
- name: "version"
in: "query"
description: |
The version number of the secret object being updated. This is
required to avoid conflicting writes.
type: "integer"
format: "int64"
required: true
tags: ["Secret"]
/configs:
get:
summary: "List configs"
operationId: "ConfigList"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
type: "array"
items:
$ref: "#/definitions/Config"
example:
- ID: "ktnbjxoalbkvbvedmg1urrz8h"
Version:
Index: 11
CreatedAt: "2016-11-05T01:20:17.327670065Z"
UpdatedAt: "2016-11-05T01:20:17.327670065Z"
Spec:
Name: "server.conf"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
type: "string"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to
process on the configs list.
Available filters:
- `id=<config id>`
- `label=<key> or label=<key>=value`
- `name=<config name>`
- `names=<config name>`
tags: ["Config"]
/configs/create:
post:
summary: "Create a config"
operationId: "ConfigCreate"
consumes:
- "application/json"
produces:
- "application/json"
responses:
201:
description: "no error"
schema:
$ref: "#/definitions/IDResponse"
409:
description: "name conflicts with an existing object"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "body"
in: "body"
schema:
allOf:
- $ref: "#/definitions/ConfigSpec"
- type: "object"
example:
Name: "server.conf"
Labels:
foo: "bar"
Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
tags: ["Config"]
/configs/{id}:
get:
summary: "Inspect a config"
operationId: "ConfigInspect"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Config"
examples:
application/json:
ID: "ktnbjxoalbkvbvedmg1urrz8h"
Version:
Index: 11
CreatedAt: "2016-11-05T01:20:17.327670065Z"
UpdatedAt: "2016-11-05T01:20:17.327670065Z"
Spec:
Name: "app-dev.crt"
404:
description: "config not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
type: "string"
description: "ID of the config"
tags: ["Config"]
delete:
summary: "Delete a config"
operationId: "ConfigDelete"
produces:
- "application/json"
responses:
204:
description: "no error"
404:
description: "config not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
type: "string"
description: "ID of the config"
tags: ["Config"]
/configs/{id}/update:
post:
summary: "Update a Config"
operationId: "ConfigUpdate"
responses:
200:
description: "no error"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
404:
description: "no such config"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
503:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
description: "The ID or name of the config"
type: "string"
required: true
- name: "body"
in: "body"
schema:
$ref: "#/definitions/ConfigSpec"
description: |
The spec of the config to update. Currently, only the Labels field
can be updated. All other fields must remain unchanged from the
[ConfigInspect endpoint](#operation/ConfigInspect) response values.
- name: "version"
in: "query"
description: |
The version number of the config object being updated. This is
required to avoid conflicting writes.
type: "integer"
format: "int64"
required: true
tags: ["Config"]
/distribution/{name}/json:
get:
summary: "Get image information from the registry"
description: |
Return image digest and platform information by contacting the registry.
operationId: "DistributionInspect"
produces:
- "application/json"
responses:
200:
description: "descriptor and platform information"
schema:
$ref: "#/definitions/DistributionInspect"
401:
description: "Failed authentication or no image found"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such image: someimage (tag: latest)"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
in: "path"
description: "Image name or id"
type: "string"
required: true
tags: ["Distribution"]
/session:
post:
summary: "Initialize interactive session"
description: |
Start a new interactive session with a server. Session allows server to
call back to the client for advanced capabilities.
### Hijacking
This endpoint hijacks the HTTP connection to HTTP2 transport that allows
the client to expose gPRC services on that connection.
For example, the client sends this request to upgrade the connection:
```
POST /session HTTP/1.1
Upgrade: h2c
Connection: Upgrade
```
The Docker daemon responds with a `101 UPGRADED` response follow with
the raw stream:
```
HTTP/1.1 101 UPGRADED
Connection: Upgrade
Upgrade: h2c
```
operationId: "Session"
produces:
- "application/vnd.docker.raw-stream"
responses:
101:
description: "no error, hijacking successful"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Session"] | unknown | github | https://github.com/moby/moby | api/docs/v1.49.yaml |
/*
MIT License http://www.opensource.org/licenses/mit-license.php
*/
"use strict";
const RuntimeGlobals = require("../RuntimeGlobals");
const RuntimeModule = require("../RuntimeModule");
/** @typedef {import("../Compilation")} Compilation */
class GetFullHashRuntimeModule extends RuntimeModule {
constructor() {
super("getFullHash");
/** @type {boolean} */
this.fullHash = true;
}
/**
* @returns {string | null} runtime code
*/
generate() {
const compilation = /** @type {Compilation} */ (this.compilation);
const { runtimeTemplate } = compilation;
return `${RuntimeGlobals.getFullHash} = ${runtimeTemplate.returningFunction(
JSON.stringify(compilation.hash || "XXXX")
)}`;
}
}
module.exports = GetFullHashRuntimeModule; | javascript | github | https://github.com/webpack/webpack | lib/runtime/GetFullHashRuntimeModule.js |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
name: 'Setup Breeze'
description: 'Sets up Python and Breeze'
inputs:
python-version:
description: 'Python version to use'
default: "3.10"
uv-version:
description: 'uv version to use'
default: "0.10.2" # Keep this comment to allow automatic replacement of uv version
outputs:
host-python-version:
description: Python version used in host
value: ${{ steps.host-python-version.outputs.host-python-version }}
runs:
using: "composite"
steps:
- name: "Setup python"
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: ${{ inputs.python-version }}
- name: "Install uv"
shell: bash
run: curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh
env:
UV_VERSION: ${{ inputs.uv-version }}
# NOTE! Installing Breeze without using cache is FASTER than when using cache - uv is so fast and has
# so low overhead, that just running upload cache/restore cache is slower than installing it from scratch
- name: "Install Breeze"
shell: bash
run: ./scripts/ci/install_breeze.sh
env:
PYTHON_VERSION: "${{ inputs.python-version }}"
- name: "Free space"
shell: bash
run: breeze ci free-space
env:
AIRFLOW_ROOT_PATH: "${{ github.workspace }}"
- name: "Get Python version"
shell: bash
run: >
echo "host-python-version=$(python -c 'import platform; print(platform.python_version())')"
>> ${GITHUB_OUTPUT}
id: host-python-version
- name: "Disable cheatsheet"
shell: bash
run: breeze setup config --no-cheatsheet --no-asciiart
env:
AIRFLOW_ROOT_PATH: "${{ github.workspace }}" | unknown | github | https://github.com/apache/airflow | .github/actions/breeze/action.yml |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pending.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PendingWidget(object):
def setupUi(self, PendingWidget):
PendingWidget.setObjectName("PendingWidget")
PendingWidget.resize(542, 297)
self.verticalLayout = QtWidgets.QVBoxLayout(PendingWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.tab_widget = QtWidgets.QTabWidget(PendingWidget)
self.tab_widget.setObjectName("tab_widget")
self.loans_tab = QtWidgets.QWidget()
self.loans_tab.setObjectName("loans_tab")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.loans_tab)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.loans_table = QtWidgets.QTableWidget(self.loans_tab)
self.loans_table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.loans_table.setObjectName("loans_table")
self.loans_table.setColumnCount(5)
self.loans_table.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.loans_table.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.loans_table.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.loans_table.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.loans_table.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.loans_table.setHorizontalHeaderItem(4, item)
self.verticalLayout_2.addWidget(self.loans_table)
self.tab_widget.addTab(self.loans_tab, "")
self.debts_tab = QtWidgets.QWidget()
self.debts_tab.setObjectName("debts_tab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.debts_tab)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.debts_table = QtWidgets.QTableWidget(self.debts_tab)
self.debts_table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.debts_table.setObjectName("debts_table")
self.debts_table.setColumnCount(5)
self.debts_table.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.debts_table.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.debts_table.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.debts_table.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.debts_table.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.debts_table.setHorizontalHeaderItem(4, item)
self.verticalLayout_3.addWidget(self.debts_table)
self.tab_widget.addTab(self.debts_tab, "")
self.verticalLayout.addWidget(self.tab_widget)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.refresh_button = QtWidgets.QPushButton(PendingWidget)
self.refresh_button.setObjectName("refresh_button")
self.horizontalLayout.addWidget(self.refresh_button)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(PendingWidget)
self.tab_widget.setCurrentIndex(0)
self.refresh_button.clicked.connect(PendingWidget.refresh)
QtCore.QMetaObject.connectSlotsByName(PendingWidget)
def retranslateUi(self, PendingWidget):
_translate = QtCore.QCoreApplication.translate
PendingWidget.setWindowTitle(_translate("PendingWidget", "Form"))
item = self.loans_table.horizontalHeaderItem(1)
item.setText(_translate("PendingWidget", "UOMe ID"))
item = self.loans_table.horizontalHeaderItem(2)
item.setText(_translate("PendingWidget", "Borrower"))
item = self.loans_table.horizontalHeaderItem(3)
item.setText(_translate("PendingWidget", "Amount"))
item = self.loans_table.horizontalHeaderItem(4)
item.setText(_translate("PendingWidget", "Description"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.loans_tab), _translate("PendingWidget", "Loans"))
item = self.debts_table.horizontalHeaderItem(1)
item.setText(_translate("PendingWidget", "UOMe ID"))
item = self.debts_table.horizontalHeaderItem(2)
item.setText(_translate("PendingWidget", "Loaner"))
item = self.debts_table.horizontalHeaderItem(3)
item.setText(_translate("PendingWidget", "Amount"))
item = self.debts_table.horizontalHeaderItem(4)
item.setText(_translate("PendingWidget", "Description"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.debts_tab), _translate("PendingWidget", "Debts"))
self.refresh_button.setText(_translate("PendingWidget", "Refresh")) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
flask.testsuite.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import flask
import unittest
from logging import StreamHandler
from flask.testsuite import FlaskTestCase, catch_warnings, catch_stderr
from werkzeug.http import parse_cache_control_header, parse_options_header
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class JSONTestCase(FlaskTestCase):
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
self.assert_equal(rv.status_code, 400)
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
self.assert_equal(resp.data, u'Hällo Wörld'.encode('utf-8'))
def test_jsonify(self):
d = dict(a=23, b=42, c=[1, 2, 3])
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
self.assert_equal(rv.mimetype, 'application/json')
self.assert_equal(flask.json.loads(rv.data), d)
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, '"\\u2603"')
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, u'"\u2603"')
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
self.assert_equal(rv.data, b'3')
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
self.assert_equal(rv, u'"\\u003c/script\\u003e"')
self.assert_equal(type(rv), text_type)
rv = render('{{ "</script>"|tojson }}')
self.assert_equal(rv, '"\\u003c/script\\u003e"')
rv = render('{{ "<\0/script>"|tojson }}')
self.assert_equal(rv, '"\\u003c\\u0000/script\\u003e"')
rv = render('{{ "<!--<script>"|tojson }}')
self.assert_equal(rv, '"\\u003c!--\\u003cscript\\u003e"')
rv = render('{{ "&"|tojson }}')
self.assert_equal(rv, '"\\u0026"')
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
self.assertEqual(rv.data, b'"<42>"')
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, u'정상처리'.encode('utf-8'))
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
self.assert_equal(app.config['JSON_SORT_KEYS'], True)
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
self.assert_equal(lines, [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
])
class SendfileTestCase(FlaskTestCase):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_equal(rv.mimetype, 'text/html')
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
self.assert_equal(rv.data, f.read())
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
def test_send_file_object(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
self.assert_equal(rv.data, f.read())
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
self.assert_equal(rv.mimetype, 'text/html')
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = False
with app.test_request_context():
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'application/octet-stream')
rv.close()
# etags
self.assert_equal(len(captured), 1)
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'text/plain')
rv.close()
# etags
self.assert_equal(len(captured), 1)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
self.assert_not_in('x-sendfile', rv.headers)
rv.close()
# etags
self.assert_equal(len(captured), 1)
def test_attachment(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
with app.test_request_context():
self.assert_equal(options['filename'], 'index.html')
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.html')
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
self.assert_equal(rv.mimetype, 'text/plain')
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.txt')
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
class LoggingTestCase(FlaskTestCase):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
self.assert_true(app.logger is logger1)
self.assert_equal(logger1.name, __name__)
app.logger_name = __name__ + '/test_logger_cache'
self.assert_true(app.logger is not logger1)
def test_debug_log(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
with catch_stderr() as err:
c.get('/')
out = err.getvalue()
self.assert_in('WARNING in helpers [', out)
self.assert_in(os.path.basename(__file__.rsplit('.', 1)[0] + '.py'), out)
self.assert_in('the standard library is dead', out)
self.assert_in('this is a debug statement', out)
with catch_stderr() as err:
try:
c.get('/exc')
except ZeroDivisionError:
pass
else:
self.assert_true(False, 'debug log ate the exception')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
self.assert_equal(app.logger.level, 10)
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
err = out.getvalue()
self.assert_in('Exception on / [GET]', err)
self.assert_in('Traceback (most recent call last):', err)
self.assert_in('1 // 0', err)
self.assert_in('ZeroDivisionError:', err)
def test_processor_exceptions(self):
app = flask.Flask(__name__)
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_equal(rv.data, b'Hello Server Error')
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index', _anchor='x y'),
'/#x%20y')
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index',
_external=True,
_scheme='https'),
'https://localhost/')
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
self.assert_equal(flask.url_for('myview', _method='GET'),
'/myview/')
self.assert_equal(flask.url_for('myview', id=42, _method='GET'),
'/myview/42')
self.assert_equal(flask.url_for('myview', _method='POST'),
'/myview/create')
class NoImportsTestCase(FlaskTestCase):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self):
try:
flask.Flask('importerror')
except NotImplementedError:
self.fail('Flask(import_name) is importing import_name.')
class StreamingTestCase(FlaskTestCase):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(generate())
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
self.assertEqual(called, [42])
def suite():
suite = unittest.TestSuite()
if flask.json_available:
suite.addTest(unittest.makeSuite(JSONTestCase))
suite.addTest(unittest.makeSuite(SendfileTestCase))
suite.addTest(unittest.makeSuite(LoggingTestCase))
suite.addTest(unittest.makeSuite(NoImportsTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
return suite | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import re
import boto
import boto.s3.key
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def s3_upload_dir(bucket, path, prefix="", connection_data=None):
if isinstance(bucket, basestring):
with contextlib.closing(boto.connect_s3(**connection_data)) as conn:
bucket = conn.lookup(bucket)
for root, dirs, files in os.walk(path):
for fil in files:
with contextlib.closing(boto.s3.key.Key(bucket)) as key:
source = root + os.sep + fil
target = re.sub("^" + re.escape(path) + "?/", prefix, source)
if os.sep != '/':
target = re.sub(re.escape(os.sep), '/', target)
key.key = target
LOG.info("Uploading %s to %s/%s", source, bucket.name, target)
key.set_contents_from_filename(source) | unknown | codeparrot/codeparrot-clean | ||
"""
Copyright 2017 Oliver Smith
This file is part of pmbootstrap.
pmbootstrap is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pmbootstrap is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pmbootstrap. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import logging
import pmb.config
def replace_variables(apkbuild):
"""
Replace a hardcoded list of variables inside the APKBUILD.
"""
ret = apkbuild
# _flavor: ${_device} (lineageos kernel packages)
ret["_flavor"] = ret["_flavor"].replace("${_device}",
ret["_device"])
# pkgname: $_flavor
ret["pkgname"] = ret["pkgname"].replace("${_flavor}", ret["_flavor"])
# subpackages: $pkgname
replaced = []
for subpackage in ret["subpackages"]:
replaced.append(subpackage.replace("$pkgname", ret["pkgname"]))
ret["subpackages"] = replaced
# makedepends: $makedepends_host, $makedepends_build, $_llvmver
replaced = []
for makedepend in ret["makedepends"]:
if makedepend.startswith("$"):
key = makedepend[1:]
if key in ret:
replaced += ret[key]
else:
raise RuntimeError("Could not resolve variable " +
makedepend + " in APKBUILD of " +
apkbuild["pkgname"])
else:
# replace in the middle of the string
for var in ["_llvmver"]:
makedepend = makedepend.replace("$" + var, ret[var])
replaced += [makedepend]
# Python: ${pkgname#py-}
if ret["pkgname"].startswith("py-"):
replacement = ret["pkgname"][3:]
for var in ["depends", "makedepends", "subpackages"]:
for i in range(len(ret[var])):
ret[var][i] = ret[var][i].replace(
"${pkgname#py-}", replacement)
ret["makedepends"] = replaced
return ret
def cut_off_function_names(apkbuild):
"""
For subpackages: only keep the subpackage name, without the internal
function name, that tells how to build the subpackage.
"""
sub = apkbuild["subpackages"]
for i in range(len(sub)):
sub[i] = sub[i].split(":", 1)[0]
apkbuild["subpackages"] = sub
return apkbuild
def apkbuild(args, path):
"""
Parse relevant information out of the APKBUILD file. This is not meant
to be perfect and catch every edge case (for that, a full shell parser
would be necessary!). Instead, it should just work with the use-cases
covered by pmbootstrap and not take too long.
:param path: Full path to the APKBUILD
:returns: Relevant variables from the APKBUILD. Arrays get returned as
arrays.
"""
# Try to get a cached result first (we assume, that the aports don't change
# in one pmbootstrap call)
if path in args.cache["apkbuild"]:
return args.cache["apkbuild"][path]
# Read the file and check line endings
with open(path, encoding="utf-8") as handle:
lines = handle.readlines()
if handle.newlines != '\n':
raise RuntimeError("Wrong line endings in APKBUILD: " + path)
# Parse all attributes from the config
ret = {}
for i in range(len(lines)):
for attribute, options in pmb.config.apkbuild_attributes.items():
if not lines[i].startswith(attribute + "="):
continue
# Extend the line value until we reach the ending quote sign
line_value = lines[i][len(attribute + "="):-1]
end_char = None
if line_value.startswith("\""):
end_char = "\""
value = ""
first_line = i
while i < len(lines) - 1:
value += line_value.replace("\"", "").strip()
if not end_char:
break
elif line_value.endswith(end_char):
# This check is needed to allow line break directly after opening quote
if i != first_line or line_value.count(end_char) > 1:
break
value += " "
i += 1
line_value = lines[i][:-1]
# Split up arrays, delete empty strings inside the list
if options["array"]:
if value:
value = list(filter(None, value.split(" ")))
else:
value = []
ret[attribute] = value
# Add missing keys
for attribute, options in pmb.config.apkbuild_attributes.items():
if attribute not in ret:
if options["array"]:
ret[attribute] = []
else:
ret[attribute] = ""
# Properly format values
ret = replace_variables(ret)
ret = cut_off_function_names(ret)
# Sanity check: pkgname
suffix = "/" + ret["pkgname"] + "/APKBUILD"
if not os.path.realpath(path).endswith(suffix):
logging.info("Folder: '" + os.path.dirname(path) + "'")
logging.info("Pkgname: '" + ret["pkgname"] + "'")
raise RuntimeError("The pkgname must be equal to the name of"
" the folder, that contains the APKBUILD!")
# Sanity check: arch
if not len(ret["arch"]):
raise RuntimeError("Arch must not be empty: " + path)
# Fill cache
args.cache["apkbuild"][path] = ret
return ret | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_dvs_portgroup_find
short_description: Find portgroup(s) in a VMware environment
description:
- Find portgroup(s) based on different criteria such as distributed vSwitch, VLAN id or a string in the name.
version_added: 2.9
author:
- David Martinez (@dx0xm)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.7
- PyVmomi
options:
dvswitch:
description:
- Name of a distributed vSwitch to look for.
type: str
vlanid:
description:
- VLAN id can be any number between 1 and 4094.
- This search criteria will looks into VLAN ranges to find possible matches.
required: false
type: int
name:
description:
- string to check inside the name of the portgroup.
- Basic containment check using python C(in) operation.
type: str
show_uplink:
description:
- Show or hide uplink portgroups.
- Only relevant when C(vlanid) is supplied.
type: bool
default: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Get all portgroups in dvswitch vDS
vmware_dvs_portgroup_find:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
dvswitch: 'vDS'
validate_certs: no
delegate_to: localhost
- name: Confirm if vlan 15 is present
vmware_dvs_portgroup_find:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
vlanid: '15'
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
dvs_portgroups:
description: basic details of portgroups found
returned: on success
type: list
sample: [
{
"dvswitch": "vDS",
"name": "N-51",
"pvlan": true,
"trunk": true,
"vlan_id": "0"
}
]
'''
try:
from pyVmomi import vim
except ImportError as e:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_dvs_by_name
class DVSPortgroupFindManager(PyVmomi):
def __init__(self, module):
super(DVSPortgroupFindManager, self).__init__(module)
self.dvs_name = self.params['dvswitch']
self.vlan = self.params['vlanid']
self.cmp_vlans = True if self.vlan else False
self.pgs = self.find_portgroups_by_name(self.content, self.module.params['name'])
if self.dvs_name:
self.pgs = self.find_portgroups_by_dvs(self.pgs, self.dvs_name)
def find_portgroups_by_name(self, content, name=None):
vimtype = [vim.dvs.DistributedVirtualPortgroup]
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
if not name:
obj = container.view
else:
obj = []
for c in container.view:
if name in c.name:
obj.append(c)
return obj
def find_portgroups_by_dvs(self, pgl, dvs):
obj = []
for c in pgl:
if dvs in c.config.distributedVirtualSwitch.name:
obj.append(c)
return obj
def vlan_match(self, pgup, userup, vlanlst):
res = False
if pgup and userup:
return True
for ln in vlanlst:
if '-' in ln:
arr = ln.split('-')
if arr[0] < self.vlan and self.vlan < arr[1]:
res = True
elif ln == str(self.vlan):
res = True
return res
def get_dvs_portgroup(self):
pgroups = self.pgs
pglist = []
for pg in pgroups:
trunk = False
pvlan = False
vlanInfo = pg.config.defaultPortConfig.vlan
cl1 = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec
cl2 = vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec
vlan_id_list = []
if isinstance(vlanInfo, cl1):
trunk = True
for item in vlanInfo.vlanId:
if item.start == item.end:
vlan_id_list.append(str(item.start))
else:
vlan_id_list.append(str(item.start) + '-' + str(item.end))
elif isinstance(vlanInfo, cl2):
pvlan = True
vlan_id_list.append(str(vlanInfo.pvlanId))
else:
vlan_id_list.append(str(vlanInfo.vlanId))
if self.cmp_vlans:
if self.vlan_match(pg.config.uplink, self.module.params['show_uplink'], vlan_id_list):
pglist.append(dict(
name=pg.name,
trunk=trunk,
pvlan=pvlan,
vlan_id=','.join(vlan_id_list),
dvswitch=pg.config.distributedVirtualSwitch.name))
else:
pglist.append(dict(
name=pg.name,
trunk=trunk,
pvlan=pvlan,
vlan_id=','.join(vlan_id_list),
dvswitch=pg.config.distributedVirtualSwitch.name))
return pglist
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dvswitch=dict(type='str', required=False),
vlanid=dict(type='int', required=False),
name=dict(type='str', required=False),
show_uplink=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['show_uplink', 'True', 'vlanid']
]
)
dvs_pg_mgr = DVSPortgroupFindManager(module)
module.exit_json(changed=False,
dvs_portgroups=dvs_pg_mgr.get_dvs_portgroup())
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
//===- bolt/Profile/DataAggregator.h - Perf data aggregator -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions reads profile data written by perf record,
// aggregates it and then writes it back to an output file.
//
//===----------------------------------------------------------------------===//
#ifndef BOLT_PROFILE_DATA_AGGREGATOR_H
#define BOLT_PROFILE_DATA_AGGREGATOR_H
#include "bolt/Profile/DataReader.h"
#include "bolt/Profile/YAMLProfileWriter.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Program.h"
#include <limits>
#include <unordered_map>
namespace llvm {
namespace bolt {
class BinaryFunction;
class BinaryContext;
class BoltAddressTranslation;
/// DataAggregator inherits all parsing logic from DataReader as well as
/// its data structures used to represent aggregated profile data in memory.
///
/// The aggregator works by dispatching two separate perf-script jobs that
/// read perf samples and perf task annotations. Later, we read the output
/// files to extract information about which PID was used for this binary.
/// With the PID, we filter the samples and extract all LBR entries.
///
/// To aggregate LBR entries, we rely on a BinaryFunction map to locate the
/// original function where the event happened. Then, we convert a raw address
/// to an offset relative to the start of this function and aggregate branch
/// information for each function.
///
/// This must be coordinated with RewriteInstance so we have BinaryFunctions in
/// State::Disassembled. After this state, BinaryFunction will drop the
/// instruction map with original addresses we rely on to validate the traces
/// found in the LBR.
///
/// The last step is to write the aggregated data to disk in the output file
/// specified by the user.
class DataAggregator : public DataReader {
public:
explicit DataAggregator(StringRef Filename) : DataReader(Filename) {
start();
}
~DataAggregator();
StringRef getReaderName() const override { return "perf data aggregator"; }
bool isTrustedSource() const override { return true; }
Error preprocessProfile(BinaryContext &BC) override;
Error readProfilePreCFG(BinaryContext &BC) override {
return Error::success();
}
Error readProfile(BinaryContext &BC) override;
bool mayHaveProfileData(const BinaryFunction &BF) override;
/// Set Bolt Address Translation Table when processing samples collected in
/// bolted binaries
void setBAT(BoltAddressTranslation *B) override { BAT = B; }
/// Check whether \p FileName is a perf.data file
static bool checkPerfDataMagic(StringRef FileName);
private:
struct LBREntry {
uint64_t From;
uint64_t To;
bool Mispred;
};
friend raw_ostream &operator<<(raw_ostream &OS, const LBREntry &);
friend struct PerfSpeEventsTestHelper;
struct PerfBranchSample {
SmallVector<LBREntry, 32> LBR;
};
struct PerfBasicSample {
StringRef EventName;
uint64_t PC;
};
struct PerfMemSample {
uint64_t PC;
uint64_t Addr;
};
/// Container for the unit of branch data, matching pre-aggregated trace type.
/// Backwards compatible with branch and fall-through types:
/// - if \p To is < 0, the trace only contains branch data (BR_ONLY),
/// - if \p Branch is < 0, the trace only contains fall-through data
/// (FT_ONLY, FT_EXTERNAL_ORIGIN, or FT_EXTERNAL_RETURN).
struct Trace {
static constexpr const uint64_t EXTERNAL = 0ULL;
static constexpr const uint64_t BR_ONLY =
std::numeric_limits<uint64_t>::max();
static constexpr const uint64_t FT_ONLY =
std::numeric_limits<uint64_t>::max();
static constexpr const uint64_t FT_EXTERNAL_ORIGIN =
std::numeric_limits<uint64_t>::max() - 1;
static constexpr const uint64_t FT_EXTERNAL_RETURN =
std::numeric_limits<uint64_t>::max() - 2;
uint64_t Branch;
uint64_t From;
uint64_t To;
auto tie() const { return std::tie(Branch, From, To); }
bool operator==(const Trace &Other) const { return tie() == Other.tie(); }
bool operator<(const Trace &Other) const { return tie() < Other.tie(); }
};
friend raw_ostream &operator<<(raw_ostream &OS, const Trace &);
struct TraceHash {
size_t operator()(const Trace &L) const { return hash_combine(L.tie()); }
};
struct TakenBranchInfo {
uint64_t TakenCount{0};
uint64_t MispredCount{0};
};
/// Intermediate storage for profile data. We save the results of parsing
/// and use them later for processing and assigning profile.
std::unordered_map<Trace, TakenBranchInfo, TraceHash> TraceMap;
std::vector<std::pair<Trace, TakenBranchInfo>> Traces;
/// Pre-populated addresses of returns, coming from pre-aggregated data or
/// disassembly. Used to disambiguate call-continuation fall-throughs.
std::unordered_map<uint64_t, bool> Returns;
std::unordered_map<uint64_t, uint64_t> BasicSamples;
std::vector<PerfMemSample> MemSamples;
template <typename T> void clear(T &Container) {
T TempContainer;
TempContainer.swap(Container);
}
/// Perf utility full path name
std::string PerfPath;
/// Perf process spawning bookkeeping
struct PerfProcessInfo {
bool IsFinished{false};
sys::ProcessInfo PI;
SmallVector<char, 256> StdoutPath;
SmallVector<char, 256> StderrPath;
};
/// Process info for spawned processes
PerfProcessInfo MainEventsPPI;
PerfProcessInfo MemEventsPPI;
PerfProcessInfo MMapEventsPPI;
PerfProcessInfo TaskEventsPPI;
/// Kernel VM starts at fixed based address
/// https://www.kernel.org/doc/Documentation/x86/x86_64/mm.txt
static constexpr uint64_t KernelBaseAddr = 0xffff800000000000;
/// Current list of created temporary files
std::vector<std::string> TempFiles;
/// Name of the binary with matching build-id from perf.data if different
/// from the file name in BC.
std::string BuildIDBinaryName;
/// Memory map info for a single file as recorded in perf.data
/// When a binary has multiple text segments, the Size is computed as the
/// difference of the last address of these segments from the BaseAddress.
/// The base addresses of all text segments must be the same.
struct MMapInfo {
uint64_t BaseAddress{0}; /// Base address of the mapped binary.
uint64_t MMapAddress{0}; /// Address of the executable segment.
uint64_t Size{0}; /// Size of the mapping.
uint64_t Offset{0}; /// File offset of the mapped segment.
int32_t PID{-1}; /// Process ID.
bool Forked{false}; /// Was the process forked?
uint64_t Time{0ULL}; /// Time in micro seconds.
};
/// Per-PID map info for the binary
std::unordered_map<uint64_t, MMapInfo> BinaryMMapInfo;
/// Fork event info
struct ForkInfo {
int32_t ParentPID;
int32_t ChildPID;
uint64_t Time{0ULL};
};
/// References to core BOLT data structures
BinaryContext *BC{nullptr};
BoltAddressTranslation *BAT{nullptr};
/// Update function execution profile with a recorded trace.
/// A trace is region of code executed between two LBR entries supplied in
/// execution order.
///
/// Return a vector of offsets corresponding to a trace in a function
/// if the trace is valid, std::nullopt otherwise.
std::optional<SmallVector<std::pair<uint64_t, uint64_t>, 16>>
getFallthroughsInTrace(BinaryFunction &BF, const Trace &Trace, uint64_t Count,
bool IsReturn) const;
/// Record external entry into the function \p BF.
///
/// Return true if the entry is valid, false otherwise.
bool recordEntry(BinaryFunction &BF, uint64_t To, bool Mispred,
uint64_t Count = 1) const;
/// Record exit from the function \p BF via a call or return.
///
/// Return true if the exit point is valid, false otherwise.
bool recordExit(BinaryFunction &BF, uint64_t From, bool Mispred,
uint64_t Count = 1) const;
/// Branch stacks aggregation statistics
uint64_t NumTraces{0};
uint64_t NumInvalidTraces{0};
uint64_t NumLongRangeTraces{0};
uint64_t NumTotalSamples{0};
/// Looks into system PATH for Linux Perf and set up the aggregator to use it
void findPerfExecutable();
/// Launch a perf subprocess with given args and save output for later
/// parsing.
void launchPerfProcess(StringRef Name, PerfProcessInfo &PPI, StringRef Args);
/// Delete all temporary files created to hold the output generated by spawned
/// subprocesses during the aggregation job
void deleteTempFiles();
// Semantic pass helpers
/// Look up which function contains an address by using out map of
/// disassembled BinaryFunctions
BinaryFunction *getBinaryFunctionContainingAddress(uint64_t Address) const;
/// Perform BAT translation for a given \p Func and return the parent
/// BinaryFunction or nullptr.
BinaryFunction *getBATParentFunction(const BinaryFunction &Func) const;
/// Retrieve the location name to be used for samples recorded in \p Func.
static StringRef getLocationName(const BinaryFunction &Func, bool BAT);
/// Semantic actions - parser hooks to interpret parsed perf samples
/// Register a sample (non-LBR mode), i.e. a new hit at \p Address
bool doBasicSample(BinaryFunction &Func, const uint64_t Address,
uint64_t Count);
/// Register an intraprocedural branch \p Branch.
bool doIntraBranch(BinaryFunction &Func, uint64_t From, uint64_t To,
uint64_t Count, uint64_t Mispreds);
/// Register an interprocedural branch from \p FromFunc to \p ToFunc with
/// offsets \p From and \p To, respectively.
bool doInterBranch(BinaryFunction *FromFunc, BinaryFunction *ToFunc,
uint64_t From, uint64_t To, uint64_t Count,
uint64_t Mispreds);
/// Checks if \p Addr corresponds to a return instruction.
bool checkReturn(uint64_t Addr);
/// Register a \p Branch.
bool doBranch(uint64_t From, uint64_t To, uint64_t Count, uint64_t Mispreds);
/// Register a trace between two LBR entries supplied in execution order.
bool doTrace(const Trace &Trace, uint64_t Count, bool IsReturn);
/// Parser helpers
/// Return false if we exhausted our parser buffer and finished parsing
/// everything
bool hasData() const { return !ParsingBuf.empty(); }
/// Print heat map based on LBR samples.
std::error_code printLBRHeatMap();
/// Parse a single perf sample containing a PID associated with a sequence of
/// LBR entries. If the PID does not correspond to the binary we are looking
/// for, return std::errc::no_such_process. If other parsing errors occur,
/// return the error. Otherwise, return the parsed sample.
ErrorOr<PerfBranchSample> parseBranchSample();
/// Parse a single perf sample containing a PID associated with an event name
/// and a PC
ErrorOr<PerfBasicSample> parseBasicSample();
/// Parse a single perf sample containing a PID associated with an IP and
/// address.
ErrorOr<PerfMemSample> parseMemSample();
/// Parse pre-aggregated LBR samples created by an external tool
std::error_code parseAggregatedLBREntry();
/// Parse either buildid:offset or just offset, representing a location in the
/// binary. Used exclusively for pre-aggregated LBR samples.
ErrorOr<Location> parseLocationOrOffset();
/// Check if a field separator is the next char to parse and, if yes, consume
/// it and return true
bool checkAndConsumeFS();
/// Consume the entire line
void consumeRestOfLine();
/// True if the next token in the parsing buffer is a new line, but don't
/// consume it (peek only).
bool checkNewLine();
using PerfProcessErrorCallbackTy = std::function<void(int, StringRef)>;
/// Prepare to parse data from a given perf script invocation.
/// Returns an invocation exit code.
int prepareToParse(StringRef Name, PerfProcessInfo &Process,
PerfProcessErrorCallbackTy Callback);
/// Parse a single LBR entry as output by perf script -Fbrstack
ErrorOr<LBREntry> parseLBREntry();
/// Parse LBR sample.
void parseLBRSample(const PerfBranchSample &Sample, bool NeedsSkylakeFix);
/// Parse and pre-aggregate branch events.
std::error_code parseBranchEvents();
/// Process all branch events.
void processBranchEvents();
/// Parse the full output generated by perf script to report non-LBR samples.
std::error_code parseBasicEvents();
/// Process non-LBR events.
void processBasicEvents();
/// Parse the full output generated by perf script to report memory events.
std::error_code parseMemEvents();
/// Process parsed memory events profile.
void processMemEvents();
/// Parse a single line of a PERF_RECORD_MMAP2 event looking for a mapping
/// between the binary name and its memory layout in a process with a given
/// PID.
/// On success return a <FileName, MMapInfo> pair.
ErrorOr<std::pair<StringRef, MMapInfo>> parseMMapEvent();
/// Parse PERF_RECORD_FORK event.
std::optional<ForkInfo> parseForkEvent();
/// Parse 'PERF_RECORD_COMM exec'. Don't consume the string.
std::optional<int32_t> parseCommExecEvent();
/// Parse the full output generated by `perf script --show-mmap-events`
/// to generate mapping between binary files and their memory mappings for
/// all PIDs.
std::error_code parseMMapEvents();
/// Parse output of `perf script --show-task-events`, and forked processes
/// to the set of tracked PIDs.
std::error_code parseTaskEvents();
/// Parse a single pair of binary full path and associated build-id
std::optional<std::pair<StringRef, StringRef>> parseNameBuildIDPair();
/// Coordinate reading and parsing of perf.data file
void parsePerfData(BinaryContext &BC);
/// Coordinate reading and parsing of pre-aggregated file
///
/// The regular perf2bolt aggregation job is to read perf output directly.
/// However, if the data is coming from a database instead of perf, one could
/// write a query to produce a pre-aggregated file. This function deals with
/// this case.
///
/// The pre-aggregated file contains aggregated LBR data, but without binary
/// knowledge. BOLT will parse it and, using information from the disassembled
/// binary, augment it with fall-through edge frequency information. After
/// this step is finished, this data can be either written to disk to be
/// consumed by BOLT later, or can be used by BOLT immediately if kept in
/// memory.
///
/// File format syntax:
/// E <event>
/// S <start> <count>
/// [TR] <start> <end> <ft_end> <count>
/// B <start> <end> <count> <mispred_count>
/// [Ffr] <start> <end> <count>
///
/// where <start>, <end>, <ft_end> have the format [<id>:]<offset>
///
/// E - name of the sampling event used for subsequent entries
/// S - indicates an aggregated basic sample at <start>
/// B - indicates an aggregated branch from <start> to <end>
/// F - an aggregated fall-through from <start> to <end>
/// f - an aggregated fall-through with external origin - used to disambiguate
/// between a return hitting a basic block head and a regular internal
/// jump to the block
/// r - an aggregated fall-through originating at an external return, no
/// checks are performed for a fallthrough start
/// T - an aggregated trace: branch from <start> to <end> with a fall-through
/// to <ft_end>
/// R - an aggregated trace originating at a return
///
/// <id> - build id of the object containing the address. We can skip it for
/// the main binary and use "X" for an unknown object. This will save some
/// space and facilitate human parsing.
///
/// <offset> - hex offset from the object base load address (0 for the
/// main executable unless it's PIE) to the address.
///
/// <count> - total aggregated count.
///
/// <mispred_count> - the number of times the branch was mispredicted.
///
/// Example:
/// Basic samples profile:
/// E cycles
/// S 41be50 3
/// E br_inst_retired.near_taken
/// S 41be60 6
///
/// Trace profile combining branches and fall-throughs:
/// T 4b196f 4b19e0 4b19ef 2
///
/// Legacy branch profile with separate branches and fall-throughs:
/// F 41be50 41be50 3
/// F 41be90 41be90 4
/// B 4b1942 39b57f0 3 0
/// B 4b196f 4b19e0 2 0
void parsePreAggregated();
/// Parse the full output of pre-aggregated LBR samples generated by
/// an external tool.
std::error_code parsePreAggregatedLBRSamples();
/// If \p Address falls into the binary address space based on memory
/// mapping info \p MMI, then adjust it for further processing by subtracting
/// the base load address. External addresses, i.e. addresses that do not
/// correspond to the binary allocated address space, are adjusted to avoid
/// conflicts.
void adjustAddress(uint64_t &Address, const MMapInfo &MMI) const {
if (Address >= MMI.MMapAddress && Address < MMI.MMapAddress + MMI.Size) {
Address -= MMI.BaseAddress;
} else if (Address < MMI.Size) {
// Make sure the address is not treated as belonging to the binary.
Address = (-1ULL);
}
}
/// Adjust addresses in \p LBR entry.
void adjustLBR(LBREntry &LBR, const MMapInfo &MMI) const {
adjustAddress(LBR.From, MMI);
adjustAddress(LBR.To, MMI);
}
/// Ignore kernel/user transition LBR if requested
bool ignoreKernelInterrupt(LBREntry &LBR) const;
/// Populate functions in \p BC with profile.
void processProfile(BinaryContext &BC);
/// Start an aggregation job asynchronously.
void start();
/// Returns true if this aggregation job is using a translation table to
/// remap samples collected on binaries already processed by BOLT.
bool usesBAT() const { return BAT; }
/// Force all subprocesses to stop and cancel aggregation
void abort();
/// Dump data structures into a file readable by llvm-bolt
std::error_code writeAggregatedFile(StringRef OutputFilename) const;
/// Dump translated data structures into YAML
std::error_code writeBATYAML(BinaryContext &BC,
StringRef OutputFilename) const;
/// Filter out binaries based on PID
void filterBinaryMMapInfo();
/// If we have a build-id available for the input file, use it to assist
/// matching profile to a binary.
///
/// If the binary name changed after profile collection, use build-id
/// to get the proper name in perf data when build-ids are available.
/// If \p FileBuildID has no match, then issue an error and exit.
void processFileBuildID(StringRef FileBuildID);
/// Infer missing fall-throughs for branch-only traces (LBR top-of-stack
/// entries).
void imputeFallThroughs();
/// Debugging dump methods
void dump() const;
void dump(const PerfBranchSample &Sample) const;
void dump(const PerfMemSample &Sample) const;
/// Profile diagnostics print methods
void printLongRangeTracesDiagnostic() const;
void printBranchSamplesDiagnostics() const;
void printBasicSamplesDiagnostics(uint64_t OutOfRangeSamples) const;
void printBranchStacksDiagnostics(uint64_t IgnoredSamples) const;
/// Get instruction at \p Addr either from containing binary function or
/// disassemble in-place, and invoke \p Callback on resulting MCInst.
/// Returns the result of the callback or nullopt.
template <typename T>
std::optional<T>
testInstructionAt(const uint64_t Addr,
std::function<T(const MCInst &)> Callback) const {
BinaryFunction *Func = getBinaryFunctionContainingAddress(Addr);
if (!Func)
return std::nullopt;
const uint64_t Offset = Addr - Func->getAddress();
if (Func->hasInstructions()) {
if (auto *MI = Func->getInstructionAtOffset(Offset))
return Callback(*MI);
} else {
if (auto MI = Func->disassembleInstructionAtOffset(Offset))
return Callback(*MI);
}
return std::nullopt;
}
/// Apply \p Callback to the instruction at \p Addr, and memoize the result
/// in a \p Map.
template <typename T>
std::optional<T> testAndSet(const uint64_t Addr,
std::function<T(const MCInst &)> Callback,
std::unordered_map<uint64_t, T> &Map) {
auto It = Map.find(Addr);
if (It != Map.end())
return It->second;
if (std::optional<T> Res = testInstructionAt<T>(Addr, Callback)) {
Map.emplace(Addr, *Res);
return *Res;
}
return std::nullopt;
}
public:
/// If perf.data was collected without build ids, the buildid-list may contain
/// incomplete entries. Return true if the buffer containing
/// "perf buildid-list" output has only valid entries and is non- empty.
/// Return false otherwise.
bool hasAllBuildIDs();
/// Parse the output generated by "perf buildid-list" to extract build-ids
/// and return a file name matching a given \p FileBuildID.
std::optional<StringRef> getFileNameForBuildID(StringRef FileBuildID);
/// Get a constant reference to the parsed binary mmap entries.
const std::unordered_map<uint64_t, MMapInfo> &getBinaryMMapInfo() {
return BinaryMMapInfo;
}
friend class YAMLProfileWriter;
};
inline raw_ostream &operator<<(raw_ostream &OS,
const DataAggregator::LBREntry &L) {
OS << formatv("{0:x} -> {1:x}/{2}", L.From, L.To, L.Mispred ? 'M' : 'P');
return OS;
}
inline raw_ostream &operator<<(raw_ostream &OS,
const DataAggregator::Trace &T) {
switch (T.Branch) {
case DataAggregator::Trace::FT_ONLY:
break;
case DataAggregator::Trace::FT_EXTERNAL_ORIGIN:
OS << "X:0 -> ";
break;
case DataAggregator::Trace::FT_EXTERNAL_RETURN:
OS << "X:R -> ";
break;
default:
OS << Twine::utohexstr(T.Branch) << " -> ";
}
OS << Twine::utohexstr(T.From);
if (T.To != DataAggregator::Trace::BR_ONLY)
OS << " ... " << Twine::utohexstr(T.To);
return OS;
}
} // namespace bolt
} // namespace llvm
#endif | c | github | https://github.com/llvm/llvm-project | bolt/include/bolt/Profile/DataAggregator.h |
"""
[Amun - low interaction honeypot]
Copyright (C) [2014] [Jan Goebel]
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>
"""
try:
import psyco ; psyco.full()
from psyco.classes import *
except ImportError:
pass
class IPRange:
def __init__(self, net):
try:
self.net = net
(self.ip, self.pattern) = net.split("/")
self.ip = self.dottedQuadToNum(self.ip)
if self.pattern == "" or self.pattern == "0":
self.pattern = ~0
else:
self.pattern = ~int("1" * (32 - int(self.pattern)), 2)
except KeyboardInterrupt:
raise
def contains(self, tip):
try:
return self.ip & self.pattern == self.dottedQuadToNum(tip) & self.pattern
except KeyboardInterrupt:
raise
def dottedQuadToNum(self,ip):
try:
l = map(int, ip.split('.'))
addr = 0
for byte in l:
addr = 256*addr+byte
return long(addr)
except KeyboardInterrupt:
raise | unknown | codeparrot/codeparrot-clean | ||
import urllib2
import urllib
import json
import subprocess
def genAllCodes():
base_url = "http://query.yahooapis.com/v1/public/yql?format=json&env=store%3A%2F%2F\
datatables.org%2Falltableswithkeys&q="
codes = []
for industry in range(110, 137):
print "Industry: {}".format(industry)
query = "select * from yahoo.finance.industry where id=\"{}\"".format(industry)
parsed = toJson(base_url, query)
if not (parsed["query"]["results"]["industry"]["name"] == ""):
companies = parsed["query"]["results"]["industry"]["company"]
for company in companies:
codes.append(company["symbol"])
return codes
def toJson(baseURL, args):
args = urllib.quote(args, '')
req =urllib2.Request(baseURL + args, None)
opener = urllib2.build_opener()
f = opener.open(req)
data = f.read()
return json.loads(data);
def genStats(code, metric):
stockStats_base = "http://query.yahooapis.com/v1/public/yql?format=json&env=store%3A%2F%2F\
datatables.org%2Falltableswithkeys&q="
stock = "select * from yahoo.finance.keystats where symbol=\'" + code + "\'"
parsed = toJson(stockStats_base, stock)
if parsed["query"]["results"]["stats"][]
return parsed["query"]["results"]["stats"]
def genLoc(code):
city = subprocess.check_output(["./getaddr.sh", code])
google_geocode_base = "http://maps.googleapis.com/maps/api/geocode/json?sensor=false&address="
parsed = toJson(google_geocode_base, city);
if not (parsed["status"] == "ZERO_RESULTS"):
return (parsed["results"][0]["geometry"]["location"]["lng"],
parsed["results"][0]["geometry"]["location"]["lat"])
def parse_interface_companies(metric):
codes = genAllCodes();
for code in codes:
if genLoc(code) == None:
if metric != "LastTradePriceOnly" || metric != :
data = genStats()
if metric == "current price":
return
if metric == "total cash":
return
if metric == "total debt":
return
else:
data = genQuote(genAllCodes())
return metric
#print(genQuote("LMT"))
#print(genStats("LMT"))
print(genLoc("LMT") == None)
#print(genQuote("A7Z.DE"))
#print(genStats("A7Z.DE"))
print(genLoc("A7Z.DE") == None)
#print(genQuote("JA9.SI"))
#print(genStats("JA9.SI"))
print(genLoc("JA9.SI") == None)
#print(genQuote("adfadfadf"))
#print(genStats("adfadfadf"))
print(genLoc("adfadfadf") == None) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os_test
import (
"bytes"
"internal/poll"
"io"
"net"
. "os"
"strconv"
"syscall"
"testing"
)
func TestSendFile(t *testing.T) {
sizes := []int{
1,
42,
1025,
syscall.Getpagesize() + 1,
32769,
}
t.Run("sendfile-to-unix", func(t *testing.T) {
for _, size := range sizes {
t.Run(strconv.Itoa(size), func(t *testing.T) {
testSendFile(t, "unix", int64(size))
})
}
})
t.Run("sendfile-to-tcp", func(t *testing.T) {
for _, size := range sizes {
t.Run(strconv.Itoa(size), func(t *testing.T) {
testSendFile(t, "tcp", int64(size))
})
}
})
}
func testSendFile(t *testing.T, proto string, size int64) {
dst, src, recv, data, hook := newSendFileTest(t, proto, size)
// Now call WriteTo (through io.Copy), which will hopefully call poll.SendFile
n, err := io.Copy(dst, src)
if err != nil {
t.Fatalf("io.Copy error: %v", err)
}
// We should have called poll.Splice with the right file descriptor arguments.
if n > 0 && !hook.called {
t.Fatal("expected to called poll.SendFile")
}
if hook.called && hook.srcfd != int(src.Fd()) {
t.Fatalf("wrong source file descriptor: got %d, want %d", hook.srcfd, src.Fd())
}
sc, ok := dst.(syscall.Conn)
if !ok {
t.Fatalf("destination is not a syscall.Conn")
}
rc, err := sc.SyscallConn()
if err != nil {
t.Fatalf("destination SyscallConn error: %v", err)
}
if err = rc.Control(func(fd uintptr) {
if hook.called && hook.dstfd != int(fd) {
t.Fatalf("wrong destination file descriptor: got %d, want %d", hook.dstfd, int(fd))
}
}); err != nil {
t.Fatalf("destination Conn Control error: %v", err)
}
// Verify the data size and content.
dataSize := len(data)
dstData := make([]byte, dataSize)
m, err := io.ReadFull(recv, dstData)
if err != nil {
t.Fatalf("server Conn Read error: %v", err)
}
if n != int64(dataSize) {
t.Fatalf("data length mismatch for io.Copy, got %d, want %d", n, dataSize)
}
if m != dataSize {
t.Fatalf("data length mismatch for net.Conn.Read, got %d, want %d", m, dataSize)
}
if !bytes.Equal(dstData, data) {
t.Errorf("data mismatch, got %s, want %s", dstData, data)
}
}
// newSendFileTest initializes a new test for sendfile.
//
// It creates source file and destination sockets, and populates the source file
// with random data of the specified size. It also hooks package os' call
// to poll.Sendfile and returns the hook so it can be inspected.
func newSendFileTest(t *testing.T, proto string, size int64) (net.Conn, *File, net.Conn, []byte, *sendFileHook) {
t.Helper()
hook := hookSendFile(t)
client, server := createSocketPair(t, proto)
tempFile, data := createTempFile(t, "writeto-sendfile-to-socket", size)
return client, tempFile, server, data, hook
}
func hookSendFile(t *testing.T) *sendFileHook {
h := new(sendFileHook)
orig := poll.TestHookDidSendFile
t.Cleanup(func() {
poll.TestHookDidSendFile = orig
})
poll.TestHookDidSendFile = func(dstFD *poll.FD, src uintptr, written int64, err error, handled bool) {
h.called = true
h.dstfd = dstFD.Sysfd
h.srcfd = int(src)
h.written = written
h.err = err
h.handled = handled
}
return h
}
type sendFileHook struct {
called bool
dstfd int
srcfd int
written int64
handled bool
err error
} | go | github | https://github.com/golang/go | src/os/writeto_linux_test.go |
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Factory functions for symmetric cryptography."""
import os
from tlslite.utils import python_aes
from tlslite.utils import python_aesgcm
from tlslite.utils import python_rc4
from tlslite.utils import cryptomath
tripleDESPresent = False
if cryptomath.m2cryptoLoaded:
from tlslite.utils import openssl_aes
from tlslite.utils import openssl_rc4
from tlslite.utils import openssl_tripledes
tripleDESPresent = True
if cryptomath.pycryptoLoaded:
from tlslite.utils import pycrypto_aes
from tlslite.utils import pycrypto_aesgcm
from tlslite.utils import pycrypto_rc4
from tlslite.utils import pycrypto_tripledes
tripleDESPresent = True
# **************************************************************************
# Factory Functions for AES
# **************************************************************************
def createAES(key, IV, implList=None):
"""Create a new AES object.
@type key: str
@param key: A 16, 24, or 32 byte string.
@type IV: str
@param IV: A 16 byte string
@rtype: L{tlslite.utils.AES}
@return: An AES object.
"""
if implList == None:
implList = ["openssl", "pycrypto", "python"]
for impl in implList:
if impl == "openssl" and cryptomath.m2cryptoLoaded:
return openssl_aes.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_aes.new(key, 2, IV)
elif impl == "python":
return python_aes.new(key, 2, IV)
raise NotImplementedError()
def createAESGCM(key, implList=None):
"""Create a new AESGCM object.
@type key: bytearray
@param key: A 16 or 32 byte byte array.
@rtype: L{tlslite.utils.AESGCM}
@return: An AESGCM object.
"""
if implList == None:
implList = ["pycrypto", "python"]
for impl in implList:
if impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_aesgcm.new(key)
if impl == "python":
return python_aesgcm.new(key)
raise NotImplementedError()
def createRC4(key, IV, implList=None):
"""Create a new RC4 object.
@type key: str
@param key: A 16 to 32 byte string.
@type IV: object
@param IV: Ignored, whatever it is.
@rtype: L{tlslite.utils.RC4}
@return: An RC4 object.
"""
if implList == None:
implList = ["openssl", "pycrypto", "python"]
if len(IV) != 0:
raise AssertionError()
for impl in implList:
if impl == "openssl" and cryptomath.m2cryptoLoaded:
return openssl_rc4.new(key)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_rc4.new(key)
elif impl == "python":
return python_rc4.new(key)
raise NotImplementedError()
#Create a new TripleDES instance
def createTripleDES(key, IV, implList=None):
"""Create a new 3DES object.
@type key: str
@param key: A 24 byte string.
@type IV: str
@param IV: An 8 byte string
@rtype: L{tlslite.utils.TripleDES}
@return: A 3DES object.
"""
if implList == None:
implList = ["openssl", "pycrypto"]
for impl in implList:
if impl == "openssl" and cryptomath.m2cryptoLoaded:
return openssl_tripledes.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_tripledes.new(key, 2, IV)
raise NotImplementedError() | unknown | codeparrot/codeparrot-clean | ||
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("auth", "0011_update_proxy_permissions"),
]
operations = [
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
] | python | github | https://github.com/django/django | django/contrib/auth/migrations/0012_alter_user_first_name_max_length.py |
import os
import random
import fnmatch
import re
import glob
import zipfile
import json
from thlib.environment import env_mode, env_server, env_inst
if env_mode.py3:
import urllib as urllib2
else:
import urllib2
import thlib.global_functions as gf
# import thlib.tactic_classes as tc
def get_version(major=0, minor=0, build=0, revision=0, string=False, sort_sum=False):
version_dict = {
'major': major,
'minor': minor,
'build': build,
'revision': revision,
}
if string:
return '{major}_{minor}_{build}_{revision}'.format(**version_dict)
elif sort_sum:
return int(major)*1000 + int(minor)*100 + int(build)*10 + int(revision)
else:
return version_dict
def read_json_from_path(file_path):
if os.path.isfile(file_path):
json_file = open(file_path)
fl = json.load(json_file)
json_file.close()
return fl
else:
return get_version()
def save_json_to_path(file_path, data):
updates_dir = '{0}/updates'.format(env_mode.get_current_path())
if not os.path.isdir(updates_dir):
os.makedirs(updates_dir)
with open(file_path, mode='w+') as json_file:
json.dump(data, json_file, indent=4)
def get_current_version():
file_path = '{0}/thlib/version.json'.format(env_mode.get_current_path())
return read_json_from_path(file_path)
def check_need_update():
server_ver = check_for_last_version()
if not server_ver:
return False
current_ver = get_current_version()
if get_version(**server_ver) != get_version(**current_ver):
return True
def save_current_version(data):
file_path = '{0}/thlib/version.json'.format(env_mode.get_current_path())
save_json_to_path(file_path, data)
def get_info_from_updates_folder(files_list=False):
updates_dir = '{0}/updates'.format(env_mode.get_current_path())
json_files = glob.glob1(updates_dir, '*.json')
if files_list:
return json_files
updates_list = []
for jf in json_files:
if jf != 'versions.json':
# print('{0}/{1}'.format(updates_dir, jf))
updates_list.append(read_json_from_path('{0}/{1}'.format(updates_dir, jf)))
return updates_list
def create_updates_list():
file_path = '{0}/updates/versions.json'.format(env_mode.get_current_path())
save_json_to_path(file_path, get_info_from_updates_folder(files_list=True))
def download_from_url(url):
proxy = env_server.get_proxy()
if proxy['enabled']:
server = proxy['server'].replace('http://', '')
proxy_dict = {
'http': 'http://{login}:{pass}@{0}'.format(server, **proxy)
}
proxy_handler = urllib2.ProxyHandler(proxy_dict)
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy_handler, auth, urllib2.HTTPHandler)
urllib2.install_opener(opener)
def url_open_agent(url=url, timeout=1):
return urllib2.urlopen(url=url, timeout=timeout)
query_worker = gf.get_thread_worker(
url_open_agent,
error_func=gf.error_handle
)
query_worker.try_start()
thread_pool = query_worker.get_thread_pool()
thread_pool.waitForDone()
if query_worker.is_failed():
return False
# else:
# return result_thread.result
def check_for_last_version():
last_ver = download_from_url('http://tactic-handler.tk/th/version.json?{0}'.format(random.randint(0, 99999)))
if last_ver:
update_str = json.loads(last_ver.read())
return update_str
def get_updates_from_server():
updates_list = download_from_url('http://tactic-handler.tk/th/versions.json?{0}'.format(random.randint(0, 99999)))
if updates_list:
versions_list = json.loads(updates_list.read())
path_to_save = '{0}/updates'.format(env_mode.get_current_path())
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
for vl in versions_list:
update_file = download_from_url('http://tactic-handler.tk/th/{0}'.format(vl))
with open('{0}/{1}'.format(path_to_save, vl), 'wb') as output:
output.write(update_file.read())
output.close()
def get_update_archive_from_server(archive_name):
archive_path = '{0}/updates/{1}'.format(env_mode.get_current_path(), archive_name)
update_archive_file = download_from_url('http://tactic-handler.tk/th/{0}'.format(archive_name))
if update_archive_file:
with open(archive_path, 'wb') as output:
output.write(update_archive_file.read())
output.close()
return archive_path
def delete_files_from_list(files_list):
pass
# print(files_list)
def create_app_update_list():
ignore_list = [
'.idea',
'!not_in_project!',
'settings',
'screenshots',
'updates',
'asd.txt',
'asd2.txt',
'asd4.txt',
'backup',
'design',
'deprecated',
]
include_list = [
'*.py',
'*.pyw',
'*.ui',
'*.json',
'*.png',
'*.psd',
'*.py',
'*.qrc',
'*.ico',
'*.tga',
'*.txt',
'*.tif',
'*.rgb',
'*.j2k',
'*.jpg',
'*.zip',
'*.ttf',
'*VERSION*',
]
include_list = r'|'.join([fnmatch.translate(x) for x in include_list])
ignore_list = '|'.join(ignore_list)
files_list = []
for root, dirs, files in os.walk(env_mode.get_current_path()):
if not re.search(ignore_list, root):
files = [os.path.join(root, f) for f in files]
files = [f for f in files if not re.search(ignore_list, f)]
files = [f for f in files if re.search(include_list, f)]
for fl in files:
files_list.append(fl)
return files_list
def create_update_archive(archive_path):
with zipfile.ZipFile(archive_path, 'w', compression=zipfile.ZIP_DEFLATED) as zp:
files_list = create_app_update_list()
abs_path = env_mode.get_current_path()
for fl in files_list:
fl_rep = fl.replace
zp.write(fl, arcname=fl_rep(abs_path, ''))
zp.close()
def update_from_archive(archive_path):
with zipfile.ZipFile(archive_path, "r") as zp:
members = []
for member in zp.infolist():
member.filename = member.filename.replace('\\', '/')
members.append(member)
zp.extractall(env_mode.get_current_path(), members)
zp.close() | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pkg_resources
def register_ew_resources(manager):
manager.register_directory(
'tracker_js', pkg_resources.resource_filename('forgetracker', 'widgets/resources/js'))
manager.register_directory(
'tracker_css', pkg_resources.resource_filename('forgetracker', 'widgets/resources/css')) | unknown | codeparrot/codeparrot-clean | ||
import { test } from '../../test';
export default test({
test({ assert, window }) {
assert.equal(window.document.title, '');
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/head-title-empty/_config.js |
import logging
from django.contrib.sessions.backends.base import CreateError, SessionBase
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.functional import cached_property
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
@classmethod
def get_model_class(cls):
# Avoids a circular import and allows importing SessionStore when
# django.contrib.sessions is not in INSTALLED_APPS.
from django.contrib.sessions.models import Session
return Session
@cached_property
def model(self):
return self.get_model_class()
def load(self):
try:
s = self.model.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
return {}
def exists(self, session_key):
return self.model.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def create_model_instance(self, data):
"""
Return a new instance of the session model object, which represents the
current session state. Intended to be used for saving the session data
to the database.
"""
return self.model(
session_key=self._get_or_create_session_key(),
session_data=self.encode(data),
expire_date=self.get_expiry_date(),
)
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
using = router.db_for_write(self.model, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
self.model.objects.get(session_key=session_key).delete()
except self.model.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
cls.get_model_class().objects.filter(expire_date__lt=timezone.now()).delete() | unknown | codeparrot/codeparrot-clean | ||
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from sickbeard import helpers
from sickbeard import logger
import requests
meta_session = requests.Session()
def getShowImage(url, imgNum=None):
image_data = None # @UnusedVariable
if url == None:
return None
# if they provided a fanart number try to use it instead
if imgNum != None:
tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"
else:
tempURL = url
logger.log(u"Fetching image from " + tempURL, logger.DEBUG)
image_data = helpers.getURL(tempURL, session=meta_session)
if image_data is None:
logger.log(u"There was an error trying to retrieve the image, aborting", logger.WARNING)
return
return image_data | unknown | codeparrot/codeparrot-clean | ||
from numpy import zeros, asarray, eye, poly1d, hstack, r_
from scipy import linalg
__all__ = ["pade"]
def pade(an, m, n=None):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomial `q`.
n : int, optional
The order of the returned approximating polynomial `p`. By default,
the order is ``len(an)-m``.
Returns
-------
p, q : Polynomial class
The Pade approximation of the polynomial defined by `an` is
``p(x)/q(x)``.
Examples
--------
>>> from scipy.interpolate import pade
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
an = asarray(an)
if n is None:
n = len(an) - 1 - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
if n < 0:
raise ValueError("Order of p <n> must be greater than 0.")
N = m + n
if N > len(an)-1:
raise ValueError("Order of q+p <m+n> must be smaller than len(an).")
an = an[:N+1]
Akj = eye(N+1, n+1, dtype=an.dtype)
Bkj = zeros((N+1, m), dtype=an.dtype)
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1]) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of box sampler."""
# Import libraries
import tensorflow as tf
from official.vision.beta.ops import sampling_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class BoxSampler(tf.keras.layers.Layer):
"""Creates a BoxSampler to sample positive and negative boxes."""
def __init__(self,
num_samples: int = 512,
foreground_fraction: float = 0.25,
**kwargs):
"""Initializes a box sampler.
Args:
num_samples: An `int` of the number of sampled boxes per image.
foreground_fraction: A `float` in [0, 1], what percentage of boxes should
be sampled from the positive examples.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'num_samples': num_samples,
'foreground_fraction': foreground_fraction,
}
super(BoxSampler, self).__init__(**kwargs)
def call(self, positive_matches: tf.Tensor, negative_matches: tf.Tensor,
ignored_matches: tf.Tensor):
"""Samples and selects positive and negative instances.
Args:
positive_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance
corresponds to a positive example.
negative_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance
corresponds to a negative example.
ignored_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance should
be ignored.
Returns:
A `tf.tensor` of shape of [batch_size, K], storing the indices of the
sampled examples, where K is `num_samples`.
"""
sample_candidates = tf.logical_and(
tf.logical_or(positive_matches, negative_matches),
tf.logical_not(ignored_matches))
sampler = sampling_ops.BalancedPositiveNegativeSampler(
positive_fraction=self._config_dict['foreground_fraction'],
is_static=True)
batch_size = sample_candidates.shape[0]
sampled_indicators = []
for i in range(batch_size):
sampled_indicator = sampler.subsample(
sample_candidates[i],
self._config_dict['num_samples'],
positive_matches[i])
sampled_indicators.append(sampled_indicator)
sampled_indicators = tf.stack(sampled_indicators)
_, selected_indices = tf.nn.top_k(
tf.cast(sampled_indicators, dtype=tf.int32),
k=self._config_dict['num_samples'],
sorted=True)
return selected_indices
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config) | unknown | codeparrot/codeparrot-clean | ||
import unittest, random, time, sys, string
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_browse as h2b
print "Not sure if we'll need quotes around these time formats"
def a0(epochTime): return time.asctime(epochTime)
def a1(epochTime): return time.strftime("%c", epochTime)
def a2(epochTime): return time.strftime("%x", epochTime)
def a3(epochTime): return time.strftime("%X", epochTime)
def a4(epochTime): return time.strftime("%X %x", epochTime)
def a5(epochTime): return time.strftime("%x %X", epochTime)
timeFormatFuncList = [
a0, a1, a2, a3, a4, a5
]
def getRandomDate():
epochSecs = random.randint(0,2000000000)
epochTime = time.gmtime(epochSecs)
a = random.randint(0,len(timeFormatFuncList)-1)
# randomly pick one of the ways to format the random time
b = timeFormatFuncList[a](epochTime)
return b
def rand_rowData(colCount=6):
a = [getRandomDate() for fields in range(colCount)]
# thru a random NA in, on every row?
naCol = random.randint(0,colCount-1)
# FIX or maybe junk?
a[naCol] = ''
# do a random upper case of one
upCol = random.randint(0,colCount-1)
a[upCol] = a[upCol].upper()
b = ",".join(map(str,a))
print b
return b
def rand_header(colCount=6):
# string.printable string.punctuation string.whitespace
choiceStr = string.ascii_uppercase + string.letters + string.digits + " "
h = []
for c in range(colCount):
h.append(''.join(random.choice(choiceStr) for x in range(random.randint(0,6))))
return ",".join(h)
def write_syn_dataset(csvPathname, rowCount, colCount, headerData=None):
dsf = open(csvPathname, "w+")
if headerData is not None:
dsf.write(headerData + "\n")
# re-randomize every row
for i in range(rowCount):
rowData = rand_rowData(colCount)
dsf.write(rowData + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
pass
h2o.init(1,java_heap_GB=2,use_flatfile=True)
#h2b.browseTheCloud()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud(h2o.nodes)
def test_parse_time_rand_fvec_NOPASS(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = "syn_time.csv"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
colCount = 6
rowCount = 10
headerData = rand_header(colCount)
write_syn_dataset(csvPathname, rowCount, colCount, headerData)
for trial in range (1):
rowData = rand_rowData()
# make sure all key names are unique, when we re-put and re-parse (h2o caching issues)
src_key = csvFilename + "_" + str(trial)
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResultA = h2i.import_parse(path=csvPathname, schema='put', src_key=src_key, hex_key=hex_key)
print "\nA trial #", trial, "parse end on ", csvFilename, 'took', time.time() - start, 'seconds'
inspect = h2o_cmd.runInspect(key=hex_key)
numRowsA = inspect['numRows']
numColsA = inspect['numCols']
summaryResult = h2o_cmd.runSummary(key=hex_key, timeoutSecs=100,
numCols=numColsA, numRows=numRowsA, noPrint=True)
print summaryResult
h2o_cmd.infoFromSummary(summaryResult)
(missingValuesDictA, constantValuesDictA, enumSizeDictA, colTypeDictA, colNameDictA) = \
h2o_cmd.columnInfoFromInspect(hex_key, exceptionOnMissingValues=False)
if constantValuesDictA or enumSizeDictA:
raise Exception("Should be empty? constantValuesDictA %s enumSizeDictA %s" % (constantValuesDictA, enumSizeDictA))
print "missingValuesListA", missingValuesListA
# self.assertEqual(missingValuesListA, [], "missingValuesList should be empty")
self.assertEqual(numColsA, colCount)
self.assertEqual(numRowsA, rowCount)
# do a little testing of saving the key as a csv
csvDownloadPathname = SYNDATASETS_DIR + "/csvDownload.csv"
h2o.nodes[0].csv_download(src_key=hex_key, csvPathname=csvDownloadPathname)
# remove the original parsed key. source was already removed by h2o
h2o.nodes[0].remove_key(hex_key)
# interesting. what happens when we do csv download with time data?
start = time.time()
parseResultB = h2i.import_parse(path=csvDownloadPathname, schema='put', src_key=src_key, hex_key=hex_key)
print "B trial #", trial, "parse end on ", csvFilename, 'took', time.time() - start, 'seconds'
inspect = h2o_cmd.runInspect(key=hex_key)
numRowsB = inspect['numRows']
numColsB = inspect['numCols']
print "missingValuesListB", missingValuesListB
summaryResult = h2o_cmd.runSummary(key=hex_key, timeoutSecs=100,
numCols=numColsB, numRows=numRowsB, noPrint=True)
(missingValuesDictB, constantValuesDictB, enumSizeDictB, colTypeDictB, colNameDictB) = \
h2o_cmd.columnInfoFromInspect(hex_key, exceptionOnMissingValues=False)
if constantValuesDictB or enumSizeDictB:
raise Exception("Should be empty? constantValuesDictB %s enumSizeDictB %s" % (constantValuesDictB, enumSizeDictB))
self.assertEqual(missingValuesListA, missingValuesListB,
"missingValuesList mismatches after re-parse of downloadCsv result")
self.assertEqual(numColsA, numColsB,
"numCols mismatches after re-parse of downloadCsv result")
# H2O adds a header to the csv created. It puts quotes around the col numbers if no header
# but in this dataset we have a header too, so the row counts should be equal
# if not, maybe the parse of our dataset didn't detect a row
self.assertEqual(numRowsA, numRowsB,
"numRowsA: %s numRowsB: %s mismatch after re-parse of downloadCsv result" % (numRowsA, numRowsB) )
# FIX! should do some comparison of values?
# maybe can use exec to checksum the columns and compare column list.
# or compare to expected values? (what are the expected values for the number for time inside h2o?)
# FIX! should compare the results of the two parses. The infoFromInspect result?
### h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main() | unknown | codeparrot/codeparrot-clean | ||
"""
Mostly deprecated clustering and data structural analysis routines.
"""
from typing import Tuple
import numpy as np
from matplotlib import pyplot as plt
from scipy import sparse as spmat
from bioflow.annotation_network.knowledge_access_analysis import ref_param_set, \
get_go_interface_instance
from bioflow.configs.main_configs import NewOutputs
from bioflow.utils.dataviz import kde_compute, render_2d_matrix
from bioflow.utils.linalg_routines import cluster_nodes, normalize_laplacian, \
average_off_diag_in_sub_matrix, average_interset_linkage
def local_indexed_select(tri_array, array_column, selection_span):
"""
Convenient small function to local_indexed_select a from tri_array all the elements where the column
number array_column is within the selection span
:param tri_array: the matrix on which we will be performing the selection
:param array_column: column on which the selection span will be applied
:param selection_span: span for which we are going to keep the column.
:return:
"""
selector = np.logical_and(
selection_span[0] < tri_array[
array_column, :], tri_array[
array_column, :] < selection_span[1])
if not any(selector):
return np.array([[0.0, 0.0, 0.0]])
decvec = tri_array[:, selector]
return decvec
# REFACTOR: Legacy code containing static analysis and clustering logic
def deprectated_show_correlations(
background_curr_deg_conf,
mean_correlations,
eigenvalues,
selector,
true_sample_tri_corr_array,
test_mean_correlation,
eigenvalue,
re_samples,
go_interface_instance=None,
sparse=False,
param_set=ref_param_set,
save_path: NewOutputs = None):
# TODO: there is a lot of repetition depending on which values are the biggest,
# test-setted or real setted. In all, we should be able to reduce it to two functions:
# scatterplot and histogram with two sets that should go into the dataviz module
"""
A general function that performs demonstration of an example of random samples of the
same size as our sample
and of our sample and conducts the statistical tests on whether any of nodes or
functional groups in our sample are non-random
:param background_curr_deg_conf: [[current, informativity, confusion_potential], ...] -
characteristics of the random samples
:param mean_correlations: [[cluster size, average internode connection], ...] -
characteristics of clustering random samples with the same parameters
:param eigenvalues: eigenvalues associated to the interconnection matrix of random samples
:param selector: range on which we would like to visually zoom and plot a histogram
:param true_sample_tri_corr_array: [[current, informativity, confusion_potential], ...] -
characteristics of the true sample. If none, nothing happens
:param test_mean_correlation: [[cluster size, average internode connection], ...] -
characteristics of clustering the true sample
:param eigenvalue: eigenvalues associated to the interconnection matrix of the true sample
:param re_samples: how many random samples we analyzed for the default model
:param go_interface_instance:
:param sparse:
:param param_set:
:return:
"""
if go_interface_instance is None:
go_interface_instance = get_go_interface_instance(param_set)
inf_sel = (go_interface_instance.calculate_informativity(selector[0]),
go_interface_instance.calculate_informativity(selector[1]))
fig = plt.figure()
fig.set_size_inches(30, 20)
# trivect: [0, :] - current; [1, :] - informativity; [2, :] - confusion potential
plt.subplot(331)
plt.title('current through nodes')
bins = np.linspace(background_curr_deg_conf[0, :].min(),
background_curr_deg_conf[0, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[0, :].min(),
true_sample_tri_corr_array[0, :].min()),
max(background_curr_deg_conf[0, :].max(),
true_sample_tri_corr_array[0, :].max()),
100)
plt.hist(background_curr_deg_conf[0, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[0, :],
bins=bins, histtype='step', log=True, color='r')
plt.subplot(332)
plt.title('test current vs pure informativity')
plt.scatter(background_curr_deg_conf[1, :],
background_curr_deg_conf[0, :], color='b', alpha=0.1)
if true_sample_tri_corr_array is not None:
plt.scatter(
true_sample_tri_corr_array[1, :],
true_sample_tri_corr_array[0, :],
color='r', alpha=0.5)
plt.axvspan(inf_sel[0], inf_sel[1], facecolor='0.5', alpha=0.3)
plt.subplot(333)
plt.title('test current v.s. confusion potential')
plt.scatter(background_curr_deg_conf[2, :], background_curr_deg_conf[0, :])
if true_sample_tri_corr_array is not None:
plt.scatter(
true_sample_tri_corr_array[2, :],
true_sample_tri_corr_array[0, :],
color='r', alpha=0.5)
plt.axvspan(selector[0], selector[1], facecolor='0.5', alpha=0.3)
plt.subplot(334)
plt.title('Gaussian KDE current_info')
estimator_function = kde_compute(background_curr_deg_conf[(1, 0), :], 50, re_samples)
current_info_rel = None
if true_sample_tri_corr_array is not None:
# Used to be the way to compute the p-values
current_info_rel = estimator_function(true_sample_tri_corr_array[(1, 0), :])
plt.subplot(335)
plt.title('GO_term pure informativity distribution')
# REFACTOR: this needs to be moved elsewhere - this is a structural analysis
bins = np.linspace(
background_curr_deg_conf[1, :].min(),
background_curr_deg_conf[1, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[1, :].min(),
true_sample_tri_corr_array[1, :].min()),
max(background_curr_deg_conf[1, :].max(),
true_sample_tri_corr_array[1, :].max()),
100)
plt.hist(background_curr_deg_conf[1, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[1, :],
bins=bins, histtype='step', log=True, color='r')
plt.subplot(336)
plt.title('Density of current in the highlighted area')
bins = np.linspace(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].min(),
local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(
min(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].min(),
local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :].min()),
max(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].max(),
local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :].max()),
100)
plt.hist(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :],
bins=bins, histtype='step', log=True, color='r')
cluster_props = None
plt.subplot(337)
plt.title('Clustering correlation')
# REFACTOR: that's the custering logic to be extracted elsewhere
if not sparse:
# plt.scatter(mean_correlations[0, :], mean_correlations[1, :], color = 'b')
estimator_function = kde_compute(mean_correlations[(0, 1), :], 50, re_samples)
cluster_props = None
if test_mean_correlation is not None:
plt.scatter(test_mean_correlation[0, :],
test_mean_correlation[1, :],
color='k', alpha=0.8)
cluster_props = estimator_function(test_mean_correlation[(0, 1), :])
plt.subplot(338)
plt.title('Eigvals_hist')
# REFACTOR: this needs to be moved elsewhere - this is a structural analysis
if not sparse:
bins = np.linspace(eigenvalues.min(), eigenvalues.max(), 100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(eigenvalues.min(), eigenvalue.min()),
max(eigenvalues.max(), eigenvalue.max()),
100)
plt.hist(eigenvalues, bins=bins, histtype='step', color='b')
if eigenvalue is not None:
plt.hist(eigenvalue.tolist() * 3, bins=bins, histtype='step', color='r')
plt.subplot(339)
plt.title('confusion potential')
bins = np.linspace(background_curr_deg_conf[2, :].min(),
background_curr_deg_conf[2, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[2, :].min(),
true_sample_tri_corr_array[2, :].min()),
max(background_curr_deg_conf[2, :].max(),
true_sample_tri_corr_array[2, :].max()),
100)
plt.hist(background_curr_deg_conf[2, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[2, :],
bins=bins, histtype='step', log=True, color='r')
# # plt.show()
plt.savefig(save_path.knowledge_network_scatterplot)
# pull the groups corresponding to non-random associations.
return current_info_rel, cluster_props
def deprecated_perform_clustering(inter_node_tension: spmat.csc_matrix,
cluster_number: int,
show: str = 'undefined clustering') -> Tuple[np.array, np.float64,
np.array, np.array]:
"""
Performs a clustering on the voltages of the nodes,
:param inter_node_tension:
:param cluster_number:
:param show:
"""
index_group = list(set([item
for key in inter_node_tension.keys()
for item in key]))
local_index = dict((UP, i) for i, UP in enumerate(index_group))
rev_idx = dict((i, UP) for i, UP in enumerate(index_group))
relations_matrix = spmat.lil_matrix((len(index_group), len(index_group)))
for (UP1, UP2), tension in inter_node_tension.items():
# TODO: change the metric used to cluster the nodes.
relations_matrix[local_index[UP1], local_index[UP2]] = -1.0 / tension
relations_matrix[local_index[UP2], local_index[UP1]] = -1.0 / tension
relations_matrix[local_index[UP2], local_index[UP2]] += 1.0 / tension
relations_matrix[local_index[UP1], local_index[UP1]] += 1.0 / tension
# underlying method is spectral clustering: do we really lie in a good zone for that?
# NOPE - we need a dynamic clusters number
# TODO: change clustering method to a different one
groups = cluster_nodes(relations_matrix, cluster_number)
relations_matrix = normalize_laplacian(relations_matrix)
if relations_matrix.shape[0] < 5:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix, k=2)
elif relations_matrix.shape[0] < 10:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix, k=4)
else:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix)
relations_matrix = - relations_matrix
relations_matrix.setdiag(1)
group_sets = []
group_2_mean_off_diag = []
for i in range(0, cluster_number):
group_selector = groups == i
group_indexes = group_selector.nonzero()[0].tolist()
group_2_mean_off_diag.append(
(tuple(rev_idx[idx] for idx in group_indexes),
len(group_indexes),
average_off_diag_in_sub_matrix(relations_matrix, group_indexes)))
group_sets.append(group_indexes)
remainder = average_interset_linkage(relations_matrix, group_sets)
clustidx = np.array([item for itemset in group_sets for item in itemset])
relations_matrix = relations_matrix[:, clustidx]
relations_matrix = relations_matrix[clustidx, :]
mean_corr_array = np.array([[items, mean_corr]
for _, items, mean_corr in group_2_mean_off_diag])
if show:
render_2d_matrix(relations_matrix.toarray(), name=show, destination='')
return np.array(group_2_mean_off_diag), \
remainder, \
mean_corr_array, \
eigenvals | unknown | codeparrot/codeparrot-clean | ||
"""
This module contains a custom dialog class used to personalize the appearance of a
L{FlatMenu} on the fly, allowing also the user of your application to do the same.
"""
import wx
from UserDict import UserDict
from artmanager import ArtManager
from fmresources import *
from labelbook import LabelBook
_ = wx.GetTranslation
# ---------------------------------------------------------------------------- #
# Class OrderedDict
# ---------------------------------------------------------------------------- #
class OrderedDict(UserDict):
"""
An ordered dictionary implementation.
"""
def __init__(self, dict = None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = UserDict.copy(self)
dict._keys = self._keys[:]
return dict
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
UserDict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, dict):
UserDict.update(self, dict)
for key in dict.keys():
if key not in self._keys: self._keys.append(key)
def values(self):
return map(self.get, self._keys)
# ---------------------------------------------------------------------------- #
# Class FMTitlePanel
# ---------------------------------------------------------------------------- #
class FMTitlePanel(wx.Panel):
"""
Helper class to draw gradient shadings on the dialog.
"""
def __init__(self, parent, title):
"""
Default class constructor.
:param `parent`: the L{FMTitlePanel} parent;
:param `title`: the string to use as a dialog title.
"""
wx.Panel.__init__(self, parent)
self._title = title
# Set the panel size
dc = wx.MemoryDC()
dc.SelectObject(wx.EmptyBitmap(1, 1))
dc.SetFont(wx.SystemSettings_GetFont( wx.SYS_DEFAULT_GUI_FONT ))
ww, hh = dc.GetTextExtent("Tp")
dc.SelectObject(wx.NullBitmap)
# Set minimum panel size
if ww < 250:
ww = 250
self.SetSize(wx.Size(ww, hh + 10))
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{FMTitlePanel}.
:param `event`: a `wx.EraseEvent` event to be processed.
:note: This method is intentionally empty to reduce flicker.
"""
pass
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{FMTitlePanel}.
:param `event`: a `wx.PaintEvent` event to be processed.
"""
dc = wx.BufferedPaintDC(self)
# Draw the background
colour1 = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE)
colour2 = ArtManager.Get().LightColour(colour1, 70)
ArtManager.Get().PaintStraightGradientBox(dc, self.GetClientRect(), colour1, colour2, False)
# Draw the text
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetWeight(wx.BOLD)
dc.SetFont(font)
dc.SetTextForeground(wx.BLACK)
dc.DrawText(self._title, 5, 5)
# ---------------------------------------------------------------------------- #
# Class FMCustomizeDlg
# ---------------------------------------------------------------------------- #
class FMCustomizeDlg(wx.Dialog):
"""
Class used to customize the appearance of L{FlatMenu} and L{FlatMenuBar}.
"""
def __init__(self, parent=None):
"""
Default class constructor.
:param `parent`: the L{FMCustomizeDlg} parent window.
"""
self._book = None
if not parent:
wx.Dialog.__init__(self)
return
wx.Dialog.__init__(self, parent, wx.ID_ANY, _("Customize"), wx.DefaultPosition,
wx.DefaultSize, wx.DEFAULT_DIALOG_STYLE)
self._visibleMenus = OrderedDict()
self._hiddenMenus = OrderedDict()
self.CreateDialog()
self.ConnectEvents()
self.GetSizer().Fit(self)
self.GetSizer().SetSizeHints(self)
self.GetSizer().Layout()
self.Centre()
def CreateDialog(self):
""" Actually creates the dialog. """
sz = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sz)
# Create the main book and add some pages into it
style = INB_NO_RESIZE | INB_LEFT | INB_DRAW_SHADOW | INB_BORDER
self._book = LabelBook(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, style)
sz.Add(self._book, 1, wx.EXPAND)
self._book.SetColour(INB_TAB_AREA_BACKGROUND_COLOUR, ArtManager.Get().GetMenuFaceColour())
colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE)
self._book.SetColour(INB_ACTIVE_TAB_COLOUR, colour)
self.created = False
self.Initialise()
hsizer = wx.BoxSizer(wx.HORIZONTAL)
# add a separator between the book & the buttons area
hsizer.Add(wx.Button(self, wx.ID_OK, _("&Close")), 0, wx.EXPAND | wx.ALIGN_RIGHT)
sz.Add(wx.StaticLine(self), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 3)
sz.Add(hsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 2)
def Initialise(self):
""" Initialzes the L{LabelBook} pages. """
self._book.DeleteAllPages()
self._book.AddPage(self.CreateMenusPage(), _("Menus"), True)
self._book.AddPage(self.CreateOptionsPage(), _("Options"), False)
def CloseDialog(self):
""" Closes the dialog. """
self.EndModal(wx.ID_OK)
def ConnectEvents(self):
""" Does nothing at the moment. """
pass
def CreateMenusPage(self):
""" Creates the L{LabelBook} pages with L{FlatMenu} information. """
menus = wx.Panel(self._book, wx.ID_ANY, wx.DefaultPosition, wx.Size(300, 300))
sz = wx.BoxSizer(wx.VERTICAL)
menus.SetSizer(sz)
choices = []
mb = self.GetParent()
if not self.created:
self.order = []
# Add all the menu items that are currently visible to the list
for i in xrange(len(mb._items)):
dummy, lableOnly = ArtManager.Get().GetAccelIndex(mb._items[i].GetTitle())
choices.append(lableOnly)
# Add the menu to the visible menus map
self._visibleMenus.update({lableOnly: mb._items[i].GetMenu()})
if not self.created:
self.order.append(lableOnly)
# Add all hidden menus to the menu bar
for key in self._hiddenMenus.keys():
choices.append(key)
if self.created:
visible = OrderedDict()
hidden = OrderedDict()
for items in self.order:
if items in self._visibleMenus:
visible[items] = self._visibleMenus[items]
elif items in self._hiddenMenus:
hidden[items] = self._hiddenMenus[items]
self._visibleMenus = visible
self._hiddenMenus = hidden
self._menuListId = wx.NewId()
self._checkListMenus = wx.CheckListBox(menus, self._menuListId, pos=wx.DefaultPosition, size=wx.Size(250, 250),
choices=self.order, style=wx.BORDER_SIMPLE)
self._checkListMenus.Bind(wx.EVT_CHECKLISTBOX, self.OnMenuChecked)
# check all visible items
for indx, item in enumerate(self.order):
if item in self._visibleMenus:
self._checkListMenus.Check(indx)
# Add title panel
title = FMTitlePanel(menus, _("Select Menu To Add/Remove:"))
sz.Add(title, 0, wx.EXPAND | wx.ALL, 2)
sz.Add(self._checkListMenus, 1, wx.EXPAND | wx.TOP | wx.RIGHT | wx.LEFT, 2)
self.created = True
return menus
def CreateShortcutsPage(self):
""" Creates the L{LabelBook} shorcuts page. """
shorcuts = wx.Panel(self._book, wx.ID_ANY, wx.DefaultPosition, wx.Size(300, 300))
return shorcuts
def CreateOptionsPage(self):
""" Creates the L{LabelBook} option page which holds the L{FlatMenu} styles. """
options = wx.Panel(self._book, wx.ID_ANY, wx.DefaultPosition, wx.Size(300, 300))
# Create some options here
vsizer = wx.BoxSizer(wx.VERTICAL)
options.SetSizer(vsizer)
#-----------------------------------------------------------
# options page layout
# - Menu Style: Default or 2007 (radio group)
#
# - Default Style Settings: (static box)
# + Draw vertical gradient (check box)
# + Draw border (check box)
# + Drop toolbar shadow (check box)
#
# - Colour Scheme (static box)
# + Menu bar background colour (combo button)
#-----------------------------------------------------------
self._menuStyleID = wx.NewId()
choices = [_("Default Style"), _("Metallic")]
self._menuStyle = wx.RadioBox(options, self._menuStyleID, _("Menu bar style"),
wx.DefaultPosition, wx.DefaultSize, choices)
# update the selection
theme = ArtManager.Get().GetMenuTheme()
if theme == Style2007:
self._menuStyle.SetSelection(1)
else:
self._menuStyle.SetSelection(0)
# connect event to the control
self._menuStyle.Bind(wx.EVT_RADIOBOX, self.OnChangeStyle)
vsizer.Add(self._menuStyle, 0, wx.EXPAND | wx.ALL, 5)
self._sbStyle = wx.StaticBoxSizer(wx.StaticBox(options, -1, _("Default style settings")), wx.VERTICAL)
self._drawVertGradID = wx.NewId()
self._verticalGradient = wx.CheckBox(options, self._drawVertGradID, _("Draw vertical gradient"))
self._verticalGradient.Bind(wx.EVT_CHECKBOX, self.OnChangeStyle)
self._sbStyle.Add(self._verticalGradient, 0, wx.EXPAND | wx.ALL, 3)
self._verticalGradient.SetValue(ArtManager.Get().GetMBVerticalGradient())
self._drawBorderID = wx.NewId()
self._drawBorder = wx.CheckBox(options, self._drawBorderID, _("Draw border around menu bar"))
self._drawBorder.Bind(wx.EVT_CHECKBOX, self.OnChangeStyle)
self._sbStyle.Add(self._drawBorder, 0, wx.EXPAND | wx.ALL, 3)
self._drawBorder.SetValue(ArtManager.Get().GetMenuBarBorder())
self._shadowUnderTBID = wx.NewId()
self._shadowUnderTB = wx.CheckBox(options, self._shadowUnderTBID, _("Toolbar float over menu bar"))
self._shadowUnderTB.Bind(wx.EVT_CHECKBOX, self.OnChangeStyle)
self._sbStyle.Add(self._shadowUnderTB, 0, wx.EXPAND | wx.ALL, 3)
self._shadowUnderTB.SetValue(ArtManager.Get().GetRaiseToolbar())
vsizer.Add(self._sbStyle, 0, wx.EXPAND | wx.ALL, 5)
# Misc
sb = wx.StaticBoxSizer(wx.StaticBox(options, -1, _("Colour Scheme")), wx.VERTICAL)
self._colourID = wx.NewId()
colourChoices = ArtManager.Get().GetColourSchemes()
colourChoices.sort()
self._colour = wx.ComboBox(options, self._colourID, ArtManager.Get().GetMenuBarColourScheme(), choices=colourChoices,
style=wx.CB_DROPDOWN | wx.CB_READONLY)
sb.Add(self._colour, 0, wx.EXPAND)
vsizer.Add(sb, 0, wx.EXPAND | wx.ALL, 5)
self._colour.Bind(wx.EVT_COMBOBOX, self.OnChangeStyle)
# update the dialog by sending all possible events to us
event = wx.CommandEvent(wx.wxEVT_COMMAND_RADIOBOX_SELECTED, self._menuStyleID)
event.SetEventObject(self)
event.SetInt(self._menuStyle.GetSelection())
self._menuStyle.ProcessEvent(event)
event.SetEventType(wx.wxEVT_COMMAND_CHECKBOX_CLICKED)
event.SetId(self._drawVertGradID)
event.SetInt(ArtManager.Get().GetMBVerticalGradient())
self._verticalGradient.ProcessEvent(event)
event.SetEventType(wx.wxEVT_COMMAND_CHECKBOX_CLICKED)
event.SetId(self._shadowUnderTBID)
event.SetInt(ArtManager.Get().GetRaiseToolbar())
self._shadowUnderTB.ProcessEvent(event)
event.SetEventType(wx.wxEVT_COMMAND_CHECKBOX_CLICKED)
event.SetId(self._drawBorderID)
event.SetInt(ArtManager.Get().GetMenuBarBorder())
self._drawBorder.ProcessEvent(event)
event.SetEventType(wx.wxEVT_COMMAND_COMBOBOX_SELECTED)
event.SetId(self._colourID)
self._colour.ProcessEvent(event)
return options
def OnMenuChecked(self, event):
"""
Handles the ``wx.EVT_CHECKBOX`` event for L{FMCustomizeDlg}.
:param `event`: a `wx.CommandEvent` event to be processed.
:note: This method handles the L{FlatMenu} menus visibility.
"""
id = event.GetInt()
checked = self._checkListMenus.IsChecked(id)
menuName = self._checkListMenus.GetString(id)
menu = None
mb = self.GetParent()
if checked:
# remove the item from the hidden map
if self._hiddenMenus.has_key(menuName):
menu = self._hiddenMenus.pop(menuName)
# add it to the visible map
if menu:
self._visibleMenus.update({menuName: menu})
indx = self._checkListMenus.GetItems().index(menuName)
# update the menubar
mb.Insert(indx, menu, menu._menuBarFullTitle)
mb.Refresh()
else:
# remove the item from the visible items
if self._visibleMenus.has_key(menuName):
menu = self._visibleMenus.pop(menuName)
# add it to the hidden map
if menu:
self._hiddenMenus.update({menuName: menu})
# update the menubar
pos = mb.FindMenu(menuName)
if pos != wx.NOT_FOUND:
mb.Remove(pos)
mb.Refresh()
if self.created:
visible = OrderedDict()
hidden = OrderedDict()
for items in self.order:
if items in self._visibleMenus:
visible[items] = self._visibleMenus[items]
elif items in self._hiddenMenus:
hidden[items] = self._hiddenMenus[items]
self._visibleMenus = visible
self._hiddenMenus = hidden
def OnChangeStyle(self, event):
"""
Handles the ``wx.EVT_CHECKBOX`` event for L{FMCustomizeDlg}.
:param `event`: a `wx.CommandEvent` event to be processed.
:note: This method handles the L{FlatMenu} styles.
"""
mb = self.GetParent()
if event.GetId() == self._menuStyleID:
if event.GetSelection() == 0:
# Default style
ArtManager.Get().SetMenuTheme(StyleXP)
self._drawBorder.Enable()
self._verticalGradient.Enable()
mb.Refresh()
else:
ArtManager.Get().SetMenuTheme(Style2007)
self._drawBorder.Enable(False)
self._verticalGradient.Enable(False)
mb.Refresh()
return
if event.GetId() == self._drawBorderID:
ArtManager.Get().DrawMenuBarBorder(event.IsChecked())
mb.Refresh()
return
if event.GetId() == self._drawVertGradID:
ArtManager.Get().SetMBVerticalGradient(event.IsChecked())
mb.Refresh()
return
if event.GetId() == self._colourID:
selection = _("Default")
sel = self._colour.GetSelection()
if sel != wx.NOT_FOUND:
# select new colour scheme
selection = self._colour.GetStringSelection()
ArtManager.Get().SetMenuBarColour(selection)
mb.Refresh()
return
if event.GetId() == self._shadowUnderTBID:
ArtManager.Get().SetRaiseToolbar(event.IsChecked())
mb.Refresh()
return | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs plain vanilla bonnie++."""
import logging
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
BENCHMARK_NAME = 'bonnie++'
BENCHMARK_CONFIG = """
bonnie++:
description: >
Runs Bonnie++. Running this benchmark inside
a container is currently not supported,
since Docker tries to run it as root, which
is not recommended.
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
"""
LATENCY_REGEX = r'([0-9]*\.?[0-9]+)(\w+)'
# Bonnie++ result fields mapping, see man bon_csv2txt for details.
BONNIE_RESULTS_MAPPING = {
'format_version': 0,
'bonnie_version': 1,
'name': 2,
'concurrency': 3,
'seed': 4,
'file_size': 5,
'chunk_size': 6,
'putc': 7,
'putc_cpu': 8,
'put_block': 9,
'put_block_cpu': 10,
'rewrite': 11,
'rewrite_cpu': 12,
'getc': 13,
'getc_cpu': 14,
'get_block': 15,
'get_block_cpu': 16,
'seeks': 17,
'seeks_cpu': 18,
'num_files': 19,
'max_size': 20,
'min_size': 21,
'num_dirs': 22,
'file_chunk_size': 23,
'seq_create': 24,
'seq_create_cpu': 25,
'seq_stat': 26,
'seq_stat_cpu': 27,
'seq_del': 28,
'seq_del_cpu': 29,
'ran_create': 30,
'ran_create_cpu': 31,
'ran_stat': 32,
'ran_stat_cpu': 33,
'ran_del': 34,
'ran_del_cpu': 35,
'putc_latency': 36,
'put_block_latency': 37,
'rewrite_latency': 38,
'getc_latency': 39,
'get_block_latency': 40,
'seeks_latency': 41,
'seq_create_latency': 42,
'seq_stat_latency': 43,
'seq_del_latency': 44,
'ran_create_latency': 45,
'ran_stat_latency': 46,
'ran_del_latency': 47}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Install Bonnie++ on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('Bonnie++ prepare on %s', vm)
vm.Install('bonnieplusplus')
def IsValueValid(value):
"""Validate the value.
An invalid value is either an empty string or a string of multiple '+'.
Args:
value: string. The value in raw result.
Returns:
A boolean indicates if the value is valid or not.
"""
if value == '' or '+' in value:
return False
return True
def IsCpuField(field):
"""Check if the field is cpu percentage.
Args:
field: string. The name of the field.
Returns:
A boolean indicates if the field contains keyword 'cpu'.
"""
return 'cpu' in field
def IsLatencyField(field):
"""Check if the field is latency.
Args:
field: string. The name of the field.
Returns:
A boolean indicates if the field contains keyword 'latency'.
"""
return 'latency' in field
def ParseLatencyResult(result):
"""Parse latency result into value and unit.
Args:
result: string. Latency value in string format, contains value and unit.
eg. 200ms
Returns:
A tuple of value (float) and unit (string).
"""
match = regex_util.ExtractAllMatches(LATENCY_REGEX, result)[0]
return float(match[0]), match[1]
def UpdateMetadata(metadata, key, value):
"""Check if the value is valid, update metadata with the key, value pair.
Args:
metadata: dict. A dictionary of sample metadata.
key: string. Key that will be added into metadata dictionary.
value: Value that of the key.
"""
if IsValueValid(value):
metadata[key] = value
def CreateSamples(results, start_index, end_index, metadata,
field_index_mapping):
"""Create samples with data in results from start_index to end_index.
Args:
results: A list of string representing bonnie++ results.
start_index: integer. The start index in results list of the samples.
end_index: integer. The end index in results list of the samples.
metadata: dict. A dictionary of metadata added into samples.
field_index_mapping: dict. A dictionary maps field index to field names.
Returns:
A list of sample.Sample instances.
"""
samples = []
for field_index in range(start_index, end_index):
field_name = field_index_mapping[field_index]
value = results[field_index]
if not IsValueValid(value):
continue
if IsCpuField(field_name):
unit = '%s'
elif IsLatencyField(field_name):
value, unit = ParseLatencyResult(value)
else:
unit = 'K/sec'
samples.append(sample.Sample(field_name, float(value), unit, metadata))
return samples
def ParseCSVResults(results):
"""Parse csv format bonnie++ results.
Sample Results:
1.96,1.96,perfkit-7b22f510-0,1,1421800799,7423M,,,,72853,15,47358,5,,,
156821,7,537.7,10,100,,,,,49223,58,+++++,+++,54405,53,2898,97,+++++,+++,
59089,60,,512ms,670ms,,44660us,200ms,3747us,1759us,1643us,33518us,192us,
839us
Args:
results: string. Bonnie++ results.
Returns:
A list of samples in the form of 3 or 4 tuples. The tuples contain
the sample metric (string), value (float), and unit (string).
If a 4th element is included, it is a dictionary of sample
metadata.
"""
field_index_mapping = {}
for field, value in BONNIE_RESULTS_MAPPING.iteritems():
field_index_mapping[value] = field
results = results.split(',')
assert len(results) == len(BONNIE_RESULTS_MAPPING)
samples = []
metadata = {}
for field_index in range(BONNIE_RESULTS_MAPPING['format_version'],
BONNIE_RESULTS_MAPPING['chunk_size'] + 1):
UpdateMetadata(metadata, field_index_mapping[field_index],
results[field_index])
for field_index in range(BONNIE_RESULTS_MAPPING['num_files'],
BONNIE_RESULTS_MAPPING['file_chunk_size'] + 1):
UpdateMetadata(metadata, field_index_mapping[field_index],
results[field_index])
samples.extend(CreateSamples(results,
BONNIE_RESULTS_MAPPING['putc'],
BONNIE_RESULTS_MAPPING['num_files'],
metadata, field_index_mapping))
samples.extend(CreateSamples(results,
BONNIE_RESULTS_MAPPING['seq_create'],
BONNIE_RESULTS_MAPPING['ran_del_latency'] + 1,
metadata, field_index_mapping))
return samples
def Run(benchmark_spec):
"""Run Bonnie++ on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of samples in the form of 3 or 4 tuples. The tuples contain
the sample metric (string), value (float), and unit (string).
If a 4th element is included, it is a dictionary of sample
metadata.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('Bonnie++ running on %s', vm)
bonnie_command = ('/usr/sbin/bonnie++ -q -d %s -s %d -n 100 -f' %
(vm.GetScratchDir(),
2 * vm.total_memory_kb / 1024))
logging.info('Bonnie++ Results:')
out, _ = vm.RemoteCommand(bonnie_command, should_log=True)
return ParseCSVResults(out.strip())
def Cleanup(benchmark_spec):
"""Cleanup Bonnie++ on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import sys
import os
sys.path.insert(0, "bin/python")
import samba
samba.ensure_external_module("testtools", "testtools")
samba.ensure_external_module("subunit", "subunit/python")
import samba.getopt as options
from samba.auth import system_session
from ldb import SCOPE_BASE, LdbError
from ldb import ERR_NO_SUCH_OBJECT, ERR_NOT_ALLOWED_ON_NON_LEAF
from ldb import ERR_UNWILLING_TO_PERFORM
from samba.samdb import SamDB
from samba.tests import delete_force
from subunit.run import SubunitTestRunner
import unittest
parser = optparse.OptionParser("deletetest.py [options] <host|file>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
class BasicDeleteTests(samba.tests.TestCase):
def GUID_string(self, guid):
return self.ldb.schema_format_value("objectGUID", guid)
def setUp(self):
super(BasicDeleteTests, self).setUp()
self.ldb = ldb
self.base_dn = ldb.domain_dn()
self.configuration_dn = ldb.get_config_basedn().get_linearized()
def search_guid(self, guid):
print "SEARCH by GUID %s" % self.GUID_string(guid)
res = ldb.search(base="<GUID=%s>" % self.GUID_string(guid),
scope=SCOPE_BASE, controls=["show_deleted:1"])
self.assertEquals(len(res), 1)
return res[0]
def search_dn(self,dn):
print "SEARCH by DN %s" % dn
res = ldb.search(expression="(objectClass=*)",
base=dn,
scope=SCOPE_BASE,
controls=["show_deleted:1"])
self.assertEquals(len(res), 1)
return res[0]
def del_attr_values(self, delObj):
print "Checking attributes for %s" % delObj["dn"]
self.assertEquals(delObj["isDeleted"][0],"TRUE")
self.assertTrue(not("objectCategory" in delObj))
self.assertTrue(not("sAMAccountType" in delObj))
def preserved_attributes_list(self, liveObj, delObj):
print "Checking for preserved attributes list"
preserved_list = ["nTSecurityDescriptor", "attributeID", "attributeSyntax", "dNReferenceUpdate", "dNSHostName",
"flatName", "governsID", "groupType", "instanceType", "lDAPDisplayName", "legacyExchangeDN",
"isDeleted", "isRecycled", "lastKnownParent", "msDS-LastKnownRDN", "mS-DS-CreatorSID",
"mSMQOwnerID", "nCName", "objectClass", "distinguishedName", "objectGUID", "objectSid",
"oMSyntax", "proxiedObjectName", "name", "replPropertyMetaData", "sAMAccountName",
"securityIdentifier", "sIDHistory", "subClassOf", "systemFlags", "trustPartner", "trustDirection",
"trustType", "trustAttributes", "userAccountControl", "uSNChanged", "uSNCreated", "whenCreated"]
for a in liveObj:
if a in preserved_list:
self.assertTrue(a in delObj)
def check_rdn(self, liveObj, delObj, rdnName):
print "Checking for correct rDN"
rdn=liveObj[rdnName][0]
rdn2=delObj[rdnName][0]
name2=delObj[rdnName][0]
guid=liveObj["objectGUID"][0]
self.assertEquals(rdn2, rdn + "\nDEL:" + self.GUID_string(guid))
self.assertEquals(name2, rdn + "\nDEL:" + self.GUID_string(guid))
def delete_deleted(self, ldb, dn):
print "Testing the deletion of the already deleted dn %s" % dn
try:
ldb.delete(dn)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
def test_delete_protection(self):
"""Delete protection tests"""
print self.base_dn
delete_force(self.ldb, "cn=entry1,cn=ldaptestcontainer," + self.base_dn)
delete_force(self.ldb, "cn=entry2,cn=ldaptestcontainer," + self.base_dn)
delete_force(self.ldb, "cn=ldaptestcontainer," + self.base_dn)
ldb.add({
"dn": "cn=ldaptestcontainer," + self.base_dn,
"objectclass": "container"})
ldb.add({
"dn": "cn=entry1,cn=ldaptestcontainer," + self.base_dn,
"objectclass": "container"})
ldb.add({
"dn": "cn=entry2,cn=ldaptestcontainer," + self.base_dn,
"objectclass": "container"})
try:
ldb.delete("cn=ldaptestcontainer," + self.base_dn)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NOT_ALLOWED_ON_NON_LEAF)
ldb.delete("cn=ldaptestcontainer," + self.base_dn, ["tree_delete:1"])
try:
res = ldb.search("cn=ldaptestcontainer," + self.base_dn,
scope=SCOPE_BASE, attrs=[])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
try:
res = ldb.search("cn=entry1,cn=ldaptestcontainer," + self.base_dn,
scope=SCOPE_BASE, attrs=[])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
try:
res = ldb.search("cn=entry2,cn=ldaptestcontainer," + self.base_dn,
scope=SCOPE_BASE, attrs=[])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
delete_force(self.ldb, "cn=entry1,cn=ldaptestcontainer," + self.base_dn)
delete_force(self.ldb, "cn=entry2,cn=ldaptestcontainer," + self.base_dn)
delete_force(self.ldb, "cn=ldaptestcontainer," + self.base_dn)
# Performs some protected object delete testing
res = ldb.search(base="", expression="", scope=SCOPE_BASE,
attrs=["dsServiceName", "dNSHostName"])
self.assertEquals(len(res), 1)
# Delete failing since DC's nTDSDSA object is protected
try:
ldb.delete(res[0]["dsServiceName"][0])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
res = ldb.search(self.base_dn, attrs=["rIDSetReferences"],
expression="(&(objectClass=computer)(dNSHostName=" + res[0]["dNSHostName"][0] + "))")
self.assertEquals(len(res), 1)
# Deletes failing since DC's rIDSet object is protected
try:
ldb.delete(res[0]["rIDSetReferences"][0])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
try:
ldb.delete(res[0]["rIDSetReferences"][0], ["tree_delete:1"])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
# Deletes failing since three main crossRef objects are protected
try:
ldb.delete("cn=Enterprise Schema,cn=Partitions," + self.configuration_dn)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
try:
ldb.delete("cn=Enterprise Schema,cn=Partitions," + self.configuration_dn, ["tree_delete:1"])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
try:
ldb.delete("cn=Enterprise Configuration,cn=Partitions," + self.configuration_dn)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NOT_ALLOWED_ON_NON_LEAF)
try:
ldb.delete("cn=Enterprise Configuration,cn=Partitions," + self.configuration_dn, ["tree_delete:1"])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NOT_ALLOWED_ON_NON_LEAF)
res = ldb.search("cn=Partitions," + self.configuration_dn, attrs=[],
expression="(nCName=%s)" % self.base_dn)
self.assertEquals(len(res), 1)
try:
ldb.delete(res[0].dn)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NOT_ALLOWED_ON_NON_LEAF)
try:
ldb.delete(res[0].dn, ["tree_delete:1"])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_NOT_ALLOWED_ON_NON_LEAF)
# Delete failing since "SYSTEM_FLAG_DISALLOW_DELETE"
try:
ldb.delete("CN=Users," + self.base_dn)
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
# Tree-delete failing since "isCriticalSystemObject"
try:
ldb.delete("CN=Computers," + self.base_dn, ["tree_delete:1"])
self.fail()
except LdbError, (num, _):
self.assertEquals(num, ERR_UNWILLING_TO_PERFORM)
def test_all(self):
"""Basic delete tests"""
print self.base_dn
usr1="cn=testuser,cn=users," + self.base_dn
usr2="cn=testuser2,cn=users," + self.base_dn
grp1="cn=testdelgroup1,cn=users," + self.base_dn
sit1="cn=testsite1,cn=sites," + self.configuration_dn
ss1="cn=NTDS Site Settings,cn=testsite1,cn=sites," + self.configuration_dn
srv1="cn=Servers,cn=testsite1,cn=sites," + self.configuration_dn
srv2="cn=TESTSRV,cn=Servers,cn=testsite1,cn=sites," + self.configuration_dn
delete_force(self.ldb, usr1)
delete_force(self.ldb, usr2)
delete_force(self.ldb, grp1)
delete_force(self.ldb, ss1)
delete_force(self.ldb, srv2)
delete_force(self.ldb, srv1)
delete_force(self.ldb, sit1)
ldb.add({
"dn": usr1,
"objectclass": "user",
"description": "test user description",
"samaccountname": "testuser"})
ldb.add({
"dn": usr2,
"objectclass": "user",
"description": "test user 2 description",
"samaccountname": "testuser2"})
ldb.add({
"dn": grp1,
"objectclass": "group",
"description": "test group",
"samaccountname": "testdelgroup1",
"member": [ usr1, usr2 ],
"isDeleted": "FALSE" })
ldb.add({
"dn": sit1,
"objectclass": "site" })
ldb.add({
"dn": ss1,
"objectclass": ["applicationSiteSettings", "nTDSSiteSettings"] })
ldb.add({
"dn": srv1,
"objectclass": "serversContainer" })
ldb.add({
"dn": srv2,
"objectClass": "server" })
objLive1 = self.search_dn(usr1)
guid1=objLive1["objectGUID"][0]
objLive2 = self.search_dn(usr2)
guid2=objLive2["objectGUID"][0]
objLive3 = self.search_dn(grp1)
guid3=objLive3["objectGUID"][0]
objLive4 = self.search_dn(sit1)
guid4=objLive4["objectGUID"][0]
objLive5 = self.search_dn(ss1)
guid5=objLive5["objectGUID"][0]
objLive6 = self.search_dn(srv1)
guid6=objLive6["objectGUID"][0]
objLive7 = self.search_dn(srv2)
guid7=objLive7["objectGUID"][0]
ldb.delete(usr1)
ldb.delete(usr2)
ldb.delete(grp1)
ldb.delete(srv1, ["tree_delete:1"])
ldb.delete(sit1, ["tree_delete:1"])
objDeleted1 = self.search_guid(guid1)
objDeleted2 = self.search_guid(guid2)
objDeleted3 = self.search_guid(guid3)
objDeleted4 = self.search_guid(guid4)
objDeleted5 = self.search_guid(guid5)
objDeleted6 = self.search_guid(guid6)
objDeleted7 = self.search_guid(guid7)
self.del_attr_values(objDeleted1)
self.del_attr_values(objDeleted2)
self.del_attr_values(objDeleted3)
self.del_attr_values(objDeleted4)
self.del_attr_values(objDeleted5)
self.del_attr_values(objDeleted6)
self.del_attr_values(objDeleted7)
self.preserved_attributes_list(objLive1, objDeleted1)
self.preserved_attributes_list(objLive2, objDeleted2)
self.preserved_attributes_list(objLive3, objDeleted3)
self.preserved_attributes_list(objLive4, objDeleted4)
self.preserved_attributes_list(objLive5, objDeleted5)
self.preserved_attributes_list(objLive6, objDeleted6)
self.preserved_attributes_list(objLive7, objDeleted7)
self.check_rdn(objLive1, objDeleted1, "cn")
self.check_rdn(objLive2, objDeleted2, "cn")
self.check_rdn(objLive3, objDeleted3, "cn")
self.check_rdn(objLive4, objDeleted4, "cn")
self.check_rdn(objLive5, objDeleted5, "cn")
self.check_rdn(objLive6, objDeleted6, "cn")
self.check_rdn(objLive7, objDeleted7, "cn")
self.delete_deleted(ldb, usr1)
self.delete_deleted(ldb, usr2)
self.delete_deleted(ldb, grp1)
self.delete_deleted(ldb, sit1)
self.delete_deleted(ldb, ss1)
self.delete_deleted(ldb, srv1)
self.delete_deleted(ldb, srv2)
self.assertTrue("CN=Deleted Objects" in str(objDeleted1.dn))
self.assertTrue("CN=Deleted Objects" in str(objDeleted2.dn))
self.assertTrue("CN=Deleted Objects" in str(objDeleted3.dn))
self.assertFalse("CN=Deleted Objects" in str(objDeleted4.dn))
self.assertTrue("CN=Deleted Objects" in str(objDeleted5.dn))
self.assertFalse("CN=Deleted Objects" in str(objDeleted6.dn))
self.assertFalse("CN=Deleted Objects" in str(objDeleted7.dn))
if not "://" in host:
if os.path.isfile(host):
host = "tdb://%s" % host
else:
host = "ldap://%s" % host
ldb = SamDB(host, credentials=creds, session_info=system_session(lp), lp=lp)
runner = SubunitTestRunner()
rc = 0
if not runner.run(unittest.makeSuite(BasicDeleteTests)).wasSuccessful():
rc = 1
sys.exit(rc) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"testing"
"k8s.io/apimachinery/pkg/api/apitesting/roundtrip"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
)
func TestRoundTripTypes(t *testing.T) {
roundtrip.RoundTripTestForAPIGroup(t, scheme.AddToScheme, Funcs)
} | go | github | https://github.com/kubernetes/kubernetes | cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer_test.go |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"strings"
"testing"
"github.com/hashicorp/cli"
)
func testOperatorSealCommand(tb testing.TB) (*cli.MockUi, *OperatorSealCommand) {
tb.Helper()
ui := cli.NewMockUi()
return ui, &OperatorSealCommand{
BaseCommand: &BaseCommand{
UI: ui,
},
}
}
func TestOperatorSealCommand_Run(t *testing.T) {
t.Parallel()
cases := []struct {
name string
args []string
out string
code int
}{
{
"args",
[]string{"foo"},
"Too many arguments",
1,
},
}
t.Run("validations", func(t *testing.T) {
t.Parallel()
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
ui, cmd := testOperatorSealCommand(t)
cmd.client = client
code := cmd.Run(tc.args)
if code != tc.code {
t.Errorf("expected %d to be %d", code, tc.code)
}
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, tc.out) {
t.Errorf("expected %q to contain %q", combined, tc.out)
}
})
}
})
t.Run("integration", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
ui, cmd := testOperatorSealCommand(t)
cmd.client = client
code := cmd.Run([]string{})
if exp := 0; code != exp {
t.Errorf("expected %d to be %d", code, exp)
}
expected := "Success! Vault is sealed."
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, expected) {
t.Errorf("expected %q to contain %q", combined, expected)
}
sealStatus, err := client.Sys().SealStatus()
if err != nil {
t.Fatal(err)
}
if !sealStatus.Sealed {
t.Errorf("expected to be sealed")
}
})
t.Run("communication_failure", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServerBad(t)
defer closer()
ui, cmd := testOperatorSealCommand(t)
cmd.client = client
code := cmd.Run([]string{})
if exp := 2; code != exp {
t.Errorf("expected %d to be %d", code, exp)
}
expected := "Error sealing: "
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, expected) {
t.Errorf("expected %q to contain %q", combined, expected)
}
})
t.Run("no_tabs", func(t *testing.T) {
t.Parallel()
_, cmd := testOperatorSealCommand(t)
assertNoTabs(t, cmd)
})
} | go | github | https://github.com/hashicorp/vault | command/operator_seal_test.go |
from functools import partial
import math
import actions
from actions import _get_as_str
import call_definitions
from call_definitions import xpcom_constructor as xpcom_const, python_wrap
from entity_values import entity
import instanceactions
from jstypes import JSWrapper
from validator.compat import FX47_DEFINITION
from validator.constants import MDN_DOC
# A list of identifiers and member values that may not be used.
BANNED_IDENTIFIERS = {
u'newThread':
'Creating threads from JavaScript is a common cause '
'of crashes and is unsupported in recent versions of the platform',
u'processNextEvent':
'Spinning the event loop with processNextEvent is a common cause of '
'deadlocks, crashes, and other errors due to unintended reentrancy. '
'Please use asynchronous callbacks instead wherever possible',
}
CUSTOMIZATION_API_HELP = (
'We are currently working to provide libraries and APIs to allow '
'extensions to modify these settings in ways that we can guarantee are '
'in-policy. In the interim, we recommend that you avoid changing these '
'settings altogether, if at all possible.')
CUSTOMIZATION_PREF_MESSAGE = {
'description': (
'Extensions must not alter user preferences such as the current home '
'page, new tab page, or search engine, without explicit user consent, '
'in which a user takes a non-default action. Such changes must also '
'be reverted when the extension is disabled or uninstalled.',
'In nearly all cases, new values for these preferences should be '
'set in the default preference branch, rather than the user branch.'),
'signing_help':
'Add-ons which directly change these preferences must undergo at '
'manual code review for at least one submission. ' +
CUSTOMIZATION_API_HELP,
'signing_severity': 'high',
}
NETWORK_PREF_MESSAGE = {
'description':
'Changing network preferences may be dangerous, and often leads to '
'performance costs.',
'signing_help':
'Changes to these preferences are strongly discouraged. If at all '
'possible, you should remove any reference to them from '
'your extension. Extensions which do modify these preferences '
'must undergo light manual code review for at least one submission.',
'signing_severity': 'low',
}
SEARCH_PREF_MESSAGE = {
'description':
'Search engine preferences may not be changed by add-ons directly. '
'All such changes must be made only via the browser search service, '
'and only after an explicit opt-in from the user. All such changes '
'must be reverted when the extension is disabled or uninstalled.',
'signing_help': (
'You should remove all references to these preferences from your '
'code, and interact with search settings only via the '
'`Services.search` interface. Extensions which interact with these '
'preferences directly are not acceptable within the Firefox add-on '
'ecosystem.',
'Note, however, that extensions which change search settings even via '
'the search service must undergo manual code review for at least '
'one submission. ' + CUSTOMIZATION_API_HELP),
'signing_severity': 'high',
}
SECURITY_PREF_MESSAGE = {
'description':
'Changing this preference may have severe security implications, and '
'is forbidden under most circumstances.',
'editors_only': True,
'signing_help': ('Extensions which alter these settings are allowed '
'within the Firefox add-on ecosystem by exception '
'only, and under extremely limited circumstances.',
'Please remove any reference to these preference names '
'from your add-on.'),
'signing_severity': 'high',
}
MARIONETTE_MESSAGE = {
'warning': 'Marionette should not be accessed by extensions',
'description': 'References to the Marionette service are not acceptable '
'in extensions. Please remove them.',
}
def fuel_error(traverse_node, err):
traverse_node.im_self.warning(
err_id=('js', 'traverser', 'dangerous_global'),
warning='The FUEL library is no longer supported.',
description='The FUEL library is no longer supported. Please use the '
'Add-ons SDK instead. See %s for more information.'
% MDN_DOC % 'Add-ons/SDK',
for_appversions=FX47_DEFINITION,
tier=5,
compatibility_type='error')
BANNED_PREF_BRANCHES = (
# Security and update preferences
(u'app.update.', SECURITY_PREF_MESSAGE),
(u'browser.addon-watch.', SECURITY_PREF_MESSAGE),
(u'capability.policy.', None),
(u'datareporting.', SECURITY_PREF_MESSAGE),
(u'extensions.blocklist.', SECURITY_PREF_MESSAGE),
(u'extensions.checkCompatibility', None),
(u'extensions.getAddons.', SECURITY_PREF_MESSAGE),
(u'extensions.update.', SECURITY_PREF_MESSAGE),
(u'xpinstall.signatures.required', SECURITY_PREF_MESSAGE),
# Let's see if we can get away with this...
# Changing any preference in this branch should result in a
# warning. However, this substring may turn out to be too
# generic, and lead to spurious warnings, in which case we'll
# have to single out sub-branches.
(u'security.', SECURITY_PREF_MESSAGE),
# Search, homepage, and customization preferences
(u'browser.newtab.url', CUSTOMIZATION_PREF_MESSAGE),
(u'browser.newtabpage.enabled', CUSTOMIZATION_PREF_MESSAGE),
(u'browser.search.defaultenginename', SEARCH_PREF_MESSAGE),
(u'browser.search.searchEnginesURL', SEARCH_PREF_MESSAGE),
(u'browser.startup.homepage', CUSTOMIZATION_PREF_MESSAGE),
(u'extensions.getMoreThemesURL', None),
(u'keyword.URL', SEARCH_PREF_MESSAGE),
(u'keyword.enabled', SEARCH_PREF_MESSAGE),
# Network
(u'network.proxy.autoconfig_url', {
'description':
'As many add-ons have reason to change the proxy autoconfig URL, '
'and only one at a time may do so without conflict, extensions '
'must make proxy changes using other mechanisms. Installing a '
'proxy filter is the recommended alternative: '
'https://developer.mozilla.org/en-US/docs/Mozilla/Tech/XPCOM/'
'Reference/Interface/nsIProtocolProxyService#registerFilter()',
'signing_help':
'Dynamic proxy configuration should be implemented via proxy '
'filters, as described above. This preference should not be '
'set, except directly by end users.',
'signing_severity': 'low'}),
(u'network.proxy.', NETWORK_PREF_MESSAGE),
(u'network.http.', NETWORK_PREF_MESSAGE),
(u'network.websocket.', NETWORK_PREF_MESSAGE),
# Other
(u'browser.preferences.instantApply', None),
(u'extensions.alwaysUnpack', None),
(u'extensions.bootstrappedAddons', None),
(u'extensions.dss.', None),
(u'extensions.installCache', None),
(u'extensions.lastAppVersion', None),
(u'extensions.pendingOperations', None),
(u'general.useragent.', None),
(u'nglayout.debug.disable_xul_cache', None),
# Marionette
(u'marionette.', MARIONETTE_MESSAGE),
)
BANNED_PREF_REGEXPS = [
r'extensions\..*\.update\.(url|enabled|interval)',
]
def is_shared_scope(traverser, right=None, node_right=None):
"""Returns true if the traverser `t` is traversing code loaded into
a shared scope, such as a browser window. Particularly used for
detecting when global overwrite warnings should be issued."""
# FIXME(Kris): This is not a great heuristic.
return not (traverser.is_jsm or
traverser.err.get_resource('em:bootstrap') == 'true')
# See https://github.com/mattbasta/amo-validator/wiki/JS-Predefined-Entities
# for details on entity properties.
CONTENT_DOCUMENT = None
CATEGORY_MANAGER = {
u'addCategoryEntry':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
('Bootstrapped add-ons may not create persistent category '
'entries.' if len(a) > 3 and t(a[3]).is_literal() else
'Authors of bootstrapped add-ons must take care to clean up '
'any added category entries at shutdown.')}}
OBSOLETE_EXTENSION_MANAGER = {
'value': {},
'dangerous': 'This interface is part of the obsolete extension manager '
'interface, which is not available in any remotely modern '
'version of Firefox. It should not be referenced in any '
'code.'}
ADDON_INSTALL_METHOD = {
'value': {},
'dangerous': {
'description': (
'Add-ons may install other add-ons only by user consent. Any '
'such installations must be carefully reviewed to ensure '
'their safety.'),
'editors_only': True,
'signing_help': (
"Rather than directly install other add-ons, you should offer "
"users the opportunity to install them via the normal web install "
"process, using an install link or button connected to the "
"`InstallTrigger` API: "
"https://developer.mozilla.org/en-US/docs/Web/API/InstallTrigger",
"Updates to existing add-ons should be provided via the "
"manifest's `updateURL` mechanism."),
'signing_severity': 'high'},
}
SEARCH_MESSAGE = 'Potentially dangerous use of the search service'
SEARCH_DESCRIPTION = (
'Changes to the default and currently-selected search engine settings '
'may only take place after users have explicitly opted-in, by taking '
'a non-default action. Any such changes must be reverted when the add-on '
'making them is disabled or uninstalled.')
def search_warning(severity='medium', editors_only=False,
message=SEARCH_MESSAGE,
description=SEARCH_DESCRIPTION):
return {'err_id': ('testcases_javascript_actions',
'search_service',
'changes'),
'signing_help':
'Add-ons which directly change search settings must undergo '
'manual code review for at least one submission. ' +
CUSTOMIZATION_API_HELP,
'signing_severity': severity,
'editors_only': editors_only,
'warning': message,
'description': description}
REGISTRY_WRITE = {'dangerous': {
'err_id': ('testcases_javascript_actions',
'windows_registry',
'write'),
'warning': 'Writes to the registry may be dangerous',
'description': 'Writing to the registry can have many system-level '
'consequences and requires careful review.',
'signing_help': (
'Please store any settings relevant to your add-on within the '
'current Firefox profile, ideally using the preferences service.'
'If you are intentionally changing system settings, consider '
'searching for a Firefox API which has a similar effect. If no such '
'API exists, we strongly discourage making any changes which affect '
'the system outside of the browser.'),
'signing_severity': 'medium',
'editors_only': True}}
def registry_key(write=False):
"""Represents a function which returns a registry key object."""
res = {'return': lambda wrapper, arguments, traverser: (
build_quick_xpcom('createInstance', 'nsIWindowMediator',
traverser, wrapper=True))}
if write:
res.update(REGISTRY_WRITE)
return res
NSIX509CERT_METHODS = {
'getUsagesArray': entity('nsIX509Cert.getUsagesArray'),
'requestUsagesArrayAsync': entity('nsIX509Cert.requestUsagesArrayAsync'),
'getUsagesString': entity('nsIX509Cert.getUsagesString'),
}
INTERFACES = {
u'nsISupports': {'value': {}},
u'mozIStorageBaseStatement':
{'value':
{u'execute':
{'dangerous': instanceactions.SYNCHRONOUS_SQL_DESCRIPTION},
u'executeStep':
{'dangerous': instanceactions.SYNCHRONOUS_SQL_DESCRIPTION}}},
u'nsIExtensionManager': OBSOLETE_EXTENSION_MANAGER,
u'nsIUpdateItem': OBSOLETE_EXTENSION_MANAGER,
u'nsIInstallLocation': OBSOLETE_EXTENSION_MANAGER,
u'nsIAddonInstallListener': OBSOLETE_EXTENSION_MANAGER,
u'nsIAddonUpdateCheckListener': OBSOLETE_EXTENSION_MANAGER,
u'nsICategoryManager':
{'value': CATEGORY_MANAGER},
u'nsIAccessibleRetrieval':
{'dangerous':
'Using the nsIAccessibleRetrieval interface causes significant '
'performance degradation in Gecko. It should only be used in '
'accessibility-related add-ons.',
'value': {}},
u'nsIBrowserSearchService':
{'value':
{u'currentEngine':
{'readonly': search_warning(severity='high')},
u'defaultEngine':
{'readonly': search_warning(severity='high')},
u'addEngine':
{'dangerous': search_warning()},
u'addEngineWithDetails':
{'dangerous': search_warning()},
u'removeEngine':
{'dangerous': search_warning()},
u'moveEngine':
{'dangerous': search_warning()}}},
u'nsIComponentRegistrar':
{'value':
{u'autoRegister':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Bootstrapped add-ons may not register chrome '
'manifest files.'},
u'registerFactory':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care to '
'clean up any component registrations at shutdown.'}}},
u'nsIDNSService': {'value': {u'resolve': entity('nsIDNSService.resolve')}},
u'nsIJSON':
{'value':
{u'encode':
{'return': call_definitions.nsIJSON_deprec},
u'decode':
{'return': call_definitions.nsIJSON_deprec}}},
u'nsINavBookmarksService': {'value': {
'getURIForKeyword': entity('nsINavBookmarksService.getURIForKeyword'),
}},
u'nsIObserverService':
{'value':
{u'addObserver': entity('nsIObserverService.addObserver')},
'dangerous': lambda a, t, e:
lambda t, e: (
e.metadata.get('is_jetpack') and
'The observer service should not be used directly in SDK '
"add-ons. Please use the 'sdk/system/events' module "
'instead.')},
u'nsIPrefBranch':
{'value': dict(
tuple((method, {'return': instanceactions.set_preference})
for method in (u'setBoolPref',
u'setCharPref',
u'setComplexValue',
u'setIntPref',
u'clearUserPref',
u'deleteBranch',
u'resetBranch')) +
tuple((method, {'return': instanceactions.get_preference})
for method in (u'getBoolPref',
u'getCharPref',
u'getChildList',
u'getComplexValue',
u'getFloatPref',
u'getIntPref',
u'getPrefType',
u'prefHasUserValue')))},
u'nsIResProtocolHandler':
{'value':
{u'setSubstitution':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
a and len(a) > 1 and t(a[1]).get_literal_value() and
'Authors of bootstrapped add-ons must take care '
'to clean up any added resource substitutions '
'at shutdown.'}}},
u'nsISound': {'value': {'play': entity('nsISound.play')}},
u'nsIStringBundleService':
{'value':
{u'createStringBundle':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to flush the string bundle cache at shutdown.'},
u'createExtensibleBundle':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to flush the string bundle cache at shutdown.'}}},
u'nsIStyleSheetService':
{'value':
{u'loadAndRegisterSheet':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care to '
'unregister registered stylesheets at shutdown.'}}},
u'nsITransferable':
{'value':
{u'init':
entity('nsITransferable.init')}},
u'nsIWindowMediator':
{'value':
{'registerNotification':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to remove any added observers at shutdown.'}}},
u'nsIWindowWatcher':
{'value':
{u'addListener':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to remove any added observers at shutdown.'},
u'openWindow': entity('nsIWindowWatcher.openWindow')}},
u'nsIProtocolProxyService': {'value': {
u'registerFilter': {'dangerous': {
'err_id': ('testcases_javascript_actions',
'predefinedentities', 'proxy_filter'),
'description': (
'Proxy filters can be used to direct arbitrary network '
'traffic through remote servers, and may potentially '
'be abused.',
'Additionally, to prevent conflicts, the `applyFilter` '
'method should always return its third argument in cases '
'when it is not supplying a specific proxy.'),
'editors_only': True,
'signing_help': 'Due to the potential for unintended effects, '
'any add-on which uses this API must undergo '
'manual code review for at least one submission.',
'signing_severity': 'low'}}}},
u'nsIWebBrowserPersist':
{'value':
{u'saveChannel':
{'return': call_definitions.webbrowserpersist},
u'saveURI':
{'return':
call_definitions.webbrowserpersist_saveuri},
u'savePrivacyAwareURI':
{'return': call_definitions.webbrowserpersist}}},
'nsIWindowsRegKey': {'value': {u'create': REGISTRY_WRITE,
u'createChild': registry_key(write=True),
u'openChild': registry_key(),
u'writeBinaryValue': REGISTRY_WRITE,
u'writeInt64Value': REGISTRY_WRITE,
u'writeIntValue': REGISTRY_WRITE,
u'writeStringValue': REGISTRY_WRITE,
}},
'nsIPK11TokenDB': {'value': {
'listTokens': entity('nsIPK11TokenDB.listTokens'),
'findTokenByName': entity('nsIPK11TokenDB.findTokenByName')
}},
'nsIPKCS11ModuleDB': {'value': {'listModules': entity('nsIPKCS11ModuleDB.listModules')}},
'nsIPKCS11Module': {'value': {'listSlots': entity('nsIPKCS11Module.listSlots')}},
'nsIIOService': {'value': {
'newChannel': entity('nsIIOService.newChannel'),
'newChannelFromURI': entity('nsIIOService.newChannelFromURI'),
'newChannelFromURIWithProxyFlags': entity('nsIIOService.newChannelFromURIWithProxyFlags'),
}},
'nsIX509Cert': {'value': NSIX509CERT_METHODS},
'nsIX509Cert2': {'value': NSIX509CERT_METHODS},
'nsIX509Cert3': {'value': NSIX509CERT_METHODS},
'nsIX509CertDB': {'value': {
'findCertByNickname': entity('nsIX509CertDB.findCertByNickname'),
'findEmailEncryptionCert': entity('nsIX509CertDB.findEmailEncryptionCert'),
'findEmailSigningCert': entity('nsIX509CertDB.findEmailSigningCert'),
'addCert': entity('nsIX509CertDB.addCert'),
}},
'nsISupportsArray': entity('nsISupportsArray'),
u'mozIAsyncFavicons': {'value': {
u'setAndFetchFaviconForPage': entity('mozIAsyncFavicons.setAndFetchFaviconForPage'),
u'replaceFaviconDataFromDataURL': entity('mozIAsyncFavicons.replaceFaviconDataFromDataURL'),
}},
u'nsIFormHistory2': entity('nsIFormHistory2'),
}
INTERFACE_ENTITIES = {u'nsIXMLHttpRequest':
{'xpcom_map':
lambda: GLOBAL_ENTITIES['XMLHttpRequest']},
u'nsIProcess': {'dangerous': {
'warning': 'The use of nsIProcess is potentially '
'dangerous and requires careful review '
'by an administrative reviewer.',
'editors_only': True,
'signing_help':
'Consider alternatives to directly launching '
'executables, such as loading a URL with an '
'appropriate external protocol handler, making '
'network requests to a local service, or using '
'the (as a last resort) `nsIFile.launch()` method '
'to open a file with the appropriate application.',
'signing_severity': 'high',
}},
u'nsIDOMGeoGeolocation': {'dangerous':
'Use of the geolocation API by add-ons requires '
'prompting users for consent.'},
u'nsIWindowsRegKey': {'dangerous': {
'signing_help':
'The information stored in many standard registry '
'keys is available via built-in Firefox APIs, '
'such as `Services.sysinfo`, `Services.dirsvc`, '
'and the environment service '
'(http://mzl.la/1OGgCF3). We strongly discourage '
'extensions from reading registry information '
'which is not available via other Firefox APIs.',
'signing_severity': 'low',
'editors_only': True,
'description': (
'Access to the registry is potentially '
'dangerous, and should be reviewed with special '
'care.')}},
}
DANGEROUS_CERT_DB = {
'err_id': ('javascript', 'predefinedentities', 'cert_db'),
'description': 'Access to the X509 certificate '
'database is potentially dangerous '
'and requires careful review by an '
'administrative reviewer.',
'editors_only': True,
'signing_help': 'Please avoid interacting with the certificate and trust '
'databases if at all possible. Any add-ons which interact '
'with these databases must undergo manual code review '
'prior to signing.',
'signing_severity': 'high',
}
INTERFACE_ENTITIES.update(
(interface, {'dangerous': DANGEROUS_CERT_DB})
for interface in ('nsIX509CertDB', 'nsIX509CertDB2', 'nsIX509CertList',
'nsICertOverrideService'))
CONTRACT_ENTITIES = {
contract: DANGEROUS_CERT_DB
for contract in ('@mozilla.org/security/x509certdb;1',
'@mozilla.org/security/x509certlist;1',
'@mozilla.org/security/certoverride;1')}
for interface in INTERFACES:
def construct(interface):
def wrap():
return INTERFACES[interface]
return wrap
if interface not in INTERFACE_ENTITIES:
INTERFACE_ENTITIES[interface] = {}
INTERFACE_ENTITIES[interface]['xpcom_map'] = construct(interface)
def build_quick_xpcom(method, interface, traverser, wrapper=False):
"""A shortcut to quickly build XPCOM objects on the fly."""
extra = ()
if isinstance(interface, (list, tuple)):
interface, extra = interface[0], interface[1:]
def interface_obj(iface):
return traverser._build_global(
name=method,
entity={'xpcom_map':
lambda: INTERFACES.get(iface, INTERFACES['nsISupports'])})
constructor = xpcom_const(method, pretraversed=True)
obj = constructor(None, [interface_obj(interface)], traverser)
for iface in extra:
# `xpcom_constructor` really needs to be cleaned up so we can avoid
# this duplication.
iface = interface_obj(iface)
iface = traverser._build_global('QueryInterface',
iface.value['xpcom_map']())
obj.value = obj.value.copy()
value = obj.value['value'].copy()
value.update(iface.value['value'])
obj.value.update(iface.value)
obj.value['value'] = value
if isinstance(obj, JSWrapper) and not wrapper:
obj = obj.value
return obj
UNSAFE_TEMPLATE_METHOD = (
'The use of `%s` can lead to unsafe '
'remote code execution, and therefore must be done with '
'great care, and only with sanitized data.')
SERVICES = {
'appinfo': ('nsIXULAppInfo', 'nsIXULRuntime'),
'appShell': 'nsIAppShellService',
'blocklist': 'nsIBlocklistService',
'cache': 'nsICacheService',
'cache2': 'nsICacheStorageService',
'clipboard': 'nsIClipboard',
'console': 'nsIConsoleService',
'contentPrefs': 'nsIContentPrefService',
'cookies': ('nsICookieManager', 'nsICookieManager2', 'nsICookieService'),
'dirsvc': ('nsIDirectoryService', 'nsIProperties'),
'DOMRequest': 'nsIDOMRequestService',
'domStorageManager': 'nsIDOMStorageManager',
'downloads': 'nsIDownloadManager',
'droppedLinkHandler': 'nsIDroppedLinkHandler',
'eTLD': 'nsIEffectiveTLDService',
'focus': 'nsIFocusManager',
'io': ('nsIIOService', 'nsIIOService2'),
'locale': 'nsILocaleService',
'logins': 'nsILoginManager',
'obs': 'nsIObserverService',
'perms': 'nsIPermissionManager',
'prefs': ('nsIPrefBranch2', 'nsIPrefService', 'nsIPrefBranch'),
'prompt': 'nsIPromptService',
'scriptloader': 'mozIJSSubScriptLoader',
'scriptSecurityManager': 'nsIScriptSecurityManager',
'search': 'nsIBrowserSearchService',
'startup': 'nsIAppStartup',
'storage': 'mozIStorageService',
'strings': 'nsIStringBundleService',
'sysinfo': 'nsIPropertyBag2',
'telemetry': 'nsITelemetry',
'tm': 'nsIThreadManager',
'uriFixup': 'nsIURIFixup',
'urlFormatter': 'nsIURLFormatter',
'vc': 'nsIVersionComparator',
'wm': 'nsIWindowMediator',
'ww': 'nsIWindowWatcher',
}
for key, value in SERVICES.items():
SERVICES[key] = {'value': partial(build_quick_xpcom,
'getService', value)}
DANGEROUS_EVAL = {
'err_id': ('javascript', 'dangerous_global', 'eval'),
'description': ('Evaluation of strings as code can lead to security '
'vulnerabilities and performance issues, even in the '
'most innocuous of circumstances. Please avoid using '
'`eval` and the `Function` constructor when at all '
'possible.',
'Alternatives are available for most use cases. See '
'https://developer.mozilla.org/en-US/Add-ons/'
'Overlay_Extensions/XUL_School/'
'Appendix_C:_Avoid_using_eval_in_Add-ons '
'for more information.'),
'signing_help':
'Please try to avoid evaluating strings as code wherever possible. '
'Read over the linked document for suggested alternatives. '
'If you are referencing the `Function` constructor without calling '
'it, and cannot avoid continuing to do so, consider alternatives '
'such as calling `Object.getPrototypeOf` on an existing function '
'object.',
'signing_severity': 'high'}
FUNCTION_EXPORT_HELP = (
'Given the potential security risks of exposing APIs to unprivileged '
'code, extensions which use these APIs must undergo manual review for at '
'least one submission. If you are not using these APIs to interact with '
'content code, please consider alternatives, such as built-in '
'message passing functionality.')
# GLOBAL_ENTITIES is also representative of the `window` object.
GLOBAL_ENTITIES = {
u'window': {'value': lambda t: {'value': GLOBAL_ENTITIES}},
u'null': {'literal': lambda t: JSWrapper(None, traverser=t)},
u'Cc': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['classes']},
u'Ci': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['interfaces']},
u'Cu': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['utils']},
# From Services.jsm.
u'Services': {'value': SERVICES},
# From Preferences.jsm.
# TODO: Support calls that return instances of this object which
# operate on non-root branches.
u'Preferences': {'value': {
u'get': {'return': instanceactions.get_preference},
u'reset': {'return': instanceactions.set_preference},
u'resetBranch': {'return': instanceactions.set_preference},
u'set': {'return': instanceactions.set_preference}}},
u'AddonManager': {
'readonly': False,
'value': {
u'autoUpdateDefault': {'readonly': SECURITY_PREF_MESSAGE},
u'checkUpdateSecurity': {'readonly': SECURITY_PREF_MESSAGE},
u'checkUpdateSecurityDefault': {'readonly': SECURITY_PREF_MESSAGE},
u'updateEnabled': {'readonly': SECURITY_PREF_MESSAGE},
u'getInstallForFile': ADDON_INSTALL_METHOD,
u'getInstallForURL': ADDON_INSTALL_METHOD,
u'installAddonsFromWebpage': ADDON_INSTALL_METHOD}},
u'ctypes': {'dangerous': {
'description': (
'Insufficiently meticulous use of ctypes can lead to serious, '
'and often exploitable, errors. The use of bundled binary code, '
'or access to system libraries, may allow for add-ons to '
'perform unsafe operations. All ctypes use must be carefully '
'reviewed by a qualified reviewer.'),
'editors_only': True,
'signing_help': ('Please try to avoid interacting with or bundling '
'native binaries whenever possible. If you are '
'bundling binaries for performance reasons, please '
'consider alternatives such as Emscripten '
'(http://mzl.la/1KrSUh2), JavaScript typed arrays '
'(http://mzl.la/1Iw02sr), and Worker threads '
'(http://mzl.la/1OGfAcc).',
'Any code which makes use of the `ctypes` API '
'must undergo manual code review for at least one '
'submission.'),
'signing_severity': 'high'}},
u'document':
{'value':
{u'title':
{'overwriteable': True,
'readonly': False},
u'defaultView':
{'value': lambda t: {'value': GLOBAL_ENTITIES}},
u'loadOverlay':
{'dangerous':
lambda a, t, e:
not a or not _get_as_str(t(a[0])).lower()
.startswith(('chrome:', 'resource:'))},
u'write': entity('document.write'),
u'writeln': entity('document.write')}},
# The nefariuos timeout brothers!
u'setTimeout': {'dangerous': actions._call_settimeout},
u'setInterval': {'dangerous': actions._call_settimeout},
u'require': {'dangerous': actions._call_require},
u'encodeURI': {'readonly': True},
u'decodeURI': {'readonly': True},
u'encodeURIComponent': {'readonly': True},
u'decodeURIComponent': {'readonly': True},
u'escape': {'readonly': True},
u'unescape': {'readonly': True},
u'isFinite': {'readonly': True},
u'isNaN': {'readonly': True},
u'parseFloat': {'readonly': True},
u'parseInt': {'readonly': True},
u'eval': {'dangerous': DANGEROUS_EVAL},
u'Function': {'dangerous': DANGEROUS_EVAL},
u'Object':
{'value':
{u'prototype': {'readonly': is_shared_scope},
u'constructor': # Just an experiment for now
{'value': lambda t: GLOBAL_ENTITIES['Function']}}},
u'String':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.string_global},
u'Array':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.array_global},
u'Number':
{'value':
{u'prototype':
{'readonly': is_shared_scope},
u'POSITIVE_INFINITY':
{'value': lambda t: JSWrapper(float('inf'), traverser=t)},
u'NEGATIVE_INFINITY':
{'value': lambda t: JSWrapper(float('-inf'), traverser=t)}},
'return': call_definitions.number_global},
u'Boolean':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.boolean_global},
u'RegExp': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'Date': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'File': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'Math':
{'value':
{u'PI':
{'value': lambda t: JSWrapper(math.pi, traverser=t)},
u'E':
{'value': lambda t: JSWrapper(math.e, traverser=t)},
u'LN2':
{'value': lambda t: JSWrapper(math.log(2), traverser=t)},
u'LN10':
{'value': lambda t: JSWrapper(math.log(10), traverser=t)},
u'LOG2E':
{'value': lambda t: JSWrapper(math.log(math.e, 2),
traverser=t)},
u'LOG10E':
{'value': lambda t: JSWrapper(math.log10(math.e),
traverser=t)},
u'SQRT2':
{'value': lambda t: JSWrapper(math.sqrt(2), traverser=t)},
u'SQRT1_2':
{'value': lambda t: JSWrapper(math.sqrt(1/2), traverser=t)},
u'abs':
{'return': python_wrap(abs, [('num', 0)])},
u'acos':
{'return': python_wrap(math.acos, [('num', 0)])},
u'asin':
{'return': python_wrap(math.asin, [('num', 0)])},
u'atan':
{'return': python_wrap(math.atan, [('num', 0)])},
u'atan2':
{'return': python_wrap(math.atan2, [('num', 0),
('num', 1)])},
u'ceil':
{'return': python_wrap(math.ceil, [('num', 0)])},
u'cos':
{'return': python_wrap(math.cos, [('num', 0)])},
u'exp':
{'return': python_wrap(math.exp, [('num', 0)])},
u'floor':
{'return': python_wrap(math.floor, [('num', 0)])},
u'log':
{'return': call_definitions.math_log},
u'max':
{'return': python_wrap(max, [('num', 0)], nargs=True)},
u'min':
{'return': python_wrap(min, [('num', 0)], nargs=True)},
u'pow':
{'return': python_wrap(math.pow, [('num', 0),
('num', 0)])},
u'random': # Random always returns 0.5 in our fantasy land.
{'return': call_definitions.math_random},
u'round':
{'return': call_definitions.math_round},
u'sin':
{'return': python_wrap(math.sin, [('num', 0)])},
u'sqrt':
{'return': python_wrap(math.sqrt, [('num', 1)])},
u'tan':
{'return': python_wrap(math.tan, [('num', 0)])},
}},
u'netscape':
{'value':
{u'security':
{'value':
{u'PrivilegeManager':
{'value':
{u'enablePrivilege':
{'dangerous': {
'signing_help':
'Any references to this API must '
'be removed from your extension. '
'Add-ons using this API will not '
'be accepted for signing.',
'signing_severity': 'high',
'description': (
'enablePrivilege is extremely '
'dangerous, and nearly always '
'unnecessary. It should not be '
'used under any circumstances.'),
}}}}}}}},
u'navigator':
{'value': {u'wifi': {'dangerous': True},
u'geolocation': {'dangerous': True}}},
u'Components':
{'dangerous_on_read':
lambda t, e: bool(e.metadata.get('is_jetpack')),
'value':
{u'classes':
{'xpcom_wildcard': True,
'value':
{u'createInstance':
{'return': xpcom_const('createInstance')},
u'getService':
{'return': xpcom_const('getService')}}},
'utils':
{'value': {u'evalInSandbox':
{'dangerous': {
'editors_only': 'true',
'signing_help':
DANGEROUS_EVAL['signing_help'],
'signing_severity': 'low'}},
u'cloneInto':
{'dangerous': {
'editors_only': True,
'signing_help': FUNCTION_EXPORT_HELP,
'signing_severity': 'low',
'description': (
'Can be used to expose privileged '
'functionality to unprivileged scopes. '
'Care should be taken to ensure that '
'this is done safely.')}},
u'exportFunction':
{'dangerous': {
'editors_only': True,
'signing_help': FUNCTION_EXPORT_HELP,
'signing_severity': 'low',
'description': (
'Can be used to expose privileged '
'functionality to unprivileged scopes. '
'Care should be taken to ensure that '
'this is done safely.')}},
u'import':
{'dangerous':
lambda a, t, e:
a and 'ctypes.jsm' in _get_as_str(t(a[0]))},
u'waiveXrays':
{'return': call_definitions.js_unwrap}}},
u'interfaces': {'value': INTERFACE_ENTITIES}}},
u'extensions': {'dangerous': True},
u'xpcnativewrappers': {'dangerous': True},
u'XMLHttpRequest':
{'value':
{u'open':
{'dangerous':
# Ban synchronous XHR by making sure the third arg
# is absent and false.
lambda a, t, e:
a and len(a) >= 3 and
not t(a[2]).get_literal_value() and
'Synchronous HTTP requests can cause serious UI '
'performance problems, especially to users with '
'slow network connections.'}}},
# Global properties are inherently read-only, though this formalizes it.
u'Infinity':
{'value':
lambda t:
GLOBAL_ENTITIES[u'Number']['value'][u'POSITIVE_INFINITY']},
u'NaN': {'readonly': True},
u'undefined': {'readonly': True},
u'innerHeight': {'readonly': False},
u'innerWidth': {'readonly': False},
u'width': {'readonly': False},
u'height': {'readonly': False},
u'content':
{'context': 'content',
'value':
{u'document':
{'value': lambda t: GLOBAL_ENTITIES[u'document']}}},
u'contentWindow':
{'context': 'content',
'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'_content': {'value': lambda t: GLOBAL_ENTITIES[u'content']},
u'gBrowser':
{'value':
{u'contentDocument':
{'context': 'content',
'value': lambda t: CONTENT_DOCUMENT},
u'contentWindow':
{'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'selectedTab':
{'readonly': False}}},
u'opener':
{'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'XPCNativeWrapper':
{'value':
{u'unwrap':
{'return': call_definitions.js_unwrap}},
'return': call_definitions.js_wrap},
# Preference creation in pref defaults files
u'pref': {'dangerous': actions._call_create_pref},
u'user_pref': {'dangerous': actions._call_create_pref},
u'unsafeWindow': {'dangerous': 'The use of unsafeWindow is insecure and '
'should be avoided whenever possible. '
'Consider using a different API if it is '
'available in order to achieve similar '
'functionality.'},
u'XPCOMUtils':
{'value': {u'categoryManager': {'value': CATEGORY_MANAGER}}},
u'MarionetteComponent': {'dangerous_on_read': MARIONETTE_MESSAGE},
u'MarionetteServer': {'dangerous_on_read': MARIONETTE_MESSAGE},
'Application': {'dangerous_on_read': fuel_error},
'NewTabURL': {'value': {'override': entity('NewTabURL.override')}},
'Proxy':
{'value':
{u'createFunction':
{'return': call_definitions.Proxy_deprec},
u'create':
{'return': call_definitions.Proxy_deprec}}},
# Common third-party libraries
'Handlebars': {
'value': {
'SafeString':
{'dangerous':
UNSAFE_TEMPLATE_METHOD % 'Handlebars.SafeString'}}},
# Angular
'$sce': {
'value': {
'trustAs': {'dangerous':
UNSAFE_TEMPLATE_METHOD % '$sce.trustAs'},
'trustAsHTML': {'dangerous':
UNSAFE_TEMPLATE_METHOD % '$sce.trustAsHTML'}}},
}
CONTENT_DOCUMENT = GLOBAL_ENTITIES[u'content']['value'][u'document'] | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# CMSIS-DAP Interface Firmware
# Copyright (c) 2009-2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import os
import argparse
import subprocess
import settings
BOOTLOADER_PROJECTS = [
{
'target' : 'k20dx128',
'path' : r'bootloader\mdk\k20dx128\k20dx128_bootloader.uvproj',
'targets' : [
'k20dx128_bootloader'
]
},
{
'target' : 'lpc11u35',
'path' : r'bootloader\mdk\lpc11u35\lpc11u35_bootloader.uvproj',
'targets' : [
'lpc11u35_bootloader'
]
}
]
INTERFACE_PROJECTS = [
{
'target' : 'k20dx128',
'path' : r'interface\mdk\k20dx128\k20dx128_interface.uvproj',
'targets' : [
'k20dx128_kl25z_if',
'k20dx128_kl25z_openSDA_bootloader_if',
'k20dx128_kl25z_mbed_bootloader_if',
'k20dx128_twr_kl25z_if',
'k20dx128_twr_kl25z_openSDA_bootloader_if',
'k20dx128_twr_kl25z_mbed_bootloader_if',
'k20dx128_k20dx_if',
'k20dx128_k20dx_openSDA_bootloader_if',
'k20dx128_k20dx_mbed_bootloader_if',
'k20dx128_kl05z_if',
'k20dx128_kl05z_openSDA_bootloader_if',
'k20dx128_kl05z_mbed_bootloader_if',
'k20dx128_kl46z_if',
'k20dx128_kl46z_openSDA_bootloader_if',
'k20dx128_kl46z_mbed_bootloader_if',
'k20dx128_k64f_if',
'k20dx128_k64f_openSDA_bootloader_if',
'k20dx128_k64f_mbed_bootloader_if',
'k20dx128_kl02z_if',
'k20dx128_kl02z_openSDA_bootloader_if',
'k20dx128_kl02z_mbed_bootloader_if',
'k20dx128_kl26z_if',
'k20dx128_kl26z_openSDA_bootloader_if',
'k20dx128_kl26z_mbed_bootloader_if',
],
},
{
'target' : 'lpc11u35',
'path' : r'interface\mdk\lpc11u35\lpc11u35_interface.uvproj',
'targets' : [
'lpc11u35_lpc812_if',
'lpc11u35_lpc812_mbed_bootloader',
'lpc11u35_lpc1768_if',
'lpc11u35_lpc1768_mbed_bootloader',
'lpc11u35_ublox_lpc1768_if',
'lpc11u35_lpc1114_if',
'lpc11u35_lpc1114_mbed_bootloader',
#'lpc11u35_lpc810_if' # fails build
],
},
{
'target' : 'lpc4322',
'path' : r'interface\mdk\lpc4322\lpc4322_interface.uvproj',
'targets' : [
'lpc4322_lpc1549_if',
'lpc4322_lpc1549_dbg_sram',
'lpc4322_lpc11U68_if',
'lpc4322_lpc11U68_dbg_sram',
'lpc4322_lpc4337_if',
],
}
]
FLASH_ALGO_PROJECTS = [
{
'target' : 'MKXXX',
'path' : r'interface\flash_algo_mdk\MKXXX\MKXX.uvproj',
'targets' : [
'MK20DX128_Pflash',
'MK64FN1M0_Pflash',
'MKL02Z32_Pflash',
'MKL05Z32_Pflash',
'MKL25Z128_Pflash',
'MKL26Z128_Pflash',
'MKL46Z256_Pflash',
'MK22F51212_Pflash'
]
},
{
'target' : 'LPC_IAP',
'path' : r'interface\flash_algo_mdk\LPC_IAP\LPC_IAP.uvproj',
'targets' : [
'LPC1700_IAP_512',
'LPC1700_IAP_256',
'LPC1700_IAP_128',
'LPC1700_IAP_64',
'LPC1700_IAP_32',
'LPC1700_IAP_512_MBED_60MHz',
#'LPC11xx_IAP_32',
'LPC8xx_IAP_4',
'LPC1549_IAP_256',
'LPC11U68_IAP_256',
'LPC4337_IAP_1024'
]
},
{
'target' : 'LPC_SPIFI',
'path' : r'interface\flash_algo_mdk\LPC_SPIFI\LPC_SPIFI.uvproj',
'targets' : [
'LPC1800_SPIFI_8M_4K',
'LPC1800_SPIFI_4M_4K',
'LPC1800_SPIFI_4M_64K',
]
}
]
class BuildError(Exception): pass
##
# @brief Class to build uVision projects.
class UV4Project(object):
# Status codes from building a project.
## No warnings or errors.
SUCCESS = 0
## Warnings only.
WARNINGS = 1
## Fatal errors and possibly warnings.
ERRORS = 2
## The request target does not exist.
INVALID_TARGET = 3
## The project file does not exit.
INVALID_PROJECT = 15
##
# @brief Constructor.
# @param self
# @param project Path to the project file.
def __init__(self, project):
self.project = project
##
# @brief Build a target of the project.
#
# @param self
# @param target Name of the desired target to build. If not specified, or set to None, the
# currently selected target (in the GUI) will be built.
# @param logFile Path to a file that the build log will be written to. The path is relative
# to the project file's directory. The log file will be created if it doesn't exist, and
# it will be overwritten if it does already exist.
#
# @return The integer status code from the uVision build.
def build(self, target=None, logFile=None):
# Build list of arguments to UV4.
argList = [settings.UV4, '-j0', '-b', self.project]
if target:
argList += ['-t', target]
if logFile:
argList += ['-o', logFile]
# Run UV4 command.
return subprocess.call(argList)
##
# @brief Command line interface to UV4 builder.
class Builder(object):
def __init__(self):
self.rootPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def _read_options(self):
# Build arg parser.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-b", "--bootloader", action="store_true", help="Build bootloader projects.")
parser.add_argument("-i", "--interface", action="store_true", help="Build interface projects.")
parser.add_argument("-f", "--flash", action="store_true", help="Build flash algo projects.")
parser.add_argument("-t", "--target", metavar="TARGET", default=None, help="Specify the target to build.")
parser.add_argument("-l", "--log", metavar="PATH", help="Specify the log file.")
return parser.parse_args()
def run(self):
# Read command line arguments.
self.args = self._read_options()
# Build all projects if no type was specified.
if (not self.args.bootloader) and (not self.args.interface) and (not self.args.flash):
self.args.bootloader = True
self.args.interface = True
self.args.flash = True
try:
if self.args.bootloader:
self._build_project_list(BOOTLOADER_PROJECTS)
if self.args.interface:
self._build_project_list(INTERFACE_PROJECTS)
if self.args.flash:
self._build_project_list(FLASH_ALGO_PROJECTS)
except BuildError:
return 1
else:
return 0
def _build_project_list(self, projects):
for targetDict in projects:
# Skip this target if it shouldn't be built.
if self.args.target and (not targetDict['target'].lower().startswith(self.args.target.lower())):
continue
# Construct project path and name.
projectPath = os.path.join(self.rootPath, targetDict['path'])
projectName = os.path.basename(projectPath)
# Create the project file object.
project = UV4Project(projectPath)
# Build all targets listed for this project.
for targetName in targetDict['targets']:
print("Building target %s of %s..." % (targetName, projectName))
status = project.build(targetName, self.args.log)
print("Status = %d" % status)
if status != UV4Project.SUCCESS and status != UV4Project.WARNINGS:
print("* Error building target %s of %s" % (targetName, projectName))
raise BuildError
if __name__ == "__main__":
exit(Builder().run()) | unknown | codeparrot/codeparrot-clean | ||
import os
import unittest
from mako import compat
from mako import exceptions
from mako import lookup
from mako import runtime
from mako.template import Template
from mako.util import FastEncodingBuffer
from test import assert_raises_message
from test import eq_
from test import template_base
from test.util import result_lines
tl = lookup.TemplateLookup(directories=[template_base])
class LookupTest(unittest.TestCase):
def test_basic(self):
t = tl.get_template("index.html")
assert result_lines(t.render()) == ["this is index"]
def test_subdir(self):
t = tl.get_template("/subdir/index.html")
assert result_lines(t.render()) == [
"this is sub index",
"this is include 2",
]
assert (
tl.get_template("/subdir/index.html").module_id
== "_subdir_index_html"
)
def test_updir(self):
t = tl.get_template("/subdir/foo/../bar/../index.html")
assert result_lines(t.render()) == [
"this is sub index",
"this is include 2",
]
def test_directory_lookup(self):
"""test that hitting an existent directory still raises
LookupError."""
self.assertRaises(
exceptions.TopLevelLookupException, tl.get_template, "/subdir"
)
def test_no_lookup(self):
t = Template("hi <%include file='foo.html'/>")
try:
t.render()
assert False
except exceptions.TemplateLookupException:
eq_(
str(compat.exception_as()),
"Template 'memory:%s' has no TemplateLookup associated"
% hex(id(t)),
)
def test_uri_adjust(self):
tl = lookup.TemplateLookup(directories=["/foo/bar"])
assert (
tl.filename_to_uri("/foo/bar/etc/lala/index.html")
== "/etc/lala/index.html"
)
tl = lookup.TemplateLookup(directories=["./foo/bar"])
assert (
tl.filename_to_uri("./foo/bar/etc/index.html") == "/etc/index.html"
)
def test_uri_cache(self):
"""test that the _uri_cache dictionary is available"""
tl._uri_cache[("foo", "bar")] = "/some/path"
assert tl._uri_cache[("foo", "bar")] == "/some/path"
def test_check_not_found(self):
tl = lookup.TemplateLookup()
tl.put_string("foo", "this is a template")
f = tl.get_template("foo")
assert f.uri in tl._collection
f.filename = "nonexistent"
self.assertRaises(
exceptions.TemplateLookupException, tl.get_template, "foo"
)
assert f.uri not in tl._collection
def test_dont_accept_relative_outside_of_root(self):
"""test the mechanics of an include where
the include goes outside of the path"""
tl = lookup.TemplateLookup(
directories=[os.path.join(template_base, "subdir")]
)
index = tl.get_template("index.html")
ctx = runtime.Context(FastEncodingBuffer())
ctx._with_template = index
assert_raises_message(
exceptions.TemplateLookupException,
'Template uri "../index.html" is invalid - it '
"cannot be relative outside of the root path",
runtime._lookup_template,
ctx,
"../index.html",
index.uri,
)
assert_raises_message(
exceptions.TemplateLookupException,
'Template uri "../othersubdir/foo.html" is invalid - it '
"cannot be relative outside of the root path",
runtime._lookup_template,
ctx,
"../othersubdir/foo.html",
index.uri,
)
# this is OK since the .. cancels out
runtime._lookup_template(ctx, "foo/../index.html", index.uri) | unknown | codeparrot/codeparrot-clean | ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.audit.CommonAuditContext;
import org.apache.hadoop.ipc.CallerContext;
/**
* A utility to help run {@link Tool}s.
*
* <p><code>ToolRunner</code> can be used to run classes implementing
* <code>Tool</code> interface. It works in conjunction with
* {@link GenericOptionsParser} to parse the
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/CommandsManual.html#Generic_Options">
* generic hadoop command line arguments</a> and modifies the
* <code>Configuration</code> of the <code>Tool</code>. The
* application-specific options are passed along without being modified.
* </p>
*
* @see Tool
* @see GenericOptionsParser
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ToolRunner {
/**
* Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
* parsing with the given generic arguments. Uses the given
* <code>Configuration</code>, or builds one if null.
*
* Sets the <code>Tool</code>'s configuration with the possibly modified
* version of the <code>conf</code>.
*
* @param conf <code>Configuration</code> for the <code>Tool</code>.
* @param tool <code>Tool</code> to run.
* @param args command-line arguments to the tool.
* @return exit code of the {@link Tool#run(String[])} method.
* @throws Exception Exception.
*/
public static int run(Configuration conf, Tool tool, String[] args)
throws Exception{
if (CallerContext.getCurrent() == null) {
CallerContext ctx = new CallerContext.Builder("CLI").build();
CallerContext.setCurrent(ctx);
}
// Note the entry point in the audit context; this
// may be used in audit events set to cloud store logs
// or elsewhere.
CommonAuditContext.noteEntryPoint(tool);
if(conf == null) {
conf = new Configuration();
}
GenericOptionsParser parser = new GenericOptionsParser(conf, args);
//set the configuration back, so that Tool can configure itself
tool.setConf(conf);
//get the args w/o generic hadoop args
String[] toolArgs = parser.getRemainingArgs();
return tool.run(toolArgs);
}
/**
* Runs the <code>Tool</code> with its <code>Configuration</code>.
*
* Equivalent to <code>run(tool.getConf(), tool, args)</code>.
*
* @param tool <code>Tool</code> to run.
* @param args command-line arguments to the tool.
* @return exit code of the {@link Tool#run(String[])} method.
* @throws Exception exception.
*/
public static int run(Tool tool, String[] args)
throws Exception{
return run(tool.getConf(), tool, args);
}
/**
* Prints generic command-line argurments and usage information.
*
* @param out stream to write usage information to.
*/
public static void printGenericCommandUsage(PrintStream out) {
GenericOptionsParser.printGenericCommandUsage(out);
}
/**
* Print out a prompt to the user, and return true if the user
* responds with "y" or "yes". (case insensitive).
*
* @param prompt prompt.
* @throws IOException raised on errors performing I/O.
* @return if the user
* responds with "y" or "yes". (case insensitive) true,
* not false.
*/
public static boolean confirmPrompt(String prompt) throws IOException {
while (true) {
System.err.print(prompt + " (Y or N) ");
StringBuilder responseBuilder = new StringBuilder();
while (true) {
int c = System.in.read();
if (c == -1 || c == '\r' || c == '\n') {
break;
}
responseBuilder.append((char)c);
}
String response = responseBuilder.toString();
if (response.equalsIgnoreCase("y") ||
response.equalsIgnoreCase("yes")) {
return true;
} else if (response.equalsIgnoreCase("n") ||
response.equalsIgnoreCase("no")) {
return false;
}
System.err.println("Invalid input: " + response);
// else ask them again
}
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java |
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(mberlin): Remove this file when SplitClone supports merge-sorting
# primary key columns based on the MySQL collation.
"""This test covers the vtworker LegacySplitClone command.
The vtworker LegacySplitClone should only be used when it is necessary to
reshard a table that has textual primary key columns (e.g. VARCHAR).
This is the case for the "timestamps" table in this end-to-end test.
The reason why only LegacySplitClone supports this use case is because the new
resharding clone code (as of https://github.com/vitessio/vitess/pull/1796)
requires to sort rows by their primary key. Whereas LegacySplitClone does a
simple copy and always assumes that the tables on the destination are empty,
the SplitClone command can diff the source and destination tables. In case of
a horizontal resharding this requires merge-sorting multiple destination shards.
Since we currently do not support sorting VARCHAR primary key columns in
SplitClone (due to missing support for MySQL collations), you'll have to resort
to LegacySplitClone only for this use case.
Note that this file was copied from the original resharding.py file.
We start with shards -80 and 80-. We then split 80- into 80-c0 and c0-.
This test is the main resharding test. It not only tests the regular resharding
workflow for an horizontal split, but also a lot of error cases and side
effects, like:
- migrating the traffic one cell at a time.
- migrating rdonly traffic back and forth.
- making sure we can't migrate the master until replica and rdonly are migrated.
- has a background thread to insert data during migration.
- tests a destination shard master failover while replication is running.
- tests a filtered replication source replacement while filtered replication
is running.
- tests 'vtctl SourceShardAdd' and 'vtctl SourceShardDelete'.
- makes sure the key range rules are properly enforced on masters.
"""
import struct
import logging
import unittest
import base_sharding
import environment
import tablet
import utils
from vtproto import topodata_pb2
from vtdb import keyrange_constants
keyspace_id_type = keyrange_constants.KIT_UINT64
pack_keyspace_id = struct.Struct('!Q').pack
# initial shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_ny_rdonly = tablet.Tablet(cell='ny')
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_slave1 = tablet.Tablet()
shard_1_slave2 = tablet.Tablet()
shard_1_ny_rdonly = tablet.Tablet(cell='ny')
shard_1_rdonly1 = tablet.Tablet()
# split shards
# range 80 - c0
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
shard_2_replica2 = tablet.Tablet()
# range c0 - ''
shard_3_master = tablet.Tablet()
shard_3_replica = tablet.Tablet()
shard_3_rdonly1 = tablet.Tablet()
all_tablets = [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica, shard_3_rdonly1]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestResharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
msg varchar(64),
custom_ksid_col ''' + t + ''' not null,
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s'
'(id, msg, custom_ksid_col) as select id, msg, custom_ksid_col '
'from %s')
create_timestamp_table = '''create table timestamps(
name varchar(64),
time_milli bigint(20) unsigned not null,
custom_ksid_col ''' + t + ''' not null,
primary key (name)
) Engine=InnoDB'''
create_unrelated_table = '''create table unrelated(
name varchar(64),
primary key (name)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_timestamp_table,
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_unrelated_table,
'test_keyspace'],
auto_log=True)
def _insert_startup_values(self):
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _check_startup_values(self):
# check first value is in the right shard
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_replica1, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_replica2, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_3_replica, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_3_rdonly1, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
# check second value is in the right shard too
self._check_value(shard_2_master, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_replica1, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_3_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._check_value(shard_3_replica, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._check_value(shard_3_rdonly1, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_1_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_2_replica2, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_3_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('waiting for %d%% of the data' % threshold,
timeout, sleep_time=1)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_3_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'bad_column',
'--sharding_column_type', 'bytes',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'custom_ksid_col', 'uint64'], expect_fail=True)
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force',
'test_keyspace', 'custom_ksid_col', keyspace_id_type])
shard_0_master.init_tablet('replica', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave2.init_tablet('replica', 'test_keyspace', '80-')
shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')
# we set full_mycnf_args to True as a test in the KIT_BYTES case
full_mycnf_args = keyspace_id_type == keyrange_constants.KIT_BYTES
# create databases so vttablet can start behaving somewhat normally
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None, full_mycnf_args=full_mycnf_args,
binlog_use_v3_resharding_mode=False)
# wait for the tablets (replication is not setup, they won't be healthy)
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
self.assertIn('-80', shards, 'unexpected shards: %s' % str(shards))
self.assertIn('80-', shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 2, 'unexpected shards: %s' % str(shards))
# create the tables
self._create_schema()
self._insert_startup_values()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_slave1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in [shard_0_ny_rdonly, shard_1_ny_rdonly, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# create the split shards
shard_2_master.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica1.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica2.init_tablet('replica', 'test_keyspace', '80-c0')
shard_3_master.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_replica.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_rdonly1.init_tablet('rdonly', 'test_keyspace', 'c0-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
shard_2_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_3_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_2_replica1, shard_2_replica2,
shard_3_replica, shard_3_rdonly1]:
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica, shard_3_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-c0',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/c0-',
shard_3_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
for s in ['-80', '80-', '80-c0', 'c0-']:
self.assertIn(s, shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 4, 'unexpected shards: %s' % str(shards))
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
# disable shard_1_slave2, so we're sure filtered replication will go
# from shard_1_slave1
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING')
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'):
utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated',
shard_1_rdonly1.tablet_alias, keyspace_shard],
auto_log=True)
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
utils.run_vtworker(['--cell', 'test_nj',
'--command_display_interval', '10ms',
'--use_v3_resharding_mode=false',
'LegacySplitClone',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias,
'rdonly'], auto_log=True)
# TODO(alainjobart): experiment with the dontStartBinlogPlayer option
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', '--exclude_tables=unrelated',
'test_keyspace'], auto_log=True)
# check the binlog players are running and exporting vars
self.check_destination_master(shard_2_master, ['test_keyspace/80-'])
self.check_destination_master(shard_3_master, ['test_keyspace/80-'])
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_1_slave1, horizontal=True)
# Check that the throttler was enabled.
self.check_throttler_service(shard_2_master.rpc_endpoint(),
['BinlogPlayer/0'], 9999)
self.check_throttler_service(shard_3_master.rpc_endpoint(),
['BinlogPlayer/0'], 9999)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
# small optimization: only do this check if we don't have all the data
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
self.check_binlog_player_vars(shard_2_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_player_vars(shard_3_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_1_slave1, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_3_rdonly1.tablet_alias])
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for destination master tablets, make sure we have it all
self.check_running_binlog_player(shard_2_master, 4000, 2000)
self.check_running_binlog_player(shard_3_master, 4000, 2000)
# tests a failover switching serving to a different replica
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica'])
utils.run_vtctl(['ChangeSlaveType', shard_1_slave1.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['RunHealthCheck', shard_1_slave2.tablet_alias])
# test data goes through again
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000, base=1000)
logging.debug('Checking 80 percent of data was sent quickly')
self._check_lots_timeout(1000, 80, 5, base=1000)
self.check_binlog_server_vars(shard_1_slave2, horizontal=True,
min_statements=800, min_transactions=800)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
expect_fail=True)
# check query service is off on master 2 and master 3, as filtered
# replication is enabled. Even health check that is enabled on
# master 3 should not interfere (we run it to be sure).
utils.run_vtctl(['RunHealthCheck', shard_3_master.tablet_alias],
auto_log=True)
for master in [shard_2_master, shard_3_master]:
utils.check_tablet_query_service(self, master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_3_master.get_healthz()
# now serve rdonly from the split shards, in test_nj only
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'test_keyspace/80-', 'rdonly'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# now serve rdonly from the split shards, everywhere
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, False, True)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# then serve replica from the split shards
destination_shards = ['test_keyspace/80-c0', 'test_keyspace/c0-']
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, True, False)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly.
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
# use vtworker to compare the data again
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
# mock with the SourceShard records to test 'vtctl SourceShardDelete'
# and 'vtctl SourceShardAdd'
utils.run_vtctl(['SourceShardDelete', 'test_keyspace/c0-', '0'],
auto_log=True)
utils.run_vtctl(['SourceShardAdd', '--key_range=80-',
'test_keyspace/c0-', '0', 'test_keyspace/80-'],
auto_log=True)
# then serve master from the split shards, make sure the source master's
# query service is now turned off
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-c0 c0-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
self.check_no_binlog_player(shard_2_master)
self.check_no_binlog_player(shard_3_master)
# delete the original tablets in the original shard
tablet.kill_tablets([shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1])
for t in [shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_1_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(
['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# test RemoveShardCell
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True,
expect_fail=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
self.assertTrue('cells' not in shard or not shard['cells'])
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/80-'], auto_log=True)
# kill everything
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica, shard_3_rdonly1])
if __name__ == '__main__':
utils.main() | unknown | codeparrot/codeparrot-clean | ||
-- the sieve of of Eratosthenes programmed with coroutines
-- typical usage: lua -e N=1000 sieve.lua | column
-- generate all the numbers from 2 to n
function gen (n)
return coroutine.wrap(function ()
for i=2,n do coroutine.yield(i) end
end)
end
-- filter the numbers generated by `g', removing multiples of `p'
function filter (p, g)
return coroutine.wrap(function ()
while 1 do
local n = g()
if n == nil then return end
if math.mod(n, p) ~= 0 then coroutine.yield(n) end
end
end)
end
N=N or 1000 -- from command line
x = gen(N) -- generate primes up to N
while 1 do
local n = x() -- pick a number until done
if n == nil then break end
print(n) -- must be a prime number
x = filter(n, x) -- now remove its multiples
end | unknown | github | https://github.com/redis/redis | deps/lua/test/sieve.lua |
from Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import NumberActionMap
from Components.config import config, getConfigListEntry, ConfigNothing, NoSave, configfile
from Components.Sources.StaticText import StaticText
from Screens.MessageBox import MessageBox
from Screens.InputBox import PinInput
from Tools.BoundFunction import boundFunction
class ProtectedScreen:
def __init__(self):
if self.isProtected() and config.ParentalControl.servicepin[0].value:
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.pinEntered, PinInput, pinList=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the correct pin code"), windowTitle=_("Enter pin code")))
def isProtected(self):
return (config.ParentalControl.servicepinactive.value or config.ParentalControl.setuppinactive.value)
def pinEntered(self, result):
if result is None:
self.closeProtectedScreen()
elif not result:
self.session.openWithCallback(self.closeProtectedScreen, MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR, timeout=3)
def closeProtectedScreen(self, result=None):
self.close(None)
class ParentalControlSetup(Screen, ConfigListScreen, ProtectedScreen):
def __init__(self, session):
Screen.__init__(self, session)
ProtectedScreen.__init__(self)
# for the skin: first try ParentalControlSetup, then Setup, this allows individual skinning
self.skinName = ["ParentalControlSetup", "Setup" ]
self.setup_title = _("Parental control setup")
self.setTitle(self.setup_title)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self.createSetup(initial=True)
self["actions"] = NumberActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self.recursive = False
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def isProtected(self):
return (not config.ParentalControl.setuppinactive.value and config.ParentalControl.servicepinactive.value) or\
(not config.ParentalControl.setuppinactive.value and config.ParentalControl.config_sections.configuration.value) or\
(not config.ParentalControl.config_sections.configuration.value and config.ParentalControl.setuppinactive.value and not config.ParentalControl.config_sections.main_menu.value)
def createSetup(self, initial=False):
self.reloadLists = None
self.list = []
if config.ParentalControl.servicepin[0].value or config.ParentalControl.servicepinactive.value or config.ParentalControl.setuppinactive.value or not initial:
if config.ParentalControl.servicepin[0].value:
pin_entry_text = _("Change PIN") + _(": 0000 - default (disabled)")
else:
pin_entry_text = _("Set PIN")
self.changePin = getConfigListEntry(pin_entry_text, NoSave(ConfigNothing()))
self.list.append(self.changePin)
self.list.append(getConfigListEntry(_("Protect services"), config.ParentalControl.servicepinactive))
if config.ParentalControl.servicepinactive.value:
self.list.append(getConfigListEntry(_("Remember service PIN"), config.ParentalControl.storeservicepin))
if config.ParentalControl.storeservicepin.value != "never":
self.list.append(getConfigListEntry(_("Hide parentel locked services"), config.ParentalControl.hideBlacklist))
self.list.append(getConfigListEntry(_("Protect on epg age"), config.ParentalControl.age))
self.reloadLists = getConfigListEntry(_("Reload blacklists"), NoSave(ConfigNothing()))
self.list.append(self.reloadLists)
self.list.append(getConfigListEntry(_("Protect Screens"), config.ParentalControl.setuppinactive))
if config.ParentalControl.setuppinactive.value:
self.list.append(getConfigListEntry(_("Protect main menu"), config.ParentalControl.config_sections.main_menu))
self.list.append(getConfigListEntry(_("Protect timer menu"), config.ParentalControl.config_sections.timer_menu))
self.list.append(getConfigListEntry(_("Protect plugin browser"), config.ParentalControl.config_sections.plugin_browser))
self.list.append(getConfigListEntry(_("Protect configuration"), config.ParentalControl.config_sections.configuration))
self.list.append(getConfigListEntry(_("Protect standby menu"), config.ParentalControl.config_sections.standby_menu))
self.list.append(getConfigListEntry(_("Protect software update screen"), config.ParentalControl.config_sections.software_update))
self.list.append(getConfigListEntry(_("Protect manufacturer reset screen"), config.ParentalControl.config_sections.manufacturer_reset))
self.list.append(getConfigListEntry(_("Protect movie list"), config.ParentalControl.config_sections.movie_list))
self.list.append(getConfigListEntry(_("Protect context menus"), config.ParentalControl.config_sections.context_menus))
if config.usage.menu_sort_mode.value == "user":
self.list.append(getConfigListEntry(_("Protect menu sort"), config.ParentalControl.config_sections.menu_sort))
else:
self.changePin = getConfigListEntry(_("Enable parental protection"), NoSave(ConfigNothing()))
self.list.append(self.changePin)
self["config"].list = self.list
self["config"].setList(self.list)
def keyOK(self):
if self["config"].l.getCurrentSelection() == self.changePin:
if config.ParentalControl.servicepin[0].value:
self.session.openWithCallback(self.oldPinEntered, PinInput, pinList=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the old PIN code"), windowTitle=_("Enter pin code"))
else:
self.oldPinEntered(True)
elif self["config"].l.getCurrentSelection() == self.reloadLists:
from Components.ParentalControl import parentalControl
parentalControl.open()
self.session.open(MessageBox, _("Lists reloaded!"), MessageBox.TYPE_INFO, timeout=3)
else:
ConfigListScreen.keyRight(self)
self.createSetup()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.createSetup()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.createSetup()
def cancelCB(self, value):
self.keySave()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.close()
def cancelConfirm(self, answer):
if answer:
for x in self["config"].list:
x[1].cancel()
self.close()
def keySave(self):
if self["config"].isChanged():
for x in self["config"].list:
x[1].save()
configfile.save()
from Components.ParentalControl import parentalControl
parentalControl.hideBlacklist()
self.close(self.recursive)
def closeRecursive(self):
self.recursive = True
self.keySave()
def keyNumberGlobal(self, number):
pass
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
def oldPinEntered(self, answer):
if answer:
self.session.openWithCallback(self.newPinEntered, PinInput, title=_("Please enter the new PIN code"), windowTitle=_("Enter pin code"))
elif answer == False:
self.session.open(MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR, timeout=3)
def newPinEntered(self, answer):
if answer is not None:
self.session.openWithCallback(boundFunction(self.confirmNewPinEntered, answer), PinInput, title=_("Please re-enter the new PIN code"), windowTitle=_("Enter pin code"))
def confirmNewPinEntered(self, answer1, answer2):
if answer2 is not None:
if answer1 == answer2:
warning_text = ""
if not answer2:
warning_text = _("You PIN code is 0000. This is the default PIN code and it disable parental control!\n")
self.session.open(MessageBox, warning_text + _("The PIN code has been changed successfully."), MessageBox.TYPE_INFO, timeout=3)
config.ParentalControl.servicepin[0].value = answer1
config.ParentalControl.servicepin[0].save()
self.createSetup()
else:
self.session.open(MessageBox, _("The PIN codes you entered are different."), MessageBox.TYPE_ERROR, timeout=3) | unknown | codeparrot/codeparrot-clean | ||
from win32com.shell import shell, shellcon
import win32api
import os
def testSHFileOperation(file_cnt):
temp_dir=os.environ['temp']
orig_fnames=[win32api.GetTempFileName(temp_dir,'sfo')[0] for x in range(file_cnt)]
new_fnames=[os.path.join(temp_dir,'copy of '+os.path.split(orig_fnames[x])[1]) for x in range(file_cnt)]
pFrom='\0'.join(orig_fnames)
pTo='\0'.join(new_fnames)
shell.SHFileOperation((0, shellcon.FO_MOVE, pFrom, pTo, shellcon.FOF_MULTIDESTFILES|shellcon.FOF_NOCONFIRMATION))
for fname in orig_fnames:
assert not os.path.isfile(fname)
for fname in new_fnames:
assert os.path.isfile(fname)
shell.SHFileOperation((0, shellcon.FO_DELETE, fname, None, shellcon.FOF_NOCONFIRMATION|shellcon.FOF_NOERRORUI))
def testSHNAMEMAPPINGS(file_cnt):
## attemps to move a set of files to names that already exist, and generated filenames should be returned
## as a sequence of 2-tuples created from SHNAMEMAPPINGS handle
temp_dir=os.environ['temp']
orig_fnames=[win32api.GetTempFileName(temp_dir,'sfo')[0] for x in range(file_cnt)]
new_fnames=[win32api.GetTempFileName(temp_dir,'sfo')[0] for x in range(file_cnt)]
pFrom='\0'.join(orig_fnames)
pTo='\0'.join(new_fnames)
rc, banyaborted, NameMappings=shell.SHFileOperation((0, shellcon.FO_MOVE, pFrom, pTo,
shellcon.FOF_MULTIDESTFILES|shellcon.FOF_NOCONFIRMATION|shellcon.FOF_RENAMEONCOLLISION|shellcon.FOF_WANTMAPPINGHANDLE))
for old_fname, new_fname in NameMappings:
print 'Old:',old_fname, 'New:', new_fname
assert len(NameMappings)==file_cnt
testSHFileOperation(10)
testSHFileOperation(1)
testSHNAMEMAPPINGS(5) | unknown | codeparrot/codeparrot-clean | ||
"""SCons.Platform.win32
Platform-specific initialization for Win32 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/win32.py 4720 2010/03/24 03:14:11 jars"
import os
import os.path
import string
import sys
import tempfile
from SCons.Platform.posix import exitvalmap
from SCons.Platform import TempFileMunge
import SCons.Util
try:
import msvcrt
import win32api
import win32con
msvcrt.get_osfhandle
win32api.SetHandleInformation
win32con.HANDLE_FLAG_INHERIT
except ImportError:
parallel_msg = \
"you do not seem to have the pywin32 extensions installed;\n" + \
"\tparallel (-j) builds may not work reliably with open Python files."
except AttributeError:
parallel_msg = \
"your pywin32 extensions do not support file handle operations;\n" + \
"\tparallel (-j) builds may not work reliably with open Python files."
else:
parallel_msg = None
import __builtin__
_builtin_file = __builtin__.file
_builtin_open = __builtin__.open
def _scons_file(*args, **kw):
fp = apply(_builtin_file, args, kw)
win32api.SetHandleInformation(msvcrt.get_osfhandle(fp.fileno()),
win32con.HANDLE_FLAG_INHERIT,
0)
return fp
def _scons_open(*args, **kw):
fp = apply(_builtin_open, args, kw)
win32api.SetHandleInformation(msvcrt.get_osfhandle(fp.fileno()),
win32con.HANDLE_FLAG_INHERIT,
0)
return fp
__builtin__.file = _scons_file
__builtin__.open = _scons_open
# The upshot of all this is that, if you are using Python 1.5.2,
# you had better have cmd or command.com in your PATH when you run
# scons.
def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
# There is no direct way to do that in python. What we do
# here should work for most cases:
# In case stdout (stderr) is not redirected to a file,
# we redirect it into a temporary file tmpFileStdout
# (tmpFileStderr) and copy the contents of this file
# to stdout (stderr) given in the argument
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
else:
# one temporary file for stdout and stderr
tmpFileStdout = os.path.normpath(tempfile.mktemp())
tmpFileStderr = os.path.normpath(tempfile.mktemp())
# check if output is redirected
stdoutRedirected = 0
stderrRedirected = 0
for arg in args:
# are there more possibilities to redirect stdout ?
if (string.find( arg, ">", 0, 1 ) != -1 or
string.find( arg, "1>", 0, 2 ) != -1):
stdoutRedirected = 1
# are there more possibilities to redirect stderr ?
if string.find( arg, "2>", 0, 2 ) != -1:
stderrRedirected = 1
# redirect output of non-redirected streams to our tempfiles
if stdoutRedirected == 0:
args.append(">" + str(tmpFileStdout))
if stderrRedirected == 0:
args.append("2>" + str(tmpFileStderr))
# actually do the spawn
try:
args = [sh, '/C', escape(string.join(args)) ]
ret = os.spawnve(os.P_WAIT, sh, args, env)
except OSError, e:
# catch any error
try:
ret = exitvalmap[e[0]]
except KeyError:
sys.stderr.write("scons: unknown OSError exception code %d - %s: %s\n" % (e[0], cmd, e[1]))
if stderr is not None:
stderr.write("scons: %s: %s\n" % (cmd, e[1]))
# copy child output from tempfiles to our streams
# and do clean up stuff
if stdout is not None and stdoutRedirected == 0:
try:
stdout.write(open( tmpFileStdout, "r" ).read())
os.remove( tmpFileStdout )
except (IOError, OSError):
pass
if stderr is not None and stderrRedirected == 0:
try:
stderr.write(open( tmpFileStderr, "r" ).read())
os.remove( tmpFileStderr )
except (IOError, OSError):
pass
return ret
def exec_spawn(l, env):
try:
result = os.spawnve(os.P_WAIT, l[0], l, env)
except OSError, e:
try:
result = exitvalmap[e[0]]
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
except KeyError:
result = 127
if len(l) > 2:
if len(l[2]) < 1000:
command = string.join(l[0:3])
else:
command = l[0]
else:
command = l[0]
sys.stderr.write("scons: unknown OSError exception code %d - '%s': %s\n" % (e[0], command, e[1]))
return result
def spawn(sh, escape, cmd, args, env):
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
return exec_spawn([sh, '/C', escape(string.join(args))], env)
# Windows does not allow special characters in file names anyway, so no
# need for a complex escape function, we will just quote the arg, except
# that "cmd /c" requires that if an argument ends with a backslash it
# needs to be escaped so as not to interfere with closing double quote
# that we add.
def escape(x):
if x[-1] == '\\':
x = x + '\\'
return '"' + x + '"'
# Get the windows system directory name
_system_root = None
def get_system_root():
global _system_root
if _system_root is not None:
return _system_root
# A resonable default if we can't read the registry
val = os.environ.get('SystemRoot', "C:\\WINDOWS")
if SCons.Util.can_read_reg:
try:
# Look for Windows NT system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows NT\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
except SCons.Util.RegError:
try:
# Okay, try the Windows 9x system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
except KeyboardInterrupt:
raise
except:
pass
_system_root = val
return val
# Get the location of the program files directory
def get_program_files_dir():
# Now see if we can look in the registry...
val = ''
if SCons.Util.can_read_reg:
try:
# Look for Windows Program Files directory
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'ProgramFilesDir')
except SCons.Util.RegError:
val = ''
pass
if val == '':
# A reasonable default if we can't read the registry
# (Actually, it's pretty reasonable even if we can :-)
val = os.path.join(os.path.dirname(get_system_root()),"Program Files")
return val
# Determine which windows CPU were running on.
class ArchDefinition:
"""
A class for defining architecture-specific settings and logic.
"""
def __init__(self, arch, synonyms=[]):
self.arch = arch
self.synonyms = synonyms
SupportedArchitectureList = [
ArchDefinition(
'x86',
['i386', 'i486', 'i586', 'i686'],
),
ArchDefinition(
'x86_64',
['AMD64', 'amd64', 'em64t', 'EM64T', 'x86_64'],
),
ArchDefinition(
'ia64',
['IA64'],
),
]
SupportedArchitectureMap = {}
for a in SupportedArchitectureList:
SupportedArchitectureMap[a.arch] = a
for s in a.synonyms:
SupportedArchitectureMap[s] = a
def get_architecture(arch=None):
"""Returns the definition for the specified architecture string.
If no string is specified, the system default is returned (as defined
by the PROCESSOR_ARCHITEW6432 or PROCESSOR_ARCHITECTURE environment
variables).
"""
if arch is None:
arch = os.environ.get('PROCESSOR_ARCHITEW6432')
if not arch:
arch = os.environ.get('PROCESSOR_ARCHITECTURE')
return SupportedArchitectureMap.get(arch, ArchDefinition('', ['']))
def generate(env):
# Attempt to find cmd.exe (for WinNT/2k/XP) or
# command.com for Win9x
cmd_interp = ''
# First see if we can look in the registry...
if SCons.Util.can_read_reg:
try:
# Look for Windows NT system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows NT\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
cmd_interp = os.path.join(val, 'System32\\cmd.exe')
except SCons.Util.RegError:
try:
# Okay, try the Windows 9x system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
cmd_interp = os.path.join(val, 'command.com')
except KeyboardInterrupt:
raise
except:
pass
# For the special case of not having access to the registry, we
# use a temporary path and pathext to attempt to find the command
# interpreter. If we fail, we try to find the interpreter through
# the env's PATH. The problem with that is that it might not
# contain an ENV and a PATH.
if not cmd_interp:
systemroot = get_system_root()
tmp_path = systemroot + os.pathsep + \
os.path.join(systemroot,'System32')
tmp_pathext = '.com;.exe;.bat;.cmd'
if os.environ.has_key('PATHEXT'):
tmp_pathext = os.environ['PATHEXT']
cmd_interp = SCons.Util.WhereIs('cmd', tmp_path, tmp_pathext)
if not cmd_interp:
cmd_interp = SCons.Util.WhereIs('command', tmp_path, tmp_pathext)
if not cmd_interp:
cmd_interp = env.Detect('cmd')
if not cmd_interp:
cmd_interp = env.Detect('command')
if not env.has_key('ENV'):
env['ENV'] = {}
# Import things from the external environment to the construction
# environment's ENV. This is a potential slippery slope, because we
# *don't* want to make builds dependent on the user's environment by
# default. We're doing this for SystemRoot, though, because it's
# needed for anything that uses sockets, and seldom changes, and
# for SystemDrive because it's related.
#
# Weigh the impact carefully before adding other variables to this list.
import_env = [ 'SystemDrive', 'SystemRoot', 'TEMP', 'TMP' ]
for var in import_env:
v = os.environ.get(var)
if v:
env['ENV'][var] = v
if not env['ENV'].has_key('COMSPEC'):
v = os.environ.get("COMSPEC")
if v:
env['ENV']['COMSPEC'] = v
env.AppendENVPath('PATH', get_system_root() + '\System32')
env['ENV']['PATHEXT'] = '.COM;.EXE;.BAT;.CMD'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = [ '$LIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX' ]
env['PSPAWN'] = piped_spawn
env['SPAWN'] = spawn
env['SHELL'] = cmd_interp
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
env['MAXLINELENGTH'] = 2048
env['ESCAPE'] = escape
env['HOST_OS'] = 'win32'
env['HOST_ARCH'] = get_architecture().arch
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_core.document_loaders import Blob, BlobLoader
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import (
FileSystemBlobLoader,
YoutubeAudioLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BlobLoader": "langchain_community.document_loaders",
"Blob": "langchain_community.document_loaders",
"FileSystemBlobLoader": "langchain_community.document_loaders",
"YoutubeAudioLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Blob",
"BlobLoader",
"FileSystemBlobLoader",
"YoutubeAudioLoader",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/document_loaders/blob_loaders/__init__.py |
{
"headers": [
{
"source": "/fonts/(.*).woff2",
"headers": [
{
"key": "Cache-Control",
"value": "public, max-age=31536000, immutable"
}
]
}
]
} | json | github | https://github.com/facebook/react | compiler/apps/playground/vercel.json |
# -*- coding: utf-8 -*-
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def ok(n):
""" Successful import of ``n`` records
:param int n: number of records which should have been imported
"""
return n, 0, 0, 0
def error(row, message, record=None, **kwargs):
""" Failed import of the record ``record`` at line ``row``, with the error
message ``message``
:param str message:
:param dict record:
"""
return (
-1, dict(record or {}, **kwargs),
"Line %d : %s" % (row, message),
'')
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
def import_(self, fields, rows, context=None):
return self.model.import_data(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._table_name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = dict(record.name_get())[record.id]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._table_name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
self.assertEqual(
self.import_(['.id', 'value'], [['42', '36']]),
error(1, u"Unknown database identifier '42'"))
def test_create_with_xid(self):
self.assertEqual(
self.import_(['id', 'value'], [['somexmlid', '42']]),
ok(1))
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
self.assertEqual(
self.import_(['.id', 'value'], [[str(id), '42']]),
ok(1))
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_exported(self):
self.assertEqual(
self.import_(['value'], [
['False'],
['True'],
]),
ok(2))
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
self.assertEqual(
self.import_(['value'], [
[u'0'],
[u'no'],
[u'false'],
[u'FALSE'],
[u''],
]),
ok(5))
self.assertEqual([
False,
False,
False,
False,
False,
],
values(self.read()))
def test_trues(self):
self.assertEqual(
self.import_(['value'], [
['off'],
['None'],
['nil'],
['()'],
['f'],
['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'],
[u'OFF'],
]),
ok(8))
self.assertEqual(
[True] * 8,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
self.assertEqual(
self.import_(['value'], [['0']]),
ok(1))
self.assertEqual(
self.import_(['value'], [['-0']]),
ok(1))
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
self.assertEqual(
self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
]),
ok(4))
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
self.assertEqual(
self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
]),
ok(5))
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db')
def test_out_of_range(self):
self.assertEqual(
self.import_(['value'], [[str(2**31)]]),
error(1, "integer out of range\n"))
# auto-rollbacks if error is in process_liness, but not during
# ir.model.data write. Can differentiate because former ends lines
# error lines with "!"
self.cr.rollback()
self.assertEqual(
self.import_(['value'], [[str(-2**32)]]),
error(1, "integer out of range\n"))
def test_nonsense(self):
self.assertEqual(
self.import_(['value'], [['zorglub']]),
error(1, u"'zorglub' does not seem to be an integer for field 'unknown'"))
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
self.assertEqual(
self.import_(['value'], [['0']]),
ok(1))
self.assertEqual(
self.import_(['value'], [['-0']]),
ok(1))
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
self.assertEqual(
self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
]),
ok(6))
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
self.assertEqual(
self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
]),
ok(7))
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
self.assertEqual(
self.import_(['value'], [['foobar']]),
error(1, u"'foobar' does not seem to be a number for field 'unknown'"))
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual([False], values(self.read()))
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
]),
ok(4))
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
]),
ok(2))
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
self.assertEqual(
self.import_(['value'], [[s]]),
ok(1))
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Qux", "toto"),
("Bar", "titi"),
("Foo", "tete"),
]
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
]),
ok(4))
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'}),
ok(3))
self.assertEqual([3, 1, 2], values(self.read()))
self.assertEqual(
self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'}),
ok(1))
def test_invalid(self):
self.assertEqual(
self.import_(['value'], [['Baz']]),
error(1, u"Value 'Baz' not found in selection field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value'], [[42]]),
error(1, u"Value '42' not found in selection field 'unknown'"))
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => ?
self.assertEqual(
self.import_(['value'], [
['3'],
["Grault"],
]),
ok(2))
self.assertEqual(
['3', '1'],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.import_(['value'], [
['toto'],
['tete'],
], context={'lang': 'fr_FR'}),
ok(2))
self.assertEqual(
self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'}),
ok(1))
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
self.assertEqual(
self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
]),
ok(3))
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
self.assertEqual(
self.import_(['value/id'], [[xid]]),
ok(1))
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
self.assertEqual(
self.import_(['value/.id'], [[integer_id]]),
ok(1))
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
self.assertEqual(
self.import_(['value'], [[name2]]),
ok(1))
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
]),
error(1, u"No matching record found for name '%s' in field 'unknown'" % integer_id1))
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
self.assertEqual(
self.import_(['value/value'], [['42']]),
error(1, u"Can not create Many-To-One records indirectly, import the field separately"))
def test_fail_noids(self):
self.assertEqual(
self.import_(['value'], [['nameisnoexist:3']]),
error(1, u"No matching record found for name 'nameisnoexist:3' in field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value/id'], [['noxidhere']]),
error(1, u"No matching record found for external id 'noxidhere' in field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value/.id'], [[66]]),
error(1, u"No matching record found for database id '66' in field 'unknown'"))
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
self.assertEqual(
self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
]),
ok(4))
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
self.assertEqual(
self.import_(['value/.id'], [['42']]),
error(1, u"No matching record found for database id '42' in field 'unknown'"))
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
self.assertEqual(
self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
]),
ok(3))
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
self.assertEqual(
self.import_(['value/id'], [['noxidforthat']]),
error(1, u"No matching record found for external id 'noxidforthat' in field 'unknown'"))
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: dict(record.name_get())[record.id]
self.assertEqual(
self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
]),
ok(3))
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
self.assertEqual(
self.import_(['value'], [['wherethem2mhavenonames']]),
error(1, u"No matching record found for name 'wherethem2mhavenonames' in field 'unknown'"))
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
self.assertEqual(
self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]]),
ok(1))
self.assertEqual(
self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]]),
ok(1))
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them to' \
u' stack traces'
self.assertEqual(
self.import_(
['const', 'value'],
[['5', s]]),
error(1, u"No matching record found for name '%s' in field 'unknown'" % s))
def test_single(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63']
]),
ok(1))
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
]),
ok(2))
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
]),
ok(4))
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
self.assertEqual(
self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
]),
ok(4))
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
try:
self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
except ValueError, e:
# should be Exception(Database ID doesn't exist: export.one2many.child : $id1,$id2)
self.assertIs(type(e), ValueError)
self.assertEqual(
e.args[0],
"invalid literal for int() with base 10: '%d,%d'" % (id1, id2))
def test_link(self):
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
self.assertEqual(
self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
]),
ok(2))
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
self.assertEqual(
self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
]),
ok(2))
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
]),
ok(4))
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
]),
ok(6))
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
]),
ok(7))
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
# function, related, reference: written to db as-is...
# => function uses @type for value coercion/conversion | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# (c) 2018, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import json
import argparse
from ansible.parsing.dataloader import DataLoader
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_text
from ansible.module_utils.net_tools.nios.api import WapiInventory
from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
CONFIG_FILES = [
'/etc/ansible/infoblox.yaml',
'/etc/ansible/infoblox.yml'
]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true',
help='List host records from NIOS for use in Ansible')
parser.add_argument('--host',
help='List meta data about single host (not used)')
return parser.parse_args()
def main():
args = parse_args()
for config_file in CONFIG_FILES:
if os.path.exists(config_file):
break
else:
sys.stdout.write('unable to locate config file at /etc/ansible/infoblox.yaml\n')
sys.exit(-1)
try:
loader = DataLoader()
config = loader.load_from_file(config_file)
provider = config.get('provider') or {}
wapi = WapiInventory(provider)
except Exception as exc:
sys.stdout.write(to_text(exc))
sys.exit(-1)
if args.host:
host_filter = {'name': args.host}
else:
host_filter = {}
config_filters = config.get('filters')
if config_filters.get('view') is not None:
host_filter['view'] = config_filters['view']
if config_filters.get('extattrs'):
extattrs = normalize_extattrs(config_filters['extattrs'])
else:
extattrs = {}
hostvars = {}
inventory = {
'_meta': {
'hostvars': hostvars
}
}
return_fields = ['name', 'view', 'extattrs', 'ipv4addrs']
hosts = wapi.get_object('record:host',
host_filter,
extattrs=extattrs,
return_fields=return_fields)
if hosts:
for item in hosts:
view = item['view']
name = item['name']
if view not in inventory:
inventory[view] = {'hosts': []}
inventory[view]['hosts'].append(name)
hostvars[name] = {
'view': view
}
if item.get('extattrs'):
for key, value in iteritems(flatten_extattrs(item['extattrs'])):
if key.startswith('ansible_'):
hostvars[name][key] = value
else:
if 'extattrs' not in hostvars[name]:
hostvars[name]['extattrs'] = {}
hostvars[name]['extattrs'][key] = value
sys.stdout.write(json.dumps(inventory, indent=4))
sys.exit(0)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#
# ElementTree
# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See https://www.python.org/psf/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
from . import ElementTree
from urllib.parse import urljoin
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
# For security reasons, the inclusion depth is limited to this read-only value by default.
DEFAULT_MAX_INCLUSION_DEPTH = 6
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
class LimitedRecursiveIncludeError(FatalIncludeError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding (UTF-8 by default for "text").
# @return The expanded resource. If the parse mode is "xml", this
# is an Element instance. If the parse mode is "text", this
# is a string. If the loader fails, it can return None
# or raise an OSError exception.
# @throws OSError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
if parse == "xml":
with open(href, 'rb') as file:
data = ElementTree.parse(file).getroot()
else:
if not encoding:
encoding = 'UTF-8'
with open(href, 'r', encoding=encoding) as file:
data = file.read()
return data
##
# Expand XInclude directives.
#
# @param elem Root Element or any ElementTree of a tree to be expanded
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @param base_url The base URL of the original file, to resolve
# relative include file references.
# @param max_depth The maximum number of recursive inclusions.
# Limited to reduce the risk of malicious content explosion.
# Pass None to disable the limitation.
# @throws LimitedRecursiveIncludeError If the {@link max_depth} was exceeded.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws OSError If the function fails to load a given resource.
# @throws ValueError If negative {@link max_depth} is passed.
# @returns None. Modifies tree pointed by {@link elem}
def include(elem, loader=None, base_url=None,
max_depth=DEFAULT_MAX_INCLUSION_DEPTH):
if max_depth is None:
max_depth = -1
elif max_depth < 0:
raise ValueError("expected non-negative depth or None for 'max_depth', got %r" % max_depth)
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if loader is None:
loader = default_loader
_include(elem, loader, base_url, max_depth, set())
def _include(elem, loader, base_url, max_depth, _parent_hrefs):
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
if base_url:
href = urljoin(base_url, href)
parse = e.get("parse", "xml")
if parse == "xml":
if href in _parent_hrefs:
raise FatalIncludeError("recursive include of %s" % href)
if max_depth == 0:
raise LimitedRecursiveIncludeError(
"maximum xinclude depth reached when including file %s" % href)
_parent_hrefs.add(href)
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node) # FIXME: this makes little sense with recursive includes
_include(node, loader, href, max_depth - 1, _parent_hrefs)
_parent_hrefs.remove(href)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if e.tail:
text += e.tail
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text
else:
elem.text = (elem.text or "") + text
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
_include(e, loader, base_url, max_depth, _parent_hrefs)
i += 1 | python | github | https://github.com/python/cpython | Lib/xml/etree/ElementInclude.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Index
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
INDEX_COLUMNS = ['uuid']
INDEX_NAME = 'virtual_interfaces_uuid_idx'
def _get_table_index(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('virtual_interfaces', meta, autoload=True)
for idx in table.indexes:
if idx.columns.keys() == INDEX_COLUMNS:
break
else:
idx = None
return meta, table, idx
def upgrade(migrate_engine):
meta, table, index = _get_table_index(migrate_engine)
if index:
LOG.info(_LI('Skipped adding %s because an equivalent index'
' already exists.'), INDEX_NAME)
return
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
index = Index(INDEX_NAME, *columns)
index.create(migrate_engine) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for special math operations."""
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
import scipy.special as sps
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
flags.DEFINE_bool('vary_seed', False,
('Whether to vary the PRNG seed unpredictably. '
'With --runs_per_test=N, produces N iid runs.'))
NUM_SAMPLES = int(1e3)
@def_function.function(jit_compile=True)
def _igamma(a, x):
return math_ops.igamma(a, x)
@def_function.function(jit_compile=True)
def _igammac(a, x):
return math_ops.igammac(a, x)
@def_function.function(jit_compile=True)
def _polygamma(n, x):
return math_ops.polygamma(n, x)
@def_function.function(jit_compile=True)
def _zeta(a, q):
return math_ops.zeta(a, q)
# This is df/da / df/dx, where f = igamma.
def implicit_reparameterization_grad(a, x):
log_prob = math_ops.xlogy(a - 1., x) - math_ops.lgamma(a) - x
prob = math_ops.exp(log_prob)
return -gen_math_ops.igamma_grad_a(a, x) / prob
@def_function.function(jit_compile=True)
def _log1p(x):
return math_ops.log1p(x)
class Log1pTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(Log1pTest, self).setUp()
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 4e-4, 0.
return 1e-10, 0.
def _test_range(self, low, high, dtype, rtol, atol, is_negative=False):
# Test values near zero.
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
x = np.exp(np.random.uniform(
low=low, high=high, size=[NUM_SAMPLES])).astype(dtype)
if is_negative:
x = -x
expected_values = np.log1p(x)
with self.session() as sess:
with self.test_scope():
actual = _log1p(x)
actual = sess.run(actual)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-7, 0.),
(np.float64, 1e-15, 0.))
def testSmallX(self, dtype, rtol, atol):
self._test_range(-40., -20., dtype, rtol, atol, is_negative=False)
self._test_range(-40., -20., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeTwentyExponent(self, dtype, rtol, atol):
self._test_range(-20., -10., dtype, rtol, atol, is_negative=False)
self._test_range(-20., -10., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeTenExponent(self, dtype, rtol, atol):
self._test_range(-10., -5., dtype, rtol, atol, is_negative=False)
self._test_range(-10., -5., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeFiveExponent(self, dtype, rtol, atol):
self._test_range(-5., -1., dtype, rtol, atol, is_negative=False)
self._test_range(-5., -1., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 4e-7, 0.),
(np.float64, 3e-14, 0.))
def testXGreaterThanOneTenth(self, dtype, rtol, atol):
self._test_range(-1., 0., dtype, rtol, atol, is_negative=False)
self._test_range(-1., 0., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 2e-15, 0.))
def testXGreaterThanOne(self, dtype, rtol, atol):
self._test_range(0., 3., dtype, rtol, atol, is_negative=False)
class ZetaTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(ZetaTest, self).setUp()
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 2e-2, 1e-7
return 2e-4, 1e-20
def testBadValues(self):
q = np.random.uniform(low=0.3, high=20., size=[10])
with self.session() as sess:
with self.test_scope():
y = _zeta(np.float64(1.), q)
actual = sess.run(y)
# When x == 1, this is the Harmonic series.
self.assertTrue(np.all(np.isinf(actual)))
with self.session() as sess:
with self.test_scope():
y = _zeta(np.float64(0.1), q)
actual = sess.run(y)
# When x < 1, this is undefined.
self.assertTrue(np.all(np.isnan(actual)))
with self.session() as sess:
with self.test_scope():
y = _zeta([1.1, 1.2, 2.1, 2.2, 3.1], [-2.0, -1.1, -1.0, -0.5, -0.1])
actual = sess.run(y)
# For q <= 0, x must be an integer.
self.assertTrue(np.all(np.isnan(actual)))
with self.session() as sess:
with self.test_scope():
y = _zeta([2.0, 4.0, 6.0], [0.0, -1.0, -2.0])
actual = sess.run(y)
# For integer q <= 0, zeta has poles with a defined limit of +inf where x is
# an even integer.
self.assertTrue(np.all(np.isinf(actual)))
with self.session() as sess:
with self.test_scope():
y = _zeta([3.0, 5.0, 7.0], [0.0, -1.0, -2.0])
actual = sess.run(y)
# For non-positive integer q, zeta has poles with an undefined limit where x
# is an odd integer.
self.assertTrue(np.all(np.isnan(actual)))
with self.session() as sess:
with self.test_scope():
y = _zeta([1.1, 2.2, 3.3], [-1.1, -1.0, 0.0])
actual = sess.run(y)
# For non-positive q, zeta is not defined if x is not an integer.
self.assertTrue(np.all(np.isnan(actual)))
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testLargeXSmallQ(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
if self.device not in ['XLA_GPU', 'XLA_CPU'] and dtype == np.float64:
# TODO(b/165739664): Figure out why on TPU F64 Zeta sometimes returns
# infs.
self.skipTest(
'Skipping test because some F64 operations are numerically '
'unstable on TPU.')
x = np.random.uniform(low=100., high=200., size=[NUM_SAMPLES]).astype(dtype)
q = np.random.uniform(low=0.3, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.zeta(x, q)
with self.session() as sess:
with self.test_scope():
y = _zeta(x, q)
actual = sess.run(y)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testSmallValues(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=1.1, high=10., size=[NUM_SAMPLES]).astype(dtype)
q = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.zeta(x, q)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_zeta(x, q))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testMediumValues(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
x = np.random.uniform(low=1.1, high=100., size=[NUM_SAMPLES]).astype(dtype)
q = np.random.uniform(low=1., high=1e1, size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.zeta(x, q)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_zeta(x, q))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 2e-2, 1e-5), (np.float64, 1e-4, 1e-30))
def testLargeValues(self, dtype, rtol, atol):
x = np.random.uniform(
low=100., high=int(1e3), size=[NUM_SAMPLES]).astype(dtype)
q = np.random.uniform(
low=1., high=int(1e1), size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.zeta(x, q)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_zeta(x, q))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
class PolygammaTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(PolygammaTest, self).setUp()
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 2e-2, 1e-7
return 2e-4, 1e-20
def testBadValues(self):
x = np.random.uniform(low=0.3, high=20., size=[10])
with self.session() as sess:
with self.test_scope():
y = _polygamma(np.float64(-1.), x)
actual = sess.run(y)
# Not defined for negative numbers.
self.assertTrue(np.all(np.isnan(actual)))
with self.session() as sess:
with self.test_scope():
y = _polygamma(np.float64(0.1), x)
actual = sess.run(y)
# Not defined for non-integers.
self.assertTrue(np.all(np.isnan(actual)))
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testRecoverDigamma(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
if self.device not in ['XLA_GPU', 'XLA_CPU'] and dtype == np.float64:
self.skipTest(
'Skipping test because some F64 operations are '
'numerically unstable on TPU.'
)
x = np.random.uniform(low=0.1, high=50., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.digamma(x)
with self.session() as sess:
with self.test_scope():
y = _polygamma(dtype(0.), x)
actual = sess.run(y)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testSmallN(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
n = np.random.randint(low=1, high=5, size=[NUM_SAMPLES]).astype(dtype)
x = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.polygamma(n, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_polygamma(n, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testMediumLargeN(self, dtype, rtol, atol):
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
n = np.random.randint(low=5, high=10, size=[NUM_SAMPLES]).astype(dtype)
x = np.random.uniform(low=1., high=1e1, size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.polygamma(n, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_polygamma(n, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
class IgammaTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(IgammaTest, self).setUp()
# Skip Float64 test on TPU due to missing ops.
def maybe_skip_test(self, dtype):
if self.device not in ['XLA_GPU', 'XLA_CPU'] and dtype == np.float64:
self.skipTest(
'Skipping test because some F64 operations not supported on TPU.')
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 2e-2, 1e-7
return 2e-4, 1e-20
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testLargeXSmallA(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=100., high=200., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=0.3, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
y = _igamma(a, x)
actual = sess.run(y)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testSmallValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testMediumValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 2e-2, 1e-5), (np.float64, 1e-4, 1e-30))
def testLargeValues(self, dtype, rtol, atol):
if self.device == 'TPU':
# TODO(b/154908275): Remove this once fixed for large a, x.
self.skipTest('Skipping test since numerically unstable on TPU.')
# Test values near zero.
x = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
# We don't check small values because the numerical gradients become quite
# large.
@parameterized.parameters((np.float32, 0.09), (np.float64, 1e-7))
def testGradMediumValues(self, dtype, tolerance):
self.maybe_skip_test(dtype)
with self.session():
with self.test_scope():
x = constant_op.constant(
np.random.uniform(low=1., high=100.,
size=[NUM_SAMPLES]).astype(dtype))
a = constant_op.constant(
np.random.uniform(low=1., high=100.,
size=[NUM_SAMPLES]).astype(dtype))
f = lambda b: _igamma(b, x)
max_error = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, x=[a], delta=1e-3))
self.assertLessEqual(max_error, tolerance)
@parameterized.parameters((np.float32, 0.5), (np.float64, 1e-7))
def testGradLargeValues(self, dtype, tolerance):
self.maybe_skip_test(dtype)
with self.session():
with self.test_scope():
x = constant_op.constant(
np.random.uniform(low=100., high=int(1e4),
size=[NUM_SAMPLES]).astype(dtype))
a = constant_op.constant(
np.random.uniform(low=100., high=int(1e4),
size=[NUM_SAMPLES]).astype(dtype))
f = lambda b: _igamma(b, x)
max_error = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, x=[a], delta=1e-2))
self.assertLessEqual(max_error, tolerance)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testRandomGammaGradSmallValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
with self.session() as sess:
with self.test_scope():
x = constant_op.constant(
np.random.uniform(
low=np.finfo(dtype).tiny, high=1.,
size=[NUM_SAMPLES]).astype(dtype))
a = constant_op.constant(
np.random.uniform(
low=np.finfo(dtype).tiny, high=1.,
size=[NUM_SAMPLES]).astype(dtype))
gamma_sample_grad = gen_random_ops.random_gamma_grad(a, x)
actual_grad = implicit_reparameterization_grad(a, x)
gamma_sample_grad, actual_grad = sess.run(
[gamma_sample_grad, actual_grad])
# We do this because the ratio computed in
# implicit_reparameterization_grad can very easily result in a NaN due
# to the computed numerator and denominator zeroing out.
gamma_sample_grad = gamma_sample_grad[
~np.logical_or(np.isnan(actual_grad), np.isinf(actual_grad))]
actual_grad = actual_grad[
~np.logical_or(np.isnan(actual_grad), np.isinf(actual_grad))]
self.assertAllClose(actual_grad, gamma_sample_grad, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testRandomGammaGradMediumValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
with self.session() as sess:
with self.test_scope():
x = constant_op.constant(
np.random.uniform(low=1., high=10.,
size=[NUM_SAMPLES]).astype(dtype))
a = constant_op.constant(
np.random.uniform(low=1., high=10.,
size=[NUM_SAMPLES]).astype(dtype))
gamma_sample_grad = gen_random_ops.random_gamma_grad(a, x)
actual_grad = implicit_reparameterization_grad(a, x)
gamma_sample_grad, actual_grad = sess.run(
[gamma_sample_grad, actual_grad])
# We do this because the ratio computed in
# implicit_reparameterization_grad can very easily result in a NaN due
# to the computed numerator and denominator zeroing out.
gamma_sample_grad = gamma_sample_grad[
~np.logical_or(np.isnan(actual_grad), np.isinf(actual_grad))]
actual_grad = actual_grad[
~np.logical_or(np.isnan(actual_grad), np.isinf(actual_grad))]
self.assertAllClose(actual_grad, gamma_sample_grad, atol=atol, rtol=rtol)
class IgammacTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(IgammacTest, self).setUp()
# Skip Float64 test on TPU due to missing ops.
def maybe_skip_test(self, dtype):
if self.device not in ['XLA_GPU', 'XLA_CPU'] and dtype == np.float64:
# TODO(b/154908275): Remove this once fixed for large a, x.
self.skipTest(
'Skipping test because some F64 operations not supported on TPU.')
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 2e-2, 1e-7
return 2e-4, 1e-20
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testLargeXSmallA(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=100., high=200., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=0.3, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammaincc(a, x)
with self.session() as sess:
with self.test_scope():
y = _igammac(a, x)
actual = sess.run(y)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testSmallValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammaincc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igammac(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testMediumValues(self, dtype, rtol, atol):
self.maybe_skip_test(dtype)
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
# Test values near zero.
x = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammaincc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igammac(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 2e-2, 1e-5), (np.float64, 1e-4, 1e-30))
def testLargeValues(self, dtype, rtol, atol):
if self.device == 'TPU':
self.skipTest('Skipping test since numerically unstable on TPU.')
# Test values near zero.
x = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammaincc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(_igammac(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
if __name__ == '__main__':
os.environ['XLA_FLAGS'] = '--xla_cpu_enable_fast_math=false'
test.main() | python | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/tests/special_math_test.py |
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v0alpha1.timeseries-thresholds.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "",
"version": "v0",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-10": {
"kind": "Panel",
"spec": {
"id": 10,
"title": "Timeseries - Area",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 20,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single"
}
},
"fieldConfig": {
"defaults": {
"unit": "short",
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "rgba(115, 191, 105, 0)"
},
{
"value": 60,
"color": "orange"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"graph": false,
"legend": false,
"tooltip": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "area"
}
}
},
"overrides": []
}
}
}
}
},
"panel-11": {
"kind": "Panel",
"spec": {
"id": 11,
"title": "Angular thresholds only lines",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 20,
"min": 0,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"fillGradient": 0,
"hiddenSeries": false,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"thresholds": [
{
"$$hashKey": "object:14",
"colorMode": "critical",
"fill": false,
"line": true,
"op": "gt",
"value": 80,
"yaxis": "left"
},
{
"$$hashKey": "object:44",
"colorMode": "warning",
"fill": false,
"line": true,
"op": "gt",
"value": 50,
"yaxis": "left"
}
],
"timeRegions": [],
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:26",
"format": "short",
"logBase": 1,
"max": "100",
"min": "0",
"show": true
},
{
"$$hashKey": "object:27",
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
}
},
"alertThreshold": true
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-12": {
"kind": "Panel",
"spec": {
"id": 12,
"title": "Timeseries - with gaps",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 20,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single"
}
},
"fieldConfig": {
"defaults": {
"unit": "short",
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "red"
},
{
"value": 20,
"color": "orange"
},
{
"value": 40,
"color": "transparent"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"graph": false,
"legend": false,
"tooltip": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line+area"
}
}
},
"overrides": []
}
}
}
}
},
"panel-13": {
"kind": "Panel",
"spec": {
"id": 13,
"title": "Angular thresholds less then ",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 20,
"min": 0,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"fillGradient": 0,
"hiddenSeries": false,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"thresholds": [
{
"$$hashKey": "object:109",
"colorMode": "critical",
"fill": true,
"line": true,
"op": "lt",
"value": 20,
"yaxis": "left"
},
{
"$$hashKey": "object:115",
"colorMode": "warning",
"fill": true,
"line": true,
"op": "lt",
"value": 60,
"yaxis": "left"
}
],
"timeRegions": [],
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:26",
"format": "short",
"logBase": 1,
"max": "100",
"min": "0",
"show": true
},
{
"$$hashKey": "object:27",
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
}
},
"alertThreshold": true
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-14": {
"kind": "Panel",
"spec": {
"id": 14,
"title": "Angular thresholds less then ",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 20,
"min": 0,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single"
}
},
"fieldConfig": {
"defaults": {
"unit": "short",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "red"
},
{
"value": 20,
"color": "orange"
},
{
"value": 60,
"color": "transparent"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"graph": false,
"legend": false,
"tooltip": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line+area"
}
}
},
"overrides": []
}
}
}
}
},
"panel-15": {
"kind": "Panel",
"spec": {
"id": 15,
"title": "Time series custom colors",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 80,
"min": 40,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 30
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single"
}
},
"fieldConfig": {
"defaults": {
"unit": "short",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "rgba(184, 119, 217, 0.23)"
},
{
"value": 40,
"color": "transparent"
},
{
"value": 80,
"color": "rgba(50, 161, 230, 0.13)"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"graph": false,
"legend": false,
"tooltip": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 4,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "always",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line+area"
}
}
},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Timeseries - Line Thresholds",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single"
}
},
"fieldConfig": {
"defaults": {
"unit": "short",
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "rgba(115, 191, 105, 0)"
},
{
"value": 60,
"color": "orange"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"graph": false,
"legend": false,
"tooltip": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line"
}
}
},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "Timeseries - Line + Area",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 20,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single"
}
},
"fieldConfig": {
"defaults": {
"unit": "short",
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "rgba(115, 191, 105, 0)"
},
{
"value": 60,
"color": "orange"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"graph": false,
"legend": false,
"tooltip": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line+area"
}
}
},
"overrides": []
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "Angular Line + Area ",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 20,
"min": 0,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"fillGradient": 0,
"hiddenSeries": false,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"thresholds": [
{
"$$hashKey": "object:14",
"colorMode": "critical",
"fill": true,
"line": true,
"op": "gt",
"value": 80,
"yaxis": "left"
},
{
"$$hashKey": "object:44",
"colorMode": "warning",
"fill": true,
"line": true,
"op": "gt",
"value": 50,
"yaxis": "left"
}
],
"timeRegions": [],
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:26",
"format": "short",
"logBase": 1,
"max": "100",
"min": "0",
"show": true
},
{
"$$hashKey": "object:27",
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
}
},
"alertThreshold": true
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-6": {
"kind": "Panel",
"spec": {
"id": 6,
"title": "Angular thresholds no lines",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 20,
"min": 0,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"fillGradient": 0,
"hiddenSeries": false,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"thresholds": [
{
"$$hashKey": "object:109",
"colorMode": "critical",
"fill": true,
"line": false,
"op": "gt",
"value": 80,
"yaxis": "left"
},
{
"$$hashKey": "object:115",
"colorMode": "warning",
"fill": true,
"line": false,
"op": "gt",
"value": 60,
"yaxis": "left"
}
],
"timeRegions": [],
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:26",
"format": "short",
"logBase": 1,
"max": "100",
"min": "0",
"show": true
},
{
"$$hashKey": "object:27",
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
}
},
"alertThreshold": true
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-7": {
"kind": "Panel",
"spec": {
"id": 7,
"title": "Angular bands with gap ",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 80,
"min": 40,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 30
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 0,
"fillGradient": 0,
"hiddenSeries": false,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 1,
"points": true,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"thresholds": [
{
"$$hashKey": "object:14",
"colorMode": "critical",
"fill": true,
"line": true,
"op": "gt",
"value": 80,
"yaxis": "left"
},
{
"$$hashKey": "object:44",
"colorMode": "warning",
"fill": true,
"line": true,
"op": "lt",
"value": 40,
"yaxis": "left"
},
{
"$$hashKey": "object:40",
"colorMode": "critical",
"fill": true,
"line": true,
"op": "lt",
"value": 20,
"yaxis": "left"
}
],
"timeRegions": [],
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:26",
"format": "short",
"logBase": 1,
"max": "100",
"min": "0",
"show": true
},
{
"$$hashKey": "object:27",
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
}
},
"alertThreshold": true
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-8": {
"kind": "Panel",
"spec": {
"id": 8,
"title": "Angular custom colors",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"max": 80,
"min": 40,
"scenarioId": "random_walk"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 30
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "7.5.0-pre",
"spec": {
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 0,
"fillGradient": 0,
"hiddenSeries": false,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 1,
"points": true,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"thresholds": [
{
"$$hashKey": "object:14",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(50, 161, 230, 0.13)",
"line": true,
"lineColor": "#B877D9",
"op": "gt",
"value": 80,
"yaxis": "left"
},
{
"$$hashKey": "object:44",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(184, 119, 217, 0.23)",
"line": true,
"lineColor": "rgba(93, 196, 31, 0.6)",
"op": "lt",
"value": 40,
"yaxis": "left"
}
],
"timeRegions": [],
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:26",
"format": "short",
"logBase": 1,
"max": "100",
"min": "0",
"show": true
},
{
"$$hashKey": "object:27",
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
}
},
"alertThreshold": true
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 12,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 0,
"width": 12,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 7,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-6"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 7,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-10"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 13,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-13"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 13,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-14"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 19,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-11"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 19,
"width": 12,
"height": 6,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 25,
"width": 12,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-7"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 25,
"width": 12,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-12"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 32,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-8"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 32,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-15"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"panel-tests",
"graph-ng"
],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Panel Tests - GraphNG Thresholds",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-timeseries/v0alpha1.timeseries-thresholds.v42.v2beta1.json |
package org.pytorch;
import com.facebook.soloader.nativeloader.NativeLoader;
public class PyTorchCodegenLoader {
public static void loadNativeLibs() {
try {
NativeLoader.loadLibrary("torch-code-gen");
} catch (Throwable t) {
// Loading the codegen lib is best-effort since it's only there for query based builds.
}
}
private PyTorchCodegenLoader() {}
} | java | github | https://github.com/pytorch/pytorch | android/pytorch_android/src/main/java/org/pytorch/PyTorchCodegenLoader.java |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session with grpc:// URLs.
This test focus on grpc:// debugging of distributed (gRPC) sessions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import subprocess
import sys
import time
import portpicker
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("b/120545219")
class DistributedSessionDebugTest(test_util.TensorFlowTestCase):
"""Test the debugging of distributed sessions."""
PER_PROC_GPU_MEMORY_FRACTION = 0.1
POLLING_INTERVAL_SEC = 0.025
@classmethod
def setUpClass(cls):
gpu_memory_fraction_opt = (
"--gpu_memory_fraction=%f" % cls.PER_PROC_GPU_MEMORY_FRACTION)
worker_port = portpicker.pick_unused_port()
cluster_spec = "worker|localhost:%d" % worker_port
tf_logging.info("cluster_spec: %s", cluster_spec)
server_bin = test.test_src_dir_path("python/debug/grpc_tensorflow_server")
cls.server_target = "grpc://localhost:%d" % worker_port
cls.server_procs = {}
cls.server_procs["worker"] = subprocess.Popen(
[
server_bin,
"--cluster_spec=%s" % cluster_spec,
"--job_name=worker",
"--task_id=0",
gpu_memory_fraction_opt,
],
stdout=sys.stdout,
stderr=sys.stderr)
# Start debug server in-process, on separate thread.
(cls.debug_server_port, cls.debug_server_url, _, cls.debug_server_thread,
cls.debug_server
) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
tf_logging.info("debug server url: %s", cls.debug_server_url)
cls.session_config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(
per_process_gpu_memory_fraction=cls.PER_PROC_GPU_MEMORY_FRACTION))
@classmethod
def tearDownClass(cls):
for key in cls.server_procs:
cls.server_procs[key].terminate()
cls.debug_server.stop_server().wait()
cls.debug_server_thread.join()
def setUp(self):
pass
def tearDown(self):
self.debug_server.clear_data()
def _pollingAssertDebugTensorValuesAllClose(self, expected_values,
debug_tensor_name):
"""Poll debug_server till tensor appears and matches expected values."""
while (debug_tensor_name not in self.debug_server.debug_tensor_values or
len(self.debug_server.debug_tensor_values) < len(expected_values)):
time.sleep(self.POLLING_INTERVAL_SEC)
self.assertAllClose(
expected_values,
self.debug_server.debug_tensor_values[debug_tensor_name])
def _createGraph(self):
"""Create graph for testing.
Returns:
Python Graph object.
"""
with ops.Graph().as_default() as graph:
with ops.device("/job:worker/task:0/cpu:0"):
self.a = variables.VariableV1(10.0, name="a")
self.b = variables.VariableV1(100.0, name="b")
self.inc_a = state_ops.assign_add(self.a, 2.0, name="inc_a")
self.dec_b = state_ops.assign_add(self.b, -5.0, name="dec_b")
self.p = math_ops.multiply(self.inc_a, self.dec_b, name="p")
self.q = math_ops.negative(self.p, name="q")
return graph
def testDistributedRunWithGatedGrpcCommunicatesWithDebugServerCorrectly(self):
graph = self._createGraph()
with session.Session(
config=self.session_config, graph=graph,
target=self.server_target) as sess:
sess.run(self.a.initializer)
sess.run(self.b.initializer)
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
sess.graph,
node_name_regex_whitelist=r"a",
debug_ops=["DebugIdentity"],
debug_urls=[self.debug_server_url])
# Test gated_grpc for an op located on the worker, i.e., on the same
# host as where MasterSession is.
# TODO(cais): gRPC gating of debug ops does not work on partition graphs
# not located on MasterSession hosts (e.g., parameter servers) yet. Make
# it work.
debug_utils.watch_graph(
run_options,
sess.graph,
node_name_regex_whitelist=r"p",
debug_ops=["DebugIdentity(gated_grpc=True)"],
debug_urls=[self.debug_server_url])
for i in xrange(4):
if i % 2 == 0:
self.debug_server.request_watch("p", 0, "DebugIdentity")
else:
self.debug_server.request_unwatch("p", 0, "DebugIdentity")
expected_p = (10.0 + 2.0 * (i + 1)) * (100.0 - 5.0 * (i + 1))
self.assertAllClose(-expected_p, sess.run(self.q, options=run_options))
self.assertEqual(1, len(self.debug_server.core_metadata_json_strings))
core_metadata = json.loads(
self.debug_server.core_metadata_json_strings[0])
self.assertEqual([], core_metadata["input_names"])
self.assertEqual(["q:0"], core_metadata["output_names"])
self.assertEqual(i, core_metadata["executor_step_index"])
if i == 0:
self.assertEqual(1, len(self.debug_server.partition_graph_defs))
# Tensor "a" is from a PS. It may take longer to arrive due to the fact
# that the stream connection between the PS and the debug server is
# persistent and not torn down at the end of each Session.run()
self._pollingAssertDebugTensorValuesAllClose([10.0 + 2.0 * i],
"a:0:DebugIdentity")
# Due to the gRPC gating of the debug op for "p", the debug tensor
# should be available on odd-indexed runs.
if i % 2 == 0:
self.assertAllClose(
[expected_p],
self.debug_server.debug_tensor_values["p:0:DebugIdentity"])
else:
self.assertNotIn("p:0:DebugIdentity",
self.debug_server.debug_tensor_values)
self.assertNotIn("b:0:DebugIdentity",
self.debug_server.debug_tensor_values)
self.debug_server.clear_data()
def testDistributedRunWithGrpcDebugWrapperWorks(self):
graph = self._createGraph()
with session.Session(
config=self.session_config, graph=graph,
target=self.server_target) as sess:
sess.run(self.a.initializer)
sess.run(self.b.initializer)
def watch_fn(feeds, fetch_keys):
del feeds, fetch_keys
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"p")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
for i in xrange(4):
expected_p = (10.0 + 2.0 * (i + 1)) * (100.0 - 5.0 * (i + 1))
self.assertAllClose(-expected_p, sess.run(self.q))
if i == 0:
self.assertEqual(1, len(self.debug_server.partition_graph_defs))
self.assertAllClose(
[expected_p],
self.debug_server.debug_tensor_values["p:0:DebugIdentity"])
self.assertNotIn("b:0:DebugIdentity",
self.debug_server.debug_tensor_values)
self.debug_server.clear_data()
if __name__ == "__main__":
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
version: 2.0.0
filters:
- "*":
approvers:
- 10gen/devprod-build
- "jstest_timeout*":
approvers:
- 10gen/devprod-correctness
- "verify_resmoke_coredump_test.sh":
approvers:
- 10gen/devprod-correctness | unknown | github | https://github.com/mongodb/mongo | buildscripts/bazel_testbuilds/OWNERS.yml |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/i2c/i2c-mt65xx.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek I2C controller
description:
This driver interfaces with the native I2C controller present in
various MediaTek SoCs.
allOf:
- $ref: /schemas/i2c/i2c-controller.yaml#
maintainers:
- Qii Wang <qii.wang@mediatek.com>
properties:
compatible:
oneOf:
- const: mediatek,mt2712-i2c
- const: mediatek,mt6577-i2c
- const: mediatek,mt6589-i2c
- const: mediatek,mt7622-i2c
- const: mediatek,mt7981-i2c
- const: mediatek,mt7986-i2c
- const: mediatek,mt8168-i2c
- const: mediatek,mt8173-i2c
- const: mediatek,mt8183-i2c
- const: mediatek,mt8186-i2c
- const: mediatek,mt8188-i2c
- const: mediatek,mt8192-i2c
- items:
- enum:
- mediatek,mt7629-i2c
- mediatek,mt8516-i2c
- const: mediatek,mt2712-i2c
- items:
- enum:
- mediatek,mt2701-i2c
- mediatek,mt6797-i2c
- mediatek,mt7623-i2c
- const: mediatek,mt6577-i2c
- items:
- enum:
- mediatek,mt8365-i2c
- const: mediatek,mt8168-i2c
- items:
- enum:
- mediatek,mt6795-i2c
- const: mediatek,mt8173-i2c
- items:
- enum:
- mediatek,mt6878-i2c
- mediatek,mt6991-i2c
- mediatek,mt8196-i2c
- const: mediatek,mt8188-i2c
- items:
- enum:
- mediatek,mt6893-i2c
- mediatek,mt8195-i2c
- const: mediatek,mt8192-i2c
reg:
items:
- description: Physical base address
- description: DMA base address
interrupts:
maxItems: 1
clocks:
minItems: 2
items:
- description: Main clock for I2C bus
- description: Clock for I2C via DMA
- description: Bus arbitrator clock
- description: Clock for I2C from PMIC
clock-names:
minItems: 2
items:
- const: main
- const: dma
- const: arb
- const: pmic
clock-div:
$ref: /schemas/types.yaml#/definitions/uint32
description: Frequency divider of clock source in I2C module
clock-frequency:
default: 100000
description:
SCL frequency to use (in Hz). If omitted, 100kHz is used.
mediatek,have-pmic:
description: Platform controls I2C from PMIC side
type: boolean
mediatek,use-push-pull:
description: Use push-pull mode I/O config
type: boolean
vbus-supply:
description: Phandle to the regulator providing power to SCL/SDA
required:
- compatible
- reg
- clocks
- clock-names
- clock-div
- interrupts
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c0: i2c@1100d000 {
compatible = "mediatek,mt6577-i2c";
reg = <0x1100d000 0x70>, <0x11000300 0x80>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_LOW>;
clocks = <&i2c0_ck>, <&ap_dma_ck>;
clock-names = "main", "dma";
clock-div = <16>;
clock-frequency = <400000>;
mediatek,have-pmic;
#address-cells = <1>;
#size-cells = <0>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import sys
import unittest
from libcloud.common.types import LibcloudError
from libcloud.compute.base import Node, NodeAuthPassword, NodeImage, \
NodeLocation, NodeSize, StorageVolume, VolumeSnapshot
from libcloud.compute.drivers.ecs import ECSDriver
from libcloud.compute.types import NodeState, StorageVolumeState
from libcloud.test import MockHttp, LibcloudTestCase
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import ECS_PARAMS
from libcloud.utils.py3 import httplib
class ECSDriverTestCase(LibcloudTestCase):
region = 'cn-qingdao'
zone = 'cn-qingdao-b'
image_id = 'ubuntu1404_64_20G_aliaegis_20150325.vhd'
def setUp(self):
ECSMockHttp.test = self
ECSDriver.connectionCls.conn_class = ECSMockHttp
ECSMockHttp.use_param = 'Action'
ECSMockHttp.type = None
self.driver = ECSDriver(*ECS_PARAMS, region=self.region)
self.fake_size = NodeSize('ecs.t1.small', 'ecs t1 small',
None, None, None, None,
self.driver)
self.fake_image = NodeImage(self.image_id, name='ubuntu 14.04 64bit',
driver=self.driver)
self.fake_node = Node(id='fake-node1', name='fake-node',
state=NodeState.RUNNING,
public_ips=None,
private_ips=None,
driver=self.driver)
self.fake_volume = StorageVolume(id='fake-volume1', name='fake-volume',
size=self.fake_size,
driver=self.driver)
self.fake_snapshot = VolumeSnapshot(id='fake-snapshot1',
driver=self.driver)
self.fake_location = NodeLocation(id=self.region, name=self.region,
country=None, driver=self.driver)
self.fake_instance_id = 'fake_instance_id'
self.fake_security_group_id = 'fake_security_group_id'
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertIsNotNone(nodes)
self.assertEqual(1, len(nodes))
node = nodes[0]
self.assertEqual('iZ28n7dkvovZ', node.name)
self.assertEqual('i-28n7dkvov', node.id)
self.assertEqual(NodeState.PENDING, node.state)
self.assertEqual(1, len(node.public_ips))
self.assertEqual('114.215.124.73', node.public_ips[0])
self.assertEqual(1, len(node.private_ips))
self.assertEqual('10.163.197.74', node.private_ips[0])
expected_extra = {
'image_id': 'ubuntu1404_64_20G_aliaegis_20150325.vhd',
'description': '',
'instance_type_family': 'ecs.t1',
'zone_id': 'cn-qingdao-b',
'internet_charge_type': 'PayByTraffic',
'serial_number': 'ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3',
'io_optimized': 'false',
'device_available': 'true',
'instance_network_type': 'classic',
'hostname': 'iZ28n7dkvovZ',
'instance_type': 'ecs.t1.small',
'creation_time': '2015-12-27T07:35Z',
'instance_charge_type': 'PostPaid',
'expired_time': '2999-09-08T16:00Z'
}
self._validate_extras(expected_extra, node.extra)
vpc = {
'vpc_id': '',
'vswitch_id': '',
'private_ip_address': None,
'nat_ip_address': ''
}
self._validate_extras(vpc, node.extra['vpc_attributes'])
eip_address = {
'allocation_id': '',
'ip_address': '',
'internet_charge_type': '',
'bandwidth': None
}
self._validate_extras(eip_address, node.extra['eip_address'])
self.assertIsNone(node.extra['operation_locks']['lock_reason'])
def test_list_nodes_with_ex_node_ids(self):
ECSMockHttp.type = 'list_nodes_ex_node_ids'
nodes = self.driver.list_nodes(ex_node_ids=['i-28n7dkvov',
'not-existed-id'])
self.assertIsNotNone(nodes)
def test_list_nodes_with_ex_filters(self):
ECSMockHttp.type = 'list_nodes_ex_filters'
nodes = self.driver.list_nodes(ex_filters={'ZoneId': self.zone})
self.assertIsNotNone(nodes)
def _validate_extras(self, expected, actual):
self.assertIsNotNone(actual)
for key, value in iter(expected.items()):
self.assertTrue(key in actual)
self.assertEqual(value, actual[key], ('extra %(key)s not equal, '
'expected: "%(expected)s", '
'actual: "%(actual)s"' %
{'key': key,
'expected': value,
'actual': actual[key]}))
def test_create_node(self):
ECSMockHttp.type = 'create_node'
name = 'test_create_node'
node = self.driver.create_node(name=name, image=self.fake_image,
size=self.fake_size,
ex_security_group_id='sg-28ou0f3xa',
ex_description='description',
ex_internet_charge_type='PayByTraffic',
ex_internet_max_bandwidth_out=1,
ex_internet_max_bandwidth_in=200,
ex_hostname='hostname',
auth=NodeAuthPassword('password'),
ex_io_optimized=True,
ex_system_disk={'category': 'cloud',
'disk_name': 'root',
'description': 'sys'},
ex_vswitch_id='vswitch-id1',
ex_private_ip_address='1.1.1.2',
ex_client_token='client_token')
self.assertIsNotNone(node)
def test_create_node_with_data_disk(self):
ECSMockHttp.type = 'create_node_with_data'
self.name = 'test_create_node'
self.data_disk = {
'size': 5,
'category': self.driver.disk_categories.CLOUD,
'disk_name': 'data1',
'description': 'description',
'device': '/dev/xvdb',
'delete_with_instance': True}
node = self.driver.create_node(name=self.name, image=self.fake_image,
size=self.fake_size,
ex_security_group_id='sg-28ou0f3xa',
ex_data_disks=self.data_disk)
self.assertIsNotNone(node)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(2, len(sizes))
size = sizes[0]
self.assertEqual('ecs.t1.xsmall', size.id)
self.assertEqual('ecs.t1.xsmall', size.name)
self.assertEqual(0.5, size.ram)
self.assertEqual(1, size.extra['cpu_core_count'])
self.assertEqual('ecs.t1', size.extra['instance_type_family'])
size = sizes[1]
self.assertEqual('ecs.s2.small', size.id)
self.assertEqual('ecs.s2.small', size.name)
self.assertEqual(1.0, size.ram)
self.assertEqual(2, size.extra['cpu_core_count'])
self.assertEqual('ecs.s2', size.extra['instance_type_family'])
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(9, len(locations))
location = locations[0]
self.assertEqual('ap-southeast-1', location.id)
self.assertIsNone(location.country)
def test_create_node_without_sg_id_exception(self):
name = 'test_create_node_without_sg_id_exception'
self.assertRaises(AttributeError, self.driver.create_node,
name=name, image=self.fake_image,
size=self.fake_size)
def test_creat_node_paybytraffic_exception(self):
name = 'test_create_node_paybytraffic_exception'
self.assertRaises(AttributeError, self.driver.create_node,
name=name, image=self.fake_image,
size=self.fake_size,
ex_security_group_id='sg-id1',
ex_internet_charge_type='PayByTraffic')
def test_create_node_ex_system_disk_exception(self):
name = 'test_creat_node_ex_system_disk_exception'
self.assertRaises(AttributeError, self.driver.create_node,
name=name, image=self.fake_image,
size=self.fake_size,
ex_security_group_id='sg-id1',
ex_system_disk=None)
def test_create_node_ex_private_ip_address_exception(self):
name = 'test_create_node_ex_private_ip_address_exception'
self.assertRaises(AttributeError, self.driver.create_node,
name=name, image=self.fake_image,
size=self.fake_size,
ex_security_group_id='sg-id1',
ex_private_ip_address='1.1.1.2')
def test_reboot_node(self):
ECSMockHttp.type = 'reboot_node'
result = self.driver.reboot_node(self.fake_node)
self.assertTrue(result)
def test_reboot_node_with_ex_force_stop(self):
ECSMockHttp.type = 'reboot_node_force_stop'
result = self.driver.reboot_node(self.fake_node, ex_force_stop=True)
self.assertTrue(result)
def test_destroy_node(self):
ECSMockHttp.type = 'destroy_node'
result = self.driver.destroy_node(self.fake_node)
self.assertTrue(result)
def test_ex_start_node(self):
ECSMockHttp.type = 'start_node'
result = self.driver.ex_start_node(self.fake_node)
self.assertTrue(result)
def test_ex_stop_node(self):
ECSMockHttp.type = 'stop_node'
result = self.driver.ex_stop_node(self.fake_node)
self.assertTrue(result)
def test_stop_node_with_ex_force_stop(self):
ECSMockHttp.type = 'stop_node_force_stop'
result = self.driver.ex_stop_node(self.fake_node, ex_force_stop=True)
self.assertTrue(result)
def test_create_public_ip(self):
ECSMockHttp.type = 'create_public_ip'
result = self.driver.create_public_ip(self.fake_instance_id)
self.assertTrue(result)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(2, len(volumes))
volume = volumes[0]
self.assertEqual('d-28m5zbua0', volume.id)
self.assertEqual('', volume.name)
self.assertEqual(5, volume.size)
self.assertEqual(StorageVolumeState.AVAILABLE, volume.state)
expected_extras = {
'region_id': 'cn-qingdao',
'zone_id': 'cn-qingdao-b',
'description': '',
'type': 'data',
'category': 'cloud',
'image_id': '',
'source_snapshot_id': '',
'product_code': '',
'portable': True,
'instance_id': '',
'device': '',
'delete_with_instance': False,
'enable_auto_snapshot': False,
'creation_time': '2014-07-23T02:44:07Z',
'attached_time': '2014-07-23T07:47:35Z',
'detached_time': '2014-07-23T08:28:48Z',
'disk_charge_type': 'PostPaid',
'operation_locks': {'lock_reason': None}
}
self._validate_extras(expected_extras, volume.extra)
volume = volumes[1]
self.assertEqual('d-28zfrmo13', volume.id)
self.assertEqual('ubuntu1404sys', volume.name)
self.assertEqual(5, volume.size)
self.assertEqual(StorageVolumeState.INUSE, volume.state)
expected_extras = {
'region_id': 'cn-qingdao',
'zone_id': 'cn-qingdao-b',
'description': 'Description',
'type': 'system',
'category': 'cloud',
'image_id': 'ubuntu1404_64_20G_aliaegis_20150325.vhd',
'source_snapshot_id': '',
'product_code': '',
'portable': False,
'instance_id': 'i-28whl2nj2',
'device': '/dev/xvda',
'delete_with_instance': True,
'enable_auto_snapshot': True,
'creation_time': '2014-07-23T02:44:06Z',
'attached_time': '2016-01-04T15:02:17Z',
'detached_time': '',
'disk_charge_type': 'PostPaid',
'operation_locks': {'lock_reason': None}
}
self._validate_extras(expected_extras, volume.extra)
def test_list_volumes_with_ex_volume_ids(self):
ECSMockHttp.type = 'list_volumes_ex_volume_ids'
volumes = self.driver.list_volumes(ex_volume_ids=['i-28n7dkvov',
'not-existed-id'])
self.assertIsNotNone(volumes)
def test_list_volumes_with_ex_filters(self):
ECSMockHttp.type = 'list_volumes_ex_filters'
ex_filters = {'InstanceId': self.fake_node.id}
volumes = self.driver.list_volumes(ex_filters=ex_filters)
self.assertIsNotNone(volumes)
def test_list_volume_snapshots(self):
snapshots = self.driver.list_volume_snapshots(self.fake_volume)
self.assertEqual(1, len(snapshots))
def test_list_volume_snapshots_with_ex_snapshot_ids(self):
ECSMockHttp.type = 'list_volume_snapshots_ex_snapshot_ids'
ex_snapshot_ids = ['fake-snapshot1']
self.driver.list_volume_snapshots(self.fake_volume,
ex_snapshot_ids=ex_snapshot_ids)
def test_list_volume_snapshots_with_ex_filters(self):
ECSMockHttp.type = 'list_volume_snapshots_ex_filters'
ex_filters = {'InstanceId': self.fake_node.id}
self.driver.list_volume_snapshots(self.fake_volume,
ex_filters=ex_filters)
def test_create_volume(self):
ECSMockHttp.type = 'create_volume'
self.volume_size = 1
self.volume_name = 'fake-volume-name'
self.description = 'fake-description'
self.disk_category = 'system'
self.client_token = 'client_token'
volume = self.driver.create_volume(self.volume_size, self.volume_name,
snapshot=self.fake_snapshot,
ex_zone_id=self.zone,
ex_description=self.description,
ex_disk_category=self.disk_category,
ex_client_token=self.client_token)
self.assertIsNotNone(volume)
def test_create_volume_without_ex_zone_id_exception(self):
self.assertRaises(AttributeError,
self.driver.create_volume,
1, 'fake-volume-name')
def test_create_volume_snapshot(self):
ECSMockHttp.type = 'create_volume_snapshot'
self.snapshot_name = 'fake-snapshot1'
self.description = 'fake-description'
self.client_token = 'client-token'
snapshot = self.driver.create_volume_snapshot(
self.fake_volume, name=self.snapshot_name,
ex_description=self.description,
ex_client_token=self.client_token)
self.assertIsNotNone(snapshot)
def test_attach_volume(self):
self.device = '/dev/sdb'
self.delete_with_instance = True
attached = self.driver.attach_volume(
self.fake_node, self.fake_volume, device=self.device,
ex_delete_with_instance=self.delete_with_instance)
self.assertTrue(attached)
def test_detach_volume(self):
self.instance_id = 'fake-node1'
result = self.driver.detach_volume(self.fake_volume,
ex_instance_id=self.instance_id)
self.assertTrue(result)
def test_detach_volume_query_instance_id(self):
ECSMockHttp.type = 'detach_volume'
result = self.driver.detach_volume(self.fake_volume)
self.assertTrue(result)
def test_detach_volume_query_instance_id_exception(self):
self.assertRaises(AttributeError, self.driver.detach_volume,
self.fake_volume)
def test_destroy_volume(self):
ECSMockHttp.type = 'destroy_volume'
result = self.driver.destroy_volume(self.fake_volume)
self.assertTrue(result)
def test_destroy_volume_query_volumes_exception(self):
self.assertRaises(LibcloudError, self.driver.destroy_volume,
self.fake_volume)
def test_destroy_volume_state_exception(self):
ECSMockHttp.type = 'destroy_volume_state'
self.assertRaises(LibcloudError, self.driver.destroy_volume,
self.fake_volume)
def test_destroy_volume_snapshot(self):
result = self.driver.destroy_volume_snapshot(self.fake_snapshot)
self.assertTrue(result)
def test_destroy_volume_snapshot_exception(self):
self.assertRaises(AttributeError, self.driver.destroy_volume_snapshot,
self.fake_volume)
def test_list_images(self):
images = self.driver.list_images(self.fake_location)
self.assertEqual(1, len(images))
image = images[0]
self.assertEqual('freebsd1001_64_20G_aliaegis_20150527.vhd', image.id)
self.assertEqual('freebsd1001_64_20G_aliaegis_20150527.vhd',
image.name)
expected_extra = {
'image_version': '1.0.0',
'os_type': 'linux',
'platform': 'Freebsd',
'architecture': 'x86_64',
'description': 'freebsd1001_64_20G_aliaegis_20150527.vhd',
'size': 20,
'image_owner_alias': 'system',
'os_name': 'FreeBSD 10.1 64位',
'product_code': '',
'is_subscribed': False,
'progress': '100%',
'creation_time': '2015-06-19T07:25:42Z',
'usage': 'instance',
'is_copied': False
}
self._validate_extras(expected_extra, image.extra)
expected_dev_mappings = {
'snapshot_id': '',
'size': 20,
'device': '/dev/xvda',
'format': '',
'import_oss_bucket': '',
'import_oss_object': ''
}
self._validate_extras(expected_dev_mappings,
image.extra['disk_device_mappings'])
def test_list_images_with_ex_image_ids(self):
ECSMockHttp.type = 'list_images_ex_image_ids'
self.driver.list_images(location=self.fake_location,
ex_image_ids=[self.fake_image.id,
'not-existed'])
def test_list_images_with_ex_image_ids_type_exception(self):
self.assertRaises(AttributeError, self.driver.list_images,
location=self.fake_location,
ex_image_ids={'image_ids': 'id1,id2'})
def test_list_images_with_ex_filters(self):
ECSMockHttp.type = 'list_images_ex_filters'
ex_filters = {'Status': 'Available'}
self.driver.list_images(location=self.fake_location,
ex_filters=ex_filters)
def test_list_images_multiple_pages(self):
ECSMockHttp.type = 'list_images_pages'
images = self.driver.list_images()
self.assertEqual(2, len(images))
def test_create_image(self):
self.image_name = 'fake-image1'
self.description = 'description'
self.image_version = '1.0.0'
self.client_token = 'client_token'
image = self.driver.create_image(None, self.image_name,
self.description,
ex_snapshot_id=self.fake_snapshot.id,
ex_image_version=self.image_version,
ex_client_token=self.client_token)
self.assertIsNotNone(image)
def test_creaet_image_exception(self):
self.assertRaises(AttributeError, self.driver.create_image,
None, None)
def test_delete_image(self):
result = self.driver.delete_image(self.fake_image)
self.assertTrue(result)
def test_get_image(self):
ECSMockHttp.type = 'get_image'
image = self.driver.get_image(self.fake_image.id)
self.assertIsNotNone(image)
def test_get_image_not_found_exception(self):
ECSMockHttp.type = 'get_image_not_found'
self.assertRaises(LibcloudError, self.driver.get_image,
self.fake_image.id)
def test_copy_image(self):
self.image_name = 'copied-image1'
self.description = 'description'
self.dest_region = 'cn-hangzhou'
self.client_token = 'client-token'
image = self.driver.copy_image(
self.region, self.fake_image,
self.image_name,
description=self.description,
ex_destination_region_id=self.dest_region,
ex_client_token=self.client_token)
self.assertIsNotNone(image)
def test_copy_image_in_the_same_region(self):
ECSMockHttp.type = 'copy_image_same_region'
image = self.driver.copy_image(self.region, self.fake_image, None)
self.assertIsNotNone(image)
def test_ex_create_security_group(self):
self.sg_description = 'description'
self.client_token = 'client-token'
sg_id = self.driver.ex_create_security_group(
description=self.sg_description, client_token=self.client_token)
self.assertEqual('sg-F876FF7BA', sg_id)
def test_ex_list_security_groups(self):
sgs = self.driver.ex_list_security_groups()
self.assertEqual(1, len(sgs))
sg = sgs[0]
self.assertEqual('sg-28ou0f3xa', sg.id)
self.assertEqual('sg-28ou0f3xa', sg.name)
self.assertEqual('System created security group.', sg.description)
self.assertEqual('', sg.vpc_id)
self.assertEqual('2015-06-26T08:35:30Z', sg.creation_time)
def test_ex_join_security_group(self):
result = self.driver.ex_join_security_group(
self.fake_node, group_id=self.fake_security_group_id)
self.assertTrue(result)
def test_ex_leave_security_group(self):
result = self.driver.ex_leave_security_group(
self.fake_node, group_id=self.fake_security_group_id)
self.assertTrue(result)
def test_ex_delete_security_group_by_id(self):
result = self.driver.ex_delete_security_group_by_id(
group_id=self.fake_security_group_id)
self.assertTrue(result)
def test_ex_modify_security_group_by_id(self):
self.sg_name = 'name'
self.sg_description = 'description'
result = self.driver.ex_modify_security_group_by_id(
group_id=self.fake_security_group_id,
name=self.sg_name,
description=self.sg_description)
self.assertTrue(result)
def test_ex_list_security_groups_with_ex_filters(self):
ECSMockHttp.type = 'list_sgs_filters'
self.vpc_id = 'vpc1'
ex_filters = {'VpcId': self.vpc_id}
sgs = self.driver.ex_list_security_groups(ex_filters=ex_filters)
self.assertEqual(1, len(sgs))
def test_ex_list_security_group_attributes(self):
self.sga_nictype = 'internet'
sgas = self.driver.ex_list_security_group_attributes(
group_id=self.fake_security_group_id, nic_type=self.sga_nictype)
self.assertEqual(1, len(sgas))
sga = sgas[0]
self.assertEqual('ALL', sga.ip_protocol)
self.assertEqual('-1/-1', sga.port_range)
self.assertEqual('Accept', sga.policy)
self.assertEqual('internet', sga.nic_type)
def test_ex_list_zones(self):
zones = self.driver.ex_list_zones()
self.assertEqual(1, len(zones))
zone = zones[0]
self.assertEqual('cn-qingdao-b', zone.id)
self.assertEqual(self.driver, zone.driver)
self.assertEqual('青岛可用区B', zone.name)
self.assertIsNotNone(zone.available_resource_types)
self.assertEqual('IoOptimized', zone.available_resource_types[0])
self.assertIsNotNone(zone.available_instance_types)
self.assertEqual('ecs.m2.medium', zone.available_instance_types[0])
self.assertIsNotNone(zone.available_disk_categories)
self.assertEqual('cloud_ssd', zone.available_disk_categories[0])
class ECSMockHttp(MockHttp):
fixtures = ComputeFileFixtures('ecs')
def _DescribeInstances(self, method, url, body, headers):
resp_body = self.fixtures.load('describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _list_nodes_ex_node_ids_DescribeInstances(self, method, url, body,
headers):
params = {'InstanceIds': '["i-28n7dkvov", "not-existed-id"]'}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeInstances(method, url, body, headers)
def _list_nodes_ex_filters_DescribeInstances(self, method, url, body,
headers):
params = {'ZoneId': self.test.zone}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeInstances(method, url, body, headers)
def _DescribeInstanceTypes(self, method, url, body, headers):
resp_body = self.fixtures.load('describe_instance_types.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _DescribeRegions(self, method, url, body, headers):
resp_body = self.fixtures.load('describe_regions.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_node_CreateInstance(self, method, url, body, headers):
params = {'SecurityGroupId': 'sg-28ou0f3xa',
'Description': 'description',
'InternetChargeType': 'PayByTraffic',
'InternetMaxBandwidthOut': '1',
'InternetMaxBandwidthIn': '200',
'HostName': 'hostname',
'Password': 'password',
'IoOptimized': 'optimized',
'SystemDisk.Category': 'cloud',
'SystemDisk.DiskName': 'root',
'SystemDisk.Description': 'sys',
'VSwitchId': 'vswitch-id1',
'PrivateIpAddress': '1.1.1.2',
'ClientToken': 'client_token'}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('create_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_node_DescribeInstances(self, method, url, body, headers):
resp_body = self.fixtures.load('create_node_describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_node_StartInstance(self, method, url, body, headers):
resp_body = self.fixtures.load('start_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_node_with_data_CreateInstance(self, method, url, body,
headers):
params = {'SecurityGroupId': 'sg-28ou0f3xa',
'DataDisk.1.Size': '5',
'DataDisk.1.Category': 'cloud',
'DataDisk.1.DiskName': 'data1',
'DataDisk.1.Description': 'description',
'DataDisk.1.Device': '/dev/xvdb',
'DataDisk.1.DeleteWithInstance': 'true'}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('create_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_node_with_data_DescribeInstances(self, method, url, body,
headers):
resp_body = self.fixtures.load('create_node_describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_node_with_data_StartInstance(self, method, url, body,
headers):
resp_body = self.fixtures.load('start_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _reboot_node_RebootInstance(self, method, url, body, headers):
node_id = self.test.fake_node.id
self.assertUrlContainsQueryParams(url, {'InstanceId': node_id,
'ForceStop': 'false'})
resp_body = self.fixtures.load('reboot_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _reboot_node_DescribeInstances(self, method, url, body, headers):
resp_body = self.fixtures.load('reboot_node_describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _reboot_node_force_stop_RebootInstance(self, method, url, body,
headers):
node_id = self.test.fake_node.id
self.assertUrlContainsQueryParams(url, {'InstanceId': node_id,
'ForceStop': 'true'})
resp_body = self.fixtures.load('reboot_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _reboot_node_force_stop_DescribeInstances(self, method, url, body,
headers):
resp_body = self.fixtures.load('reboot_node_describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _destroy_node_DescribeInstances(self, method, url, body, headers):
resp_body = self.fixtures.load('destroy_node_describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _destroy_node_DeleteInstance(self, method, url, body, headers):
node_id = self.test.fake_node.id
self.assertUrlContainsQueryParams(url, {'InstanceId': node_id})
resp_body = self.fixtures.load('delete_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _start_node_StartInstance(self, method, url, body, headers):
node_id = self.test.fake_node.id
self.assertUrlContainsQueryParams(url, {'InstanceId': node_id})
resp_body = self.fixtures.load('start_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _start_node_DescribeInstances(self, method, url, body, headers):
resp_body = self.fixtures.load('reboot_node_describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _stop_node_StopInstance(self, method, url, body, headers):
node_id = self.test.fake_node.id
self.assertUrlContainsQueryParams(url, {'InstanceId': node_id,
'ForceStop': 'false'})
resp_body = self.fixtures.load('stop_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _stop_node_DescribeInstances(self, method, url, body, headers):
resp_body = self.fixtures.load('stop_node_describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _stop_node_force_stop_StopInstance(self, method, url, body, headers):
node_id = self.test.fake_node.id
self.assertUrlContainsQueryParams(url, {'InstanceId': node_id,
'ForceStop': 'true'})
resp_body = self.fixtures.load('stop_instance.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _stop_node_force_stop_DescribeInstances(self, method, url, body,
headers):
resp_body = self.fixtures.load('stop_node_describe_instances.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _DescribeDisks(self, method, url, body, headers):
resp_body = self.fixtures.load('describe_disks.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _list_volumes_ex_volume_ids_DescribeDisks(self, method, url, body,
headers):
region = self.test.region
params = {'DiskIds': '["i-28n7dkvov", "not-existed-id"]',
'RegionId': region}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeInstances(method, url, body, headers)
def _list_volumes_ex_filters_DescribeDisks(self, method, url, body,
headers):
params = {'InstanceId': self.test.fake_node.id}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeDisks(method, url, body, headers)
def _DescribeSnapshots(self, method, url, body, headers):
region = self.test.region
volume_id = self.test.fake_volume.id
params = {'RegionId': region,
'DiskId': volume_id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('describe_snapshots.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _list_volume_snapshots_ex_snapshot_ids_DescribeSnapshots(
self, method, url, body, headers):
params = {'RegionId': self.test.region,
'SnapshotIds': '["fake-snapshot1"]'}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeSnapshots(method, url, body, headers)
def _list_volume_snapshots_ex_filters_DescribeSnapshots(self, method, url, body,
headers):
params = {'InstanceId': self.test.fake_node.id}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeSnapshots(method, url, body, headers)
def _create_volume_CreateDisk(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'DiskName': self.test.volume_name,
'Size': str(self.test.volume_size),
'ZoneId': self.test.zone,
'SnapshotId': self.test.fake_snapshot.id,
'Description': self.test.description,
'DiskCategory': self.test.disk_category,
'ClientToken': self.test.client_token}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('create_disk.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_volume_DescribeDisks(self, method, url, body, headers):
resp_body = self.fixtures.load('create_volume_describe_disks.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_volume_snapshot_CreateSnapshot(self, method, url, body,
headers):
params = {'DiskId': self.test.fake_volume.id,
'SnapshotName': self.test.snapshot_name,
'Description': self.test.description,
'ClientToken': self.test.client_token}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('create_snapshot.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_volume_snapshot_DescribeSnapshots(self, method, url, body,
headers):
resp_body = self.fixtures.load('describe_snapshots.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _AttachDisk(self, method, url, body, headers):
delete_with_instance = str(self.test.delete_with_instance).lower()
params = {'InstanceId': self.test.fake_node.id,
'DiskId': self.test.fake_volume.id,
'Device': self.test.device,
'DeleteWithInstance': delete_with_instance}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('attach_disk.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _DetachDisk(self, method, url, body, headers):
params = {'DiskId': self.test.fake_volume.id,
'InstanceId': self.test.instance_id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('detach_disk.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _detach_volume_DescribeDisks(self, method, url, body, headers):
params = {'DiskIds': '["' + self.test.fake_volume.id + '"]'}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('detach_volume_describe_disks.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _detach_volume_DetachDisk(self, method, url, body, headers):
params = {'DiskId': self.test.fake_volume.id,
'InstanceId': 'i-28whl2nj2'}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('detach_disk.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _destroy_volume_DescribeDisks(self, method, url, body, headers):
params = {'DiskIds': '["' + self.test.fake_volume.id + '"]'}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('destroy_volume_describe_disks.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _destroy_volume_DeleteDisk(self, method, url, body, headers):
params = {'DiskId': self.test.fake_volume.id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('delete_disk.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _destroy_volume_state_DescribeDisks(self, method, url, body, headers):
return self._detach_volume_DescribeDisks(method, url, body, headers)
def _DeleteSnapshot(self, method, url, body, header):
params = {'SnapshotId': self.test.fake_snapshot.id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('delete_snapshot.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _DescribeImages(self, method, url, body, headers):
params = {'RegionId': self.test.fake_location.id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('describe_images.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _list_images_pages_DescribeImages(self, method, url, body, headers):
if 'PageNumber=2' in url:
resp_body = self.fixtures.load('pages_describe_images_page2.xml')
else:
resp_body = self.fixtures.load('pages_describe_images.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _list_images_ex_image_ids_DescribeImages(self, method, url, body,
headers):
params = {'ImageId': self.test.fake_image.id + ',not-existed'}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeImages(method, url, body, headers)
def _list_images_ex_filters_DescribeImages(self, method, url, body,
headers):
params = {'Status': 'Available'}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeImages(method, url, body, headers)
def _CreateImage(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'ImageName': self.test.image_name,
'Description': self.test.description,
'SnapshotId': self.test.fake_snapshot.id,
'ImageVersion': self.test.image_version,
'ClientToken': self.test.client_token}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('create_image.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _DeleteImage(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'ImageId': self.test.fake_image.id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('delete_image.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _get_image_DescribeImages(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'ImageId': self.test.fake_image.id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('describe_images.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _get_image_not_found_DescribeImages(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'ImageId': self.test.fake_image.id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('get_image_describe_images.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _CopyImage(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'ImageId': self.test.fake_image.id,
'DestinationRegionId': self.test.dest_region,
'DestinationImageName': self.test.image_name,
'DestinationDescription': self.test.description,
'ClientToken': self.test.client_token}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('copy_image.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _copy_image_same_region_CopyImage(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'ImageId': self.test.fake_image.id,
'DestinationRegionId': self.test.region}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('copy_image.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _copy_image_same_region_DescribeImages(self, method, url, body,
headers):
return self._DescribeImages(method, url, body, headers)
def _DescribeSecurityGroups(self, method, url, body, headers):
params = {'RegionId': self.test.region}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('describe_security_groups.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _JoinSecurityGroup(self, method, url, body, headers):
params = {'InstanceId': self.test.fake_node.id,
'SecurityGroupId': self.test.fake_security_group_id}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('join_security_group_by_id.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _LeaveSecurityGroup(self, method, url, body, headers):
params = {'InstanceId': self.test.fake_node.id,
'SecurityGroupId': self.test.fake_security_group_id}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('leave_security_group_by_id.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _list_sgs_filters_DescribeSecurityGroups(self, method, url, body,
headers):
params = {'VpcId': self.test.vpc_id}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeSecurityGroups(method, url, body, headers)
def _CreateSecurityGroup(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'Description': self.test.sg_description,
'ClientToken': self.test.client_token}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('create_security_group.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _DeleteSecurityGroup(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'SecurityGroupId': self.test.fake_security_group_id}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('delete_security_group_by_id.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _ModifySecurityGroupAttribute(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'SecurityGroupId': self.test.fake_security_group_id,
'SecurityGroupName': self.test.sg_name,
'Description': self.test.sg_description}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('modify_security_group_by_id.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _DescribeSecurityGroupAttribute(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'SecurityGroupId': self.test.fake_security_group_id,
'NicType': self.test.sga_nictype}
self.assertUrlContainsQueryParams(url, params)
resp_body = self.fixtures.load('describe_security_group_attributes.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _DescribeZones(self, method, url, body, headers):
resp_body = self.fixtures.load('describe_zones.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
def _create_public_ip_AllocatePublicIpAddress(self, method, url, body, headers):
resp_body = self.fixtures.load('create_public_ip.xml')
return (httplib.OK, resp_body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main()) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/qcom,sm7150-videocc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Video Clock & Reset Controller on SM7150
maintainers:
- Danila Tikhonov <danila@jiaxyga.com>
- David Wronek <david@mainlining.org>
- Jens Reidel <adrian@travitia.xyz>
description: |
Qualcomm video clock control module provides the clocks, resets and power
domains on SM7150.
See also: include/dt-bindings/clock/qcom,videocc-sm7150.h
properties:
compatible:
const: qcom,sm7150-videocc
clocks:
items:
- description: Board XO source
- description: Board Always On XO source
power-domains:
maxItems: 1
description:
CX power domain.
required:
- compatible
- clocks
- power-domains
allOf:
- $ref: qcom,gcc.yaml#
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
videocc: clock-controller@ab00000 {
compatible = "qcom,sm7150-videocc";
reg = <0x0ab00000 0x10000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&rpmhcc RPMH_CXO_CLK_A>;
power-domains = <&rpmhpd RPMHPD_CX>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/qcom,sm7150-videocc.yaml |
"""Definitions for the `Line` class."""
import numexpr as ne
import numpy as np
from astropy import constants as c
from astropy import units as u
from mosfit.modules.seds.sed import SED
from mosfit.constants import SQRT_2_PI
# Important: Only define one ``Module`` class per file.
class Line(SED):
"""Line spectral energy distribution, modifies existing SED."""
C_CONST = c.c.cgs.value
def process(self, **kwargs):
"""Process module."""
kwargs = self.prepare_input(self.key('luminosities'), **kwargs)
prt = self._printer
self._rest_t_explosion = kwargs[self.key('resttexplosion')]
self._times = kwargs[self.key('rest_times')]
self._seds = kwargs.get(self.key('seds'))
self._bands = kwargs['all_bands']
self._band_indices = kwargs['all_band_indices']
self._sample_wavelengths = kwargs['sample_wavelengths']
self._frequencies = kwargs['all_frequencies']
self._luminosities = kwargs[self.key('luminosities')]
self._line_wavelength = kwargs[self.key('line_wavelength')]
self._line_width = kwargs[self.key('line_width')]
self._line_time = kwargs[self.key('line_time')]
self._line_duration = kwargs[self.key('line_duration')]
self._line_amplitude = kwargs[self.key('line_amplitude')]
lw = self._line_wavelength # noqa: F841
ls = self._line_width
cc = self.C_CONST
# Some temp vars for speed.
zp1 = 1.0 + kwargs[self.key('redshift')]
czp1A = cc / (zp1 * u.Angstrom.cgs.scale)
amps = self._line_amplitude * np.array([
np.exp(-0.5 * (
(x - self._rest_t_explosion - self._line_time) /
self._line_duration) ** 2) for x in self._times])
if self._seds is None:
raise ValueError(prt.message('line_sed'))
seds = [x * (1.0 - amps[xi]) for xi, x in enumerate(self._seds)]
amps *= self._luminosities / (ls * SQRT_2_PI)
amps_dict = {}
evaled = False
for li, lum in enumerate(self._luminosities):
bi = self._band_indices[li]
if lum == 0.0:
continue
bind = czp1A / self._frequencies[li] if bi < 0 else bi
if bind not in amps_dict:
# Leave `rest_wavs` in Angstroms.
if bi >= 0:
rest_wavs = self._sample_wavelengths[bi] / zp1
else:
rest_wavs = np.array([bind]) # noqa: F841
if not evaled:
amps_dict[bind] = ne.evaluate(
'exp(-0.5 * ((rest_wavs - lw) / ls) ** 2)')
evaled = True
else:
amps_dict[bind] = ne.re_evaluate()
seds[li] += amps[li] * amps_dict[bind]
# seds[li][np.isnan(seds[li])] = 0.0
# Units of `seds` is ergs / s / Angstrom.
return {'sample_wavelengths': self._sample_wavelengths,
self.key('seds'): seds} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import json
import textwrap
import intelmq.lib.harmonization
from intelmq import HARMONIZATION_CONF_FILE
print("""
Harmonization field names
=========================
|Section|Name|Type|Description|
|:------|:---|:---|:----------|""")
with open(HARMONIZATION_CONF_FILE) as fhandle:
HARM = json.load(fhandle)['event']
for key, value in sorted(HARM.items()):
section = ' '.join([sec.title() for sec in key.split('.')[:-1]])
print('|{}|{}|{}|{}|'.format(section, key, value['type'],
value['description']))
print("""
Harmonization types
-------------------
""")
for value in sorted(dir(intelmq.lib.harmonization)):
if value == 'GenericType' or value.startswith('__'):
continue
obj = getattr(intelmq.lib.harmonization, value)
try:
if issubclass(obj, intelmq.lib.harmonization.GenericType):
doc = getattr(obj, '__doc__', '')
if doc is None:
doc = ''
else:
doc = textwrap.dedent(doc)
print("""### {}
{}
""".format(value, doc))
except TypeError:
pass | unknown | codeparrot/codeparrot-clean | ||
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from time import time
from datetime import datetime
import struct
import os
import socket
import logging
log = logging.getLogger(__name__)
# default protocol version before negotiation
proto_version = 3
class CellError(Exception):
"""
Generic cell error.
"""
pass
class TorError(Exception):
"""
Generic tor protocol error.
"""
pass
class FixedCell(object):
"""
Fixed length cell.
"""
cell_type = -1
def __init__(self, circuit_id=None):
self.fixed = True
self.circuit_id = circuit_id or 0
def unpack(self, data):
self.data = data
def pack(self, data):
"""
Pack the circuit id, cell type, and data.
"""
if proto_version < 4:
data = struct.pack('>HB509s', self.circuit_id, self.cell_type, data)
else:
data = struct.pack('>IB509s', self.circuit_id, self.cell_type, data)
return data
class VariableCell(object):
"""
Variable lengthed cell.
"""
cell_type = -1
def __init__(self, circuit_id=None):
self.fixed = False
self.circuit_id = circuit_id or 0
def has_len(self):
"""
Returns true if the length header has been parsed.
"""
return hasattr(self, 'length')
def len(self, data=None):
"""
Get or set the length of the cell.
"""
if data:
self.length = struct.unpack('>H', data[:2])[0]
elif self.has_len():
return self.length
def unpack(self, data):
self.data = data[:self.length]
def pack(self, data):
"""
Pack the circuit id, cell type, length, and data.
"""
if proto_version < 4:
header = struct.pack('>HBH', self.circuit_id, self.cell_type, len(data))
else:
header = struct.pack('>IBH', self.circuit_id, self.cell_type, len(data))
return header + data
class Relay(FixedCell):
"""
Relay cell.
"""
cell_type = 3
def get_str(self, include_digest=True):
"""
Returns the packed data without sending so that it can be encrypted.
"""
if isinstance(self.data, str):
return self.data
if not self.data['data']:
self.data['data'] = ''
if include_digest:
digest = self.data['digest']
else:
digest = '\x00' * 4
return struct.pack('>BHH4sH498s', self.data['command'], 0,
self.data['stream_id'], digest, len(self.data['data']), self.data['data'])
def parse(self):
"""
Parse a received relay cell after decryption. This currently can't be implemented
as a part of the unpack() function because the data must first be decrypted.
"""
headers = struct.unpack('>BHH4sH', self.data[:11])
self.data = self.data[11:]
if len(self.data) < headers[4] or headers[1]:
raise CellError('Invalid relay packet (possibly not from this OR).')
try:
text = relay_commands[headers[0]]
except IndexError:
raise CellError('Invalid relay packet command.')
self.data = {
'command': headers[0],
'command_text': text,
'recognized': headers[1],
'stream_id': headers[2],
'digest': headers[3],
'length': headers[4],
'data': self.data[:headers[4]]
}
def pack(self, data):
return super(Relay, self).pack(self.data)
def init_relay(self, data):
"""
Set the relay cell data.
"""
self.data = data
class Padding(FixedCell):
"""
Padding cell.
"""
cell_type = 0
class Destroy(FixedCell):
"""
Destroy cell.
"""
cell_type = 4
def unpack(self, data):
super(Destroy, self).unpack(data)
reason = struct.unpack('>B', self.data[0])[0]
reasons = [
'No reason given.', 'Tor protocol violation.', 'Internal error.',
'A client sent a TRUNCATE command.',
'Not currently operating; trying to save bandwidth.',
'Out of memory, sockets, or circuit IDs.',
'Unable to reach relay.',
'Connected to relay, but its OR identity was not as expected.',
'The OR connection that was carrying this circuit died.',
'The circuit has expired for being dirty or old.'
'Circuit construction took too long.',
'The circuit was destroyed w/o client TRUNCATE.',
'Request for unknown hidden service.'
]
raise TorError('Circuit closed: %s' % reasons[reason])
class CreateFast(FixedCell):
"""
CreateFast cell.
"""
cell_type = 5
def __init__(self, circuit_id=None):
super(CreateFast, self).__init__(circuit_id=circuit_id)
self.key_material = os.urandom(20)
def pack(self, data):
data = struct.pack('>20s', self.key_material)
return super(CreateFast, self).pack(data)
class CreatedFast(FixedCell):
"""
CreatedFast cell.
"""
cell_type = 6
def unpack(self, data):
"""
Unpack the key material.
"""
super(CreatedFast, self).unpack(data)
self.key_material, self.derivative_key = struct.unpack('>20s20s', self.data[:40])
class Versions(VariableCell):
"""
Versions cell.
"""
cell_type = 7
def unpack(self, data):
"""
Parse the received versions.
"""
super(Versions, self).unpack(data)
self.versions = struct.unpack('>' + 'H' * int(len(self.data) / 2), self.data)
def pack(self, data):
"""
Pack our known versions.
"""
data = struct.pack('>HH', 3,4)
return super(Versions, self).pack(data)
class Netinfo(FixedCell):
"""
Netinfo cell.
"""
cell_type = 8
def unpack(self, data):
"""
Parse out netinfo.
"""
super(Netinfo, self).unpack(data)
data = self.data
time = struct.unpack('>I', data[:4])[0]
data = data[4:]
# decode our IP address
host_type, address, data = self.decode_ip(data)
self.our_address = address
self.router_addresses = []
# iterate over OR addresses.
num_addresses = data[0]
if not isinstance(num_addresses, int):
num_addresses = struct.unpack('B', num_addresses)[0]
data = data[1:]
for _ in range(num_addresses):
host_type, address, data = self.decode_ip(data)
self.router_addresses.append(address)
def decode_ip(self, data):
"""
Decode IPv4 and IPv6 addresses.
"""
host_type, size = struct.unpack('>BB', data[:2])
data = data[2:]
address = struct.unpack('>%ds' % size, data[:size])[0]
data = data[size:]
if host_type == 4:
address = socket.inet_ntop(socket.AF_INET, address)
elif host_type == 6:
address = socket.inet_ntop(socket.AF_INET6, address)
else:
raise CellError('Do we allow hostnames in NETINFO?')
return host_type, address, data
def pack(self, data):
"""
Pack our own netinfo.
"""
ips = data
data = struct.pack('>I', int(time()))
data += self.encode_ip(ips['other'])
data += struct.pack('>B', 1)
data += self.encode_ip(ips['me'])
return super(Netinfo, self).pack(data)
def encode_ip(self, ip):
"""
Encode an IPv4 address.
"""
return struct.pack('>BB', 4, 4) + socket.inet_aton(ip)
class RelayEarly(Relay):
"""
RelayEarly cell.
"""
cell_type = 9
class Create2(FixedCell):
"""
Create2 cell.
"""
cell_type = 10
def pack(self, data):
data = struct.pack('>HH', 0x2, len(data)) + data
return super(Create2, self).pack(data)
class Created2(FixedCell):
"""
Created2 cell.
"""
cell_type = 11
def unpack(self, data):
super(Created2, self).unpack(data)
length, self.Y, self.auth = struct.unpack('>H32s32s', data[:66])
class Certs(VariableCell):
"""
Certs cell.
"""
cell_type = 129
def unpack(self, data):
"""
Unpack a certs cell. Parses out all of the send certs and does *very* basic
validation.
"""
super(Certs, self).unpack(data)
data = self.data
num_certs = data[0]
if not isinstance(num_certs, int):
num_certs = struct.unpack('>B', num_certs)[0]
data = data[1:]
now = datetime.now().strftime('%Y%m%d%H%M%S%z')
self.certs = {}
for _ in range(num_certs):
# get cert type and length
cert_info = struct.unpack('>BH', data[:3])
data = data[3:]
# unpack the cert
cert_type = cert_info[0]
cert = struct.unpack('>%ds' % cert_info[1], data[:cert_info[1]])[0]
data = data[cert_info[1]:]
# we only want one of each certificate type
if cert_type in self.certs or int(cert_type) > 3:
raise CellError('Duplicate or invalid certificate received.')
# load the certificate and check expiration.
# cert = crypto.load_certificate(crypto.FILETYPE_ASN1, cert)
cert = x509.load_der_x509_certificate(cert, default_backend())
now = datetime.now()
if cert.not_valid_before > now or cert.not_valid_after < now:
log.error('got invalid certificate date.')
raise CellError('Certificate expired.')
self.certs[cert_type] = cert
log.info('got cert type %d, hash: %s' % (cert_type, cert))
class AuthChallenge(VariableCell):
"""
AuthChallenge cell.
"""
cell_type = 130
def unpack(self, data):
"""
Unpack the auth challenge. Currently not doing anything with it.
"""
super(AuthChallenge, self).unpack(data)
struct.unpack('>32sH', self.data[:34])
def cell_type_to_name(cell_type):
"""
Convert a cell type to its name.
"""
if cell_type in cell_types:
return cell_types[cell_type].__name__
else:
return ''
def relay_name_to_command(name):
"""
Converts relay name to a command.
"""
if name in relay_commands:
return relay_commands.index(name)
else:
return -1
# List of cell types.
cell_types = {
0: Padding,
3: Relay,
4: Destroy,
5: CreateFast,
6: CreatedFast,
7: Versions,
8: Netinfo,
9: RelayEarly,
10: Create2,
11: Created2,
129: Certs,
130: AuthChallenge
}
# List of relay commnads.
relay_commands = [
'', 'RELAY_BEGIN', 'RELAY_DATA', 'RELAY_END', 'RELAY_CONNECTED', 'RELAY_SENDME',
'RELAY_EXTEND', 'RELAY_EXTENDED', 'RELAY_TRUNCATE', 'RELAY_TRUNCATED',
'RELAY_DROP', 'RELAY_RESOLVE', 'RELAY_RESOLVED', 'RELAY_BEGIN_DIR',
'RELAY_EXTEND2', 'RELAY_EXTENDED2'
] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
u"""Test slugify."""
from __future__ import unicode_literals
import nikola.utils
def test_ascii():
"""Test an ASCII-only string."""
o = nikola.utils.slugify(u'hello', lang='en')
assert o == u'hello'
assert isinstance(o, nikola.utils.unicode_str)
def test_ascii_dash():
"""Test an ASCII string, with dashes."""
o = nikola.utils.slugify(u'hello-world', lang='en')
assert o == u'hello-world'
assert isinstance(o, nikola.utils.unicode_str)
def test_ascii_fancy():
"""Test an ASCII string, with fancy characters."""
o = nikola.utils.slugify(u'The quick brown fox jumps over the lazy dog!-123.456', lang='en')
assert o == u'the-quick-brown-fox-jumps-over-the-lazy-dog-123456'
assert isinstance(o, nikola.utils.unicode_str)
def test_pl():
"""Test a string with Polish diacritical characters."""
o = nikola.utils.slugify(u'zażółćgęśląjaźń', lang='pl')
assert o == u'zazolcgeslajazn'
assert isinstance(o, nikola.utils.unicode_str)
def test_pl_dash():
"""Test a string with Polish diacritical characters and dashes."""
o = nikola.utils.slugify(u'zażółć-gęślą-jaźń', lang='pl')
assert o == u'zazolc-gesla-jazn'
def test_pl_fancy():
"""Test a string with Polish diacritical characters and fancy characters."""
o = nikola.utils.slugify(u'Zażółć gęślą jaźń!-123.456', lang='pl')
assert o == u'zazolc-gesla-jazn-123456'
assert isinstance(o, nikola.utils.unicode_str)
def test_disarmed():
"""Test disarmed slugify."""
nikola.utils.USE_SLUGIFY = False
o = nikola.utils.slugify(u'Zażółć gęślą jaźń!-123.456', lang='pl')
assert o == u'Zażółć gęślą jaźń!-123.456'
assert isinstance(o, nikola.utils.unicode_str)
nikola.utils.USE_SLUGIFY = True
def test_disarmed_weird():
"""Test disarmed slugify with banned characters."""
nikola.utils.USE_SLUGIFY = False
o = nikola.utils.slugify(u'Zażółć gęślą jaźń!-123.456 "Hello World"?#H<e>l/l\\o:W\'o\rr*l\td|!\n', lang='pl')
assert o == u'Zażółć gęślą jaźń!-123.456 -Hello World---H-e-l-l-o-W-o-r-l-d-!-'
assert isinstance(o, nikola.utils.unicode_str)
nikola.utils.USE_SLUGIFY = True | unknown | codeparrot/codeparrot-clean | ||
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
from xmlrpc.server import SimpleXMLRPCServer
from threading import Thread
import socket
import logging
from .utils import install_locale, parse_temperature_report
install_locale('pronterface')
RPC_PORT = 7978
class ProntRPC:
server = None
def __init__(self, pronsole, port = RPC_PORT):
self.pronsole = pronsole
used_port = port
while True:
try:
self.server = SimpleXMLRPCServer(("localhost", used_port),
allow_none = True,
logRequests = False)
if used_port != port:
logging.warning(_("RPC server bound on non-default port %d") % used_port)
break
except socket.error as e:
if e.errno == 98:
used_port += 1
continue
else:
raise
self.server.register_function(self.get_status, 'status')
self.server.register_function(self.set_extruder_temperature,'settemp')
self.server.register_function(self.set_bed_temperature,'setbedtemp')
self.server.register_function(self.load_file,'load_file')
self.server.register_function(self.startprint,'startprint')
self.server.register_function(self.pauseprint,'pauseprint')
self.server.register_function(self.resumeprint,'resumeprint')
self.server.register_function(self.sendhome,'sendhome')
self.server.register_function(self.connect,'connect')
self.server.register_function(self.disconnect, 'disconnect')
self.server.register_function(self.send, 'send')
self.thread = Thread(target = self.run_server)
self.thread.start()
def run_server(self):
self.server.serve_forever()
def shutdown(self):
self.server.shutdown()
self.thread.join()
def get_status(self):
if self.pronsole.p.printing:
progress = 100 * float(self.pronsole.p.queueindex) / len(self.pronsole.p.mainqueue)
elif self.pronsole.sdprinting:
progress = self.pronsole.percentdone
else: progress = None
if self.pronsole.p.printing or self.pronsole.sdprinting:
eta = self.pronsole.get_eta()
else:
eta = None
if self.pronsole.tempreadings:
temps = parse_temperature_report(self.pronsole.tempreadings)
else:
temps = None
z = self.pronsole.curlayer
return {"filename": self.pronsole.filename,
"progress": progress,
"eta": eta,
"temps": temps,
"z": z,
}
def set_extruder_temperature(self, targettemp):
if self.pronsole.p.online:
self.pronsole.p.send_now("M104 S" + targettemp)
def set_bed_temperature(self,targettemp):
if self.pronsole.p.online:
self.pronsole.p.send_now("M140 S" + targettemp)
def load_file(self,filename):
self.pronsole.do_load(filename)
def startprint(self):
self.pronsole.do_print("")
def pauseprint(self):
self.pronsole.do_pause("")
def resumeprint(self):
self.pronsole.do_resume("")
def sendhome(self):
self.pronsole.do_home("")
def connect(self):
self.pronsole.do_connect("")
def disconnect(self):
self.pronsole.do_disconnect("")
def send(self, command):
self.pronsole.p.send_now(command) | unknown | codeparrot/codeparrot-clean | ||
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CurriculumVitae.show_contact_number'
db.add_column('base_curriculumvitae', 'show_contact_number', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'CurriculumVitae.show_contact_number'
db.delete_column('base_curriculumvitae', 'show_contact_number')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'base.article': {
'Meta': {'object_name': 'Article'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 12, 12, 2, 34, 676701)', 'blank': 'True'}),
'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'base.category': {
'Meta': {'object_name': 'Category'},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['base.Article']", 'null': 'True', 'blank': 'True'}),
'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'province': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['base.Province']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'user_submitted_job_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['base.UserSubmittedJobArticle']", 'null': 'True', 'blank': 'True'})
},
'base.certificate': {
'Meta': {'object_name': 'Certificate'},
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'base.curriculumvitae': {
'Meta': {'object_name': 'CurriculumVitae'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certificates': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Certificate']", 'symmetrical': 'False', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'connection_requests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'connection_requests'", 'blank': 'True', 'to': "orm['auth.User']"}),
'date_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'highest_grade': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'highest_grade_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'nr_of_faxes_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'preferred_skill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'profiles_preferred'", 'null': 'True', 'to': "orm['base.Skill']"}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Reference']", 'symmetrical': 'False', 'blank': 'True'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'show_contact_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'profiles'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['base.Skill']"}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'work_experiences': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.WorkExperience']", 'symmetrical': 'False', 'blank': 'True'})
},
'base.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'read_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'base.province': {
'Meta': {'object_name': 'Province'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'search_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'base.reference': {
'Meta': {'object_name': 'Reference'},
'contact_no': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'})
},
'base.skill': {
'Meta': {'object_name': 'Skill'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skill': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'base.usersubmittedjobarticle': {
'Meta': {'object_name': 'UserSubmittedJobArticle'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_category': ('django.db.models.fields.TextField', [], {'default': "''"}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'province': ('django.db.models.fields.TextField', [], {'default': "''"}),
'text': ('django.db.models.fields.TextField', [], {'default': "''"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_submitted_job_article_user'", 'to': "orm['auth.User']"})
},
'base.workexperience': {
'Meta': {'object_name': 'WorkExperience'},
'company': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'end_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['base'] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Strongly connected components.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__authors__ = "\n".join(['Eben Kenah',
'Aric Hagberg (hagberg@lanl.gov)'
'Christopher Ellison',
'Ben Edwards (bedwards@cs.unm.edu)'])
__all__ = ['number_strongly_connected_components',
'strongly_connected_components',
'strongly_connected_component_subgraphs',
'is_strongly_connected',
'strongly_connected_components_recursive',
'kosaraju_strongly_connected_components',
'condensation']
def strongly_connected_components(G):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError: If G is undirected.
See Also
--------
connected_components, weakly_connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
Nonrecursive version of algorithm.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
preorder={}
lowlink={}
scc_found={}
scc_queue = []
scc_list=[]
i=0 # Preorder counter
for source in G:
if source not in scc_found:
queue=[source]
while queue:
v=queue[-1]
if v not in preorder:
i=i+1
preorder[v]=i
done=1
v_nbrs=G[v]
for w in v_nbrs:
if w not in preorder:
queue.append(w)
done=0
break
if done==1:
lowlink[v]=preorder[v]
for w in v_nbrs:
if w not in scc_found:
if preorder[w]>preorder[v]:
lowlink[v]=min([lowlink[v],lowlink[w]])
else:
lowlink[v]=min([lowlink[v],preorder[w]])
queue.pop()
if lowlink[v]==preorder[v]:
scc_found[v]=True
scc=[v]
while scc_queue and preorder[scc_queue[-1]]>preorder[v]:
k=scc_queue.pop()
scc_found[k]=True
scc.append(k)
scc_list.append(scc)
else:
scc_queue.append(v)
scc_list.sort(key=len,reverse=True)
return scc_list
def kosaraju_strongly_connected_components(G,source=None):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError: If G is undirected
See Also
--------
connected_components
Notes
-----
Uses Kosaraju's algorithm.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
components=[]
G=G.reverse(copy=False)
post=list(nx.dfs_postorder_nodes(G,source=source))
G=G.reverse(copy=False)
seen={}
while post:
r=post.pop()
if r in seen:
continue
c=nx.dfs_preorder_nodes(G,r)
new=[v for v in c if v not in seen]
seen.update([(u,True) for u in new])
components.append(new)
components.sort(key=len,reverse=True)
return components
def strongly_connected_components_recursive(G):
"""Return nodes in strongly connected components of graph.
Recursive version of algorithm.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError : If G is undirected
See Also
--------
connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
def visit(v,cnt):
root[v]=cnt
visited[v]=cnt
cnt+=1
stack.append(v)
for w in G[v]:
if w not in visited: visit(w,cnt)
if w not in component:
root[v]=min(root[v],root[w])
if root[v]==visited[v]:
component[v]=root[v]
tmpc=[v] # hold nodes in this component
while stack[-1]!=v:
w=stack.pop()
component[w]=root[v]
tmpc.append(w)
stack.remove(v)
scc.append(tmpc) # add to scc list
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
scc=[]
visited={}
component={}
root={}
cnt=0
stack=[]
for source in G:
if source not in visited:
visit(source,cnt)
scc.sort(key=len,reverse=True)
return scc
def strongly_connected_component_subgraphs(G):
"""Return strongly connected components as subgraphs.
Parameters
----------
G : NetworkX Graph
A graph.
Returns
-------
glist : list
A list of graphs, one for each strongly connected component of G.
See Also
--------
connected_component_subgraphs
Notes
-----
The list is ordered from largest strongly connected component to smallest.
Graph, node, and edge attributes are copied to the subgraphs.
"""
cc=strongly_connected_components(G)
graph_list=[]
for c in cc:
graph_list.append(G.subgraph(c).copy())
return graph_list
def number_strongly_connected_components(G):
"""Return number of strongly connected components in graph.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of strongly connected components
See Also
--------
connected_components
Notes
-----
For directed graphs only.
"""
return len(strongly_connected_components(G))
def is_strongly_connected(G):
"""Test directed graph for strong connectivity.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is strongly connected, False otherwise.
See Also
--------
strongly_connected_components
Notes
-----
For directed graphs only.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
See is_connected() for connectivity test.""")
if len(G)==0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(strongly_connected_components(G)[0])==len(G)
def condensation(G, scc=None):
"""Returns the condensation of G.
The condensation of G is the graph with each of the strongly connected
components contracted into a single node.
Parameters
----------
G : NetworkX DiGraph
A directed graph.
scc: list (optional, default=None)
A list of strongly connected components. If provided, the elements in
`scc` must partition the nodes in `G`. If not provided, it will be
calculated as scc=nx.strongly_connected_components(G).
Returns
-------
C : NetworkX DiGraph
The condensation of G. The node labels are integers corresponding
to the index of the component in the list of strongly connected
components.
Raises
------
NetworkXError: If G is not directed
Notes
-----
After contracting all strongly connected components to a single node,
the resulting graph is a directed acyclic graph.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
See is_connected() for connectivity test.""")
if scc is None:
scc = nx.strongly_connected_components(G)
mapping = {}
C = nx.DiGraph()
for i,component in enumerate(scc):
for n in component:
mapping[n] = i
C.add_nodes_from(range(len(scc)))
for u,v in G.edges():
if mapping[u] != mapping[v]:
C.add_edge(mapping[u],mapping[v])
return C | unknown | codeparrot/codeparrot-clean | ||
import json
from coalib.bearlib.abstractions.Lint import Lint
from coalib.bears.LocalBear import LocalBear
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
class DockerfileLintBear(LocalBear, Lint):
executable = 'dockerfile_lint'
arguments = '--json -f'
severity_map = {
"error": RESULT_SEVERITY.MAJOR,
"warn": RESULT_SEVERITY.NORMAL,
"info": RESULT_SEVERITY.INFO}
def run(self, filename, file):
'''
Checks the given file with dockerfile_lint.
'''
return self.lint(filename)
def _process_issues(self, output, filename):
output = json.loads("".join(output))
for severity in output:
if severity == "summary":
continue
for issue in output[severity]["data"]:
yield Result.from_values(
origin=self,
message=issue["message"],
file=filename,
severity=self.severity_map[issue["level"]],
line=issue["line"]) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Implement DNS inclusion proof checking, see [TBD].
#
# Unfortunately, getting at the SCTs in general is hard in Python, so this
# does not start with an SSL connection, but instead fetches a log entry by
# index and then verifies the proof over DNS.
# You will need to install DNSPython (http://www.dnspython.org/)
import base64
import dns.resolver
import hashlib
import json
import logging
import os
import sys
import urllib2
basepath = os.path.dirname(sys.argv[0])
sys.path.append(os.path.join(basepath, '../../../python'))
from ct.crypto import merkle, pem, verify
from ct.proto import client_pb2
class CTDNSLookup:
def __init__(self, domain, verifier, resolver=None):
self.verifier = verifier
self.domain = domain
self.resolver = resolver
if not self.resolver:
self.resolver = dns.resolver.get_default_resolver()
def Get(self, name):
logging.info('get %s', name)
answers = self.resolver.query(name, 'TXT')
assert answers.rdtype == dns.rdatatype.TXT
return answers
def GetOne(self, name):
name += '.%s' % self.domain
answers = self.Get(name)
assert len(answers) == 1
txt = answers[0]
return ''.join(txt.strings)
def GetSTH(self):
sth_str = self.GetOne('sth')
sth = client_pb2.SthResponse()
parts = str(sth_str).split('.')
sth.tree_size = int(parts[0])
sth.timestamp = int(parts[1])
sth.sha256_root_hash = base64.b64decode(parts[2])
sth.tree_head_signature = base64.b64decode(parts[3])
self.verifier.verify_sth(sth)
return sth
def GetEntry(self, level, index, size):
return self.GetOne('%d.%d.%d.tree' % (level, index, size))
def GetIndexFromHash(self, hash):
return self.GetOne('%s.hash' % base64.b32encode(hash).rstrip('='))
if __name__ == '__main__':
logging.basicConfig(level='INFO')
index = sys.argv[1]
keypem = ('-----BEGIN PUBLIC KEY-----\n'
'MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEfahLEimAoz2t01p\n'
'3uMziiLOl/fHTDM0YDOhBRuiBARsV4UvxG2LdNgoIGLrtCzWE0J\n'
'5APC2em4JlvR8EEEFMoA==\n'
'-----END PUBLIC KEY-----\n')
logurl = 'http://ct.googleapis.com/pilot';
logdns = 'pilot.ct.googleapis.com'
response = urllib2.urlopen('%s/ct/v1/get-entries?start=%s&end=%s'
% (logurl, index, index))
j = response.read()
j = json.loads(j)
leaf_input = j['entries'][0]['leaf_input']
logging.info('leaf = %s', leaf_input)
leaf = base64.b64decode(leaf_input)
leaf_hash = hashlib.sha256(chr(0) + leaf).digest()
keyinfo = verify.create_key_info_from_raw_key(pem.from_pem(keypem, 'PUBLIC KEY')[0])
log_verifier = verify.LogVerifier(keyinfo)
lookup = CTDNSLookup(logdns, log_verifier)
sth = lookup.GetSTH()
logging.info('sth = %s', sth)
logging.info('hash = %s', base64.b64encode(leaf_hash))
verifier = merkle.MerkleVerifier()
index = int(index)
audit_path = []
prev = None
apl = verifier.audit_path_length(index, sth.tree_size)
for level in range(0, apl):
h = lookup.GetEntry(level, index, sth.tree_size)
logging.info('hash = %s', base64.b64encode(h))
audit_path.append(h[:32])
if prev:
if level < apl - 6:
assert prev[32:] == h[:-32]
else:
assert prev[32:] == h
else:
assert len(h) == 32 * min(7, apl)
prev = h
logging.info('path = %s', map(base64.b64encode, audit_path))
assert verifier.verify_leaf_hash_inclusion(leaf_hash, index, audit_path,
sth)
hash_info = lookup.GetIndexFromHash(leaf_hash)
assert hash_info == str(index) | unknown | codeparrot/codeparrot-clean | ||
"""Unit tests for memory-based file-like objects.
StringIO -- for unicode strings
BytesIO -- for bytes
"""
from __future__ import unicode_literals
from __future__ import print_function
import unittest
from test import test_support as support
import io
import _pyio as pyio
import pickle
class MemorySeekTestMixin:
def testInit(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
def testRead(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf[:1], bytesIo.read(1))
self.assertEqual(buf[1:5], bytesIo.read(4))
self.assertEqual(buf[5:], bytesIo.read(900))
self.assertEqual(self.EOF, bytesIo.read())
def testReadNoArgs(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf, bytesIo.read())
self.assertEqual(self.EOF, bytesIo.read())
def testSeek(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
bytesIo.read(5)
bytesIo.seek(0)
self.assertEqual(buf, bytesIo.read())
bytesIo.seek(3)
self.assertEqual(buf[3:], bytesIo.read())
self.assertRaises(TypeError, bytesIo.seek, 0.0)
def testTell(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(0, bytesIo.tell())
bytesIo.seek(5)
self.assertEqual(5, bytesIo.tell())
bytesIo.seek(10000)
self.assertEqual(10000, bytesIo.tell())
class MemoryTestMixin:
def test_detach(self):
buf = self.ioclass()
self.assertRaises(self.UnsupportedOperation, buf.detach)
def write_ops(self, f, t):
self.assertEqual(f.write(t("blah.")), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("Hello.")), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(5), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(t(" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("h")), 1)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 1)
def test_write(self):
buf = self.buftype("hello world\n")
memio = self.ioclass(buf)
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass()
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.write, None)
memio.close()
self.assertRaises(ValueError, memio.write, self.buftype(""))
def test_writelines(self):
buf = self.buftype("1234567890")
memio = self.ioclass()
self.assertEqual(memio.writelines([buf] * 100), None)
self.assertEqual(memio.getvalue(), buf * 100)
memio.writelines([])
self.assertEqual(memio.getvalue(), buf * 100)
memio = self.ioclass()
self.assertRaises(TypeError, memio.writelines, [buf] + [1])
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.writelines, None)
memio.close()
self.assertRaises(ValueError, memio.writelines, [])
def test_writelines_error(self):
memio = self.ioclass()
def error_gen():
yield self.buftype('spam')
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen())
def test_truncate(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(ValueError, memio.truncate, -1)
memio.seek(6)
self.assertEqual(memio.truncate(), 6)
self.assertEqual(memio.getvalue(), buf[:6])
self.assertEqual(memio.truncate(4), 4)
self.assertEqual(memio.getvalue(), buf[:4])
# truncate() accepts long objects
self.assertEqual(memio.truncate(4L), 4)
self.assertEqual(memio.getvalue(), buf[:4])
self.assertEqual(memio.tell(), 6)
memio.seek(0, 2)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf[:4] + buf)
pos = memio.tell()
self.assertEqual(memio.truncate(None), pos)
self.assertEqual(memio.tell(), pos)
self.assertRaises(TypeError, memio.truncate, '0')
memio.close()
self.assertRaises(ValueError, memio.truncate, 0)
def test_init(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass(None)
self.assertEqual(memio.getvalue(), self.EOF)
memio.__init__(buf * 2)
self.assertEqual(memio.getvalue(), buf * 2)
memio.__init__(buf)
self.assertEqual(memio.getvalue(), buf)
def test_read(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.read(0), self.EOF)
self.assertEqual(memio.read(1), buf[:1])
# read() accepts long objects
self.assertEqual(memio.read(4L), buf[1:5])
self.assertEqual(memio.read(900), buf[5:])
self.assertEqual(memio.read(), self.EOF)
memio.seek(0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 10)
memio.seek(0)
self.assertEqual(memio.read(-1), buf)
memio.seek(0)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(100)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(0)
self.assertEqual(memio.read(None), buf)
self.assertRaises(TypeError, memio.read, '')
memio.close()
self.assertRaises(ValueError, memio.read)
def test_readline(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 2)
self.assertEqual(memio.readline(0), self.EOF)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(memio.readline(5), buf[:5])
# readline() accepts long objects
self.assertEqual(memio.readline(5L), buf[5:10])
self.assertEqual(memio.readline(5), buf[10:15])
memio.seek(0)
self.assertEqual(memio.readline(-1), buf)
memio.seek(0)
self.assertEqual(memio.readline(0), self.EOF)
buf = self.buftype("1234567890\n")
memio = self.ioclass((buf * 3)[:-1])
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf[:-1])
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(type(memio.readline()), type(buf))
self.assertEqual(memio.readline(), buf)
self.assertRaises(TypeError, memio.readline, '')
memio.close()
self.assertRaises(ValueError, memio.readline)
def test_readlines(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(memio.readlines(), [buf] * 10)
memio.seek(5)
self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9)
memio.seek(0)
# readlines() accepts long objects
self.assertEqual(memio.readlines(15L), [buf] * 2)
memio.seek(0)
self.assertEqual(memio.readlines(-1), [buf] * 10)
memio.seek(0)
self.assertEqual(memio.readlines(0), [buf] * 10)
memio.seek(0)
self.assertEqual(type(memio.readlines()[0]), type(buf))
memio.seek(0)
self.assertEqual(memio.readlines(None), [buf] * 10)
self.assertRaises(TypeError, memio.readlines, '')
memio.close()
self.assertRaises(ValueError, memio.readlines)
def test_iterator(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(iter(memio), memio)
self.assertTrue(hasattr(memio, '__iter__'))
self.assertTrue(hasattr(memio, 'next'))
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio.seek(0)
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio = self.ioclass(buf * 2)
memio.close()
self.assertRaises(ValueError, next, memio)
def test_getvalue(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio.read()
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(type(memio.getvalue()), type(buf))
memio = self.ioclass(buf * 1000)
self.assertEqual(memio.getvalue()[-3:], self.buftype("890"))
memio = self.ioclass(buf)
memio.close()
self.assertRaises(ValueError, memio.getvalue)
def test_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.read(5)
self.assertRaises(ValueError, memio.seek, -1)
self.assertRaises(ValueError, memio.seek, 1, -1)
self.assertRaises(ValueError, memio.seek, 1, 3)
self.assertEqual(memio.seek(0), 0)
self.assertEqual(memio.seek(0, 0), 0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.seek(3), 3)
# seek() accepts long objects
self.assertEqual(memio.seek(3L), 3)
self.assertEqual(memio.seek(0, 1), 3)
self.assertEqual(memio.read(), buf[3:])
self.assertEqual(memio.seek(len(buf)), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.seek(len(buf) + 1)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.seek(0, 2), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.close()
self.assertRaises(ValueError, memio.seek, 0)
def test_overseek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(len(buf) + 1), 11)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 11)
self.assertEqual(memio.getvalue(), buf)
memio.write(self.EOF)
self.assertEqual(memio.getvalue(), buf)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf)
def test_tell(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.tell(), 0)
memio.seek(5)
self.assertEqual(memio.tell(), 5)
memio.seek(10000)
self.assertEqual(memio.tell(), 10000)
memio.close()
self.assertRaises(ValueError, memio.tell)
def test_flush(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.flush(), None)
def test_flags(self):
memio = self.ioclass()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertEqual(memio.isatty(), False)
self.assertEqual(memio.closed, False)
memio.close()
self.assertRaises(ValueError, memio.writable)
self.assertRaises(ValueError, memio.readable)
self.assertRaises(ValueError, memio.seekable)
self.assertRaises(ValueError, memio.isatty)
self.assertEqual(memio.closed, True)
def test_subclassing(self):
buf = self.buftype("1234567890")
def test1():
class MemIO(self.ioclass):
pass
m = MemIO(buf)
return m.getvalue()
def test2():
class MemIO(self.ioclass):
def __init__(me, a, b):
self.ioclass.__init__(me, a)
m = MemIO(buf, None)
return m.getvalue()
self.assertEqual(test1(), buf)
self.assertEqual(test2(), buf)
def test_instance_dict_leak(self):
# Test case for issue #6242.
# This will be caught by regrtest.py -R if this leak.
for _ in range(100):
memio = self.ioclass()
memio.foo = 1
def test_pickling(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.foo = 42
memio.seek(2)
class PickleTestMemIO(self.ioclass):
def __init__(me, initvalue, foo):
self.ioclass.__init__(me, initvalue)
me.foo = foo
# __getnewargs__ is undefined on purpose. This checks that PEP 307
# is used to provide pickling support.
# Pickle expects the class to be on the module level. Here we use a
# little hack to allow the PickleTestMemIO class to derive from
# self.ioclass without having to define all combinations explicitly on
# the module-level.
import __main__
PickleTestMemIO.__module__ = '__main__'
__main__.PickleTestMemIO = PickleTestMemIO
submemio = PickleTestMemIO(buf, 80)
submemio.seek(2)
# We only support pickle protocol 2 and onward since we use extended
# __reduce__ API of PEP 307 to provide pickling support.
for proto in range(2, pickle.HIGHEST_PROTOCOL):
for obj in (memio, submemio):
obj2 = pickle.loads(pickle.dumps(obj, protocol=proto))
self.assertEqual(obj.getvalue(), obj2.getvalue())
self.assertEqual(obj.__class__, obj2.__class__)
self.assertEqual(obj.foo, obj2.foo)
self.assertEqual(obj.tell(), obj2.tell())
obj.close()
self.assertRaises(ValueError, pickle.dumps, obj, proto)
del __main__.PickleTestMemIO
class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin, unittest.TestCase):
UnsupportedOperation = pyio.UnsupportedOperation
# When Jython tries to use UnsupportedOperation as _pyio defines it, it runs
# into a problem with multiple inheritance and the slots array: issue 1996.
# Override the affected test version just so we can skip it visibly.
@unittest.skipIf(support.is_jython, "FIXME: Jython issue 1996")
def test_detach(self):
pass
@staticmethod
def buftype(s):
return s.encode("ascii")
ioclass = pyio.BytesIO
EOF = b""
def test_read1(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(TypeError, memio.read1)
self.assertEqual(memio.read(), buf)
def test_readinto(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
b = bytearray(b"hello")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"12345")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"67890")
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"67890")
b = bytearray(b"hello world")
memio.seek(0)
self.assertEqual(memio.readinto(b), 10)
self.assertEqual(b, b"1234567890d")
b = bytearray(b"")
memio.seek(0)
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"")
self.assertRaises(TypeError, memio.readinto, '')
import array
a = array.array(b'b', b"hello world")
memio = self.ioclass(buf)
memio.readinto(a)
self.assertEqual(a.tostring(), b"1234567890d")
memio.close()
self.assertRaises(ValueError, memio.readinto, b)
memio = self.ioclass(b"123")
b = bytearray()
memio.seek(42)
memio.readinto(b)
self.assertEqual(b, b"")
def test_relative_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(-1, 1), 0)
self.assertEqual(memio.seek(3, 1), 3)
self.assertEqual(memio.seek(-4, 1), 0)
self.assertEqual(memio.seek(-1, 2), 9)
self.assertEqual(memio.seek(1, 1), 10)
self.assertEqual(memio.seek(1, 2), 11)
memio.seek(-3, 2)
self.assertEqual(memio.read(), buf[-3:])
memio.seek(0)
memio.seek(1, 1)
self.assertEqual(memio.read(), buf[1:])
def test_unicode(self):
memio = self.ioclass()
self.assertRaises(TypeError, self.ioclass, "1234567890")
self.assertRaises(TypeError, memio.write, "1234567890")
self.assertRaises(TypeError, memio.writelines, ["1234567890"])
def test_bytes_array(self):
buf = b"1234567890"
import array
a = array.array(b'b', buf)
memio = self.ioclass(a)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(a), 10)
self.assertEqual(memio.getvalue(), buf)
def test_issue5449(self):
buf = self.buftype("1234567890")
self.ioclass(initial_bytes=buf)
self.assertRaises(TypeError, self.ioclass, buf, foo=None)
class TextIOTestMixin:
def test_newlines_property(self):
memio = self.ioclass(newline=None)
# The C StringIO decodes newlines in write() calls, but the Python
# implementation only does when reading. This function forces them to
# be decoded for testing.
def force_decode():
memio.seek(0)
memio.read()
self.assertEqual(memio.newlines, None)
memio.write("a\n")
force_decode()
self.assertEqual(memio.newlines, "\n")
memio.write("b\r\n")
force_decode()
self.assertEqual(memio.newlines, ("\n", "\r\n"))
memio.write("c\rd")
force_decode()
self.assertEqual(memio.newlines, ("\r", "\n", "\r\n"))
def test_relative_seek(self):
memio = self.ioclass()
self.assertRaises(IOError, memio.seek, -1, 1)
self.assertRaises(IOError, memio.seek, 3, 1)
self.assertRaises(IOError, memio.seek, -3, 1)
self.assertRaises(IOError, memio.seek, -1, 2)
self.assertRaises(IOError, memio.seek, 1, 1)
self.assertRaises(IOError, memio.seek, 1, 2)
def test_textio_properties(self):
memio = self.ioclass()
# These are just dummy values but we nevertheless check them for fear
# of unexpected breakage.
self.assertIsNone(memio.encoding)
self.assertIsNone(memio.errors)
self.assertFalse(memio.line_buffering)
def test_newline_none(self):
# newline=None
memio = self.ioclass("a\nb\r\nc\rd", newline=None)
self.assertEqual(list(memio), ["a\n", "b\n", "c\n", "d"])
memio.seek(0)
self.assertEqual(memio.read(1), "a")
self.assertEqual(memio.read(2), "\nb")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\n")
memio = self.ioclass(newline=None)
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(3, memio.write("b\r\n"))
self.assertEqual(3, memio.write("c\rd"))
memio.seek(0)
self.assertEqual(memio.read(), "a\nb\nc\nd")
memio = self.ioclass("a\r\nb", newline=None)
self.assertEqual(memio.read(3), "a\nb")
def test_newline_empty(self):
# newline=""
memio = self.ioclass("a\nb\r\nc\rd", newline="")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
memio.seek(0)
self.assertEqual(memio.read(4), "a\nb\r")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\r")
memio = self.ioclass(newline="")
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(2, memio.write("b\r"))
self.assertEqual(2, memio.write("\nc"))
self.assertEqual(2, memio.write("\rd"))
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
def test_newline_lf(self):
# newline="\n"
memio = self.ioclass("a\nb\r\nc\rd")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
def test_newline_cr(self):
# newline="\r"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r")
self.assertEqual(memio.read(), "a\rb\r\rc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
def test_newline_crlf(self):
# newline="\r\n"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r\n")
self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
def test_issue5265(self):
# StringIO can duplicate newlines in universal newlines mode
memio = self.ioclass("a\r\nb\r\n", newline=None)
self.assertEqual(memio.read(5), "a\nb\n")
class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin,
TextIOTestMixin, unittest.TestCase):
buftype = unicode
ioclass = pyio.StringIO
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
# When Jython tries to use UnsupportedOperation as _pyio defines it, it runs
# into a problem with multiple inheritance and the slots array: issue 1996.
# Override the affected test version just so we can skip it visibly.
@unittest.skipIf(support.is_jython, "FIXME: Jython issue 1996")
def test_detach(self):
pass
class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase):
"""Test if pickle restores properly the internal state of StringIO.
"""
buftype = unicode
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class ioclass(pyio.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
class CBytesIOTest(PyBytesIOTest):
ioclass = io.BytesIO
UnsupportedOperation = io.UnsupportedOperation
test_bytes_array = unittest.skip(
"array.array() does not have the new buffer API"
)(PyBytesIOTest.test_bytes_array)
# Re-instate test_detach skipped by Jython in PyBytesIOTest
if support.is_jython: # FIXME: Jython issue 1996
test_detach = MemoryTestMixin.test_detach
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 3)
bytearray(state[0]) # Check if state[0] supports the buffer interface.
self.assertIsInstance(state[1], int)
self.assertTrue(isinstance(state[2], dict) or state[2] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__((b"no error", 0, None))
memio.__setstate__((bytearray(b"no error"), 0, None))
memio.__setstate__((b"no error", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, (b"", -1, None))
self.assertRaises(TypeError, memio.__setstate__, ("unicode", 0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, (b"len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
basesize = support.calcobjsize(b'P2PP2P')
check = self.check_sizeof
self.assertEqual(object.__sizeof__(io.BytesIO()), basesize)
check(io.BytesIO(), basesize )
check(io.BytesIO(b'a'), basesize + 1 + 1 )
check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 )
class CStringIOTest(PyStringIOTest):
ioclass = io.StringIO
UnsupportedOperation = io.UnsupportedOperation
# XXX: For the Python version of io.StringIO, this is highly
# dependent on the encoding used for the underlying buffer.
# Re-instate test_detach skipped by Jython in PyBytesIOTest
if support.is_jython: # FIXME: Jython issue 1996
test_detach = MemoryTestMixin.test_detach
# This test checks that tell() results are consistent with the length of
# text written, but this is not documented in the API: only that seek()
# accept what tell() returns.
@unittest.skipIf(support.is_jython, "Exact value of tell() is CPython specific")
def test_widechar(self):
buf = self.buftype("\U0002030a\U00020347")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf))
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf) * 2)
self.assertEqual(memio.getvalue(), buf + buf)
# This test checks that seek() accepts what tell() returns, without requiring
# that tell() return a particular absolute value. Conceived for Jython, but
# probably universal.
def test_widechar_seek(self):
buf = self.buftype("\U0002030aX\u00ca\U00020347\u05d1Y\u0628Z")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
# For each character in buf, read it back from memio and its tell value
chars = list(buf)
tells = list()
for ch in chars :
tells.append(memio.tell())
self.assertEqual(memio.read(1), ch)
# For each character in buf, seek to it and check it's there
chpos = zip(chars, tells)
chpos.reverse()
for ch, pos in chpos:
memio.seek(pos)
self.assertEqual(memio.read(1), ch)
# Check write after seek to end
memio.seek(0, 2)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.getvalue(), buf + buf)
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 4)
self.assertIsInstance(state[0], unicode)
self.assertIsInstance(state[1], str)
self.assertIsInstance(state[2], int)
self.assertTrue(isinstance(state[3], dict) or state[3] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__(("no error", "\n", 0, None))
memio.__setstate__(("no error", "", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, ("", "f", 0, None))
self.assertRaises(ValueError, memio.__setstate__, ("", "", -1, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", "", 0, None))
# trunk is more tolerant than py3k on the type of the newline param
#self.assertRaises(TypeError, memio.__setstate__, ("", b"", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, ("len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, ("closed", "", 0, None))
class CStringIOPickleTest(PyStringIOPickleTest):
UnsupportedOperation = io.UnsupportedOperation
class ioclass(io.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(io.StringIO(*args, **kwargs),
protocol=2))
def __init__(self, *args, **kwargs):
pass
def test_main():
tests = [PyBytesIOTest, PyStringIOTest, CBytesIOTest, CStringIOTest,
PyStringIOPickleTest, CStringIOPickleTest]
support.run_unittest(*tests)
if __name__ == '__main__':
test_main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_MACHO_MAIN_COMMAND_H
#define LIEF_MACHO_MAIN_COMMAND_H
#include <ostream>
#include "LIEF/visibility.h"
#include "LIEF/MachO/LoadCommand.hpp"
namespace LIEF {
namespace MachO {
namespace details {
struct entry_point_command;
}
/// Class that represent the LC_MAIN command. This kind
/// of command can be used to determine the entrypoint of an executable
class LIEF_API MainCommand : public LoadCommand {
public:
MainCommand() = default;
MainCommand(const details::entry_point_command& cmd);
MainCommand(uint64_t entrypoint, uint64_t stacksize);
MainCommand& operator=(const MainCommand& copy) = default;
MainCommand(const MainCommand& copy) = default;
std::unique_ptr<LoadCommand> clone() const override {
return std::unique_ptr<MainCommand>(new MainCommand(*this));
}
~MainCommand() override = default;
/// Offset of the *main* function relative to the ``__TEXT``
/// segment
uint64_t entrypoint() const {
return entrypoint_;
}
/// The initial stack size
uint64_t stack_size() const {
return stack_size_;
}
void entrypoint(uint64_t entrypoint) {
entrypoint_ = entrypoint;
}
void stack_size(uint64_t stacksize) {
stack_size_ = stacksize;
}
std::ostream& print(std::ostream& os) const override;
void accept(Visitor& visitor) const override;
static bool classof(const LoadCommand* cmd) {
return cmd->command() == LoadCommand::TYPE::MAIN;
}
private:
uint64_t entrypoint_ = 0;
uint64_t stack_size_ = 0;
};
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/MachO/MainCommand.hpp |
/* Copyright (c) 2021, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef MANIFEST_INCLUDED
#define MANIFEST_INCLUDED
#include <fstream> /* std::ifstream */
#include <memory>
#include <string>
#include "scope_guard.h"
#include "my_rapidjson_size_t.h"
#include <rapidjson/document.h>
#include <rapidjson/schema.h>
namespace manifest {
std::string manifest_version_1_0 =
"{"
" \"title\": \"Manifest validator version 1.0\","
" \"description\": \"Expected schema for version 1.0\","
" \"type\": \"object\","
" \"properties\": {"
" \"read_local_manifest\": {"
" \"description\": \"Flag to indicate that manifest information is in "
"data directory\","
" \"type\": \"boolean\""
" },"
" \"components\": {"
" \"description\": \"The list of components to be loaded at "
"bootstrap\","
" \"type\": \"string\""
" }"
" }"
"}";
class Manifest_reader final {
public:
/*
Constructor
Reads manifest file if present.
Expected format: JSON.
@param [in] executable_path Executable location
@param [in] instance_path Location of specific instance
Must have separator character at the end
*/
explicit Manifest_reader(const std::string executable_path,
const std::string instance_path,
std::string json_schema = manifest_version_1_0)
: config_file_path_(),
schema_(),
data_(),
file_present_(false),
valid_(false),
empty_(true),
ro_(true) {
std::string exe_path(executable_path);
const std::size_t last_separator = exe_path.find_last_of("/\\");
std::string executable = exe_path.substr(last_separator + 1);
const std::string path = exe_path.erase(last_separator + 1);
#ifdef _WIN32
const std::size_t ext = executable.find(".exe");
executable = executable.substr(0, ext);
#endif // _WIN32
executable.append(".my");
if (instance_path.length() == 0)
config_file_path_ = path + executable;
else
config_file_path_ = instance_path + executable;
std::ifstream file_stream(config_file_path_,
std::ios::in | std::ios::ate | std::ios::binary);
if (!file_stream.is_open()) return;
file_present_ = true;
{
/* Check if files read-only or not */
std::ofstream out_stream(config_file_path_, std::ios_base::app);
ro_ = !out_stream.is_open();
out_stream.close();
}
auto clean_up = create_scope_guard([&] { file_stream.close(); });
auto file_length = file_stream.tellg();
if (file_length > 0) {
empty_ = false;
file_stream.seekg(std::ios::beg);
const std::unique_ptr<char[]> read_data(
new (std::nothrow) char[file_length]);
if (!read_data) return;
if (file_stream.read(read_data.get(), file_length).fail() == true) return;
const std::string data(read_data.get(), file_length);
if (data_.Parse(data).HasParseError()) return;
if (schema_.Parse(json_schema).HasParseError()) return;
{
rapidjson::Document document;
if (document.Parse(data).HasParseError()) return;
const rapidjson::SchemaDocument sd(schema_);
rapidjson::SchemaValidator validator(sd);
if (!document.Accept(validator)) return;
}
}
valid_ = true;
}
~Manifest_reader() = default;
bool file_present() const { return file_present_; }
bool empty() const { return !file_present_ || empty_; }
bool ro() const { return ro_; }
std::string manifest_file() const { return config_file_path_; }
bool read_local_manifest() const {
bool read_local_manifest = false;
if (get_element<bool>("read_local_manifest", read_local_manifest) == false)
return false;
return read_local_manifest;
}
bool components(std::string &components_string) const {
return get_element<std::string>("components", components_string);
}
private:
/**
Get an element value from JSON document.
Assumption: Type is compatible with Get() function and
type of element is matching with template argument.
@param [in] element_name Name of the element being searched
@param [out] element_value Value of the element
@returns status of search operation
@retval true Element found. Refer to element_value
@retval false Element missing.
*/
template <typename T>
bool get_element(const std::string element_name, T &element_value) const {
if (!valid_ || !data_.HasMember(element_name)) return false;
element_value = data_[element_name].Get<T>();
return true;
}
private:
/** Configuration file path */
std::string config_file_path_;
/** Schema Document */
rapidjson::Document schema_;
/** Configuration data in JSON */
rapidjson::Document data_;
/** File status */
bool file_present_;
/** Validity of configuration data */
bool valid_;
/** content */
bool empty_;
/** RO flag */
bool ro_;
};
} // namespace manifest
#endif // !MANIFEST_INCLUDED | c | github | https://github.com/mysql/mysql-server | include/manifest.h |
# General utilities for MAPI and MAPI objects.
# We used to use these old names from the 'types' module...
TupleType=tuple
ListType=list
IntType=int
from pywintypes import TimeType
import pythoncom
import mapi, mapitags
prTable = {}
def GetPropTagName(pt):
if not prTable:
for name, value in mapitags.__dict__.iteritems():
if name[:3] == 'PR_':
# Store both the full ID (including type) and just the ID.
# This is so PR_FOO_A and PR_FOO_W are still differentiated,
# but should we get a PT_FOO with PT_ERROR set, we fallback
# to the ID.
# String types should have 3 definitions in mapitags.py
# PR_BODY = PROP_TAG( PT_TSTRING, 4096)
# PR_BODY_W = PROP_TAG( PT_UNICODE, 4096)
# PR_BODY_A = PROP_TAG( PT_STRING8, 4096)
# The following change ensures a lookup using only the the
# property id returns the conditional default.
# PT_TSTRING is a conditional assignment for either PT_UNICODE or
# PT_STRING8 and should not be returned during a lookup.
if mapitags.PROP_TYPE(value) == mapitags.PT_UNICODE or \
mapitags.PROP_TYPE(value) == mapitags.PT_STRING8:
if name[-2:] == '_A' or name[-2:] == '_W':
prTable[value] = name
else:
prTable[mapitags.PROP_ID(value)] = name
else:
prTable[value] = name
prTable[mapitags.PROP_ID(value)] = name
try:
try:
return prTable[pt]
except KeyError:
# Can't find it exactly - see if the raw ID exists.
return prTable[mapitags.PROP_ID(pt)]
except KeyError:
# god-damn bullshit hex() warnings: I don't see a way to get the
# old behaviour without a warning!!
ret = hex(long(pt))
# -0x8000000L -> 0x80000000
if ret[0]=='-': ret = ret[1:]
if ret[-1]=='L': ret = ret[:-1]
return ret
mapiErrorTable = {}
def GetScodeString(hr):
if not mapiErrorTable:
for name, value in mapi.__dict__.iteritems():
if name[:7] in ['MAPI_E_', 'MAPI_W_']:
mapiErrorTable[value] = name
return mapiErrorTable.get(hr, pythoncom.GetScodeString(hr))
ptTable = {}
def GetMapiTypeName(propType, rawType=True):
"""Given a mapi type flag, return a string description of the type"""
if not ptTable:
for name, value in mapitags.__dict__.iteritems():
if name[:3] == 'PT_':
# PT_TSTRING is a conditional assignment
# for either PT_UNICODE or PT_STRING8 and
# should not be returned during a lookup.
if name in ['PT_TSTRING', 'PT_MV_TSTRING']:
continue
ptTable[value] = name
if rawType:
propType = propType & ~mapitags.MV_FLAG
return ptTable.get(propType, str(hex(propType)))
def GetProperties(obj, propList):
"""Given a MAPI object and a list of properties, return a list of property values.
Allows a single property to be passed, and the result is a single object.
Each request property can be an integer or a string. Of a string, it is
automatically converted to an integer via the GetIdsFromNames function.
If the property fetch fails, the result is None.
"""
bRetList = 1
if type(propList) not in [TupleType, ListType]:
bRetList = 0
propList = (propList,)
realPropList = []
rc = []
for prop in propList:
if type(prop)!=IntType: # Integer
props = ( (mapi.PS_PUBLIC_STRINGS, prop), )
propIds = obj.GetIDsFromNames(props, 0)
prop = mapitags.PROP_TAG( mapitags.PT_UNSPECIFIED, mapitags.PROP_ID(propIds[0]))
realPropList.append(prop)
hr, data = obj.GetProps(realPropList,0)
if hr != 0:
data = None
return None
if bRetList:
return [v[1] for v in data]
else:
return data[0][1]
def GetAllProperties(obj, make_tag_names = True):
tags = obj.GetPropList(0)
hr, data = obj.GetProps(tags)
ret = []
for tag, val in data:
if make_tag_names:
hr, tags, array = obj.GetNamesFromIDs( (tag,) )
if type(array[0][1])==type(u''):
name = array[0][1]
else:
name = GetPropTagName(tag)
else:
name = tag
ret.append((name, val))
return ret
_MapiTypeMap = {
type(0.0): mapitags.PT_DOUBLE,
type(0): mapitags.PT_I4,
type(''.encode('ascii')): mapitags.PT_STRING8, # str in py2x, bytes in 3x
type(u''): mapitags.PT_UNICODE, # unicode in py2x, str in 3x
type(None): mapitags.PT_UNSPECIFIED,
# In Python 2.2.2, bool isn't a distinct type (type(1==1) is type(0)).
}
def SetPropertyValue(obj, prop, val):
if type(prop)!=IntType:
props = ( (mapi.PS_PUBLIC_STRINGS, prop), )
propIds = obj.GetIDsFromNames(props, mapi.MAPI_CREATE)
if val == (1==1) or val == (1==0):
type_tag = mapitags.PT_BOOLEAN
else:
type_tag = _MapiTypeMap.get(type(val))
if type_tag is None:
raise ValueError("Don't know what to do with '%r' ('%s')" % (val, type(val)))
prop = mapitags.PROP_TAG( type_tag, mapitags.PROP_ID(propIds[0]))
if val is None:
# Delete the property
obj.DeleteProps((prop,))
else:
obj.SetProps(((prop,val),))
def SetProperties( msg, propDict):
""" Given a Python dictionary, set the objects properties.
If the dictionary key is a string, then a property ID is queried
otherwise the ID is assumed native.
Coded for maximum efficiency wrt server calls - ie, maximum of
2 calls made to the object, regardless of the dictionary contents
(only 1 if dictionary full of int keys)
"""
newProps = []
# First pass over the properties we should get IDs for.
for key, val in propDict.iteritems():
if type(key) in [str, unicode]:
newProps.append((mapi.PS_PUBLIC_STRINGS, key))
# Query for the new IDs
if newProps: newIds = msg.GetIDsFromNames(newProps, mapi.MAPI_CREATE)
newIdNo = 0
newProps = []
for key, val in propDict.iteritems():
if type(key) in [str, unicode]:
type_val=type(val)
if type_val in [str, unicode]:
tagType = mapitags.PT_UNICODE
elif type_val==IntType:
tagType = mapitags.PT_I4
elif type_val==TimeType:
tagType = mapitags.PT_SYSTIME
else:
raise ValueError("The type of object %s(%s) can not be written" % (repr(val),type_val))
key = mapitags.PROP_TAG(tagType, mapitags.PROP_ID(newIds[newIdNo]))
newIdNo = newIdNo + 1
newProps.append( (key, val) )
msg.SetProps(newProps) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
"""Postapocalyptic Zombiegame
1. Implement Fight Concept
2. Add Equipment (Armor, Weapons) + Health + Stamina
3. Add GameState and get rid of globals
4. First attempt at classes, don't judge ;)
5. sketch of a fight system:
http://inventwithpython.com/blog/2012/03/18/how-much-math-do-i-need-to-know
-to-program-not-that-much-actually/
"""
from random import randint
from sys import exit
class Colors(object):
HEADER = '\033[95m'
PLAYER = '\033[94m'
FRIEND = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
class Player(object):
def __init__(self, name):
self.name = name
self.health = 100
# Something funky is going on with the scope so we define these globally until
# this whole mess gets rewritten
cend = Colors().END
cyou = Colors().PLAYER
cfriend = Colors().FRIEND
class Enemy(object):
pass
class Zombie(Enemy):
pass
class ZombieDog(Enemy):
pass
class HoodedFigure(Enemy):
pass
class Scene(object):
def __init__(self):
# Same as above, can't figure out the scope problem, so this is a dirty
# workaround
player2 = Player("")
self.npc = player2.name
class Street(Scene):
def enter(self):
print "You feel uneasy."
action = raw_input("> ")
if any(x in action for x in ("look", "explore")):
pass
class Hospital(Scene):
""" find some equipment here, maybe a first encounter """
def enter(self):
if not self.npc:
print cfriend + "\"Wake up, man! We have to get out of here!\""\
+ cend
print "Someone is shaking you angrily."
print cfriend + "\"Finally, man, get up, we have to leave RIGHT NOW!\""\
+ cend
print "As you come to, you realize you are in a hospital room."
print "Your friend is grabbing you. You try to shake him off,"
print "but your head is spinning. You can't even remember his name."
friends_name = raw_input("What was his name again? ")
player2 = Player(friends_name)
self.npc = player2.name
print "Right,", self.npc + "..."
print "You've been friends since preschool."
print cyou + self.npc, ", what the hell is going on here?" + cend
print cfriend + "\"Can you get up? We need to get down the corridor"
print "unless you want to die here.\"" + cend
action = raw_input("> ")
if any(x in action for x in ("look", "explore")):
print "Nothing here yet"
return "death"
elif "door" in action:
return "chained_door"
elif any(x in action for x in ("hallway", "corridor")):
return "street"
elif "help" in action:
print "try the door"
return "hospital"
else:
return "hospital"
class ChainedDoor(Scene):
def enter(self):
print "As you approach the chained door you hear growling. A foul"
print "stench fills the air. Someone sprayed graffiti on the door:"
print "DON'T DEAD. OPEN INSIDE.\n What the hell does that mean?"
action = raw_input("> ")
if any(x in action for x in ("run", "flee", "back")):
print ""
return "hospital"
else:
print "As you stand there, pondering this mystery, something grabs"
print "you at your ankle. You feel a searing pain.\n"
return "death"
class Death(Scene):
quips = [
"You died. Sorry 'bout that.",
"You died. Why? I don't know.",
"Daaamn, he bit your face off."
]
def enter(self):
print Death.quips[randint(0, len(self.quips)-1)]
exit(1)
class Fight(Scene):
""" init, fight, flee """
pass
class Engine(object):
""" Starts the game by calling the opening scene and every scene after that """
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
your_name = raw_input("What is your name? ")
player1 = Player(your_name)
print cyou + "Your current HP is: ", player1.health, cend
current_scene = self.scene_map.opening_scene()
while True:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
class Map(object):
""" Gets and sets the name of the scenes (mapping them) """
scenes = {
"hospital": Hospital(),
"chained_door": ChainedDoor(),
"street": Street(),
"death": Death(),
"fight": Fight()
}
# Initialize as start_scene
def __init__(self, start_scene):
self.start_scene = start_scene
# And determine its corresponding class, then return it again to the Engine
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
# The very first scene, used when first initializing or starting over
def opening_scene(self):
return self.next_scene(self.start_scene)
zombie = Engine(Map("hospital"))
zombie.play() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import unittest
import random
import subprocess
from pattern import text
from pattern import en
try:
PATH = os.path.dirname(os.path.realpath(__file__))
except:
PATH = ""
#---------------------------------------------------------------------------------------------------
class TestInflection(unittest.TestCase):
def setUp(self):
pass
def test_indefinite_article(self):
# Assert "a" or "an".
for article, word in (
("an", "hour"),
("an", "FBI"),
("a", "bear"),
("a", "one-liner"),
("a", "European"),
("a", "university"),
("a", "uterus"),
("an", "owl"),
("an", "yclept"),
("a", "year")):
self.assertEqual(en.article(word, function=en.INDEFINITE), article)
self.assertEqual(en.inflect.article("heir", function=en.DEFINITE), "the")
self.assertEqual(en.inflect.referenced("ewe"), "a ewe")
print("pattern.en.inflect.article()")
def test_pluralize(self):
# Assert "octopodes" for classical plural of "octopus".
# Assert "octopuses" for modern plural.
self.assertEqual("octopodes", en.inflect.pluralize("octopus", classical=True))
self.assertEqual("octopuses", en.inflect.pluralize("octopus", classical=False))
# Assert the accuracy of the pluralization algorithm.
from pattern.db import Datasheet
i, n = 0, 0
for sg, pl in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-en-celex.csv")):
if en.inflect.pluralize(sg) == pl:
i +=1
n += 1
self.assertTrue(float(i) / n > 0.95)
print("pattern.en.inflect.pluralize()")
def test_singularize(self):
# Assert the accuracy of the singularization algorithm.
from pattern.db import Datasheet
i, n = 0, 0
for sg, pl in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-en-celex.csv")):
if en.inflect.singularize(pl) == sg:
i +=1
n += 1
self.assertTrue(float(i) / n > 0.95)
print("pattern.en.inflect.singularize()")
def test_find_lemma(self):
# Assert the accuracy of the verb lemmatization algorithm.
# Note: the accuracy is higher (95%) when measured on CELEX word forms
# (probably because en.verbs has high percentage irregular verbs).
i, n = 0, 0
for v1, v2 in en.inflect.verbs.inflections.items():
if en.inflect.verbs.find_lemma(v1) == v2:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.90)
print("pattern.en.inflect.verbs.find_lemma()")
def test_find_lexeme(self):
# Assert the accuracy of the verb conjugation algorithm.
i, n = 0, 0
for v, lexeme1 in en.inflect.verbs.infinitives.items():
lexeme2 = en.inflect.verbs.find_lexeme(v)
for j in range(len(lexeme2)):
if lexeme1[j] == lexeme2[j] or \
lexeme1[j] == "" and \
lexeme1[j>5 and 10 or 0] == lexeme2[j]:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.90)
print("pattern.en.inflect.verbs.find_lexeme()")
def test_conjugate(self):
# Assert different tenses with different conjugations.
for (v1, v2, tense) in (
("be", "be", en.INFINITIVE),
("be", "am", (en.PRESENT, 1, en.SINGULAR)),
("be", "are", (en.PRESENT, 2, en.SINGULAR)),
("be", "is", (en.PRESENT, 3, en.SINGULAR)),
("be", "are", (en.PRESENT, 0, en.PLURAL)),
("be", "being", (en.PRESENT + en.PARTICIPLE,)),
("be", "was", (en.PAST, 1, en.SINGULAR)),
("be", "were", (en.PAST, 2, en.SINGULAR)),
("be", "was", (en.PAST, 3, en.SINGULAR)),
("be", "were", (en.PAST, 0, en.PLURAL)),
("be", "were", (en.PAST, 0, None)),
("be", "been", (en.PAST + en.PARTICIPLE,)),
("be", "am", "1sg"),
("be", "are", "2sg"),
("be", "is", "3sg"),
("be", "are", "1pl"),
("be", "are", "2pl"),
("be", "are", "3pl"),
("be", "are", "pl"),
("be", "being", "part"),
("be", "was", "1sgp"),
("be", "were", "2sgp"),
("be", "was", "3sgp"),
("be", "were", "1ppl"),
("be", "were", "2ppl"),
("be", "were", "3ppl"),
("be", "were", "p"),
("be", "were", "ppl"),
("be", "been", "ppart"),
("be", "am not", "1sg-"),
("be", "aren't", "2sg-"),
("be", "isn't", "3sg-"),
("be", "aren't", "1pl-"),
("be", "aren't", "2pl-"),
("be", "aren't", "3pl-"),
("be", "aren't", "pl-"),
("be", "wasn't", "1sgp-"),
("be", "weren't", "2sgp-"),
("be", "wasn't", "3sgp-"),
("be", "weren't", "1ppl-"),
("be", "weren't", "2ppl-"),
("be", "weren't", "3ppl-"),
("be", "weren't", "ppl-"),
("had", "have", "inf"),
("had", "have", "1sg"),
("had", "have", "2sg"),
("had", "has", "3sg"),
("had", "have", "pl"),
("had", "having", "part"),
("has", "had", "1sgp"),
("has", "had", "2sgp"),
("has", "had", "3sgp"),
("has", "had", "ppl"),
("has", "had", "p"),
("has", "had", "ppart"),
("will", "will", "1sg"),
("will", "will", "2sg"),
("will", "will", "3sg"),
("will", "will", "1pl"),
("imaginerify", "imaginerifying", "part"),
("imaginerify", "imaginerified", "3sgp"),
("imaginerify", None, "1sg-")):
self.assertEqual(en.inflect.conjugate(v1, tense), v2)
print("pattern.en.inflect.conjugate()")
def test_lemma(self):
# Assert the infinitive of "weren't".
v = en.inflect.lemma("weren't")
self.assertEqual(v, "be")
print("pattern.en.inflect.lemma()")
def test_lexeme(self):
# Assert all inflections of "be".
v = en.inflect.lexeme("be")
self.assertEqual(v, [
"be", "am", "are", "is", "being",
"was", "were", "been",
"am not", "aren't", "isn't", "wasn't", "weren't"
])
v = en.inflect.lexeme("imaginerify")
self.assertEqual(v, [
"imaginerify", "imaginerifies", "imaginerifying", "imaginerified"
])
print("pattern.en.inflect.lexeme()")
def test_tenses(self):
# Assert tense recognition.
self.assertTrue((en.inflect.PRESENT, 1, en.inflect.SINGULAR) in en.inflect.tenses("am"))
self.assertTrue("1sg" in en.inflect.tenses("am"))
self.assertTrue("1sg" in en.inflect.tenses("will"))
self.assertTrue("2sg-" in en.inflect.tenses("won't"))
self.assertTrue("part" in en.inflect.tenses("imaginarifying"))
print("pattern.en.inflect.tenses()")
def test_comparative(self):
# Assert "nice" => "nicer".
self.assertEqual(en.inflect.comparative("nice"), "nicer")
print("pattern.en.inflect.comparative()")
def test_superlative(self):
# Assert "nice" => "nicest"
self.assertEqual(en.inflect.superlative("nice"), "nicest")
# Assert "important" => "most important"
self.assertEqual(en.inflect.superlative("important"), "most important")
print("pattern.en.inflect.superlative()")
#---------------------------------------------------------------------------------------------------
class TestQuantification(unittest.TestCase):
def setUp(self):
pass
def test_extract_leading_zeros(self):
# Assert "zero zero one" => ("one", 2).
from pattern.text.en.inflect_quantify import zshift
v = zshift("zero zero one")
self.assertEqual(v, ("one", 2))
v = zshift("0 0 one")
self.assertEqual(v, ("one", 2))
print("pattern.en.quantify._extract_leading_zeros()")
def test_numerals(self):
# Assert number to numerals.
for x, s in (
( 1.5, "one point five"),
( 15, "fifteen"),
( 150, "one hundred and fifty"),
( 151, "one hundred and fifty-one"),
( 1510, "one thousand five hundred and ten"),
( 15101, "fifteen thousand one hundred and one"),
( 150101, "one hundred and fifty thousand one hundred and one"),
(1500101, "one million, five hundred thousand one hundred and one")):
self.assertEqual(en.numerals(x), s)
print("pattern.en.numerals()")
def test_number(self):
# Assert numeric string = actual number (after rounding).
for i in range(100):
x = random.random()
y = en.number(en.numerals(x, round=10))
self.assertAlmostEqual(x, y, places=10)
print("pattern.en.number()")
def test_quantify(self):
# Assert quantification algorithm.
for a, s in (
( 2 * ["carrot"], "a pair of carrots"),
( 4 * ["carrot"], "several carrots"),
( 9 * ["carrot"], "a number of carrots"),
( 19 * ["carrot"], "a score of carrots"),
( 23 * ["carrot"], "dozens of carrots"),
( 201 * ["carrot"], "hundreds of carrots"),
(1001 * ["carrot"], "thousands of carrots"),
({"carrot": 4, "parrot": 2}, "several carrots and a pair of parrots")):
self.assertEqual(en.quantify(a), s)
print("pattern.en.quantify()")
def test_reflect(self):
self.assertEqual(en.reflect(""), "a string")
self.assertEqual(en.reflect(["","",""]), "several strings")
self.assertEqual(en.reflect(en.reflect), "a function")
print("pattern.en.reflect()")
#---------------------------------------------------------------------------------------------------
class TestSpelling(unittest.TestCase):
def test_spelling(self):
# Assert case-sensitivity + numbers.
for a, b in (
( ".", "." ),
( "?", "?" ),
( "!", "!" ),
( "I", "I" ),
( "a", "a" ),
( "42", "42" ),
("3.14", "3.14"),
( "The", "The" ),
( "the", "the" )):
self.assertEqual(en.suggest(a)[0][0], b)
# Assert spelling suggestion accuracy.
# Note: simply training on more text will not improve accuracy.
i = j = 0.0
from pattern.db import Datasheet
for correct, wrong in Datasheet.load(os.path.join(PATH, "corpora", "spelling-birkbeck.csv")):
for w in wrong.split(" "):
if en.suggest(w)[0][0] == correct:
i += 1
else:
j += 1
self.assertTrue(i / (i+j) > 0.70)
print("pattern.en.suggest()")
#---------------------------------------------------------------------------------------------------
class TestParser(unittest.TestCase):
def setUp(self):
pass
def test_tokenize(self):
# Assert list with two sentences.
# The tokenizer should at least handle common abbreviations and punctuation.
v = en.tokenize("The cat is eating (e.g., a fish). Yum!")
self.assertEqual(v, ["The cat is eating ( e.g. , a fish ) .", "Yum !"])
print("pattern.en.tokenize()")
def _test_morphological_rules(self, function=en.parser.morphology.apply):
""" For each word in WordNet that is not in Brill's lexicon,
test if the given tagger((word, "NN")) yields an improved (word, tag).
Returns the relative scores for nouns, verbs, adjectives and adverbs.
"""
scores = []
for tag, lexicon in (
("NN", en.wordnet.NOUNS),
("VB", en.wordnet.VERBS),
("JJ", en.wordnet.ADJECTIVES),
("RB", en.wordnet.ADVERBS)):
i, n = 0, 0
for word in lexicon:
word = word.form
if word not in en.lexicon:
if function([word, "NN"])[1].startswith(tag):
i += 1
n += 1
scores.append(float(i) / n)
return scores
def test_default_suffix_rules(self):
# Assert part-of-speech tag for unknown tokens.
for a, b in (
(["eating", "NN"], ["eating", "VBG"]),
(["tigers", "NN"], ["tigers", "NNS"]),
(["really", "NN"], ["really", "RB"]),
(["foolish", "NN"], ["foolish", "JJ"])):
self.assertEqual(text._suffix_rules(a), b)
# Test with words in WordNet that are not in Brill's lexicon.
# Given are the scores for detection of nouns, verbs, adjectives and adverbs.
# The baseline should increase (not decrease) when the algorithm is modified.
v = self._test_morphological_rules(function=text._suffix_rules)
self.assertTrue(v[0] > 0.91) # NN
self.assertTrue(v[1] > 0.23) # VB
self.assertTrue(v[2] > 0.38) # JJ
self.assertTrue(v[3] > 0.60) # RB
print("pattern.text._suffix_rules()")
def test_apply_morphological_rules(self):
# Assert part-of-speech tag for unknown tokens (Brill's lexical rules).
v = self._test_morphological_rules(function=en.parser.morphology.apply)
self.assertTrue(v[0] > 0.85) # NN
self.assertTrue(v[1] > 0.19) # VB
self.assertTrue(v[2] > 0.65) # JJ
self.assertTrue(v[3] > 0.59) # RB
print("pattern.en.parser.morphology.apply()")
def test_apply_context_rules(self):
# Assert part-of-speech tags based on word context.
for a, b in ( # Rule:
([["", "JJ"], ["", "JJ"], ["", ","]], [["", "JJ"], ["", "NN"], ["", ","]]), # SURROUNDTAG
([["", "NNP"], ["", "RB"]], [["", "NNP"], ["", "NNP"]]), # PREVTAG
([["", "NN"], ["", "PRP$"]], [["", "VB"], ["", "PRP$"]]), # NEXTTAG
([["phone", ""], ["", "VBZ"]], [["phone", ""], ["", "NNS"]]), # PREVWD
([["", "VB"], ["countries", ""]], [["", "JJ"], ["countries", ""]]), # NEXTWD
([["close", "VB"], ["to", ""]], [["close", "RB"], ["to", ""]]), # RBIGRAM
([["very", ""], ["much", "JJ"]], [["very", ""], ["much", "RB"]]), # LBIGRAM
([["such", "JJ"], ["as", "DT"]], [["such", "JJ"], ["as", "IN"]]), # WDNEXTWD
([["be", "VB"]], [["be", "VB"]])): # CURWD
self.assertEqual(en.parser.context.apply(a), b)
print("pattern.en.parser.context.apply()")
def test_find_tags(self):
# Assert part-of-speech-tag annotation.
v = en.parser.find_tags(["black", "cat"])
self.assertEqual(v, [["black", "JJ"], ["cat", "NN"]])
self.assertEqual(en.parser.find_tags(["felix"])[0][1], "NN")
self.assertEqual(en.parser.find_tags(["Felix"])[0][1], "NNP")
print("pattern.en.parser.find_tags()")
def test_find_chunks(self):
# Assert chunk tag annotation.
v = en.parser.find_chunks([["black", "JJ"], ["cat", "NN"]])
self.assertEqual(v, [["black", "JJ", "B-NP", "O"], ["cat", "NN", "I-NP", "O"]])
# Assert the accuracy of the chunker.
# For example, in "The very black cat must be really meowing really loud in the yard.":
# - "The very black" (NP)
# - "must be really meowing" (VP)
# - "really loud" (ADJP)
# - "in" (PP)
# - "the yard" (NP)
v = en.parser.find_chunks([
["","DT"], ["","RB"], ["","JJ"], ["","NN"],
["","MD"], ["","RB"], ["","VBZ"], ["","VBG"],
["","RB"], ["","JJ"],
["","IN"],
["","CD"], ["","NNS"]
])
self.assertEqual(v, [
["", "DT", "B-NP", "O"], ["", "RB", "I-NP", "O"], ["", "JJ", "I-NP", "O"], ["", "NN", "I-NP", "O"],
["", "MD", "B-VP", "O"], ["", "RB", "I-VP", "O"], ["", "VBZ", "I-VP", "O"], ["", "VBG", "I-VP", "O"],
["", "RB", "B-ADJP", "O"], ["", "JJ", "I-ADJP", "O"],
["", "IN", "B-PP", "B-PNP"],
["", "CD", "B-NP", "I-PNP"], ["", "NNS", "I-NP", "I-PNP"]])
# Assert commas inside chunks.
# - "the big, black cat"
v = en.parser.find_chunks([
["", "DT"], ["", "JJ"], ["", ","], ["", "JJ"], ["", "NN"]
])
self.assertEqual(v, [
["", "DT", "B-NP", "O"],
["", "JJ", "I-NP", "O"],
["", ",", "I-NP", "O"],
["", "JJ", "I-NP", "O"],
["", "NN", "I-NP", "O"]
])
# - "big, black and furry"
v = en.parser.find_chunks([
["", "JJ"], ["", ","], ["", "JJ"], ["", "CC"], ["", "JJ"]
])
self.assertEqual(v, [
["", "JJ", "B-ADJP", "O"],
["", ",", "I-ADJP", "O"],
["", "JJ", "I-ADJP", "O"],
["", "CC", "I-ADJP", "O"],
["", "JJ", "I-ADJP", "O"]
])
# - big, and very black (= two chunks "big" and "very black")
v = en.parser.find_chunks([
["", "JJ"], ["", ","], ["", "CC"], ["", "RB"], ["", "JJ"]
])
self.assertEqual(v, [
["", "JJ", "B-ADJP", "O"],
["", ",", "O", "O"],
["", "CC", "O", "O"],
["", "RB", "B-ADJP", "O"],
["", "JJ", "I-ADJP", "O"]
])
# Assert cases for which we have written special rules.
# - "perhaps you" (ADVP + NP)
v = en.parser.find_chunks([["","RB"], ["","PRP"]])
self.assertEqual(v, [["","RB","B-ADVP", "O"], ["","PRP","B-NP", "O"]])
# - "very nice cats" (NP)
v = en.parser.find_chunks([["","RB"], ["","JJ"], ["","PRP"]])
self.assertEqual(v, [["","RB","B-NP", "O"], ["","JJ","I-NP", "O"], ["","PRP","I-NP", "O"]])
print("pattern.en.parser.find_chunks()")
def test_find_labels(self):
# Assert relation tag annotation (SBJ/OBJ).
v = en.parser.find_labels([
["", "", "NP"], ["", "", "NP"],
["", "", "VP"], ["", "", "VP"],
["", "", "NP"]])
self.assertEqual(v, [
["", "", "NP", "NP-SBJ-1"], ["", "", "NP", "NP-SBJ-1"],
["", "", "VP", "VP-1"], ["", "", "VP", "VP-1"],
["", "", "NP", "NP-OBJ-1"]])
print("pattern.en.parser.find_labels()")
def test_find_prepositions(self):
# Assert preposition tag annotation (PP + NP).
v = en.parser.find_prepositions([
["", "", "NP"],
["", "", "VP"],
["", "", "PP"],
["", "", "NP"],
["", "", "NP"],])
self.assertEqual(v, [
["", "", "NP", "O"],
["", "", "VP", "O"],
["", "", "PP", "B-PNP"],
["", "", "NP", "I-PNP"],
["", "", "NP", "I-PNP"]])
# Assert PNP's with consecutive PP's.
v = en.parse("The cat was looking at me from up on the roof with interest.", prepositions=True)
self.assertEqual(v,
"The/DT/B-NP/O cat/NN/I-NP/O " \
"was/VBD/B-VP/O looking/VBG/I-VP/O " \
"at/IN/B-PP/B-PNP me/PRP/B-NP/I-PNP " \
"from/IN/B-PP/B-PNP up/IN/I-PP/I-PNP on/IN/I-PP/I-PNP the/DT/B-NP/I-PNP roof/NN/I-NP/I-PNP " \
"with/IN/B-PP/B-PNP interest/NN/B-NP/I-PNP " \
"././O/O"
)
print("pattern.en.parser.find_prepositions()")
def test_find_lemmata(self):
# Assert lemmata for nouns and verbs.
v = en.parser.find_lemmata([["cats", "NNS"], ["wearing", "VBG"], ["hats", "NNS"]])
self.assertEqual(v, [
["cats", "NNS", "cat"],
["wearing", "VBG", "wear"],
["hats", "NNS", "hat"]])
print("pattern.en.parser.find_lemmata()")
def test_named_entity_recognition(self):
# Assert named entities.
v = en.parser.parse("Arnold Schwarzenegger is cool.", chunks=False)
self.assertEqual(v,
"Arnold/NNP-PERS Schwarzenegger/NNP-PERS is/VBZ cool/JJ ./."
)
print("pattern.en.parser.entities.apply()")
def test_parse(self):
# Assert parsed output with Penn Treebank II tags (slash-formatted).
# 1) "the black cat" is a noun phrase, "on the mat" is a prepositional noun phrase.
v = en.parser.parse("The black cat sat on the mat.")
self.assertEqual(v,
"The/DT/B-NP/O black/JJ/I-NP/O cat/NN/I-NP/O " + \
"sat/VBD/B-VP/O " + \
"on/IN/B-PP/B-PNP the/DT/B-NP/I-PNP mat/NN/I-NP/I-PNP ././O/O"
)
# 2) "the black cat" is the subject, "a fish" is the object.
v = en.parser.parse("The black cat is eating a fish.", relations=True)
self.assertEqual(v,
"The/DT/B-NP/O/NP-SBJ-1 black/JJ/I-NP/O/NP-SBJ-1 cat/NN/I-NP/O/NP-SBJ-1 " + \
"is/VBZ/B-VP/O/VP-1 eating/VBG/I-VP/O/VP-1 " + \
"a/DT/B-NP/O/NP-OBJ-1 fish/NN/I-NP/O/NP-OBJ-1 ././O/O/O"
)
# 3) "chasing" and "mice" lemmata are "chase" and "mouse".
v = en.parser.parse("The black cat is chasing mice.", lemmata=True)
self.assertEqual(v,
"The/DT/B-NP/O/the black/JJ/I-NP/O/black cat/NN/I-NP/O/cat " + \
"is/VBZ/B-VP/O/be chasing/VBG/I-VP/O/chase " + \
"mice/NNS/B-NP/O/mouse ././O/O/."
)
# 4) Assert unicode.
self.assertTrue(isinstance(v, unicode))
# 5) Assert unicode for faulty input (bytestring with unicode characters).
self.assertTrue(isinstance(en.parse("ø ü"), unicode))
self.assertTrue(isinstance(en.parse("ø ü", tokenize=True, tags=False, chunks=False), unicode))
self.assertTrue(isinstance(en.parse("ø ü", tokenize=False, tags=False, chunks=False), unicode))
self.assertTrue(isinstance(en.parse("o u", encoding="ascii"), unicode))
# 6) Assert optional parameters (i.e., setting all to False).
self.assertEqual(en.parse("ø ü.", tokenize=True, tags=False, chunks=False), u"ø ü .")
self.assertEqual(en.parse("ø ü.", tokenize=False, tags=False, chunks=False), u"ø ü.")
# 7) Assert the accuracy of the English tagger.
i, n = 0, 0
for corpus, a in (("tagged-en-wsj.txt", (0.968, 0.945)), ("tagged-en-oanc.txt", (0.929, 0.932))):
for sentence in open(os.path.join(PATH, "corpora", corpus)).readlines():
sentence = sentence.decode("utf-8").strip()
s1 = [w.split("/") for w in sentence.split(" ")]
s2 = [[w for w, pos in s1]]
s2 = en.parse(s2, tokenize=False)
s2 = [w.split("/") for w in s2.split(" ")]
for j in range(len(s1)):
if s1[j][1] == s2[j][1].split("-")[0]:
i += 1
n += 1
#print(corpus, float(i) / n)
self.assertTrue(float(i) / n > (en.parser.model and a[0] or a[1]))
print("pattern.en.parse()")
def test_tagged_string(self):
# Assert splitable TaggedString with language and tags properties.
v = en.parser.parse("The black cat sat on the mat.", relations=True, lemmata=True)
self.assertEqual(v.language, "en")
self.assertEqual(v.tags,
["word", "part-of-speech", "chunk", "preposition", "relation", "lemma"])
self.assertEqual(v.split(text.TOKENS)[0][0],
["The", "DT", "B-NP", "O", "NP-SBJ-1", "the"])
print("pattern.en.parse().split()")
def test_parsetree(self):
# Assert parsetree(s) == Text.
v = en.parsetree("The cat purs.")
self.assertTrue(isinstance(v, en.Text))
print("pattern.en.parsetree()")
def test_split(self):
# Assert split(parse(s)) == Text.
v = en.split(en.parse("The cat purs."))
self.assertTrue(isinstance(v, en.Text))
print("pattern.en.split()")
def test_tag(self):
# Assert [("black", "JJ"), ("cats", "NNS")].
v = en.tag("black cats")
self.assertEqual(v, [("black", "JJ"), ("cats", "NNS")])
v = en.tag("")
self.assertEqual(v, [])
print("pattern.en.tag()")
def test_ngrams(self):
# Assert n-grams with and without punctuation marks / sentence marks.
s = "The cat is napping."
v1 = en.ngrams(s, n=2)
v2 = en.ngrams(s, n=3, punctuation=en.PUNCTUATION.strip("."))
self.assertEqual(v1, [("The", "cat"), ("cat", "is"), ("is", "napping")])
self.assertEqual(v2, [("The", "cat", "is"), ("cat", "is", "napping"), ("is", "napping", ".")])
s = "The cat purrs. The dog barks."
v1 = en.ngrams(s, n=2)
v2 = en.ngrams(s, n=2, continuous=True)
self.assertEqual(v1, [("The", "cat"), ("cat", "purrs"), ("The", "dog"), ("dog", "barks")])
self.assertEqual(v2, [("The", "cat"), ("cat", "purrs"), ("purrs", "The"), ("The", "dog"), ("dog", "barks")])
print("pattern.en.ngrams()")
def test_command_line(self):
# Assert parsed output from the command-line (example from the documentation).
p = ["python", "-m", "pattern.en", "-s", "Nice cat.", "-OTCRL"]
p = subprocess.Popen(p, stdout=subprocess.PIPE)
p.wait()
v = p.stdout.read()
v = v.strip()
self.assertEqual(v, "Nice/JJ/B-NP/O/O/nice cat/NN/I-NP/O/O/cat ././O/O/O/.")
print("python -m pattern.en")
#---------------------------------------------------------------------------------------------------
class TestParseTree(unittest.TestCase):
def setUp(self):
# Parse sentences to test on.
# Creating a Text creates Sentence, Chunk, PNP and Word.
# Creating a Sentence tests Sentence.append() and Sentence.parse_token().
self.text = "I'm eating pizza with a fork. What a tasty pizza!"
self.text = en.Text(en.parse(self.text, relations=True, lemmata=True))
def test_copy(self):
# Assert deepcopy of Text, Sentence, Chunk, PNP and Word.
self.text = self.text.copy()
print("pattern.en.Text.copy()")
def test_xml(self):
# Assert XML export and import.
self.text = en.Text.from_xml(self.text.xml)
print("pattern.en.Text.xml")
print("pattern.en.Text.from_xml()")
def test_text(self):
# Assert Text.
self.assertEqual(self.text.sentences[0].string, "I 'm eating pizza with a fork .")
self.assertEqual(self.text.sentences[1].string, "What a tasty pizza !")
print("pattern.en.Text")
def test_sentence(self):
# Assert Sentence.
v = self.text[0]
self.assertTrue(v.start == 0)
self.assertTrue(v.stop == 8)
self.assertTrue(v.string == "I 'm eating pizza with a fork .")
self.assertTrue(v.subjects == [self.text[0].chunks[0]])
self.assertTrue(v.verbs == [self.text[0].chunks[1]])
self.assertTrue(v.objects == [self.text[0].chunks[2]])
self.assertTrue(v.nouns == [self.text[0].words[3], self.text[0].words[6]])
# Sentence.string must be unicode.
self.assertTrue(isinstance(v.string, unicode) == True)
self.assertTrue(isinstance(unicode(v), unicode) == True)
self.assertTrue(isinstance(str(v), str) == True)
print("pattern.en.Sentence")
def test_sentence_constituents(self):
# Assert in-order list of Chunk, PNP and Word.
v = self.text[0].constituents(pnp=True)
self.assertEqual(v, [
self.text[0].chunks[0],
self.text[0].chunks[1],
self.text[0].chunks[2],
self.text[0].pnp[0],
self.text[0].words[7],
])
print("pattern.en.Sentence.constituents()")
def test_slice(self):
# Assert sentence slice.
v = self.text[0].slice(start=4, stop=6)
self.assertTrue(v.parent == self.text[0])
self.assertTrue(v.string == "with a")
# Assert sentence slice tag integrity.
self.assertTrue(v.words[0].type == "IN")
self.assertTrue(v.words[1].chunk == None)
print("pattern.en.Slice")
def test_chunk(self):
# Assert chunk with multiple words ("a fork").
v = self.text[0].chunks[4]
self.assertTrue(v.start == 5)
self.assertTrue(v.stop == 7)
self.assertTrue(v.string == "a fork")
self.assertTrue(v.lemmata == ["a", "fork"])
self.assertTrue(v.words == [self.text[0].words[5], self.text[0].words[6]])
self.assertTrue(v.head == self.text[0].words[6])
self.assertTrue(v.type == "NP")
self.assertTrue(v.role == None)
self.assertTrue(v.pnp != None)
# Assert chunk that is subject/object of the sentence ("pizza").
v = self.text[0].chunks[2]
self.assertTrue(v.role == "OBJ")
self.assertTrue(v.relation == 1)
self.assertTrue(v.related == [self.text[0].chunks[0], self.text[0].chunks[1]])
self.assertTrue(v.subject == self.text[0].chunks[0])
self.assertTrue(v.verb == self.text[0].chunks[1])
self.assertTrue(v.object == None)
# Assert chunk traversal.
self.assertEqual(v.nearest("VP"), self.text[0].chunks[1])
self.assertEqual(v.previous(), self.text[0].chunks[1])
self.assertEqual(v.next(), self.text[0].chunks[3])
print("pattern.en.Chunk")
def test_chunk_conjunctions(self):
# Assert list of conjunct/disjunct chunks ("black cat" AND "white cat").
v = en.Sentence(en.parse("black cat and white cat"))
self.assertEqual(v.chunk[0].conjunctions, [(v.chunk[1], en.AND)])
print("pattern.en.Chunk.conjunctions()")
def test_chunk_modifiers(self):
# Assert list of nearby adjectives and adverbs with no role, for VP.
v = en.Sentence(en.parse("Perhaps you should go."))
self.assertEqual(v.chunk[2].modifiers, [v.chunk[0]]) # should <=> perhaps
print("pattern.en.Chunk.modifiers")
def test_pnp(self):
# Assert PNP chunk ("with a fork").
v = self.text[0].pnp[0]
self.assertTrue(v.string == "with a fork")
self.assertTrue(v.chunks == [self.text[0].chunks[3], self.text[0].chunks[4]])
self.assertTrue(v.pp == self.text[0].chunks[3])
print("pattern.en.PNP")
def test_word(self):
# Assert word tags ("fork" => NN).
v = self.text[0].words[6]
self.assertTrue(v.index == 6)
self.assertTrue(v.string == "fork")
self.assertTrue(v.lemma == "fork")
self.assertTrue(v.type == "NN")
self.assertTrue(v.chunk == self.text[0].chunks[4])
self.assertTrue(v.pnp != None)
for i, tags in enumerate([
["I", "PRP", "B-NP", "O", "NP-SBJ-1", "i"],
["'m", "VBP", "B-VP", "O", "VP-1", "be"],
["eating", "VBG", "I-VP", "O", "VP-1", "eat"],
["pizza", "NN", "B-NP", "O", "NP-OBJ-1", "pizza"],
["with", "IN", "B-PP", "B-PNP", "O", "with"],
["a", "DT", "B-NP", "I-PNP", "O", "a"],
["fork", "NN", "I-NP", "I-PNP", "O", "fork"],
[".", ".", "O", "O", "O", "."]]):
self.assertEqual(self.text[0].words[i].tags, tags)
print("pattern.en.Word")
def test_word_custom_tags(self):
# Assert word custom tags ("word/part-of-speech/.../some-custom-tag").
s = en.Sentence("onion/NN/FOOD", token=[en.WORD, en.POS, "semantic_type"])
v = s.words[0]
self.assertEqual(v.semantic_type, "FOOD")
self.assertEqual(v.custom_tags["semantic_type"], "FOOD")
self.assertEqual(v.copy().custom_tags["semantic_type"], "FOOD")
# Assert addition of new custom tags.
v.custom_tags["taste"] = "pungent"
self.assertEqual(s.token, [en.WORD, en.POS, "semantic_type", "taste"])
print("pattern.en.Word.custom_tags")
def test_find(self):
# Assert first item for which given function is True.
v = text.tree.find(lambda x: x>10, [1,2,3,11,12])
self.assertEqual(v, 11)
print("pattern.text.tree.find()")
def test_zip(self):
# Assert list of zipped tuples, using default to balance uneven lists.
v = text.tree.zip([1,2,3], [4,5,6,7], default=0)
self.assertEqual(v, [(1,4), (2,5), (3,6), (0,7)])
print("pattern.text.tree.zip()")
def test_unzip(self):
v = text.tree.unzip(1, [(1,4), (2,5), (3,6)])
self.assertEqual(v, [4,5,6])
print("pattern.text.tree.unzip()")
def test_unique(self):
# Assert list copy with unique items.
v = text.tree.unique([1,1,1])
self.assertEqual(len(v), 1)
self.assertEqual(v[0], 1)
print("pattern.text.tree.unique()")
def test_map(self):
# Assert dynamic Map().
v = text.tree.Map(lambda x: x+1, [1,2,3])
self.assertEqual(list(v), [2,3,4])
self.assertEqual(v.items[0], 1)
print("pattern.text.tree.Map()")
#---------------------------------------------------------------------------------------------------
class TestModality(unittest.TestCase):
def setUp(self):
pass
def test_imperative(self):
# Assert True for sentences that are orders, commands, warnings.
from pattern.text.en.modality import imperative
for b, s in (
(True, "Do your homework!"),
(True, "Do not listen to me."),
(True, "Turn that off, will you."),
(True, "Let's help him."),
(True, "Help me!"),
(True, "You will help me."),
(False, "Do it if you think it is necessary."),
(False, "I hope you will help me."),
(False, "I can help you."),
(False, "I can help you if you let me.")):
self.assertEqual(imperative(en.Sentence(en.parse(s))), b)
print("pattern.en.modality.imperative()")
def test_conditional(self):
# Assert True for sentences that contain possible or imaginary situations.
from pattern.text.en.modality import conditional
for b, s in (
(True, "We ought to help him."),
(True, "We could help him."),
(True, "I will help you."),
(True, "I hope you will help me."),
(True, "I can help you if you let me."),
(False, "You will help me."),
(False, "I can help you.")):
self.assertEqual(conditional(en.Sentence(en.parse(s))), b)
# Assert predictive mood.
s = "I will help you."
v = conditional(en.Sentence(en.parse(s)), predictive=False)
self.assertEqual(v, False)
# Assert speculative mood.
s = "I will help you if you pay me."
v = conditional(en.Sentence(en.parse(s)), predictive=False)
self.assertEqual(v, True)
print("pattern.en.modality.conditional()")
def test_subjunctive(self):
# Assert True for sentences that contain wishes, judgments or opinions.
from pattern.text.en.modality import subjunctive
for b, s in (
(True, "I wouldn't do that if I were you."),
(True, "I wish I knew."),
(True, "I propose that you be on time."),
(True, "It is a bad idea to be late."),
(False, "I will be late.")):
self.assertEqual(subjunctive(en.Sentence(en.parse(s))), b)
print("pattern.en.modality.subjunctive()")
def test_negated(self):
# Assert True for sentences that contain "not", "n't" or "never".
for b, s in (
(True, "Not true?"),
(True, "Never true."),
(True, "Isn't true."),):
self.assertEqual(en.negated(en.Sentence(en.parse(s))), b)
print("pattern.en.negated()")
def test_mood(self):
# Assert imperative mood.
v = en.mood(en.Sentence(en.parse("Do your homework!")))
self.assertEqual(v, en.IMPERATIVE)
# Assert conditional mood.
v = en.mood(en.Sentence(en.parse("We ought to help him.")))
self.assertEqual(v, en.CONDITIONAL)
# Assert subjunctive mood.
v = en.mood(en.Sentence(en.parse("I wouldn't do that if I were you.")))
self.assertEqual(v, en.SUBJUNCTIVE)
# Assert indicative mood.
v = en.mood(en.Sentence(en.parse("The weather is nice today.")))
self.assertEqual(v, en.INDICATIVE)
print("pattern.en.mood()")
def test_modality(self):
# Assert -1.0 => +1.0 representing the degree of certainty.
v = en.modality(en.Sentence(en.parse("I wish it would stop raining.")))
self.assertTrue(v < 0)
v = en.modality(en.Sentence(en.parse("It will surely stop raining soon.")))
self.assertTrue(v > 0)
# Assert the accuracy of the modality algorithm.
# Given are the scores for the CoNLL-2010 Shared Task 1 Wikipedia uncertainty data:
# http://www.inf.u-szeged.hu/rgai/conll2010st/tasks.html#task1
# The baseline should increase (not decrease) when the algorithm is modified.
from pattern.db import Datasheet
from pattern.metrics import test
sentences = []
for certain, sentence in Datasheet.load(os.path.join(PATH, "corpora", "uncertainty-conll2010.csv")):
sentence = en.parse(sentence, chunks=False, light=True)
sentence = en.Sentence(sentence)
sentences.append((sentence, int(certain) > 0))
A, P, R, F = test(lambda sentence: en.modality(sentence) > 0.5, sentences)
#print(A, P, R, F)
self.assertTrue(A > 0.69)
self.assertTrue(P > 0.72)
self.assertTrue(R > 0.64)
self.assertTrue(F > 0.68)
print("pattern.en.modality()")
#---------------------------------------------------------------------------------------------------
class TestSentiment(unittest.TestCase):
def setUp(self):
pass
def test_sentiment_avg(self):
# Assert 2.5.
from pattern.text import avg
v = avg([1,2,3,4])
self.assertEqual(v, 2.5)
print("pattern.text.avg")
def test_sentiment(self):
# Assert < 0 for negative adjectives and > 0 for positive adjectives.
self.assertTrue(en.sentiment("wonderful")[0] > 0)
self.assertTrue(en.sentiment("horrible")[0] < 0)
self.assertTrue(en.sentiment(en.wordnet.synsets("horrible", pos="JJ")[0])[0] < 0)
self.assertTrue(en.sentiment(en.Text(en.parse("A bad book. Really horrible.")))[0] < 0)
# Assert that :) and :( are recognized.
self.assertTrue(en.sentiment(":)")[0] > 0)
self.assertTrue(en.sentiment(":(")[0] < 0)
# Assert the accuracy of the sentiment analysis (for the positive class).
# Given are the scores for Pang & Lee's polarity dataset v2.0:
# http://www.cs.cornell.edu/people/pabo/movie-review-data/
# The baseline should increase (not decrease) when the algorithm is modified.
from pattern.db import Datasheet
from pattern.metrics import test
reviews = []
for score, review in Datasheet.load(os.path.join(PATH, "corpora", "polarity-en-pang&lee1.csv")):
reviews.append((review, int(score) > 0))
from time import time
t = time()
A, P, R, F = test(lambda review: en.positive(review), reviews)
#print(A, P, R, F)
self.assertTrue(A > 0.754)
self.assertTrue(P > 0.773)
self.assertTrue(R > 0.719)
self.assertTrue(F > 0.745)
# Assert the accuracy of the sentiment analysis on short text (for the positive class).
# Given are the scores for Pang & Lee's sentence polarity dataset v1.0:
# http://www.cs.cornell.edu/people/pabo/movie-review-data/
reviews = []
for score, review in Datasheet.load(os.path.join(PATH, "corpora", "polarity-en-pang&lee2.csv")):
reviews.append((review, int(score) > 0))
A, P, R, F = test(lambda review: en.positive(review), reviews)
#print(A, P, R, F)
self.assertTrue(A > 0.654)
self.assertTrue(P > 0.660)
self.assertTrue(R > 0.636)
self.assertTrue(F > 0.648)
print("pattern.en.sentiment()")
def test_sentiment_twitter(self):
sanders = os.path.join(PATH, "corpora", "polarity-en-sanders.csv")
if os.path.exists(sanders):
# Assert the accuracy of the sentiment analysis on tweets.
# Given are the scores for Sanders Twitter Sentiment Corpus:
# http://www.sananalytics.com/lab/twitter-sentiment/
# Positive + neutral is taken as polarity >= 0.0,
# Negative is taken as polarity < 0.0.
# Since there are a lot of neutral cases,
# and the algorithm predicts 0.0 by default (i.e., majority class) the results are good.
# Distinguishing negative from neutral from positive is a much harder task
from pattern.db import Datasheet
from pattern.metrics import test
reviews = []
for i, id, date, tweet, polarity, topic in Datasheet.load(sanders):
if polarity != "irrelevant":
reviews.append((tweet, polarity in ("positive", "neutral")))
A, P, R, F = test(lambda review: en.positive(review, threshold=0.0), reviews)
#print(A, P, R, F)
self.assertTrue(A > 0.824)
self.assertTrue(P > 0.879)
self.assertTrue(R > 0.911)
self.assertTrue(F > 0.895)
def test_sentiment_assessment(self):
# Assert that en.sentiment() has a fine-grained "assessments" property.
v = en.sentiment("A warm and pleasant day.").assessments
self.assertTrue(v[1][0][0] == "pleasant")
self.assertTrue(v[1][1] > 0)
print("pattern.en.sentiment().assessments")
def test_polarity(self):
# Assert that en.polarity() yields en.sentiment()[0].
s = "A great day!"
self.assertTrue(en.polarity(s) == en.sentiment(s)[0])
print("pattern.en.polarity()")
def test_subjectivity(self):
# Assert that en.subjectivity() yields en.sentiment()[1].
s = "A great day!"
self.assertTrue(en.subjectivity(s) == en.sentiment(s)[1])
print("pattern.en.subjectivity()")
def test_positive(self):
# Assert that en.positive() yields polarity >= 0.1.
s = "A great day!"
self.assertTrue(en.positive(s))
print("pattern.en.subjectivity()")
def test_sentiwordnet(self):
# Assert < 0 for negative words and > 0 for positive words.
try:
from pattern.text.en.wordnet import SentiWordNet
lexicon = SentiWordNet()
lexicon.load()
except ImportError, e:
# SentiWordNet data file is not installed in default location, stop test.
print(e)
return
self.assertTrue(lexicon["wonderful"][0] > 0)
self.assertTrue(lexicon["horrible"][0] < 0)
print("pattern.en.sentiment.SentiWordNet")
#---------------------------------------------------------------------------------------------------
class TestWordNet(unittest.TestCase):
def setUp(self):
pass
def test_normalize(self):
# Assert normalization of simple diacritics (WordNet does not store diacritics).
self.assertEqual(en.wordnet.normalize(u"cliché"), "cliche")
self.assertEqual(en.wordnet.normalize(u"façade"), "facade")
print("pattern.en.wordnet.normalize()")
def test_version(self):
print("WordNet " + en.wordnet.VERSION)
def test_synsets(self):
# Assert synsets by part-of-speech.
for word, pos in (
("cat", en.wordnet.NOUN),
("purr", en.wordnet.VERB),
("nice", en.wordnet.ADJECTIVE),
("nicely", en.wordnet.ADVERB),
("cat", "nn"),
("cat", "NNS")):
self.assertTrue(en.wordnet.synsets(word, pos) != [])
# Assert TypeError when part-of-speech is not NOUN, VERB, ADJECTIVE or ADVERB.
self.assertRaises(TypeError, en.wordnet.synsets, "cat", "unknown_pos")
print("pattern.en.wordnet.synsets()")
def test_synset(self):
v = en.wordnet.synsets("puma")[0]
# Assert Synset(id).
self.assertEqual(v, en.wordnet.Synset(v.id))
self.assertEqual(v.pos, en.wordnet.NOUN)
self.assertAlmostEqual(v.ic, 0.0, places=1)
self.assertTrue("cougar" in v.synonyms) # ["cougar", "puma", "catamount", ...]
self.assertTrue("feline" in v.gloss) # "large American feline resembling a lion"
# Assert WordNet relations.
s = en.wordnet.synsets
v = s("tree")[0]
self.assertTrue(v.hypernym in v.hypernyms())
self.assertTrue(s("woody plant")[0] in v.hypernyms())
self.assertTrue(s("entity")[0] in v.hypernyms(recursive=True))
self.assertTrue(s("beech")[0] in v.hyponyms())
self.assertTrue(s("red beech")[0] in v.hyponyms(recursive=True))
self.assertTrue(s("trunk")[0] in v.meronyms())
self.assertTrue(s("forest")[0] in v.holonyms())
# Assert Lin-similarity.
self.assertTrue(
v.similarity(s("flower")[0]) >
v.similarity(s("teapot")[0]))
print("pattern.en.wordnet.Synset")
def test_ancenstor(self):
# Assert least-common-subsumer algorithm.
v1 = en.wordnet.synsets("cat")[0]
v2 = en.wordnet.synsets("dog")[0]
self.assertTrue(en.wordnet.ancestor(v1,v2) == en.wordnet.synsets("carnivore")[0])
print("pattern.en.wordnet.ancestor()")
def test_map32(self):
# Assert sense mapping from WN 3.0 to 2.1.
self.assertEqual(en.wordnet.map32(18850, "JJ"), (19556, "JJ"))
self.assertEqual(en.wordnet.map32(1382437, "VB"), (1370230, "VB"))
print("pattern.en.wordnet.map32")
def test_sentiwordnet(self):
# Assert SentiWordNet is loaded correctly.
if en.wordnet.sentiwordnet is None:
return
try:
en.wordnet.sentiwordnet.load()
except ImportError:
return
v = en.wordnet.synsets("anguish")[0]
self.assertEqual(v.weight, (-0.625, 0.625))
v = en.wordnet.synsets("enzymology")[0]
self.assertEqual(v.weight, (0.125, 0.125))
print("pattern.en.wordnet.sentiwordnet")
#---------------------------------------------------------------------------------------------------
class TestWordlists(unittest.TestCase):
def setUp(self):
pass
def test_wordlist(self):
# Assert lazy loading Wordlist.
v = en.wordlist.STOPWORDS
self.assertTrue("the" in v)
# Assert Wordlist to dict.
v = dict.fromkeys(en.wordlist.STOPWORDS, True)
self.assertTrue("the" in v)
# Assert new Wordlist by adding other Wordlists.
v = en.wordlist.STOPWORDS + en.wordlist.ACADEMIC
self.assertTrue("the" in v)
self.assertTrue("dr." in v)
print("pattern.en.wordlist.Wordlist")
#---------------------------------------------------------------------------------------------------
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestInflection))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestQuantification))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSpelling))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestParser))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestParseTree))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestModality))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSentiment))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestWordNet))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestWordlists))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite()) | unknown | codeparrot/codeparrot-clean | ||
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { Button, Heading, useDisclosure } from "@chakra-ui/react";
import { useTranslation } from "react-i18next";
import { FiPlusCircle } from "react-icons/fi";
import { Dialog, Toaster } from "src/components/ui";
import { useAddVariable } from "src/queries/useAddVariable";
import VariableForm, { type VariableBody } from "./VariableForm";
type Props = {
readonly disabled: boolean;
};
const AddVariableButton = ({ disabled }: Props) => {
const { t: translate } = useTranslation("admin");
const { onClose, onOpen, open } = useDisclosure();
const { addVariable, error, isPending, setError } = useAddVariable({
onSuccessConfirm: onClose,
});
const initialVariableValue: VariableBody = {
description: "",
key: "",
team_name: "",
value: "",
};
const handleClose = () => {
setError(undefined);
onClose();
};
return (
<>
<Toaster />
<Button colorPalette="brand" disabled={disabled} onClick={onOpen}>
<FiPlusCircle /> {translate("variables.add")}
</Button>
<Dialog.Root onOpenChange={handleClose} open={open} size="xl">
<Dialog.Content backdrop>
<Dialog.Header>
<Heading size="xl">{translate("variables.add")}</Heading>
</Dialog.Header>
<Dialog.CloseTrigger />
<Dialog.Body>
<VariableForm
error={error}
initialVariable={initialVariableValue}
isPending={isPending}
manageMutate={addVariable}
setError={setError}
/>
</Dialog.Body>
</Dialog.Content>
</Dialog.Root>
</>
);
};
export default AddVariableButton; | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/src/pages/Variables/ManageVariable/AddVariableButton.tsx |
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launches the firefox and does necessary preparation like
installing the extension"""
from subprocess import Popen
from subprocess import PIPE
import logging
import shutil
import tempfile
import time
import platform
import os
from extensionconnection import ExtensionConnection
from firefox_profile import FirefoxProfile
import utils
MAX_START_ATTEMPTS = 20
class FirefoxLauncher(object):
"""Launches the firefox browser."""
def __init__(self):
self.extension_connection = ExtensionConnection()
self._start_cmd = utils.get_firefox_start_cmd()
self.process = None
def launch_browser(self, profile):
"""Launches the browser for the given profile name.
It is assumed the profile already exists.
"""
self.profile = profile
while self.extension_connection.is_connectable():
logging.info("Browser already running, kill it")
self.extension_connection.connect_and_quit()
time.sleep(1)
self._start_from_profile_path(profile.path)
attempts = 0
while not self.extension_connection.is_connectable():
attempts += 1
if attempts > MAX_START_ATTEMPTS:
raise RuntimeError("Unablet to start firefox")
self._start_from_profile_path(profile.path)
time.sleep(1)
def _lock_file_exists(self):
return os.path.exists(os.path.join(self.profile.path, ".parentlock"))
def kill(self):
"""Kill the browser.
This is useful when the browser is stuck.
"""
try:
if self.process:
os.kill(self.process.pid, 9)
except AttributeError:
# kill may not be available under windows environment
pass
def _start_from_profile_path(self, path):
os.environ["XRE_PROFILE_PATH"] = path
self.process = Popen([self._start_cmd, "-no-remote", "--verbose"])
def _wait_until_connectable(self):
"""Blocks until the extension is connectable in the firefox."""
while not self.extension_connection.is_connectable():
logging.debug("Waiting for browser to launch...")
if self.process.returncode:
# Browser has exited
return False
time.sleep(1)
return True | unknown | codeparrot/codeparrot-clean | ||
# pylint: skip-file
# flake8: noqa
class Edit(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
resource_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Edit, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
self.separator = separator
def get(self):
'''return a secret by name '''
return self._get(self.kind, self.name)
def update(self, file_name, content, edits, force=False, content_type='yaml'):
'''run update '''
if file_name:
if content_type == 'yaml':
data = yaml.load(open(file_name))
elif content_type == 'json':
data = json.loads(open(file_name).read())
yed = Yedit(filename=file_name, content=data, separator=self.separator)
# Keep this for compatibility
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if not results['changed']:
return results
yed.write()
atexit.register(Utils.cleanup, [file_name])
return self._replace(file_name, force=force)
return self._replace_content(self.kind, self.name, content, edits, force=force, sep=self.separator)
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocedit = Edit(params['kind'],
params['namespace'],
params['name'],
kubeconfig=params['kubeconfig'],
separator=params['separator'],
verbose=params['debug'])
api_rval = ocedit.get()
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
return {"failed": True, 'msg': api_rval}
########
# Update
########
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
api_rval = ocedit.update(params['file_name'],
params['content'],
params['edits'],
params['force'],
params['file_format'])
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
if 'updated' in api_rval and not api_rval['updated']:
return {"changed": False, 'results': api_rval, 'state': 'present'}
# return the created object
api_rval = ocedit.get()
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
return {"changed": True, 'results': api_rval, 'state': 'present'} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Using a Condition to control sequencing between workers.
"""
#end_pymotw_header
import multiprocessing
import time
def stage_1(cond):
"""perform first stage of work, then notify stage_2 to continue"""
name = multiprocessing.current_process().name
print 'Starting', name
with cond:
print '%s done and ready for stage 2' % name
cond.notify_all()
def stage_2(cond):
"""wait for the condition telling us stage_1 is done"""
name = multiprocessing.current_process().name
print 'Starting', name
with cond:
cond.wait()
print '%s running' % name
if __name__ == '__main__':
condition = multiprocessing.Condition()
s1 = multiprocessing.Process(name='s1', target=stage_1, args=(condition,))
s2_clients = [
multiprocessing.Process(name='stage_2[%d]' % i, target=stage_2, args=(condition,))
for i in range(1, 3)
]
for c in s2_clients:
c.start()
time.sleep(1)
s1.start()
s1.join()
for c in s2_clients:
c.join() | unknown | codeparrot/codeparrot-clean | ||
from unittest.mock import call, Mock, patch
from alexa_client.refreshtoken import http_server, handlers
@patch.object(handlers.AmazonAlexaServiceLoginHandler, '__init__',
return_value=None)
@patch.object(http_server.AmazonLoginHttpServer, 'server_bind', Mock)
def test_http_server_passes_args(mock__init__):
server = http_server.AmazonLoginHttpServer(
server_address=('localhost', 9000),
RequestHandlerClass=handlers.AmazonAlexaServiceLoginHandler,
client_id='client-id-here',
client_secret='client-secret-here',
device_type_id='device-type-id-here',
callback_url='http://localhost:9000/callback/',
)
request = Mock()
client_address = ('localhost', '9000')
server.finish_request(request=request, client_address=client_address)
assert mock__init__.call_count == 1
assert mock__init__.call_args == call(request, client_address, server)
assert server.client_id == 'client-id-here'
assert server.client_secret == 'client-secret-here'
assert server.device_type_id == 'device-type-id-here'
assert server.callback_url == 'http://localhost:9000/callback/' | unknown | codeparrot/codeparrot-clean | ||
"""
articles module (imdb package).
This module provides functions and data to handle in a smart way
articles (in various languages) at the beginning of movie titles.
Copyright 2009 Davide Alberani <da@erlug.linux.it>
2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# List of generic articles used when the language of the title is unknown (or
# we don't have information about articles in that language).
# XXX: Managing titles in a lot of different languages, a function to recognize
# an initial article can't be perfect; sometimes we'll stumble upon a short
# word that is an article in some language, but it's not in another; in these
# situations we have to choose if we want to interpret this little word
# as an article or not (remember that we don't know what the original language
# of the title was).
# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it
# seems also to be a preposition in other languages (French?).
# Running a script over the whole list of titles (and aliases), I've found
# that 'en' is used as an article only 376 times, and as another thing 594
# times, so I've decided to _always_ consider 'en' as a non article.
#
# Here is a list of words that are _never_ considered as articles, complete
# with the cound of times they are used in a way or another:
# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99),
# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70),
# 'da' (23 vs 298), "'n" (8 vs 12)
#
# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56)
# I'm not sure what '-al' is, and so I've left it out...
#
# Generic list of articles in utf-8 encoding:
GENERIC_ARTICLES = ('the', 'la', 'a', 'die', 'der', 'le', 'el',
"l'", 'il', 'das', 'les', 'i', 'o', 'ein', 'un', 'de', 'los',
'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os',
'ang', 'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-',
'mga', "un'", 'uno', 'ett', 'dem', 'egy', 'els', 'eines',
'\xc3\x8f', '\xc3\x87', '\xc3\x94\xc3\xaf', '\xc3\x8f\xc3\xa9')
# Lists of articles separated by language. If possible, the list should
# be sorted by frequency (not very important, but...)
# If you want to add a list of articles for another language, mail it
# it at imdbpy-devel@lists.sourceforge.net; non-ascii articles must be utf-8
# encoded.
LANG_ARTICLES = {
'English': ('the', 'a', 'an'),
'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'",
'uno'),
'Spanish': ('la', 'le', 'el', 'les', 'un', 'los', 'una', 'uno', 'unos',
'unas'),
'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'),
'Turkish': (), # Some languages doesn't have articles.
}
LANG_ARTICLESget = LANG_ARTICLES.get
# Maps a language to countries where it is the main language.
# If you want to add an entry for another language or country, mail it at
# imdbpy-devel@lists.sourceforge.net .
_LANG_COUNTRIES = {
'English': ('USA', 'UK', 'Canada', 'Ireland', 'Australia'),
'Italian': ('Italy',),
'Spanish': ('Spain', 'Mexico'),
'Portuguese': ('Portugal', 'Brazil'),
'Turkish': ('Turkey',),
#'German': ('Germany', 'East Germany', 'West Germany'),
#'French': ('France'),
}
# Maps countries to their main language.
COUNTRY_LANG = {}
for lang in _LANG_COUNTRIES:
for country in _LANG_COUNTRIES[lang]:
COUNTRY_LANG[country] = lang
def toUnicode(articles):
"""Convert a list of articles utf-8 encoded to unicode strings."""
return tuple([art.decode('utf_8') for art in articles])
def toDicts(articles):
"""Given a list of utf-8 encoded articles, build two dictionary (one
utf-8 encoded and another one with unicode keys) for faster matches."""
uArticles = toUnicode(articles)
return dict([(x, x) for x in articles]), dict([(x, x) for x in uArticles])
def addTrailingSpace(articles):
"""From the given list of utf-8 encoded articles, return two
lists (one utf-8 encoded and another one in unicode) where a space
is added at the end - if the last char is not ' or -."""
_spArticles = []
_spUnicodeArticles = []
for article in articles:
if article[-1] not in ("'", '-'):
article += ' '
_spArticles.append(article)
_spUnicodeArticles.append(article.decode('utf_8'))
return _spArticles, _spUnicodeArticles
# Caches.
_ART_CACHE = {}
_SP_ART_CACHE = {}
def articlesDictsForLang(lang):
"""Return dictionaries of articles specific for the given language, or the
default one if the language is not known."""
if lang in _ART_CACHE:
return _ART_CACHE[lang]
artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_ART_CACHE[lang] = artDicts
return artDicts
def spArticlesForLang(lang):
"""Return lists of articles (plus optional spaces) specific for the
given language, or the default one if the language is not known."""
if lang in _SP_ART_CACHE:
return _SP_ART_CACHE[lang]
spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_SP_ART_CACHE[lang] = spArticles
return spArticles | unknown | codeparrot/codeparrot-clean | ||
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Base class for all hypervisors
The syntax for the _CHECK variables and the contents of the PARAMETERS
dict is the same, see the docstring for L{BaseHypervisor.PARAMETERS}.
@var _FILE_CHECK: stub for file checks, without the required flag
@var _DIR_CHECK: stub for directory checks, without the required flag
@var REQ_FILE_CHECK: mandatory file parameter
@var OPT_FILE_CHECK: optional file parameter
@var REQ_DIR_CHECK: mandatory directory parametr
@var OPT_DIR_CHECK: optional directory parameter
@var NO_CHECK: parameter without any checks at all
@var REQUIRED_CHECK: parameter required to exist (and non-false), but
without other checks; beware that this can't be used for boolean
parameters, where you should use NO_CHECK or a custom checker
"""
import os
import re
import logging
from ganeti import errors
from ganeti import utils
from ganeti import constants
def _IsCpuMaskWellFormed(cpu_mask):
"""Verifies if the given single CPU mask is valid
The single CPU mask should be in the form "a,b,c,d", where each
letter is a positive number or range.
"""
try:
cpu_list = utils.ParseCpuMask(cpu_mask)
except errors.ParseError, _:
return False
return isinstance(cpu_list, list) and len(cpu_list) > 0
def _IsMultiCpuMaskWellFormed(cpu_mask):
"""Verifies if the given multiple CPU mask is valid
A valid multiple CPU mask is in the form "a:b:c:d", where each
letter is a single CPU mask.
"""
try:
utils.ParseMultiCpuMask(cpu_mask)
except errors.ParseError, _:
return False
return True
# Read the BaseHypervisor.PARAMETERS docstring for the syntax of the
# _CHECK values
# must be afile
_FILE_CHECK = (utils.IsNormAbsPath, "must be an absolute normalized path",
os.path.isfile, "not found or not a file")
# must be a directory
_DIR_CHECK = (utils.IsNormAbsPath, "must be an absolute normalized path",
os.path.isdir, "not found or not a directory")
# CPU mask must be well-formed
# TODO: implement node level check for the CPU mask
_CPU_MASK_CHECK = (_IsCpuMaskWellFormed,
"CPU mask definition is not well-formed",
None, None)
# Multiple CPU mask must be well-formed
_MULTI_CPU_MASK_CHECK = (_IsMultiCpuMaskWellFormed,
"Multiple CPU mask definition is not well-formed",
None, None)
# Check for validity of port number
_NET_PORT_CHECK = (lambda x: 0 < x < 65535, "invalid port number",
None, None)
# Check that an integer is non negative
_NONNEGATIVE_INT_CHECK = (lambda x: x >= 0, "cannot be negative", None, None)
# nice wrappers for users
REQ_FILE_CHECK = (True, ) + _FILE_CHECK
OPT_FILE_CHECK = (False, ) + _FILE_CHECK
REQ_DIR_CHECK = (True, ) + _DIR_CHECK
OPT_DIR_CHECK = (False, ) + _DIR_CHECK
REQ_NET_PORT_CHECK = (True, ) + _NET_PORT_CHECK
OPT_NET_PORT_CHECK = (False, ) + _NET_PORT_CHECK
REQ_CPU_MASK_CHECK = (True, ) + _CPU_MASK_CHECK
OPT_CPU_MASK_CHECK = (False, ) + _CPU_MASK_CHECK
REQ_MULTI_CPU_MASK_CHECK = (True, ) + _MULTI_CPU_MASK_CHECK
OPT_MULTI_CPU_MASK_CHECK = (False, ) + _MULTI_CPU_MASK_CHECK
REQ_NONNEGATIVE_INT_CHECK = (True, ) + _NONNEGATIVE_INT_CHECK
OPT_NONNEGATIVE_INT_CHECK = (False, ) + _NONNEGATIVE_INT_CHECK
# no checks at all
NO_CHECK = (False, None, None, None, None)
# required, but no other checks
REQUIRED_CHECK = (True, None, None, None, None)
# migration type
MIGRATION_MODE_CHECK = (True, lambda x: x in constants.HT_MIGRATION_MODES,
"invalid migration mode", None, None)
def ParamInSet(required, my_set):
"""Builds parameter checker for set membership.
@type required: boolean
@param required: whether this is a required parameter
@type my_set: tuple, list or set
@param my_set: allowed values set
"""
fn = lambda x: x in my_set
err = ("The value must be one of: %s" % utils.CommaJoin(my_set))
return (required, fn, err, None, None)
class BaseHypervisor(object):
"""Abstract virtualisation technology interface
The goal is that all aspects of the virtualisation technology are
abstracted away from the rest of code.
@cvar PARAMETERS: a dict of parameter name: check type; the check type is
a five-tuple containing:
- the required flag (boolean)
- a function to check for syntax, that will be used in
L{CheckParameterSyntax}, in the master daemon process
- an error message for the above function
- a function to check for parameter validity on the remote node,
in the L{ValidateParameters} function
- an error message for the above function
@type CAN_MIGRATE: boolean
@cvar CAN_MIGRATE: whether this hypervisor can do migration (either
live or non-live)
"""
PARAMETERS = {}
ANCILLARY_FILES = []
ANCILLARY_FILES_OPT = []
CAN_MIGRATE = False
def StartInstance(self, instance, block_devices, startup_paused):
"""Start an instance."""
raise NotImplementedError
def StopInstance(self, instance, force=False, retry=False, name=None):
"""Stop an instance
@type instance: L{objects.Instance}
@param instance: instance to stop
@type force: boolean
@param force: whether to do a "hard" stop (destroy)
@type retry: boolean
@param retry: whether this is just a retry call
@type name: string or None
@param name: if this parameter is passed, the the instance object
should not be used (will be passed as None), and the shutdown
must be done by name only
"""
raise NotImplementedError
def CleanupInstance(self, instance_name):
"""Cleanup after a stopped instance
This is an optional method, used by hypervisors that need to cleanup after
an instance has been stopped.
@type instance_name: string
@param instance_name: instance name to cleanup after
"""
pass
def RebootInstance(self, instance):
"""Reboot an instance."""
raise NotImplementedError
def ListInstances(self):
"""Get the list of running instances."""
raise NotImplementedError
def GetInstanceInfo(self, instance_name):
"""Get instance properties.
@type instance_name: string
@param instance_name: the instance name
@return: tuple (name, id, memory, vcpus, state, times)
"""
raise NotImplementedError
def GetAllInstancesInfo(self):
"""Get properties of all instances.
@return: list of tuples (name, id, memory, vcpus, stat, times)
"""
raise NotImplementedError
def GetNodeInfo(self):
"""Return information about the node.
@return: a dict with the following keys (values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
"""
raise NotImplementedError
@classmethod
def GetInstanceConsole(cls, instance, hvparams, beparams):
"""Return information for connecting to the console of an instance.
"""
raise NotImplementedError
@classmethod
def GetAncillaryFiles(cls):
"""Return a list of ancillary files to be copied to all nodes as ancillary
configuration files.
@rtype: (list of absolute paths, list of absolute paths)
@return: (all files, optional files)
"""
# By default we return a member variable, so that if an hypervisor has just
# a static list of files it doesn't have to override this function.
assert set(cls.ANCILLARY_FILES).issuperset(cls.ANCILLARY_FILES_OPT), \
"Optional ancillary files must be a subset of ancillary files"
return (cls.ANCILLARY_FILES, cls.ANCILLARY_FILES_OPT)
def Verify(self):
"""Verify the hypervisor.
@return: Problem description if something is wrong, C{None} otherwise
"""
raise NotImplementedError
def MigrationInfo(self, instance): # pylint: disable=R0201,W0613
"""Get instance information to perform a migration.
By default assume no information is needed.
@type instance: L{objects.Instance}
@param instance: instance to be migrated
@rtype: string/data (opaque)
@return: instance migration information - serialized form
"""
return ""
def AcceptInstance(self, instance, info, target):
"""Prepare to accept an instance.
By default assume no preparation is needed.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type info: string/data (opaque)
@param info: migration information, from the source node
@type target: string
@param target: target host (usually ip), on this node
"""
pass
def BalloonInstanceMemory(self, instance, mem):
"""Balloon an instance memory to a certain value.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type mem: int
@param mem: actual memory size to use for instance runtime
"""
raise NotImplementedError
def FinalizeMigrationDst(self, instance, info, success):
"""Finalize the instance migration on the target node.
Should finalize or revert any preparation done to accept the instance.
Since by default we do no preparation, we also don't have anything to do
@type instance: L{objects.Instance}
@param instance: instance whose migration is being finalized
@type info: string/data (opaque)
@param info: migration information, from the source node
@type success: boolean
@param success: whether the migration was a success or a failure
"""
pass
def MigrateInstance(self, instance, target, live):
"""Migrate an instance.
@type instance: L{objects.Instance}
@param instance: the instance to be migrated
@type target: string
@param target: hostname (usually ip) of the target node
@type live: boolean
@param live: whether to do a live or non-live migration
"""
raise NotImplementedError
def FinalizeMigrationSource(self, instance, success, live):
"""Finalize the instance migration on the source node.
@type instance: L{objects.Instance}
@param instance: the instance that was migrated
@type success: bool
@param success: whether the migration succeeded or not
@type live: bool
@param live: whether the user requested a live migration or not
"""
pass
def GetMigrationStatus(self, instance):
"""Get the migration status
@type instance: L{objects.Instance}
@param instance: the instance that is being migrated
@rtype: L{objects.MigrationStatus}
@return: the status of the current migration (one of
L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
progress info that can be retrieved from the hypervisor
"""
raise NotImplementedError
def _InstanceStartupMemory(self, instance):
"""Get the correct startup memory for an instance
This function calculates how much memory an instance should be started
with, making sure it's a value between the minimum and the maximum memory,
but also trying to use no more than the current free memory on the node.
@type instance: L{objects.Instance}
@param instance: the instance that is being started
@rtype: integer
@return: memory the instance should be started with
"""
free_memory = self.GetNodeInfo()["memory_free"]
max_start_mem = min(instance.beparams[constants.BE_MAXMEM], free_memory)
start_mem = max(instance.beparams[constants.BE_MINMEM], max_start_mem)
return start_mem
@classmethod
def CheckParameterSyntax(cls, hvparams):
"""Check the given parameters for validity.
This should check the passed set of parameters for
validity. Classes should extend, not replace, this function.
@type hvparams: dict
@param hvparams: dictionary with parameter names/value
@raise errors.HypervisorError: when a parameter is not valid
"""
for key in hvparams:
if key not in cls.PARAMETERS:
raise errors.HypervisorError("Parameter '%s' is not supported" % key)
# cheap tests that run on the master, should not access the world
for name, (required, check_fn, errstr, _, _) in cls.PARAMETERS.items():
if name not in hvparams:
raise errors.HypervisorError("Parameter '%s' is missing" % name)
value = hvparams[name]
if not required and not value:
continue
if not value:
raise errors.HypervisorError("Parameter '%s' is required but"
" is currently not defined" % (name, ))
if check_fn is not None and not check_fn(value):
raise errors.HypervisorError("Parameter '%s' fails syntax"
" check: %s (current value: '%s')" %
(name, errstr, value))
@classmethod
def ValidateParameters(cls, hvparams):
"""Check the given parameters for validity.
This should check the passed set of parameters for
validity. Classes should extend, not replace, this function.
@type hvparams: dict
@param hvparams: dictionary with parameter names/value
@raise errors.HypervisorError: when a parameter is not valid
"""
for name, (required, _, _, check_fn, errstr) in cls.PARAMETERS.items():
value = hvparams[name]
if not required and not value:
continue
if check_fn is not None and not check_fn(value):
raise errors.HypervisorError("Parameter '%s' fails"
" validation: %s (current value: '%s')" %
(name, errstr, value))
@classmethod
def PowercycleNode(cls):
"""Hard powercycle a node using hypervisor specific methods.
This method should hard powercycle the node, using whatever
methods the hypervisor provides. Note that this means that all
instances running on the node must be stopped too.
"""
raise NotImplementedError
@staticmethod
def GetLinuxNodeInfo():
"""For linux systems, return actual OS information.
This is an abstraction for all non-hypervisor-based classes, where
the node actually sees all the memory and CPUs via the /proc
interface and standard commands. The other case if for example
xen, where you only see the hardware resources via xen-specific
tools.
@return: a dict with the following keys (values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
"""
try:
data = utils.ReadFile("/proc/meminfo").splitlines()
except EnvironmentError, err:
raise errors.HypervisorError("Failed to list node info: %s" % (err,))
result = {}
sum_free = 0
try:
for line in data:
splitfields = line.split(":", 1)
if len(splitfields) > 1:
key = splitfields[0].strip()
val = splitfields[1].strip()
if key == "MemTotal":
result["memory_total"] = int(val.split()[0]) / 1024
elif key in ("MemFree", "Buffers", "Cached"):
sum_free += int(val.split()[0]) / 1024
elif key == "Active":
result["memory_dom0"] = int(val.split()[0]) / 1024
except (ValueError, TypeError), err:
raise errors.HypervisorError("Failed to compute memory usage: %s" %
(err,))
result["memory_free"] = sum_free
cpu_total = 0
try:
fh = open("/proc/cpuinfo")
try:
cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
fh.read()))
finally:
fh.close()
except EnvironmentError, err:
raise errors.HypervisorError("Failed to list node info: %s" % (err,))
result["cpu_total"] = cpu_total
# FIXME: export correct data here
result["cpu_nodes"] = 1
result["cpu_sockets"] = 1
return result
@classmethod
def LinuxPowercycle(cls):
"""Linux-specific powercycle method.
"""
try:
fd = os.open("/proc/sysrq-trigger", os.O_WRONLY)
try:
os.write(fd, "b")
finally:
fd.close()
except OSError:
logging.exception("Can't open the sysrq-trigger file")
result = utils.RunCmd(["reboot", "-n", "-f"])
if not result:
logging.error("Can't run shutdown: %s", result.output)
@staticmethod
def _FormatVerifyResults(msgs):
"""Formats the verification results, given a list of errors.
@param msgs: list of errors, possibly empty
@return: overall problem description if something is wrong,
C{None} otherwise
"""
if msgs:
return "; ".join(msgs)
else:
return None | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2008 Fred Gansevles <fred@betterbe.com>
#
# This file is part of rdiff-backup.
#
# rdiff-backup is free software; you can redistribute it and/or modify
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# rdiff-backup is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rdiff-backup; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
from __future__ import generators
import C, metadata, re, rorpiter, rpath, log
try:
from win32security import *
import pywintypes
except ImportError:
GROUP_SECURITY_INFORMATION = 0
OWNER_SECURITY_INFORMATION = 0
DACL_SECURITY_INFORMATION = 0
pywintypes = None
def encode(str_):
if type(str_) == unicode:
return str_.encode('utf-8')
return str_
class ACL:
flags = (GROUP_SECURITY_INFORMATION|
OWNER_SECURITY_INFORMATION|
DACL_SECURITY_INFORMATION)
def __init__(self, index=()):
self.__acl = ""
self.index = index
def get_indexpath(self): return self.index and '/'.join(self.index) or '.'
def load_from_rp(self, rp, skip_inherit_only = True):
self.index = rp.index
# Sometimes, we are asked to load from an rpath when ACL's
# are not supported. Ignore the request in this case.
if not pywintypes:
return
try:
sd = rp.conn.win32security. \
GetNamedSecurityInfo(rp.path, SE_FILE_OBJECT, ACL.flags)
except (OSError, IOError, pywintypes.error), exc:
log.Log("Warning: unable to read ACL from %s: %s"
% (repr(rp.path), exc), 4)
return
if skip_inherit_only:
# skip the inherit_only aces
acl = sd.GetSecurityDescriptorDacl()
if acl:
n = acl.GetAceCount()
# traverse the ACL in reverse, so the indices stay correct
while n:
n -= 1
ace_flags = acl.GetAce(n)[0][1]
if ace_flags & INHERIT_ONLY_ACE:
acl.DeleteAce(n)
sd.SetSecurityDescriptorDacl(1, acl, 0)
if ACL.flags & SACL_SECURITY_INFORMATION:
acl = sd.GetSecurityDescriptorSacl()
if acl:
n = acl.GetAceCount()
# traverse the ACL in reverse, so the indices stay correct
while n:
n -= 1
ace_flags = acl.GetAce(n)[0][1]
if ace_flags & INHERIT_ONLY_ACE:
acl.DeleteAce(n)
sd.SetSecurityDescriptorSacl(1, acl, 0)
if not sd.GetSecurityDescriptorDacl():
sd.SetSecurityDescriptorDacl(0, None, 0)
if (ACL.flags & SACL_SECURITY_INFORMATION) and not \
sd.GetSecurityDescriptorSacl():
sd.SetSecurityDescriptorSacl(0, None, 0)
try:
self.__acl = \
rp.conn.win32security. \
ConvertSecurityDescriptorToStringSecurityDescriptor(sd,
SDDL_REVISION_1, ACL.flags)
except (OSError, IOError, pywintypes.error), exc:
log.Log("Warning: unable to convert ACL from %s to string: %s"
% (repr(rp.path), exc), 4)
self.__acl = ''
def clear_rp(self, rp):
# not sure how to interpret this
# I'll just clear all acl-s from rp.path
try:
sd = rp.conn.win32security. \
GetNamedSecurityInfo(rp.path, SE_FILE_OBJECT, ACL.flags)
except (OSError, IOError, pywintypes.error), exc:
log.Log("Warning: unable to read ACL from %s for clearing: %s"
% (repr(rp.path), exc), 4)
return
acl = sd.GetSecurityDescriptorDacl()
if acl:
n = acl.GetAceCount()
# traverse the ACL in reverse, so the indices stay correct
while n:
n -= 1
acl.DeleteAce(n)
sd.SetSecurityDescriptorDacl(0, acl, 0)
if ACL.flags & SACL_SECURITY_INFORMATION:
acl = sd.GetSecurityDescriptorSacl()
if acl:
n = acl.GetAceCount()
# traverse the ACL in reverse, so the indices stay correct
while n:
n -= 1
acl.DeleteAce(n)
sd.SetSecurityDescriptorSacl(0, acl, 0)
try:
rp.conn.win32security. \
SetNamedSecurityInfo(rp.path, SE_FILE_OBJECT, ACL.flags,
sd.GetSecurityDescriptorOwner(),sd.GetSecurityDescriptorGroup(),
sd.GetSecurityDescriptorDacl(),
(ACL.flags & SACL_SECURITY_INFORMATION) and
sd.GetSecurityDescriptorSacl() or None)
except (OSError, IOError, pywintypes.error), exc:
log.Log("Warning: unable to set ACL on %s after clearing: %s"
% (repr(rp.path), exc), 4)
def write_to_rp(self, rp):
if not self.__acl:
return
try:
sd = rp.conn.win32security. \
ConvertStringSecurityDescriptorToSecurityDescriptor(
self.__acl, SDDL_REVISION_1)
except (OSError, IOError, pywintypes.error), exc:
log.Log("Warning: unable to convert string %s to ACL: %s"
% (repr(self.__acl), exc), 4)
# Enable next block of code for dirs after we have a mechanism in
# backup.py (and similar) to do a first pass to see if a directory
# has SE_DACL_PROTECTED. In that case, we will need to
# 1) dest_rorp.write_win_acl(source_rorp.get_win_acl())
# --> And clear existing dest_rorp one while doing so
# 2) Check if backup user has Admin privs to write dest_rorp
# --> May need to use Win32 AccessCheck() API
# 3) If not, add Admin write privs to dest_rorp and add dir
# to dir_perms_list-equivalent
# 4) THEN, allow the pre_process() function to finish and the
# files be copied over. Those files which wish to
# will now inherit the correct ACE objects.
# 5) If dir was on dir_perms_list-equivalent, drop the write
# write permission we added.
# 6) When copy_attribs is called in end_process, make sure
# that the write_win_acl() call isn't made this time
# The reason we will need to do this is because otherwise, the files
# which are created during step 4 will reference the ACE entries
# which we clear during step 6. We need to clear them *before* the
# children files/subdirs are created and generate the appropriate
# DACL so the inheritance magic can happen during step 4.
(flags, revision) = sd.GetSecurityDescriptorControl()
if (not rp.isdir() and flags & SE_DACL_PROTECTED):
self.clear_rp(rp)
try:
rp.conn.win32security. \
SetNamedSecurityInfo(rp.path, SE_FILE_OBJECT, ACL.flags,
sd.GetSecurityDescriptorOwner(),sd.GetSecurityDescriptorGroup(),
sd.GetSecurityDescriptorDacl(),
(ACL.flags & SACL_SECURITY_INFORMATION) and
sd.GetSecurityDescriptorSacl() or None)
except (OSError, IOError, pywintypes.error), exc:
log.Log("Warning: unable to set ACL on %s: %s"
% (repr(rp.path), exc), 4)
def __str__(self):
return '# file: %s\n%s\n' % \
(C.acl_quote(encode(self.get_indexpath())),
unicode(self.__acl))
def from_string(self, acl_str):
lines = acl_str.splitlines()
if len(lines) != 2 or not lines[0][:8] == "# file: ":
raise metadata.ParsingError("Bad record beginning: " + lines[0][:8])
filename = lines[0][8:]
if filename == '.': self.index = ()
else: self.index = tuple(unicode(C.acl_unquote(filename)).split('/'))
self.__acl = lines[1]
def Record2WACL(record):
acl = ACL()
acl.from_string(record)
return acl
def WACL2Record(wacl):
return unicode(wacl)
class WACLExtractor(metadata.FlatExtractor):
"""Iterate ExtendedAttributes objects from the WACL information file"""
record_boundary_regexp = re.compile('(?:\\n|^)(# file: (.*?))\\n')
record_to_object = staticmethod(Record2WACL)
def filename_to_index(self, filename):
"""Convert possibly quoted filename to index tuple"""
if filename == '.': return ()
else: return tuple(C.acl_unquote(filename).split('/'))
class WinAccessControlListFile(metadata.FlatFile):
"""Store/retrieve ACLs from extended_attributes file"""
_prefix = "win_access_control_lists"
_extractor = WACLExtractor
_object_to_record = staticmethod(WACL2Record)
def join_wacl_iter(rorp_iter, wacl_iter):
"""Update a rorp iter by adding the information from acl_iter"""
for rorp, wacl in rorpiter.CollateIterators(rorp_iter, wacl_iter):
assert rorp, "Missing rorp for index %s" % (wacl.index,)
if not wacl: wacl = ACL(rorp.index)
rorp.set_win_acl(unicode(wacl))
yield rorp
def rpath_acl_win_get(rpath):
acl = ACL()
acl.load_from_rp(rpath)
return unicode(acl)
rpath.win_acl_get = rpath_acl_win_get
def rpath_get_blank_win_acl(index):
acl = ACL(index)
return unicode(acl)
rpath.get_blank_win_acl = rpath_get_blank_win_acl
def rpath_set_win_acl(rp, acl_str):
acl = ACL()
acl.from_string(acl_str)
acl.write_to_rp(rp)
rpath.write_win_acl = rpath_set_win_acl
def init_acls():
# A process that tries to read or write a SACL needs
# to have and enable the SE_SECURITY_NAME privilege.
# And inorder to backup/restore, the SE_BACKUP_NAME and
# SE_RESTORE_NAME privileges are needed.
import win32api
try:
hnd = OpenProcessToken(win32api.GetCurrentProcess(),
TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY)
except win32api.error, exc:
log.Log("Warning: unable to open Windows process token: %s"
% exc, 5)
return
try:
try:
lpv = lambda priv: LookupPrivilegeValue(None, priv)
# enable the SE_*_NAME privileges
SecurityName = lpv(SE_SECURITY_NAME)
AdjustTokenPrivileges(hnd, False, [
(SecurityName, SE_PRIVILEGE_ENABLED),
(lpv(SE_BACKUP_NAME), SE_PRIVILEGE_ENABLED),
(lpv(SE_RESTORE_NAME), SE_PRIVILEGE_ENABLED)
])
except win32api.error, exc:
log.Log("Warning: unable to enable SE_*_NAME privileges: %s"
% exc, 5)
return
for name, enabled in GetTokenInformation(hnd, TokenPrivileges):
if name == SecurityName and enabled:
# now we *may* access the SACL (sigh)
ACL.flags |= SACL_SECURITY_INFORMATION
break
finally:
win32api.CloseHandle(hnd) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"context"
"errors"
"reflect"
"sync"
"testing"
"time"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
storagelisters "k8s.io/client-go/listers/storage/v1"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/component-helpers/storage/volume"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/util"
)
// Test the real controller methods (add/update/delete claim/volume) with
// a fake API server.
// There is no controller API to 'initiate syncAll now', therefore these tests
// can't reliably simulate periodic sync of volumes/claims - it would be
// either very timing-sensitive or slow to wait for real periodic sync.
func TestControllerSync(t *testing.T) {
tests := []controllerTest{
// [Unit test set 5] - controller tests.
// We test the controller as if
// it was connected to real API server, i.e. we call add/update/delete
// Claim/Volume methods. Also, all changes to volumes and claims are
// sent to add/update/delete Claim/Volume as real controller would do.
{
// addClaim gets a new claim. Check it's bound to a volume.
name: "5-2 - complete bind",
initialVolumes: newVolumeArray("volume5-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
expectedVolumes: newVolumeArray("volume5-2", "1Gi", "uid5-2", "claim5-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, volume.AnnBoundByController),
initialClaims: noclaims, /* added in testAddClaim5_2 */
expectedClaims: newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", v1.ClaimBound, nil, volume.AnnBoundByController, volume.AnnBindCompleted),
expectedEvents: noevents,
errors: noerrors,
// Custom test function that generates an add event
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
claim := newClaim("claim5-2", "uid5-2", "1Gi", "", v1.ClaimPending, nil)
reactor.AddClaimEvent(claim)
return nil
},
},
{
name: "5-2-2 - complete bind when PV and PVC both exist",
initialVolumes: newVolumeArray("volume5-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
expectedVolumes: newVolumeArray("volume5-2", "1Gi", "uid5-2", "claim5-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, volume.AnnBoundByController),
initialClaims: newClaimArray("claim5-2", "uid5-2", "1Gi", "", v1.ClaimPending, nil),
expectedClaims: newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", v1.ClaimBound, nil, volume.AnnBoundByController, volume.AnnBindCompleted),
expectedEvents: noevents,
errors: noerrors,
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
return nil
},
},
{
name: "5-2-3 - complete bind when PV and PVC both exist and PV has AnnPreResizeCapacity annotation",
initialVolumes: volumesWithAnnotation(util.AnnPreResizeCapacity, "1Gi", newVolumeArray("volume5-2", "2Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, volume.AnnBoundByController)),
expectedVolumes: volumesWithAnnotation(util.AnnPreResizeCapacity, "1Gi", newVolumeArray("volume5-2", "2Gi", "uid5-2", "claim5-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, volume.AnnBoundByController)),
initialClaims: withExpectedCapacity("2Gi", newClaimArray("claim5-2", "uid5-2", "2Gi", "", v1.ClaimPending, nil)),
expectedClaims: withExpectedCapacity("1Gi", newClaimArray("claim5-2", "uid5-2", "2Gi", "volume5-2", v1.ClaimBound, nil, volume.AnnBoundByController, volume.AnnBindCompleted)),
expectedEvents: noevents,
errors: noerrors,
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
return nil
},
},
{
// deleteClaim with a bound claim makes bound volume released.
name: "5-3 - delete claim",
initialVolumes: newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, volume.AnnBoundByController),
expectedVolumes: newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty, volume.AnnBoundByController),
initialClaims: newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", v1.ClaimBound, nil, volume.AnnBoundByController, volume.AnnBindCompleted),
expectedClaims: noclaims,
expectedEvents: noevents,
errors: noerrors,
// Custom test function that generates a delete event
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
obj := ctrl.claims.List()[0]
claim := obj.(*v1.PersistentVolumeClaim)
reactor.DeleteClaimEvent(claim)
return nil
},
},
{
// deleteVolume with a bound volume. Check the claim is Lost.
name: "5-4 - delete volume",
initialVolumes: newVolumeArray("volume5-4", "1Gi", "uid5-4", "claim5-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
expectedVolumes: novolumes,
initialClaims: newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", v1.ClaimBound, nil, volume.AnnBoundByController, volume.AnnBindCompleted),
expectedClaims: newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", v1.ClaimLost, nil, volume.AnnBoundByController, volume.AnnBindCompleted),
expectedEvents: []string{"Warning ClaimLost"},
errors: noerrors,
// Custom test function that generates a delete event
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
obj := ctrl.volumes.store.List()[0]
volume := obj.(*v1.PersistentVolume)
reactor.DeleteVolumeEvent(volume)
return nil
},
},
{
// deleteClaim with a bound claim makes bound volume released with external deleter.
// delete the corresponding volume from apiserver, and report latency metric
name: "5-5 - delete claim and delete volume report metric",
initialVolumes: volumesWithAnnotation(volume.AnnDynamicallyProvisioned, "gcr.io/vendor-csi",
newVolumeArray("volume5-5", "10Gi", "uid5-5", "claim5-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classExternal, volume.AnnBoundByController)),
expectedVolumes: novolumes,
initialClaims: claimWithAnnotation(volume.AnnStorageProvisioner, "gcr.io/vendor-csi",
newClaimArray("claim5-5", "uid5-5", "1Gi", "volume5-5", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted)),
expectedClaims: noclaims,
expectedEvents: noevents,
errors: noerrors,
// Custom test function that generates a delete claim event which should have been caught by
// "deleteClaim" to remove the claim from controller's cache, after that, a volume deleted
// event will be generated to trigger "deleteVolume" call for metric reporting
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
test.initialVolumes[0].Annotations[volume.AnnDynamicallyProvisioned] = "gcr.io/vendor-csi"
obj := ctrl.claims.List()[0]
claim := obj.(*v1.PersistentVolumeClaim)
reactor.DeleteClaimEvent(claim)
err := wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return len(ctrl.claims.ListKeys()) == 0, nil
})
if err != nil {
return err
}
// claim has been removed from controller's cache, generate a volume deleted event
volume := ctrl.volumes.store.List()[0].(*v1.PersistentVolume)
reactor.DeleteVolumeEvent(volume)
return nil
},
},
{
// deleteClaim with a bound claim makes bound volume released with external deleter pending
// there should be an entry in operation timestamps cache in controller
name: "5-6 - delete claim and waiting for external volume deletion",
initialVolumes: volumesWithAnnotation(volume.AnnDynamicallyProvisioned, "gcr.io/vendor-csi", []*v1.PersistentVolume{newExternalProvisionedVolume("volume5-6", "10Gi", "uid5-6", "claim5-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classExternal, "fake.driver.csi", nil, volume.AnnBoundByController)}),
expectedVolumes: volumesWithAnnotation(volume.AnnDynamicallyProvisioned, "gcr.io/vendor-csi", []*v1.PersistentVolume{newExternalProvisionedVolume("volume5-6", "10Gi", "uid5-6", "claim5-6", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, classExternal, "fake.driver.csi", nil, volume.AnnBoundByController)}),
initialClaims: claimWithAnnotation(volume.AnnStorageProvisioner, "gcr.io/vendor-csi",
newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted)),
expectedClaims: noclaims,
expectedEvents: noevents,
errors: noerrors,
// Custom test function that generates a delete claim event which should have been caught by
// "deleteClaim" to remove the claim from controller's cache and mark bound volume to be released
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
// should have been provisioned by external provisioner
obj := ctrl.claims.List()[0]
claim := obj.(*v1.PersistentVolumeClaim)
reactor.DeleteClaimEvent(claim)
// wait until claim is cleared from cache, i.e., deleteClaim is called
err := wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return len(ctrl.claims.ListKeys()) == 0, nil
})
if err != nil {
return err
}
// wait for volume delete operation to appear once volumeWorker() runs
return wait.PollImmediate(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
// make sure the operation timestamp cache is NOT empty
if ctrl.operationTimestamps.Has("volume5-6") {
return true, nil
}
t.Logf("missing volume5-6 from timestamp cache, will retry")
return false, nil
})
},
},
{
// deleteVolume event issued before deleteClaim, no metric should have been reported
// and no delete operation start timestamp should be inserted into controller.operationTimestamps cache
name: "5-7 - delete volume event makes claim lost, delete claim event will not report metric",
initialVolumes: newVolumeArray("volume5-7", "10Gi", "uid5-7", "claim5-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classExternal, volume.AnnBoundByController, volume.AnnDynamicallyProvisioned),
expectedVolumes: novolumes,
initialClaims: claimWithAnnotation(volume.AnnStorageProvisioner, "gcr.io/vendor-csi",
newClaimArray("claim5-7", "uid5-7", "1Gi", "volume5-7", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted)),
expectedClaims: noclaims,
expectedEvents: []string{"Warning ClaimLost"},
errors: noerrors,
// Custom test function that generates a delete claim event which should have been caught by
// "deleteClaim" to remove the claim from controller's cache and mark bound volume to be released
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
volume := ctrl.volumes.store.List()[0].(*v1.PersistentVolume)
reactor.DeleteVolumeEvent(volume)
err := wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return len(ctrl.volumes.store.ListKeys()) == 0, nil
})
if err != nil {
return err
}
// Wait for the PVC to get fully processed. This avoids races between PV controller and DeleteClaimEvent
// below.
err = wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
obj := ctrl.claims.List()[0]
claim := obj.(*v1.PersistentVolumeClaim)
return claim.Status.Phase == v1.ClaimLost, nil
})
if err != nil {
return err
}
// trying to remove the claim as well
obj := ctrl.claims.List()[0]
claim := obj.(*v1.PersistentVolumeClaim)
reactor.DeleteClaimEvent(claim)
// wait until claim is cleared from cache, i.e., deleteClaim is called
err = wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return len(ctrl.claims.ListKeys()) == 0, nil
})
if err != nil {
return err
}
// make sure operation timestamp cache is empty
if ctrl.operationTimestamps.Has("volume5-7") {
return errors.New("failed checking timestamp cache")
}
return nil
},
},
{
// delete a claim waiting for being bound cleans up provision(volume ref == "") entry from timestamp cache
name: "5-8 - delete claim cleans up operation timestamp cache for provision",
initialVolumes: novolumes,
expectedVolumes: novolumes,
initialClaims: claimWithAnnotation(volume.AnnStorageProvisioner, "gcr.io/vendor-csi",
newClaimArray("claim5-8", "uid5-8", "1Gi", "", v1.ClaimPending, &classExternal)),
expectedClaims: noclaims,
expectedEvents: []string{"Normal ExternalProvisioning"},
errors: noerrors,
// Custom test function that generates a delete claim event which should have been caught by
// "deleteClaim" to remove the claim from controller's cache and mark bound volume to be released
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
// wait until the provision timestamp has been inserted
err := wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return ctrl.operationTimestamps.Has("default/claim5-8"), nil
})
if err != nil {
return err
}
// delete the claim
obj := ctrl.claims.List()[0]
claim := obj.(*v1.PersistentVolumeClaim)
reactor.DeleteClaimEvent(claim)
// wait until claim is cleared from cache, i.e., deleteClaim is called
err = wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return len(ctrl.claims.ListKeys()) == 0, nil
})
if err != nil {
return err
}
// make sure operation timestamp cache is empty
if ctrl.operationTimestamps.Has("default/claim5-8") {
return errors.New("failed checking timestamp cache")
}
return nil
},
},
{
// Test that the finalizer gets removed if CSI migration is disabled. The in-tree finalizer is added
// back on the PV since migration is disabled.
name: "5-9 - volume has its external PV deletion protection finalizer removed as CSI migration is disabled",
initialVolumes: volumesWithFinalizers(
volumesWithAnnotation(volume.AnnMigratedTo, "pd.csi.storage.gke.io",
newVolumeArray("volume-5-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty, volume.AnnDynamicallyProvisioned)),
[]string{volume.PVDeletionProtectionFinalizer},
),
expectedVolumes: volumesWithFinalizers(newVolumeArray("volume-5-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty, volume.AnnDynamicallyProvisioned), []string{volume.PVDeletionInTreeProtectionFinalizer}),
initialClaims: noclaims,
expectedClaims: noclaims,
expectedEvents: noevents,
errors: noerrors,
test: func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
return nil
},
},
}
logger, ctx := ktesting.NewTestContext(t)
doit := func(test controllerTest) {
// Initialize the controller
client := &fake.Clientset{}
fakeVolumeWatch := watch.NewFakeWithOptions(watch.FakeOptions{Logger: &logger})
client.PrependWatchReactor("persistentvolumes", core.DefaultWatchReactor(fakeVolumeWatch, nil))
fakeClaimWatch := watch.NewFakeWithOptions(watch.FakeOptions{Logger: &logger})
client.PrependWatchReactor("persistentvolumeclaims", core.DefaultWatchReactor(fakeClaimWatch, nil))
client.PrependWatchReactor("storageclasses", core.DefaultWatchReactor(watch.NewFakeWithOptions(watch.FakeOptions{Logger: &logger}), nil))
client.PrependWatchReactor("nodes", core.DefaultWatchReactor(watch.NewFakeWithOptions(watch.FakeOptions{Logger: &logger}), nil))
client.PrependWatchReactor("pods", core.DefaultWatchReactor(watch.NewFakeWithOptions(watch.FakeOptions{Logger: &logger}), nil))
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
ctrl, err := newTestController(ctx, client, informers, true)
if err != nil {
t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err)
}
// Inject storage classes into controller via a custom lister for test [5-5]
storageClasses := []*storagev1.StorageClass{
makeStorageClass(classExternal, &modeImmediate),
}
storageClasses[0].Provisioner = "gcr.io/vendor-csi"
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
for _, class := range storageClasses {
indexer.Add(class)
}
ctrl.classLister = storagelisters.NewStorageClassLister(indexer)
reactor := newVolumeReactor(ctx, client, ctrl, fakeVolumeWatch, fakeClaimWatch, test.errors)
for _, claim := range test.initialClaims {
claim = claim.DeepCopy()
reactor.AddClaim(claim)
go func(claim *v1.PersistentVolumeClaim) {
fakeClaimWatch.Add(claim)
}(claim)
}
for _, volume := range test.initialVolumes {
volume = volume.DeepCopy()
reactor.AddVolume(volume)
go func(volume *v1.PersistentVolume) {
fakeVolumeWatch.Add(volume)
}(volume)
}
// Start the controller
var wg sync.WaitGroup
defer wg.Wait()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
informers.Start(ctx.Done())
informers.WaitForCacheSync(ctx.Done())
wg.Go(func() {
ctrl.Run(ctx)
})
// Wait for the controller to pass initial sync and fill its caches.
err = wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return len(ctrl.claims.ListKeys()) >= len(test.initialClaims) &&
len(ctrl.volumes.store.ListKeys()) >= len(test.initialVolumes), nil
})
if err != nil {
t.Errorf("Test %q controller sync failed: %v", test.name, err)
}
logger.V(4).Info("controller synced, starting test")
// Call the tested function
err = test.test(ctrl, reactor.VolumeReactor, test)
if err != nil {
t.Errorf("Test %q initial test call failed: %v", test.name, err)
}
// Simulate a periodic resync, just in case some events arrived in a
// wrong order.
ctrl.resync(ctx)
err = reactor.waitTest(test)
if err != nil {
t.Errorf("Failed to run test %s: %v", test.name, err)
}
evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
doit(test)
})
}
}
func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) {
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty)
pv.ResourceVersion = version
logger, _ := ktesting.NewTestContext(t)
ret, err := storeObjectUpdate(logger, c, pv, "volume")
if err != nil {
t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err)
}
if expectedReturn != ret {
t.Errorf("%s: expected storeObjectUpdate to return %v, got: %v", prefix, expectedReturn, ret)
}
// find the stored version
pvObj, found, err := c.GetByKey("pvName")
if err != nil {
t.Errorf("expected volume 'pvName' in the cache, got error instead: %v", err)
}
if !found {
t.Errorf("expected volume 'pvName' in the cache but it was not found")
}
pv, ok := pvObj.(*v1.PersistentVolume)
if !ok {
t.Errorf("expected volume in the cache, got different object instead: %#v", pvObj)
}
if ret {
if pv.ResourceVersion != version {
t.Errorf("expected volume with version %s in the cache, got %s instead", version, pv.ResourceVersion)
}
} else {
if pv.ResourceVersion == version {
t.Errorf("expected volume with version other than %s in the cache, got %s instead", version, pv.ResourceVersion)
}
}
}
// TestControllerCache tests func storeObjectUpdate()
func TestControllerCache(t *testing.T) {
// Cache under test
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// Store new PV
storeVersion(t, "Step1", c, "1", true)
// Store the same PV
storeVersion(t, "Step2", c, "1", true)
// Store newer PV
storeVersion(t, "Step3", c, "2", true)
// Store older PV - simulating old "PV updated" event or periodic sync with
// old data
storeVersion(t, "Step4", c, "1", false)
// Store newer PV - test integer parsing ("2" > "10" as string,
// while 2 < 10 as integers)
storeVersion(t, "Step5", c, "10", true)
}
func TestControllerCacheParsingError(t *testing.T) {
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// There must be something in the cache to compare with
storeVersion(t, "Step1", c, "1", true)
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty)
pv.ResourceVersion = "xxx"
logger, _ := ktesting.NewTestContext(t)
_, err := storeObjectUpdate(logger, c, pv, "volume")
if err == nil {
t.Errorf("Expected parsing error, got nil instead")
}
}
func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
return &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
},
Provisioner: "kubernetes.io/no-provisioner",
VolumeBindingMode: mode,
}
}
func makeDefaultStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
return &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
Annotations: map[string]string{
util.IsDefaultStorageClassAnnotation: "true",
},
},
Provisioner: "kubernetes.io/no-provisioner",
VolumeBindingMode: mode,
}
}
func TestAnnealMigrationAnnotations(t *testing.T) {
// The gce-pd plugin is used to test a migrated plugin (as the feature is
// locked as of 1.25), and rbd is used as a non-migrated plugin (still alpha
// as of 1.25). As plugins are migrated, rbd should be changed to a non-
// migrated plugin. If there are no other non-migrated plugins, then those
// test cases are moot and they can be removed (keeping only the test cases
// with gce-pd).
const testPlugin = "non-migrated-plugin"
const migratedPlugin = "kubernetes.io/gce-pd"
const migratedDriver = "pd.csi.storage.gke.io"
const nonmigratedPlugin = "kubernetes.io/rbd"
const nonmigratedDriver = "rbd.csi.ceph.com"
tests := []struct {
name string
volumeAnnotations map[string]string
expVolumeAnnotations map[string]string
claimAnnotations map[string]string
expClaimAnnotations map[string]string
testMigration bool
}{
{
name: "migration on",
volumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: migratedPlugin},
expVolumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: migratedPlugin, volume.AnnMigratedTo: migratedDriver},
claimAnnotations: map[string]string{volume.AnnStorageProvisioner: migratedPlugin},
expClaimAnnotations: map[string]string{volume.AnnStorageProvisioner: migratedPlugin, volume.AnnMigratedTo: migratedDriver},
},
{
name: "migration on with Beta storage provisioner annontation",
volumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: migratedPlugin},
expVolumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: migratedPlugin, volume.AnnMigratedTo: migratedDriver},
claimAnnotations: map[string]string{volume.AnnBetaStorageProvisioner: migratedPlugin},
expClaimAnnotations: map[string]string{volume.AnnBetaStorageProvisioner: migratedPlugin, volume.AnnMigratedTo: migratedDriver},
},
{
name: "migration off",
volumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: nonmigratedPlugin},
expVolumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: nonmigratedPlugin},
claimAnnotations: map[string]string{volume.AnnStorageProvisioner: nonmigratedPlugin},
expClaimAnnotations: map[string]string{volume.AnnStorageProvisioner: nonmigratedPlugin},
},
{
name: "migration off removes migrated to (rollback)",
volumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: nonmigratedPlugin, volume.AnnMigratedTo: nonmigratedDriver},
expVolumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: nonmigratedPlugin},
claimAnnotations: map[string]string{volume.AnnStorageProvisioner: nonmigratedPlugin, volume.AnnMigratedTo: nonmigratedDriver},
expClaimAnnotations: map[string]string{volume.AnnStorageProvisioner: nonmigratedPlugin},
},
{
name: "migration off removes migrated to (rollback) with Beta storage provisioner annontation",
volumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: nonmigratedPlugin, volume.AnnMigratedTo: nonmigratedDriver},
expVolumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: nonmigratedPlugin},
claimAnnotations: map[string]string{volume.AnnBetaStorageProvisioner: nonmigratedPlugin, volume.AnnMigratedTo: nonmigratedDriver},
expClaimAnnotations: map[string]string{volume.AnnBetaStorageProvisioner: nonmigratedPlugin},
},
{
name: "migration on, other plugin not affected",
volumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: testPlugin},
expVolumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: testPlugin},
claimAnnotations: map[string]string{volume.AnnStorageProvisioner: testPlugin},
expClaimAnnotations: map[string]string{volume.AnnStorageProvisioner: testPlugin},
},
{
name: "not dynamically provisioned",
volumeAnnotations: map[string]string{},
expVolumeAnnotations: map[string]string{},
claimAnnotations: map[string]string{},
expClaimAnnotations: map[string]string{},
testMigration: false,
},
{
name: "nil annotations",
volumeAnnotations: nil,
expVolumeAnnotations: nil,
claimAnnotations: nil,
expClaimAnnotations: nil,
testMigration: false,
},
}
translator := csitrans.New()
cmpm := csimigration.NewPluginManager(translator)
logger, _ := ktesting.NewTestContext(t)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if tc.volumeAnnotations != nil {
ann := tc.volumeAnnotations
updateMigrationAnnotations(logger, cmpm, translator, ann, false)
if !reflect.DeepEqual(tc.expVolumeAnnotations, ann) {
t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations)
}
}
if tc.claimAnnotations != nil {
ann := tc.claimAnnotations
updateMigrationAnnotations(logger, cmpm, translator, ann, true)
if !reflect.DeepEqual(tc.expClaimAnnotations, ann) {
t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations)
}
}
})
}
}
func TestModifyDeletionFinalizers(t *testing.T) {
// This set of tests ensures that protection finalizer is removed when CSI migration is disabled
// and PV controller needs to remove finalizers added by the external-provisioner. The rbd
// in-tree plugin is used as migration is disabled. When that plugin is migrated, a different
// non-migrated one should be used. If all plugins are migrated this test can be removed. The
// gce in-tree plugin is used for a migrated driver as it is feature-locked as of 1.25.
const nonmigratedDriver = "rbd.csi.ceph.com"
const migratedPlugin = "kubernetes.io/gce-pd"
const migratedDriver = "pd.csi.storage.gke.io"
const customFinalizer = "test.volume.kubernetes.io/finalizer"
tests := []struct {
name string
initialVolume *v1.PersistentVolume
volumeAnnotations map[string]string
expVolumeFinalizers []string
expModified bool
}{
{
// Represents a CSI volume provisioned through external-provisioner, no CSI migration enabled.
name: "13-1 migration was never enabled, volume has the finalizer",
initialVolume: newExternalProvisionedVolume("volume-13-1", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, nonmigratedDriver, []string{volume.PVDeletionProtectionFinalizer}, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: []string{volume.PVDeletionProtectionFinalizer},
expModified: false,
},
{
// Represents a volume provisioned through external-provisioner but the external-provisioner has
// yet to sync the volume to add the new finalizer
name: "13-2 migration was never enabled, volume does not have the finalizer",
initialVolume: newExternalProvisionedVolume("volume-13-2", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, nonmigratedDriver, nil, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: nil,
expModified: false,
},
{
// Represents an in-tree volume that has the migrated-to annotation but the external-provisioner is
// yet to sync the volume and add the pv deletion protection finalizer. The custom finalizer is some
// pre-existing finalizer, for example the pv-protection finalizer. When csi-migration is disabled,
// the migrated-to annotation will be removed shortly when updateVolumeMigrationAnnotationsAndFinalizers
// is called followed by adding back the in-tree pv protection finalizer.
name: "13-3 migration was disabled, volume has existing custom finalizer, does not have in-tree pv deletion protection finalizer",
initialVolume: newVolumeWithFinalizers("volume-13-3", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, []string{customFinalizer}, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: []string{customFinalizer, volume.PVDeletionInTreeProtectionFinalizer},
expModified: true,
},
{
name: "13-4 migration was disabled, volume has no finalizers",
initialVolume: newVolumeWithFinalizers("volume-13-4", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, nil, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: []string{volume.PVDeletionInTreeProtectionFinalizer},
expModified: true,
},
{
// Represents roll back scenario where the external-provisioner has added the pv deletion protection
// finalizer and later the csi migration was disabled. The pv deletion protection finalizer added through
// external-provisioner will be removed and the in-tree pv deletion protection finalizer will be added.
name: "13-5 migration was disabled, volume has external PV deletion finalizer",
initialVolume: newVolumeWithFinalizers("volume-13-5", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, []string{volume.PVDeletionProtectionFinalizer}, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: []string{volume.PVDeletionInTreeProtectionFinalizer},
expModified: true,
},
{
// Represents roll-back of csi-migration as 13-5, here there are multiple finalizers, only the pv deletion
// protection finalizer added by external-provisioner will be removed and the in-tree pv deletion protection
// finalizer will be added.
name: "13-6 migration was disabled, volume has multiple finalizers",
initialVolume: newVolumeWithFinalizers("volume-13-6", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, []string{volume.PVDeletionProtectionFinalizer, customFinalizer}, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: []string{customFinalizer, volume.PVDeletionInTreeProtectionFinalizer},
expModified: true,
},
{
// csi migration is enabled, the pv controller should not delete the finalizer added by the
// external-provisioner and the in-tree finalizer should be deleted.
name: "13-7 migration is enabled, volume has both the in-tree and external PV deletion protection finalizer",
initialVolume: newVolumeWithFinalizers("volume-13-7", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, []string{volume.PVDeletionProtectionFinalizer, volume.PVDeletionInTreeProtectionFinalizer}, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
volumeAnnotations: map[string]string{volume.AnnDynamicallyProvisioned: migratedPlugin, volume.AnnMigratedTo: migratedDriver},
expVolumeFinalizers: []string{volume.PVDeletionProtectionFinalizer},
expModified: true,
},
{
// csi-migration is not completely enabled as the specific plugin feature is not present. This is equivalent
// of disabled csi-migration.
name: "13-8 migration is enabled but plugin migration feature is disabled, volume has the external PV deletion protection finalizer",
initialVolume: newVolumeWithFinalizers("volume-13-8", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, []string{volume.PVDeletionProtectionFinalizer}, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: []string{volume.PVDeletionInTreeProtectionFinalizer},
expModified: true,
},
{
// same as 13-8 but multiple finalizers exists, only the pv deletion protection finalizer needs to be
// removed and the in-tree pv deletion protection finalizer needs to be added.
name: "13-9 migration is enabled but plugin migration feature is disabled, volume has multiple finalizers including external PV deletion protection finalizer",
initialVolume: newVolumeWithFinalizers("volume-13-9", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, []string{volume.PVDeletionProtectionFinalizer, customFinalizer}, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: []string{customFinalizer, volume.PVDeletionInTreeProtectionFinalizer},
expModified: true,
},
{
// corner error case.
name: "13-10 missing annotations but finalizers exist",
initialVolume: newVolumeWithFinalizers("volume-13-10", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, []string{volume.PVDeletionProtectionFinalizer}),
expVolumeFinalizers: []string{volume.PVDeletionProtectionFinalizer},
expModified: false,
},
{
name: "13-11 missing annotations and finalizers",
initialVolume: newVolumeWithFinalizers("volume-13-11", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classCopper, nil),
expVolumeFinalizers: nil,
expModified: false,
},
{
// When ReclaimPolicy is Retain ensure that in-tree pv deletion protection finalizer is not added.
name: "13-12 migration is disabled, volume has no finalizers, reclaimPolicy is Retain",
initialVolume: newVolumeWithFinalizers("volume-13-12", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classCopper, nil, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: nil,
expModified: false,
},
{
// When ReclaimPolicy is Recycle ensure that in-tree pv deletion protection finalizer is not added.
name: "13-13 migration is disabled, volume has no finalizers, reclaimPolicy is Recycle",
initialVolume: newVolumeWithFinalizers("volume-13-13", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classCopper, nil, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: nil,
expModified: false,
},
{
// When ReclaimPolicy is Retain ensure that in-tree pv deletion protection finalizer present is removed.
name: "13-14 migration is disabled, volume has in-tree pv deletion finalizers, reclaimPolicy is Retain",
initialVolume: newVolumeWithFinalizers("volume-13-14", "1Gi", "uid11-23", "claim11-23", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classCopper, []string{volume.PVDeletionInTreeProtectionFinalizer}, volume.AnnDynamicallyProvisioned, volume.AnnBoundByController),
expVolumeFinalizers: nil,
expModified: true,
},
{
// Statically provisioned volumes should not have the in-tree pv deletion protection finalizer
name: "13-15 migration is disabled, statically provisioned PV",
initialVolume: newVolumeWithFinalizers("volume-13-14", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classCopper, nil),
expVolumeFinalizers: nil,
expModified: false,
},
}
translator := csitrans.New()
cmpm := csimigration.NewPluginManager(translator)
logger, _ := ktesting.NewTestContext(t)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if tc.volumeAnnotations != nil {
tc.initialVolume.SetAnnotations(tc.volumeAnnotations)
}
modifiedFinalizers, modified := modifyDeletionFinalizers(logger, cmpm, tc.initialVolume)
if modified != tc.expModified {
t.Errorf("got modified: %v, but expected: %v", modified, tc.expModified)
}
if !reflect.DeepEqual(tc.expVolumeFinalizers, modifiedFinalizers) {
t.Errorf("got volume finaliers: %v, but expected: %v", modifiedFinalizers, tc.expVolumeFinalizers)
}
})
}
}
func TestRetroactiveStorageClassAssignment(t *testing.T) {
tests := []struct {
storageClasses []*storagev1.StorageClass
tests []controllerTest
}{
// [Unit test set 15] - retroactive storage class assignment tests
{
storageClasses: []*storagev1.StorageClass{},
tests: []controllerTest{
{
name: "15-1 - pvc storage class is not assigned retroactively if there are no default storage classes",
initialVolumes: novolumes,
expectedVolumes: novolumes,
initialClaims: newClaimArray("claim15-1", "uid15-1", "1Gi", "", v1.ClaimPending, nil),
expectedClaims: newClaimArray("claim15-1", "uid15-1", "1Gi", "", v1.ClaimPending, nil),
expectedEvents: noevents,
errors: noerrors,
test: testSyncClaim,
},
},
},
{
storageClasses: []*storagev1.StorageClass{
makeDefaultStorageClass(classGold, &modeImmediate),
makeStorageClass(classSilver, &modeImmediate),
},
tests: []controllerTest{
{
name: "15-3 - pvc storage class is not assigned retroactively if claim is already bound",
initialVolumes: novolumes,
expectedVolumes: novolumes,
initialClaims: newClaimArray("claim15-3", "uid15-3", "1Gi", "test", v1.ClaimBound, &classCopper, volume.AnnBoundByController, volume.AnnBindCompleted),
expectedClaims: newClaimArray("claim15-3", "uid15-3", "1Gi", "test", v1.ClaimLost, &classCopper, volume.AnnBoundByController, volume.AnnBindCompleted),
expectedEvents: noevents,
errors: noerrors,
test: testSyncClaim,
},
},
},
{
storageClasses: []*storagev1.StorageClass{
makeDefaultStorageClass(classGold, &modeImmediate),
makeStorageClass(classSilver, &modeImmediate),
},
tests: []controllerTest{
{
name: "15-4 - pvc storage class is not assigned retroactively if claim is already bound but annotations are missing",
initialVolumes: novolumes,
expectedVolumes: novolumes,
initialClaims: newClaimArray("claim15-4", "uid15-4", "1Gi", "test", v1.ClaimBound, &classCopper),
expectedClaims: newClaimArray("claim15-4", "uid15-4", "1Gi", "test", v1.ClaimPending, &classCopper),
expectedEvents: noevents,
errors: noerrors,
test: testSyncClaim,
},
},
},
{
storageClasses: []*storagev1.StorageClass{
makeDefaultStorageClass(classGold, &modeImmediate),
makeStorageClass(classSilver, &modeImmediate),
},
tests: []controllerTest{
{
name: "15-5 - pvc storage class is assigned retroactively if there is a default",
initialVolumes: novolumes,
expectedVolumes: novolumes,
initialClaims: newClaimArray("claim15-5", "uid15-5", "1Gi", "", v1.ClaimPending, nil),
expectedClaims: newClaimArray("claim15-5", "uid15-5", "1Gi", "", v1.ClaimPending, &classGold),
expectedEvents: noevents,
errors: noerrors,
test: testSyncClaim,
},
},
},
{
storageClasses: []*storagev1.StorageClass{
makeDefaultStorageClass(classGold, &modeImmediate),
makeDefaultStorageClass(classSilver, &modeImmediate)},
tests: []controllerTest{
{
name: "15-2 - pvc storage class is assigned retroactively if there are multiple default storage classes",
initialVolumes: novolumes,
expectedVolumes: novolumes,
initialClaims: newClaimArray("claim15-2", "uid15-2", "1Gi", "", v1.ClaimPending, nil),
expectedClaims: newClaimArray("claim15-2", "uid15-2", "1Gi", "", v1.ClaimPending, &classGold),
expectedEvents: noevents,
errors: noerrors,
test: testSyncClaim,
},
},
},
{
storageClasses: []*storagev1.StorageClass{
makeDefaultStorageClass(classGold, &modeImmediate),
makeStorageClass(classCopper, &modeImmediate),
},
tests: []controllerTest{
{
name: "15-6 - pvc storage class is not changed if claim is not bound but already has a storage class",
initialVolumes: novolumes,
expectedVolumes: novolumes,
initialClaims: newClaimArray("claim15-6", "uid15-6", "1Gi", "", v1.ClaimPending, &classCopper),
expectedClaims: newClaimArray("claim15-6", "uid15-6", "1Gi", "", v1.ClaimPending, &classCopper),
expectedEvents: noevents,
errors: noerrors,
test: testSyncClaim,
},
},
},
{
storageClasses: []*storagev1.StorageClass{
makeDefaultStorageClass(classGold, &modeImmediate),
makeStorageClass(classCopper, &modeImmediate),
},
tests: []controllerTest{
{
name: "15-7 - pvc storage class is not changed if claim is not bound but already set annotation \"volume.beta.kubernetes.io/storage-class\"",
initialVolumes: novolumes,
expectedVolumes: novolumes,
initialClaims: newClaimArray("claim15-7", "uid15-7", "1Gi", "", v1.ClaimPending, nil, v1.BetaStorageClassAnnotation),
expectedClaims: newClaimArray("claim15-7", "uid15-7", "1Gi", "", v1.ClaimPending, nil, v1.BetaStorageClassAnnotation),
expectedEvents: noevents,
errors: noerrors,
test: testSyncClaim,
},
},
},
}
_, ctx := ktesting.NewTestContext(t)
for _, test := range tests {
runSyncTests(t, ctx, test.tests, test.storageClasses, nil)
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/volume/persistentvolume/pv_controller_test.go |
/// This file implements Knuth-Bendix completion and the normal form algorithm.
enum RewritingError: Error {
case tooManyRounds
case tooManyRules
case tooManyNodes
case ruleTooLong
case tooManySteps
case reducedWordTooLong
}
let debug = false
func log(_ str: @autoclosure () -> String) {
if debug {
print(str())
}
}
struct RewritingSystem: ~Copyable {
var state: State = .initial
enum State {
case initial
case complete
case failed
}
var alphabet: Int
var rules: [Rule] = []
var trie: Trie
// Limits for completion
struct Limits: Hashable {
var maxRounds = 100
var maxRules = 200
var maxLength = 100
var maxReductionLength = 100
var maxReductionSteps = 1 << 24
}
var limits = Limits()
var checkedRulesUpTo = 0 // Completion progress
var reducedRules: [UInt32] = [] // Bitmap of reduced rules
typealias CriticalPair = (i: Int, from: Int, j: Int)
var criticalPairs: [CriticalPair] = [] // Temporary array for completion
var stats = Stats()
struct Stats {
var numRounds = 0
var numRulesRemaining = 0 // Number of rules that were not reduced away
var numReductionSteps = 0
}
init(alphabet: Int) {
self.alphabet = alphabet
self.trie = Trie(alphabet: self.alphabet)
criticalPairs.reserveCapacity(128)
}
mutating func addRules(_ rules: [Rule], order: Order)
throws(RewritingError) {
for var rule in rules {
_ = try addRule(&rule, order: order)
}
}
func reduceOne(_ word: Word, excluding: Int? = nil) -> (Int, Int)? {
var from = 0
while from < word.count {
if let n = trie.lookup(word, from) {
if n != excluding { return (from, n) }
}
from += 1
}
return nil
}
func reduce(_ word: inout Word, numReductionSteps: inout Int)
throws(RewritingError) {
var count = 0
repeat {
guard let (from, n) = reduceOne(word) else { return }
let index = word.startIndex + from
word.replaceSubrange(index ..< index + rules[n].lhs.count,
with: rules[n].rhs)
numReductionSteps += (from + rules[n].lhs.count)
if numReductionSteps > limits.maxReductionSteps { throw .tooManySteps }
if count > limits.maxReductionLength { throw .tooManySteps }
// FIXME: Load bearing
if word.count > limits.maxLength { throw .reducedWordTooLong }
count += 1
} while true
}
mutating func addOrientedRule(_ rule: Rule) throws(RewritingError) {
let longestSide = max(rule.lhs.count, rule.rhs.count)
if longestSide > limits.maxLength { throw .ruleTooLong }
if stats.numRulesRemaining == limits.maxRules { throw .tooManyRules }
log("Adding rule \(rules.count) = \(rule)")
try trie.insert(rule.lhs, rules.count)
rules.append(rule)
stats.numRulesRemaining += 1
}
mutating func addRule(_ rule: inout Rule, order: Order)
throws(RewritingError) -> Bool {
var numReductionSteps = stats.numReductionSteps
try reduce(&rule.lhs, numReductionSteps: &numReductionSteps)
try reduce(&rule.rhs, numReductionSteps: &numReductionSteps)
stats.numReductionSteps = numReductionSteps
switch compare(rule.lhs, rule.rhs, order: order) {
case .equal:
return false
case .lessThan:
swap(&rule.lhs, &rule.rhs)
fallthrough
case .greaterThan:
try addOrientedRule(rule)
return true
}
}
mutating func resolveOverlap(i: Int, from: Int, j: Int, order: Order)
throws(RewritingError) -> Bool {
let lhs = rules[i]
let rhs = rules[j]
log("Critical pair: \(i) vs \(j) at \(from)")
log("\(printWord(rules[i].lhs))")
log("\(String(repeating: " ", count: from))\(printWord(rules[j].lhs))")
var rule = Rule(lhs: [], rhs: [])
let end = lhs.lhs.count
if from + rhs.lhs.count < end {
rule.lhs = lhs.rhs
rule.rhs.reserveCapacity(lhs.lhs.count - rhs.lhs.count + rhs.rhs.count)
rule.rhs.append(contentsOf: lhs.lhs[0 ..< from])
rule.rhs.append(contentsOf: rhs.rhs)
rule.rhs.append(contentsOf: lhs.lhs[(from + rhs.lhs.count)...])
} else {
rule.lhs.reserveCapacity(lhs.rhs.count + rhs.lhs.count - lhs.lhs.count + from)
rule.lhs.append(contentsOf: lhs.rhs)
rule.lhs.append(contentsOf: rhs.lhs[(lhs.lhs.count - from)...])
rule.rhs.reserveCapacity(from + rhs.rhs.count)
rule.rhs.append(contentsOf: lhs.lhs[..<from])
rule.rhs.append(contentsOf: rhs.rhs)
}
return try addRule(&rule, order: order)
}
mutating func processRule(_ i: Int) {
if isReduced(i) { return }
let lhs = rules[i]
var from = 0
while from < lhs.lhs.count {
trie.visitOverlaps(lhs.lhs, from) { j in
precondition(!isReduced(j))
if i < checkedRulesUpTo && j < checkedRulesUpTo { return }
if from == 0 {
if i == j { return }
if rules[j].lhs.count > lhs.lhs.count { return }
}
criticalPairs.append((i: i, from: from, j: j))
}
from += 1
}
}
mutating func completeOne(order: Order) throws(RewritingError) -> Bool {
precondition(state == .initial)
precondition(criticalPairs.isEmpty)
for i in rules.indices {
processRule(i)
}
checkedRulesUpTo = rules.count
stats.numRounds += 1
reduceLeft()
var confluent = true
do {
log("Resolving critical pairs...")
for (i, from, j) in criticalPairs {
if try resolveOverlap(i: i, from: from, j: j, order: order) {
confluent = false
}
}
criticalPairs.removeAll(keepingCapacity: true)
log("All critical pairs resolved")
try reduceRight()
} catch let e {
state = .failed
throw e
}
if confluent {
state = .complete
return true
}
if stats.numRounds > limits.maxRounds {
state = .failed
throw .tooManyRounds
}
return false
}
mutating func complete(order: Order) throws(RewritingError) {
while try !completeOne(order: order) {}
}
func isReduced(_ rule: Int) -> Bool {
let i = (rule >> 5)
let j = (rule & 31)
if i >= reducedRules.count { return false }
return (reducedRules[i] & (1 << j)) != 0
}
mutating func setReduced(_ rule: Int) {
let i = (rule >> 5)
let j = (rule & 31)
while i >= reducedRules.count { reducedRules.append(0) }
reducedRules[i] |= (1 << j)
}
mutating func reduceLeft() {
if rules.isEmpty { return }
log("Reducing left-hand sides...")
for (n, rule) in rules.enumerated() {
if !isReduced(n) && reduceOne(rule.lhs, excluding: n) != nil {
log("Reduced \(n) = \(rule)")
setReduced(n)
trie.remove(rule.lhs, n)
stats.numRulesRemaining -= 1
continue
}
}
precondition(stats.numRulesRemaining > 0)
}
mutating func reduceRight() throws(RewritingError) {
var numReductionSteps = stats.numReductionSteps
for n in rules.indices {
if !isReduced(n) {
var rhs = rules[n].rhs
try reduce(&rhs, numReductionSteps: &numReductionSteps)
rules[n].rhs = rhs
}
}
stats.numReductionSteps = numReductionSteps
}
/// Returns a complete presentation once the rewriting system is complete.
var presentation: Presentation {
var result: [Rule] = []
for (n, rule) in rules.enumerated() {
if !isReduced(n) {
result.append(rule)
}
}
return Presentation(alphabet: alphabet, rules: result)
}
} | swift | github | https://github.com/apple/swift | benchmark/multi-source/Monoids/RewritingSystem.swift |
#include <torch/library.h>
TORCH_LIBRARY_IMPL(_, BackendSelect, m) {
m.fallback(torch::CppFunction::makeFallthrough());
} | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/core/BackendSelectFallbackKernel.cpp |
from django.db import models
from django.core.validators import MinValueValidator
class Company(models.Model):
"""Describes a company/organization participating or interested in D-Dagen"""
def __str__(self):
return self.name + ": " + self.area_of_business
# filled in by company
AREA_OF_BUSINESS_CHOICES = (
("finance", "Finance"),
("appdev", "App Development"),
("consulting", "Consulting"),
("entertainment", "Entertainment"),
("comm", "Communication"),
("it", "IT Services"),
("prodev", "Product Development"),
("info", "Information"),
("edu", "Education"),
("data", "Data & Search"),
("fash", "Fashion"),
("sec", "Security"),
("man", "Management"),
)
name = models.CharField(max_length=50)
description = models.CharField(max_length=1100)
logotype = models.FileField()
area_of_business = models.CharField(choices=AREA_OF_BUSINESS_CHOICES, default="Pick one", max_length=100)
employees = models.IntegerField(validators=[MinValueValidator(1)])
first_time_at_fair = models.BooleanField(default=True)
billing_address = models.CharField(max_length=200)
# for internal use only
PRIORITY_CHOICES = (
("low", "LOW"),
("mid", "MID"),
("high","HIGH")
)
comment = models.CharField(max_length=500)
priority = models.CharField(choices=PRIORITY_CHOICES, default=PRIORITY_CHOICES[0], max_length=10)
def calculateTotalPrice(self):
"""Sum up the prices of the packages the company has selected for this instance of D-Dagen. Assumes there is only one instance of each package."""
totalPrice = 0
# todo
return totalPrice
class CompanyStatus(models.Model):
"""A company can be in a specific status depending on how far they've come in the application process. A new instance of this class should be created each time the company changes status, so we can track change over time (using the TIMESTAMPs). Statuses are managed by the project team or automatically by the system."""
company = models.ForeignKey(Company, on_delete=models.CASCADE)
TIMESTAMP = models.DateTimeField(auto_now_add = True)
STATUS_CHOICES = (
("notcontacted", "Not Contacted"),
("contacted", "Contacted - Waiting to sign up"),
("signedup", "Signed Up - Waiting for contract to be signed"),
("contractsigned", "Contract Signed"),
("dec", "Declined")
)
STATUS = models.CharField(choices=STATUS_CHOICES,default=STATUS_CHOICES[0], max_length=100) | unknown | codeparrot/codeparrot-clean | ||
import { expect, it } from 'vitest'
import { segment } from './segment'
it('should result in a single segment when the separator is not present', () => {
expect(segment('foo', ':')).toEqual(['foo'])
})
it('should split by the separator', () => {
expect(segment('foo:bar:baz', ':')).toEqual(['foo', 'bar', 'baz'])
})
it('should not split inside of parens', () => {
expect(segment('a:(b:c):d', ':')).toEqual(['a', '(b:c)', 'd'])
})
it('should not split inside of brackets', () => {
expect(segment('a:[b:c]:d', ':')).toEqual(['a', '[b:c]', 'd'])
})
it('should not split inside of curlies', () => {
expect(segment('a:{b:c}:d', ':')).toEqual(['a', '{b:c}', 'd'])
})
it('should not split inside of double quotes', () => {
expect(segment('a:"b:c":d', ':')).toEqual(['a', '"b:c"', 'd'])
})
it('should not split inside of single quotes', () => {
expect(segment("a:'b:c':d", ':')).toEqual(['a', "'b:c'", 'd'])
})
it('should not crash when double quotes are unbalanced', () => {
expect(segment('a:"b:c:d', ':')).toEqual(['a', '"b:c:d'])
})
it('should not crash when single quotes are unbalanced', () => {
expect(segment("a:'b:c:d", ':')).toEqual(['a', "'b:c:d"])
})
it('should skip escaped double quotes', () => {
expect(segment(String.raw`a:"b:c\":d":e`, ':')).toEqual(['a', String.raw`"b:c\":d"`, 'e'])
})
it('should skip escaped single quotes', () => {
expect(segment(String.raw`a:'b:c\':d':e`, ':')).toEqual(['a', String.raw`'b:c\':d'`, 'e'])
})
it('should split by the escape sequence which is escape as well', () => {
expect(segment('a\\b\\c\\d', '\\')).toEqual(['a', 'b', 'c', 'd'])
expect(segment('a\\(b\\c)\\d', '\\')).toEqual(['a', '(b\\c)', 'd'])
expect(segment('a\\[b\\c]\\d', '\\')).toEqual(['a', '[b\\c]', 'd'])
expect(segment('a\\{b\\c}\\d', '\\')).toEqual(['a', '{b\\c}', 'd'])
}) | typescript | github | https://github.com/tailwindlabs/tailwindcss | packages/tailwindcss/src/utils/segment.test.ts |
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file creates release artifacts (tar files, container images) that are
# ready to distribute to install or distribute to end users.
###############################################################################
# Most of the ::release:: namespace functions have been moved to
# github.com/kubernetes/release. Have a look in that repo and specifically in
# lib/releaselib.sh for ::release::-related functionality.
###############################################################################
# This is where the final release artifacts are created locally
readonly RELEASE_STAGE="${LOCAL_OUTPUT_ROOT}/release-stage"
readonly RELEASE_TARS="${LOCAL_OUTPUT_ROOT}/release-tars"
readonly RELEASE_IMAGES="${LOCAL_OUTPUT_ROOT}/release-images"
KUBE_BUILD_CONFORMANCE=${KUBE_BUILD_CONFORMANCE:-n}
KUBE_BUILD_PULL_LATEST_IMAGES=${KUBE_BUILD_PULL_LATEST_IMAGES:-y}
# ---------------------------------------------------------------------------
# Build final release artifacts
function kube::release::clean_cruft() {
# Clean out cruft
find "${RELEASE_STAGE}" -name '*~' -exec rm {} \;
find "${RELEASE_STAGE}" -name '#*#' -exec rm {} \;
find "${RELEASE_STAGE}" -name '.DS*' -exec rm {} \;
}
function kube::release::package_tarballs() {
# Clean out any old releases
rm -rf "${RELEASE_STAGE}" "${RELEASE_TARS}" "${RELEASE_IMAGES}"
mkdir -p "${RELEASE_TARS}"
kube::release::package_src_tarball &
kube::release::package_client_tarballs &
kube::release::package_kube_manifests_tarball &
kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
# _node and _server tarballs depend on _src tarball
kube::release::package_node_tarballs &
kube::release::package_server_tarballs &
kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
kube::release::package_final_tarball & # _final depends on some of the previous phases
kube::release::package_test_tarballs & # _test doesn't depend on anything
kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
}
# Package the source code we built, for compliance/licensing/audit/yadda.
function kube::release::package_src_tarball() {
local -r src_tarball="${RELEASE_TARS}/kubernetes-src.tar.gz"
kube::log::status "Building tarball: src"
if [[ "${KUBE_GIT_TREE_STATE-}" = 'clean' ]]; then
git archive -o "${src_tarball}" HEAD
else
find "${KUBE_ROOT}" -mindepth 1 -maxdepth 1 \
! \( \
\( -path "${KUBE_ROOT}"/_\* -o \
-path "${KUBE_ROOT}"/.git\* -o \
-path "${KUBE_ROOT}"/.config\* -o \
-path "${KUBE_ROOT}"/.gsutil\* \
\) -prune \
\) -print0 \
| "${TAR}" czf "${src_tarball}" --transform "s|${KUBE_ROOT#/*}|kubernetes|" --null -T -
fi
}
# Package up all of the cross compiled clients. Over time this should grow into
# a full SDK
function kube::release::package_client_tarballs() {
# Find all of the built client binaries
local long_platforms=("${LOCAL_OUTPUT_BINPATH}"/*/*)
if [[ -n ${KUBE_BUILD_PLATFORMS-} ]]; then
read -ra long_platforms <<< "${KUBE_BUILD_PLATFORMS}"
fi
for platform_long in "${long_platforms[@]}"; do
local platform
local platform_tag
platform=${platform_long##"${LOCAL_OUTPUT_BINPATH}"/} # Strip LOCAL_OUTPUT_BINPATH
platform_tag=${platform/\//-} # Replace a "/" for a "-"
kube::log::status "Starting tarball: client $platform_tag"
(
local release_stage="${RELEASE_STAGE}/client/${platform_tag}/kubernetes"
rm -rf "${release_stage}"
mkdir -p "${release_stage}/client/bin"
local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
if [[ "${platform%/*}" = 'windows' ]]; then
client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
fi
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# client_bins array.
cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/client/bin/"
kube::release::clean_cruft
local package_name="${RELEASE_TARS}/kubernetes-client-${platform_tag}.tar.gz"
kube::release::create_tarball "${package_name}" "${release_stage}/.."
) &
done
kube::log::status "Waiting on tarballs"
kube::util::wait-for-jobs || { kube::log::error "client tarball creation failed"; exit 1; }
}
# Package up all of the node binaries
function kube::release::package_node_tarballs() {
local platform
for platform in "${KUBE_NODE_PLATFORMS[@]}"; do
local platform_tag
local arch
platform_tag=${platform/\//-} # Replace a "/" for a "-"
arch=$(basename "${platform}")
kube::log::status "Building tarball: node $platform_tag"
local release_stage="${RELEASE_STAGE}/node/${platform_tag}/kubernetes"
rm -rf "${release_stage}"
mkdir -p "${release_stage}/node/bin"
local node_bins=("${KUBE_NODE_BINARIES[@]}")
if [[ "${platform%/*}" = 'windows' ]]; then
node_bins=("${KUBE_NODE_BINARIES_WIN[@]}")
fi
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# node_bins array.
cp "${node_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/node/bin/"
# TODO: Docker images here
# kube::release::create_docker_images_for_server "${release_stage}/server/bin" "${arch}"
# Include the client binaries here too as they are useful debugging tools.
local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
if [[ "${platform%/*}" = 'windows' ]]; then
client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
fi
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# client_bins array.
cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/node/bin/"
cp -R "${KUBE_ROOT}/LICENSES" "${release_stage}/"
echo "${KUBE_GIT_VERSION}" > "${release_stage}/version"
cp "${RELEASE_TARS}/kubernetes-src.tar.gz" "${release_stage}/"
kube::release::clean_cruft
local package_name="${RELEASE_TARS}/kubernetes-node-${platform_tag}.tar.gz"
kube::release::create_tarball "${package_name}" "${release_stage}/.."
done
}
# Package up all of the server binaries in docker images
function kube::release::build_server_images() {
kube::util::ensure-docker-buildx
# Clean out any old images
rm -rf "${RELEASE_IMAGES}"
local platform
for platform in "${KUBE_SERVER_PLATFORMS[@]}"; do
local platform_tag
local arch
platform_tag=${platform/\//-} # Replace a "/" for a "-"
arch=$(basename "${platform}")
kube::log::status "Building images: $platform_tag"
local release_stage
release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
rm -rf "${release_stage}"
mkdir -p "${release_stage}/server/bin"
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# KUBE_SERVER_IMAGE_BINARIES array.
cp "${KUBE_SERVER_IMAGE_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/server/bin/"
kube::release::create_docker_images_for_server "${release_stage}/server/bin" "${arch}"
done
}
# Package up all of the server binaries
function kube::release::package_server_tarballs() {
kube::release::build_server_images
local platform
for platform in "${KUBE_SERVER_PLATFORMS[@]}"; do
local platform_tag
local arch
platform_tag=${platform/\//-} # Replace a "/" for a "-"
arch=$(basename "${platform}")
kube::log::status "Building tarball: server $platform_tag"
# NOTE: this directory was setup in kube::release::build_server_images
local release_stage
release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
mkdir -p "${release_stage}/addons"
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# KUBE_SERVER_BINARIES array.
cp "${KUBE_SERVER_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/server/bin/"
# Include the client binaries here too as they are useful debugging tools.
local client_bins
client_bins=("${KUBE_CLIENT_BINARIES[@]}")
if [[ "${platform%/*}" = 'windows' ]]; then
client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
fi
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# client_bins array.
cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/server/bin/"
cp -R "${KUBE_ROOT}/LICENSES" "${release_stage}/"
echo "${KUBE_GIT_VERSION}" > "${release_stage}/version"
cp "${RELEASE_TARS}/kubernetes-src.tar.gz" "${release_stage}/"
kube::release::clean_cruft
local package_name
package_name="${RELEASE_TARS}/kubernetes-server-${platform_tag}.tar.gz"
kube::release::create_tarball "${package_name}" "${release_stage}/.."
done
}
function kube::release::md5() {
if which md5 >/dev/null 2>&1; then
md5 -q "$1"
else
md5sum "$1" | awk '{ print $1 }'
fi
}
function kube::release::sha1() {
if which sha1sum >/dev/null 2>&1; then
sha1sum "$1" | awk '{ print $1 }'
else
shasum -a1 "$1" | awk '{ print $1 }'
fi
}
function kube::release::build_conformance_image() {
local -r arch="$1"
local -r registry="$2"
local -r version="$3"
local -r save_dir="${4-}"
kube::log::status "Building conformance image for arch: ${arch}"
ARCH="${arch}" REGISTRY="${registry}" VERSION="${version}" \
make -C test/conformance/image/ build >/dev/null
local conformance_tag
conformance_tag="${registry}/conformance-${arch}:${version}"
if [[ -n "${save_dir}" ]]; then
"${DOCKER[@]}" save "${conformance_tag}" > "${save_dir}/conformance-${arch}.tar"
fi
kube::log::status "Deleting conformance image ${conformance_tag}"
"${DOCKER[@]}" rmi "${conformance_tag}" &>/dev/null || true
}
# This builds all the release docker images (One docker image per binary)
# Args:
# $1 - binary_dir, the directory to save the tared images to.
# $2 - arch, architecture for which we are building docker images.
function kube::release::create_docker_images_for_server() {
# Create a sub-shell so that we don't pollute the outer environment
(
local binary_dir
local arch
local binaries
local images_dir
binary_dir="$1"
arch="$2"
binaries=$(kube::build::get_docker_wrapped_binaries)
images_dir="${RELEASE_IMAGES}/${arch}"
mkdir -p "${images_dir}"
# registry.k8s.io is the constant tag in the docker archives, this is also the default for config scripts in GKE.
# We can use KUBE_DOCKER_REGISTRY to include and extra registry in the docker archive.
# If we use KUBE_DOCKER_REGISTRY="registry.k8s.io", then the extra tag (same) is ignored, see release_docker_image_tag below.
local -r docker_registry="registry.k8s.io"
# Docker tags cannot contain '+'
local docker_tag="${KUBE_GIT_VERSION/+/_}"
if [[ -z "${docker_tag}" ]]; then
kube::log::error "git version information missing; cannot create Docker tag"
return 1
fi
# provide `--pull` argument to `docker build` if `KUBE_BUILD_PULL_LATEST_IMAGES`
# is set to y or Y; otherwise try to build the image without forcefully
# pulling the latest base image.
local docker_build_opts
docker_build_opts=
if [[ "${KUBE_BUILD_PULL_LATEST_IMAGES}" =~ [yY] ]]; then
docker_build_opts='--pull'
fi
for wrappable in $binaries; do
local binary_name=${wrappable%%,*}
local base_image=${wrappable##*,}
local binary_file_path="${binary_dir}/${binary_name}"
local docker_build_path="${binary_file_path}.dockerbuild"
local docker_image_tag="${docker_registry}/${binary_name}-${arch}:${docker_tag}"
local docker_file_path="${KUBE_ROOT}/build/server-image/Dockerfile"
# If this binary has its own Dockerfile use that else use the generic Dockerfile.
if [[ -f "${KUBE_ROOT}/build/server-image/${binary_name}/Dockerfile" ]]; then
docker_file_path="${KUBE_ROOT}/build/server-image/${binary_name}/Dockerfile"
fi
kube::log::status "Starting docker build for image: ${binary_name}-${arch} with base ${base_image}"
(
rm -rf "${docker_build_path}"
mkdir -p "${docker_build_path}"
ln "${binary_file_path}" "${docker_build_path}/${binary_name}"
local build_log="${docker_build_path}/build.log"
if ! DOCKER_CLI_EXPERIMENTAL=enabled "${DOCKER[@]}" buildx build \
-f "${docker_file_path}" \
--platform linux/"${arch}" \
--load ${docker_build_opts:+"${docker_build_opts}"} \
-t "${docker_image_tag}" \
--build-arg BASEIMAGE="${base_image}" \
--build-arg SETCAP_IMAGE="${KUBE_BUILD_SETCAP_IMAGE}" \
--build-arg BINARY="${binary_name}" \
"${docker_build_path}" >"${build_log}" 2>&1; then
cat "${build_log}"
exit 1
fi
rm "${build_log}"
# If we are building an official/alpha/beta release we want to keep
# docker images and tag them appropriately.
local -r release_docker_image_tag="${KUBE_DOCKER_REGISTRY-$docker_registry}/${binary_name}-${arch}:${KUBE_DOCKER_IMAGE_TAG-$docker_tag}"
if [[ "${release_docker_image_tag}" != "${docker_image_tag}" ]]; then
kube::log::status "Tagging docker image ${docker_image_tag} as ${release_docker_image_tag}"
"${DOCKER[@]}" rmi "${release_docker_image_tag}" 2>/dev/null || true
"${DOCKER[@]}" tag "${docker_image_tag}" "${release_docker_image_tag}" 2>/dev/null
fi
"${DOCKER[@]}" save -o "${binary_file_path}.tar" "${docker_image_tag}" "${release_docker_image_tag}"
echo "${docker_tag}" > "${binary_file_path}.docker_tag"
rm -rf "${docker_build_path}"
ln "${binary_file_path}.tar" "${images_dir}/"
kube::log::status "Deleting docker image ${docker_image_tag}"
"${DOCKER[@]}" rmi "${docker_image_tag}" &>/dev/null || true
) &
done
if [[ "${KUBE_BUILD_CONFORMANCE}" =~ [yY] ]]; then
kube::release::build_conformance_image "${arch}" "${docker_registry}" \
"${docker_tag}" "${images_dir}" &
fi
kube::util::wait-for-jobs || { kube::log::error "previous Docker build failed"; return 1; }
kube::log::status "Docker builds done"
)
}
# This will pack kube-system manifests files for distros such as COS.
function kube::release::package_kube_manifests_tarball() {
kube::log::status "Building tarball: manifests"
local src_dir="${KUBE_ROOT}/cluster/gce/manifests"
local release_stage="${RELEASE_STAGE}/manifests/kubernetes"
rm -rf "${release_stage}"
local dst_dir="${release_stage}/gci-trusty"
mkdir -p "${dst_dir}"
cp "${src_dir}/kube-proxy.manifest" "${dst_dir}/"
cp "${src_dir}/cluster-autoscaler.manifest" "${dst_dir}/"
cp "${src_dir}/etcd.manifest" "${dst_dir}"
cp "${src_dir}/kube-scheduler.manifest" "${dst_dir}"
cp "${src_dir}/kube-apiserver.manifest" "${dst_dir}"
cp "${src_dir}/konnectivity-server.yaml" "${dst_dir}"
cp "${src_dir}/abac-authz-policy.jsonl" "${dst_dir}"
cp "${src_dir}/cloud-controller-manager.manifest" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}"
cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}"
cp "${src_dir}/glbc.manifest" "${dst_dir}"
find "${src_dir}" -name 'internal-*' -exec cp {} "${dst_dir}" \;
cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh"
cp "${KUBE_ROOT}/cluster/gce/gci/configure-kubeapiserver.sh" "${dst_dir}/configure-kubeapiserver.sh"
if [[ -e "${KUBE_ROOT}/cluster/gce/gci/gke-internal-configure-helper.sh" ]]; then
cp "${KUBE_ROOT}/cluster/gce/gci/gke-internal-configure-helper.sh" "${dst_dir}/"
fi
# Merge GCE-specific addons with general purpose addons.
for d in cluster/addons cluster/gce/addons; do
find "${KUBE_ROOT}/${d}" \( \( -name \*.yaml -o -name \*.yaml.in -o -name \*.json \) -a ! \( -name \*demo\* \) \) -print0 | "${TAR}" c --transform "s|${KUBE_ROOT#/*}/${d}||" --null -T - | "${TAR}" x -C "${dst_dir}"
done
kube::release::clean_cruft
local package_name="${RELEASE_TARS}/kubernetes-manifests.tar.gz"
kube::release::create_tarball "${package_name}" "${release_stage}/.."
}
# Builds tarballs for each test platform containing the appropriate binaries.
function kube::release::package_test_platform_tarballs() {
local platform
rm -rf "${RELEASE_STAGE}/test"
# KUBE_TEST_SERVER_PLATFORMS is a subset of KUBE_TEST_PLATFORMS,
# so process it first.
for platform in "${KUBE_TEST_SERVER_PLATFORMS[@]}"; do
local platform_tag=${platform/\//-} # Replace a "/" for a "-"
local release_stage="${RELEASE_STAGE}/test/${platform_tag}/kubernetes"
mkdir -p "${release_stage}/test/bin"
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# KUBE_TEST_SERVER_BINARIES array.
cp "${KUBE_TEST_SERVER_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/test/bin/"
done
for platform in "${KUBE_TEST_PLATFORMS[@]}"; do
(
local platform_tag=${platform/\//-} # Replace a "/" for a "-"
kube::log::status "Starting tarball: test $platform_tag"
local release_stage="${RELEASE_STAGE}/test/${platform_tag}/kubernetes"
mkdir -p "${release_stage}/test/bin"
local test_bins=("${KUBE_TEST_BINARIES[@]}")
if [[ "${platform%/*}" = 'windows' ]]; then
test_bins=("${KUBE_TEST_BINARIES_WIN[@]}")
fi
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
# test_bins array.
cp "${test_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
"${release_stage}/test/bin/"
local package_name="${RELEASE_TARS}/kubernetes-test-${platform_tag}.tar.gz"
kube::release::create_tarball "${package_name}" "${release_stage}/.."
) &
done
kube::log::status "Waiting on test tarballs"
kube::util::wait-for-jobs || { kube::log::error "test tarball creation failed"; exit 1; }
}
# This is the stuff you need to run tests from the binary distribution.
function kube::release::package_test_tarballs() {
kube::release::package_test_platform_tarballs
kube::log::status "Building tarball: test portable"
local release_stage="${RELEASE_STAGE}/test/kubernetes"
rm -rf "${release_stage}"
mkdir -p "${release_stage}"
# First add test image files and other portable sources so we can create
# the portable test tarball.
mkdir -p "${release_stage}/test/images"
cp -fR "${KUBE_ROOT}/test/images" "${release_stage}/test/"
"${TAR}" c "${KUBE_TEST_PORTABLE[@]}" | "${TAR}" x -C "${release_stage}"
kube::release::clean_cruft
local portable_tarball_name="${RELEASE_TARS}/kubernetes-test-portable.tar.gz"
kube::release::create_tarball "${portable_tarball_name}" "${release_stage}/.."
}
# This is all the platform-independent stuff you need to run/install kubernetes.
# Arch-specific binaries will need to be downloaded separately (possibly by
# using the bundled cluster/get-kube-binaries.sh script).
# Included in this tarball:
# - Cluster spin up/down scripts and configs for various cloud providers
# - Tarballs for manifest configs that are ready to be uploaded
# - Examples (which may or may not still work)
# - The remnants of the docs/ directory
function kube::release::package_final_tarball() {
kube::log::status "Building tarball: final"
# This isn't a "full" tarball anymore, but the release lib still expects
# artifacts under "full/kubernetes/"
local release_stage="${RELEASE_STAGE}/full/kubernetes"
rm -rf "${release_stage}"
mkdir -p "${release_stage}"
mkdir -p "${release_stage}/client"
cat <<EOF > "${release_stage}/client/README"
Client binaries are no longer included in the Kubernetes final tarball.
Run cluster/get-kube-binaries.sh to download client and server binaries.
EOF
# We want everything in /cluster.
cp -R "${KUBE_ROOT}/cluster" "${release_stage}/"
mkdir -p "${release_stage}/server"
cp "${RELEASE_TARS}/kubernetes-manifests.tar.gz" "${release_stage}/server/"
cat <<EOF > "${release_stage}/server/README"
Server binary tarballs are no longer included in the Kubernetes final tarball.
Run cluster/get-kube-binaries.sh to download client and server binaries.
EOF
# Include hack/lib as a dependency for the cluster/ scripts
mkdir -p "${release_stage}/hack"
cp -R "${KUBE_ROOT}/hack/lib" "${release_stage}/hack/"
cp -R "${KUBE_ROOT}/docs" "${release_stage}/"
cp "${KUBE_ROOT}/README.md" "${release_stage}/"
cp -R "${KUBE_ROOT}/LICENSES" "${release_stage}/"
echo "${KUBE_GIT_VERSION}" > "${release_stage}/version"
kube::release::clean_cruft
local package_name="${RELEASE_TARS}/kubernetes.tar.gz"
kube::release::create_tarball "${package_name}" "${release_stage}/.."
}
# Build a release tarball. $1 is the output tar name. $2 is the base directory
# of the files to be packaged. This assumes that ${2}/kubernetes is what is
# being packaged.
function kube::release::create_tarball() {
kube::build::ensure_tar
local tarfile=$1
local stagingdir=$2
"${TAR}" czf "${tarfile}" -C "${stagingdir}" kubernetes --owner=0 --group=0
} | unknown | github | https://github.com/kubernetes/kubernetes | build/lib/release.sh |
name: Self-hosted runner scale set (AMD mi355 scheduled CI caller)
# Note: For every job in this workflow, the name of the runner scale set is finalized in the runner yaml i.e. huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml
# For example, 1gpu : amd-mi355-ci-1gpu
# 2gpu : amd-mi355-ci-2gpu
on:
workflow_run:
workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
branches: ["main"]
types: [completed]
push:
branches:
- run_amd_scheduled_ci_caller*
jobs:
model-ci:
name: Model CI
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main
with:
job: run_models_gpu
slack_report_channel: "#amd-hf-ci"
runner_group: hfc-amd-mi355
docker: huggingface/transformers-pytorch-amd-gpu
ci_event: Scheduled CI (AMD) - mi355
report_repo_id: hf-transformers-bot/transformers-ci-dummy
secrets: inherit
torch-pipeline:
name: Torch pipeline CI
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main
with:
job: run_pipelines_torch_gpu
slack_report_channel: "#amd-hf-ci"
runner_group: hfc-amd-mi355
docker: huggingface/transformers-pytorch-amd-gpu
ci_event: Scheduled CI (AMD) - mi355
report_repo_id: hf-transformers-bot/transformers-ci-dummy
secrets: inherit
example-ci:
name: Example CI
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main
with:
job: run_examples_gpu
slack_report_channel: "#amd-hf-ci"
runner_group: hfc-amd-mi355
docker: huggingface/transformers-pytorch-amd-gpu
ci_event: Scheduled CI (AMD) - mi355
report_repo_id: hf-transformers-bot/transformers-ci-dummy
secrets: inherit
deepspeed-ci:
name: DeepSpeed CI
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main
with:
job: run_torch_cuda_extensions_gpu
slack_report_channel: "#amd-hf-ci"
runner_group: hfc-amd-mi355
docker: huggingface/testing-rocm7.0-preview
ci_event: Scheduled CI (AMD) - mi355
report_repo_id: hf-transformers-bot/transformers-ci-dummy
secrets: inherit | unknown | github | https://github.com/huggingface/transformers | .github/workflows/self-scheduled-amd-mi355-caller.yml |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.