code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
import logging import re from django.contrib.staticfiles.storage import staticfiles_storage from django.contrib.staticfiles import finders from django.conf import settings from static_replace.models import AssetBaseUrlConfig, AssetExcludedExtensionsConfig from xmodule.modulestore.django import modulestore from xmodule.modulestore import ModuleStoreEnum from xmodule.contentstore.content import StaticContent from opaque_keys.edx.locator import AssetLocator log = logging.getLogger(__name__) XBLOCK_STATIC_RESOURCE_PREFIX = '/static/xblock' def _url_replace_regex(prefix): """ Match static urls in quotes that don't end in '?raw'. To anyone contemplating making this more complicated: http://xkcd.com/1171/ """ return ur""" (?x) # flags=re.VERBOSE (?P<quote>\\?['"]) # the opening quotes (?P<prefix>{prefix}) # the prefix (?P<rest>.*?) # everything else in the url (?P=quote) # the first matching closing quote """.format(prefix=prefix) def try_staticfiles_lookup(path): """ Try to lookup a path in staticfiles_storage. If it fails, return a dead link instead of raising an exception. """ try: url = staticfiles_storage.url(path) except Exception as err: log.warning("staticfiles_storage couldn't find path {0}: {1}".format( path, str(err))) # Just return the original path; don't kill everything. url = path return url def replace_jump_to_id_urls(text, course_id, jump_to_id_base_url): """ This will replace a link to another piece of courseware to a 'jump_to' URL that will redirect to the right place in the courseware NOTE: This is similar to replace_course_urls in terms of functionality but it is intended to be used when we only have a 'id' that the course author provides. This is much more helpful when using Studio authored courses since they don't need to know the path. This is also durable with respect to item moves. text: The content over which to perform the subtitutions course_id: The course_id in which this rewrite happens jump_to_id_base_url: A app-tier (e.g. LMS) absolute path to the base of the handler that will perform the redirect. e.g. /courses/<org>/<course>/<run>/jump_to_id. NOTE the <id> will be appended to the end of this URL at re-write time output: <text> after the link rewriting rules are applied """ def replace_jump_to_id_url(match): quote = match.group('quote') rest = match.group('rest') return "".join([quote, jump_to_id_base_url + rest, quote]) return re.sub(_url_replace_regex('/jump_to_id/'), replace_jump_to_id_url, text) def replace_course_urls(text, course_key): """ Replace /course/$stuff urls with /courses/$course_id/$stuff urls text: The text to replace course_module: A CourseDescriptor returns: text with the links replaced """ course_id = course_key.to_deprecated_string() def replace_course_url(match): quote = match.group('quote') rest = match.group('rest') return "".join([quote, '/courses/' + course_id + '/', rest, quote]) return re.sub(_url_replace_regex('/course/'), replace_course_url, text) def process_static_urls(text, replacement_function, data_dir=None): """ Run an arbitrary replacement function on any urls matching the static file directory """ def wrap_part_extraction(match): """ Unwraps a match group for the captures specified in _url_replace_regex and forward them on as function arguments """ original = match.group(0) prefix = match.group('prefix') quote = match.group('quote') rest = match.group('rest') # Don't rewrite XBlock resource links. Probably wasn't a good idea that /static # works for actual static assets and for magical course asset URLs.... full_url = prefix + rest starts_with_static_url = full_url.startswith(unicode(settings.STATIC_URL)) starts_with_prefix = full_url.startswith(XBLOCK_STATIC_RESOURCE_PREFIX) contains_prefix = XBLOCK_STATIC_RESOURCE_PREFIX in full_url if starts_with_prefix or (starts_with_static_url and contains_prefix): return original return replacement_function(original, prefix, quote, rest) return re.sub( _url_replace_regex(u'(?:{static_url}|/static/)(?!{data_dir})'.format( static_url=settings.STATIC_URL, data_dir=data_dir )), wrap_part_extraction, text ) def make_static_urls_absolute(request, html): """ Converts relative URLs referencing static assets to absolute URLs """ def replace(__, prefix, quote, rest): """ Function to actually do a single relative -> absolute url replacement """ processed = request.build_absolute_uri(prefix + rest) return quote + processed + quote return process_static_urls( html, replace ) def replace_static_urls(text, data_directory=None, course_id=None, static_asset_path=''): """ Replace /static/$stuff urls either with their correct url as generated by collectstatic, (/static/$md5_hashed_stuff) or by the course-specific content static url /static/$course_data_dir/$stuff, or, if course_namespace is not None, by the correct url in the contentstore (/c4x/.. or /asset-loc:..) text: The source text to do the substitution in data_directory: The directory in which course data is stored course_id: The course identifier used to distinguish static content for this course in studio static_asset_path: Path for static assets, which overrides data_directory and course_namespace, if nonempty """ def replace_static_url(original, prefix, quote, rest): """ Replace a single matched url. """ # Don't mess with things that end in '?raw' if rest.endswith('?raw'): return original # In debug mode, if we can find the url as is, if settings.DEBUG and finders.find(rest, True): return original # if we're running with a MongoBacked store course_namespace is not None, then use studio style urls elif (not static_asset_path) and course_id: # first look in the static file pipeline and see if we are trying to reference # a piece of static content which is in the edx-platform repo (e.g. JS associated with an xmodule) exists_in_staticfiles_storage = False try: exists_in_staticfiles_storage = staticfiles_storage.exists(rest) except Exception as err: log.warning("staticfiles_storage couldn't find path {0}: {1}".format( rest, str(err))) if exists_in_staticfiles_storage: url = staticfiles_storage.url(rest) else: # if not, then assume it's courseware specific content and then look in the # Mongo-backed database base_url = AssetBaseUrlConfig.get_base_url() excluded_exts = AssetExcludedExtensionsConfig.get_excluded_extensions() url = StaticContent.get_canonicalized_asset_path(course_id, rest, base_url, excluded_exts) if AssetLocator.CANONICAL_NAMESPACE in url: url = url.replace('block@', 'block/', 1) # Otherwise, look the file up in staticfiles_storage, and append the data directory if needed else: course_path = "/".join((static_asset_path or data_directory, rest)) try: if staticfiles_storage.exists(rest): url = staticfiles_storage.url(rest) else: url = staticfiles_storage.url(course_path) # And if that fails, assume that it's course content, and add manually data directory except Exception as err: log.warning("staticfiles_storage couldn't find path {0}: {1}".format( rest, str(err))) url = "".join([prefix, course_path]) return "".join([quote, url, quote]) return process_static_urls(text, replace_static_url, data_dir=static_asset_path or data_directory)
unknown
codeparrot/codeparrot-clean
from sympy.core import * def test_rational(): a = Rational(1, 5) assert a**Rational(1, 2) == a**Rational(1, 2) assert 2 * a**Rational(1, 2) == 2 * a**Rational(1, 2) assert a**Rational(3, 2) == a * a**Rational(1, 2) assert 2 * a**Rational(3, 2) == 2*a * a**Rational(1, 2) assert a**Rational(17, 3) == a**5 * a**Rational(2, 3) assert 2 * a**Rational(17, 3) == 2*a**5 * a**Rational(2, 3) def test_large_rational(): e = (Rational(123712**12-1,7)+Rational(1,7))**Rational(1,3) assert e == 234232585392159195136 * (Rational(1,7)**Rational(1,3)) def test_negative_real(): def feq(a,b): return abs(a - b) < 1E-10 assert feq(Basic.One() / Real(-0.5), -Integer(2)) def test_expand(): x = Symbol('x') assert (2**(-1-x)).expand() == Rational(1,2)*2**(-x) def test_issue153(): #test that is runs: a = Basic.sqrt(2*(1+Basic.sqrt(2))) def test_issue350(): #test if powers are simplified correctly a = Symbol('a') assert ((a**Rational(1,3))**Rational(2)) == a**Rational(2,3) assert ((a**Rational(3))**Rational(2,5)) != a**Rational(6,5) a = Symbol('a', real = True) assert (a**Rational(3))**Rational(2,5) == a**Rational(6,5) #assert Number(5)**Rational(2,3)==Number(25)**Rational(1,3) test_issue350()
unknown
codeparrot/codeparrot-clean
from datetime import datetime, timedelta from django.db import models import logging from .constants import CACHE_DEFAULT, CACHE_INVALIDATED_HOLDER from hashlib import sha1 #pylint: disable=C0103 encodekey = lambda *args: '.'.join([str(x) for x in args]) decodekey = lambda key: key.split('.') encodeargs = lambda *args: sha1(repr(args)).hexdigest() def MODEL_PK(self, func): return self.pk def OBJECT_PROP(kw): def OBJECT_PROP_GETTER(self, func): return getattr(self, kw, None) return OBJECT_PROP_GETTER class BaseCache(object): # defaults seconds = CACHE_DEFAULT ignore_result = None use_internal = False num_args = None filter_kwargs = None disabled = False extra_key = None @property def log(self): log = getattr(self, '_logger', None) if log is None: log = self._logger = logging.getLogger("%s.%s" % (self.module_path, self.func_path)) return log def __init__(self, func): self.func = func self.__doc__ = func.__doc__ def __repr__(self): return repr(self.func) def __str__(self): return str(self.func) def __get__(self, instance, cls): self._instance = instance self._cls = cls return self def get_cls(self): cls = getattr(self, '_cls', None) instance = getattr(self, '_instance', None) if cls is None: if instance is not None: cls = instance.__class__ else: cls = getattr(self.func, '__module__', None) or '__console__' if isinstance(instance, models.Manager): cls = self._instance.model return cls def get_key_parts(self): cls = self.get_cls() ret = [] if isinstance(cls, basestring): ret = [cls, self.func.__name__] else: ret = [cls.__module__, cls.__name__, self.func.__name__] if self.extra_key != None: extra = self.extra_key if callable(self.extra_key): instance = getattr(self, '_instance', None) extra = self.extra_key(instance, self.func) ret.append(extra) return ret @property def module_path(self): path = getattr(self, "_module_path", None) if path is None: path = self.get_cls() if not isinstance(path, basestring): path = path.__module__ self._module_path = path return path @property def func_path(self): path = getattr(self, "_func_path", None) if path is None: cls = self.get_cls() if not isinstance(cls, basestring): cls = cls.__name__ path = self._func_path = '%s.%s' % (cls, self.func.__name__) return path def get_args(self, args, kwargs): key_args = None key_kwargs = None if self.num_args: key_args = args[:self.num_args] else: key_args = args[:] if self.filter_kwargs: key_kwargs = sorted([(x, y) for x, y in kwargs.items() if x in self.filter_kwargs]) else: key_kwargs = sorted(kwargs.items()) return key_args, key_kwargs def get_seconds(self): secs = self.seconds or CACHE_DEFAULT if callable(secs): secs = secs() return secs def get_key(self, args, kwargs): key = encodeargs(self.get_args(args, kwargs)) key_parts = list(self.get_key_parts()) cache_key = encodekey(*(key_parts + [key,]))[:220] self.log.debug("key: %s", key) return cache_key def __call__(self, *args, **kwargs): from django.core.cache import cache as _djcache if self.use_internal: if getattr(self, '_int_cache', datetime.min) >= datetime.now(): self.log.debug("Internal cache hit") return getattr(self, '_int_cache_data') cache_key = self.get_key(args, kwargs) value = _djcache.get(cache_key, None) if value not in (None, CACHE_INVALIDATED_HOLDER, self.ignore_result): self.log.debug("Cache HIT") return value self.log.debug("Cache MISS") return self.get_and_store(cache_key, args, kwargs) def get_and_store(self, cache_key, args, kwargs): from django.core.cache import cache as _djcache instance = getattr(self, '_instance', None) if instance is None: data = self.func(*args, **kwargs) else: data = self.func(instance, *args, **kwargs) if isinstance(data, basestring): if len(data) > 2**20: return data if data not in (None, CACHE_INVALIDATED_HOLDER, self.ignore_result): self.log.debug("Storing to cache") seconds = self.get_seconds() if self.use_internal: self._int_cache = datetime.now() + timedelta(seconds = seconds) self._int_cache_data = data _djcache.set(cache_key, data, seconds) return data def force_miss(self, *args, **kwargs): self.log.debug("Forced MISS") key = encodeargs(self.get_args(args, kwargs)) key_parts = list(self.get_key_parts()) cache_key = encodekey(key_parts + [key,])[:220] self.log.debug("key: %s", key) return self.get_and_store(cache_key, args, kwargs) def invalidate(self, *args, **kwargs): from django.core.cache import cache as _djcache self.log.debug("Invalidate") cache_key = self.get_key(args, kwargs) _djcache.set(cache_key, CACHE_INVALIDATED_HOLDER, 1) self._int_cache = datetime.min cache = BaseCache
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import MerchantCenterLinkServiceTransport from .grpc import MerchantCenterLinkServiceGrpcTransport # Compile a registry of transports. _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[MerchantCenterLinkServiceTransport]] _transport_registry["grpc"] = MerchantCenterLinkServiceGrpcTransport __all__ = ( "MerchantCenterLinkServiceTransport", "MerchantCenterLinkServiceGrpcTransport", )
unknown
codeparrot/codeparrot-clean
""" HyperParser =========== This module defines the HyperParser class, which provides advanced parsing abilities for the ParenMatch and other extensions. The HyperParser uses PyParser. PyParser is intended mostly to give information on the proper indentation of code. HyperParser gives some information on the structure of code, used by extensions to help the user. """ import string import keyword import PyParse class HyperParser: def __init__(self, editwin, index): """Initialize the HyperParser to analyze the surroundings of the given index. """ self.editwin = editwin self.text = text = editwin.text parser = PyParse.Parser(editwin.indentwidth, editwin.tabwidth) def index2line(index): return int(float(index)) lno = index2line(text.index(index)) if not editwin.context_use_ps1: for context in editwin.num_context_lines: startat = max(lno - context, 1) startatindex = `startat` + ".0" stopatindex = "%d.end" % lno # We add the newline because PyParse requires a newline at end. # We add a space so that index won't be at end of line, so that # its status will be the same as the char before it, if should. parser.set_str(text.get(startatindex, stopatindex)+' \n') bod = parser.find_good_parse_start( editwin._build_char_in_string_func(startatindex)) if bod is not None or startat == 1: break parser.set_lo(bod or 0) else: r = text.tag_prevrange("console", index) if r: startatindex = r[1] else: startatindex = "1.0" stopatindex = "%d.end" % lno # We add the newline because PyParse requires a newline at end. # We add a space so that index won't be at end of line, so that # its status will be the same as the char before it, if should. parser.set_str(text.get(startatindex, stopatindex)+' \n') parser.set_lo(0) # We want what the parser has, except for the last newline and space. self.rawtext = parser.str[:-2] # As far as I can see, parser.str preserves the statement we are in, # so that stopatindex can be used to synchronize the string with the # text box indices. self.stopatindex = stopatindex self.bracketing = parser.get_last_stmt_bracketing() # find which pairs of bracketing are openers. These always correspond # to a character of rawtext. self.isopener = [i>0 and self.bracketing[i][1] > self.bracketing[i-1][1] for i in range(len(self.bracketing))] self.set_index(index) def set_index(self, index): """Set the index to which the functions relate. Note that it must be in the same statement. """ indexinrawtext = \ len(self.rawtext) - len(self.text.get(index, self.stopatindex)) if indexinrawtext < 0: raise ValueError("The index given is before the analyzed statement") self.indexinrawtext = indexinrawtext # find the rightmost bracket to which index belongs self.indexbracket = 0 while self.indexbracket < len(self.bracketing)-1 and \ self.bracketing[self.indexbracket+1][0] < self.indexinrawtext: self.indexbracket += 1 if self.indexbracket < len(self.bracketing)-1 and \ self.bracketing[self.indexbracket+1][0] == self.indexinrawtext and \ not self.isopener[self.indexbracket+1]: self.indexbracket += 1 def is_in_string(self): """Is the index given to the HyperParser is in a string?""" # The bracket to which we belong should be an opener. # If it's an opener, it has to have a character. return self.isopener[self.indexbracket] and \ self.rawtext[self.bracketing[self.indexbracket][0]] in ('"', "'") def is_in_code(self): """Is the index given to the HyperParser is in a normal code?""" return not self.isopener[self.indexbracket] or \ self.rawtext[self.bracketing[self.indexbracket][0]] not in \ ('#', '"', "'") def get_surrounding_brackets(self, openers='([{', mustclose=False): """If the index given to the HyperParser is surrounded by a bracket defined in openers (or at least has one before it), return the indices of the opening bracket and the closing bracket (or the end of line, whichever comes first). If it is not surrounded by brackets, or the end of line comes before the closing bracket and mustclose is True, returns None. """ bracketinglevel = self.bracketing[self.indexbracket][1] before = self.indexbracket while not self.isopener[before] or \ self.rawtext[self.bracketing[before][0]] not in openers or \ self.bracketing[before][1] > bracketinglevel: before -= 1 if before < 0: return None bracketinglevel = min(bracketinglevel, self.bracketing[before][1]) after = self.indexbracket + 1 while after < len(self.bracketing) and \ self.bracketing[after][1] >= bracketinglevel: after += 1 beforeindex = self.text.index("%s-%dc" % (self.stopatindex, len(self.rawtext)-self.bracketing[before][0])) if after >= len(self.bracketing) or \ self.bracketing[after][0] > len(self.rawtext): if mustclose: return None afterindex = self.stopatindex else: # We are after a real char, so it is a ')' and we give the index # before it. afterindex = self.text.index("%s-%dc" % (self.stopatindex, len(self.rawtext)-(self.bracketing[after][0]-1))) return beforeindex, afterindex # This string includes all chars that may be in a white space _whitespace_chars = " \t\n\\" # This string includes all chars that may be in an identifier _id_chars = string.ascii_letters + string.digits + "_" # This string includes all chars that may be the first char of an identifier _id_first_chars = string.ascii_letters + "_" # Given a string and pos, return the number of chars in the identifier # which ends at pos, or 0 if there is no such one. Saved words are not # identifiers. def _eat_identifier(self, str, limit, pos): i = pos while i > limit and str[i-1] in self._id_chars: i -= 1 if i < pos and (str[i] not in self._id_first_chars or \ keyword.iskeyword(str[i:pos])): i = pos return pos - i def get_expression(self): """Return a string with the Python expression which ends at the given index, which is empty if there is no real one. """ if not self.is_in_code(): raise ValueError("get_expression should only be called if index "\ "is inside a code.") rawtext = self.rawtext bracketing = self.bracketing brck_index = self.indexbracket brck_limit = bracketing[brck_index][0] pos = self.indexinrawtext last_identifier_pos = pos postdot_phase = True while 1: # Eat whitespaces, comments, and if postdot_phase is False - one dot while 1: if pos>brck_limit and rawtext[pos-1] in self._whitespace_chars: # Eat a whitespace pos -= 1 elif not postdot_phase and \ pos > brck_limit and rawtext[pos-1] == '.': # Eat a dot pos -= 1 postdot_phase = True # The next line will fail if we are *inside* a comment, but we # shouldn't be. elif pos == brck_limit and brck_index > 0 and \ rawtext[bracketing[brck_index-1][0]] == '#': # Eat a comment brck_index -= 2 brck_limit = bracketing[brck_index][0] pos = bracketing[brck_index+1][0] else: # If we didn't eat anything, quit. break if not postdot_phase: # We didn't find a dot, so the expression end at the last # identifier pos. break ret = self._eat_identifier(rawtext, brck_limit, pos) if ret: # There is an identifier to eat pos = pos - ret last_identifier_pos = pos # Now, in order to continue the search, we must find a dot. postdot_phase = False # (the loop continues now) elif pos == brck_limit: # We are at a bracketing limit. If it is a closing bracket, # eat the bracket, otherwise, stop the search. level = bracketing[brck_index][1] while brck_index > 0 and bracketing[brck_index-1][1] > level: brck_index -= 1 if bracketing[brck_index][0] == brck_limit: # We were not at the end of a closing bracket break pos = bracketing[brck_index][0] brck_index -= 1 brck_limit = bracketing[brck_index][0] last_identifier_pos = pos if rawtext[pos] in "([": # [] and () may be used after an identifier, so we # continue. postdot_phase is True, so we don't allow a dot. pass else: # We can't continue after other types of brackets break else: # We've found an operator or something. break return rawtext[last_identifier_pos:self.indexinrawtext]
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: onyx_mlag_vip version_added: "2.5" author: "Samer Deeb (@samerd)" short_description: Configures MLAG VIP on Mellanox ONYX network devices description: - This module provides declarative management of MLAG virtual IPs on Mellanox ONYX network devices. notes: - Tested on ONYX 3.6.4000 options: ipaddress: description: - Virtual IP address of the MLAG. Required if I(state=present). group_name: description: - MLAG group name. Required if I(state=present). mac_address: description: - MLAG system MAC address. Required if I(state=present). state: description: - MLAG VIP state. choices: ['present', 'absent'] delay: description: - Delay interval, in seconds, waiting for the changes on mlag VIP to take effect. default: 12 """ EXAMPLES = """ - name: configure mlag-vip onyx_mlag_vip: ipaddress: 50.3.3.1/24 group_name: ansible-test-group mac_address: 00:11:12:23:34:45 """ RETURN = """ commands: description: The list of configuration mode commands to send to the device. returned: always type: list sample: - mlag-vip ansible_test_group ip 50.3.3.1 /24 force - no mlag shutdown """ import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.onyx.onyx import BaseOnyxModule from ansible.module_utils.network.onyx.onyx import show_cmd class OnyxMLagVipModule(BaseOnyxModule): def init_module(self): """ initialize module """ element_spec = dict( ipaddress=dict(), group_name=dict(), mac_address=dict(), delay=dict(type='int', default=12), state=dict(choices=['present', 'absent'], default='present'), ) argument_spec = dict() argument_spec.update(element_spec) self._module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True) def get_required_config(self): module_params = self._module.params lag_params = { 'ipaddress': module_params['ipaddress'], 'group_name': module_params['group_name'], 'mac_address': module_params['mac_address'], 'delay': module_params['delay'], 'state': module_params['state'], } self.validate_param_values(lag_params) self._required_config = lag_params def _show_mlag_cmd(self, cmd): return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) def _show_mlag(self): cmd = "show mlag" return self._show_mlag_cmd(cmd) def _show_mlag_vip(self): cmd = "show mlag-vip" return self._show_mlag_cmd(cmd) def load_current_config(self): self._current_config = dict() mlag_config = self._show_mlag() mlag_vip_config = self._show_mlag_vip() if mlag_vip_config: mlag_vip = mlag_vip_config.get("MLAG-VIP", {}) self._current_config['group_name'] = \ mlag_vip.get("MLAG group name") self._current_config['ipaddress'] = \ mlag_vip.get("MLAG VIP address") if mlag_config: self._current_config['mac_address'] = \ mlag_config.get("System-mac") def generate_commands(self): state = self._required_config['state'] if state == 'present': self._generate_mlag_vip_cmds() else: self._generate_no_mlag_vip_cmds() def _generate_mlag_vip_cmds(self): current_group = self._current_config.get('group_name') current_ip = self._current_config.get('ipaddress') current_mac = self._current_config.get('mac_address') if current_mac: current_mac = current_mac.lower() req_group = self._required_config.get('group_name') req_ip = self._required_config.get('ipaddress') req_mac = self._required_config.get('mac_address') if req_mac: req_mac = req_mac.lower() if req_group != current_group or req_ip != current_ip: ipaddr, mask = req_ip.split('/') self._commands.append( 'mlag-vip %s ip %s /%s force' % (req_group, ipaddr, mask)) if req_mac != current_mac: self._commands.append( 'mlag system-mac %s' % (req_mac)) if self._commands: self._commands.append('no mlag shutdown') def _generate_no_mlag_vip_cmds(self): if self._current_config.get('group_name'): self._commands.append('no mlag-vip') def check_declarative_intent_params(self, result): if not result['changed']: return delay_interval = self._required_config.get('delay') if delay_interval > 0: time.sleep(delay_interval) for cmd in ("show mlag-vip", ""): show_cmd(self._module, cmd, json_fmt=False, fail_on_error=False) def main(): """ main entry point for module execution """ OnyxMLagVipModule.main() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
""" TODO: add table listing each forecast's peak and peak time... """ import datetime import pytz import numpy as np import pandas as pd from pandas.io.sql import read_sql import matplotlib.dates as mdates from pyiem.plot import figure_axes from pyiem.util import get_autoplot_context, get_dbconn from pyiem.exceptions import NoDataFound MDICT = {"primary": "Primary Field", "secondary": "Secondary Field"} def get_description(): """Return a dict describing how to call this plotter""" desc = dict() desc["data"] = True desc["cache"] = 3600 desc[ "description" ] = """This page presents a sphagetti plot of river stage and forecasts. The plot is roughly centered on the date of your choice with the plot showing any forecasts made three days prior to the date and for one day afterwards. Sorry that you have to know the station ID prior to using this page (will fix at some point). Presented timestamps are hopefully all in the local timezone of the reporting station. If you download the data, the timestamps are all in UTC. """ utc = datetime.datetime.utcnow() desc["arguments"] = [ dict( type="text", name="station", default="EKDI4", label="Enter 5 Char NWSLI Station Code (sorry):", ), dict( type="datetime", name="dt", default=utc.strftime("%Y/%m/%d %H%M"), label="Time to center plot at (UTC Time Zone):", min="2013/01/01 0000", ), dict( type="select", name="var", options=MDICT, label="Which Variable to Plot:", default="primary", ), ] return desc def get_context(fdict): """Do the common work""" pgconn = get_dbconn("hml") cursor = pgconn.cursor() ctx = get_autoplot_context(fdict, get_description()) ctx["station"] = ctx["station"].upper() station = ctx["station"] dt = ctx["dt"] # Attempt to get station information cursor.execute( "SELECT name, tzname from stations where id = %s and network ~* 'DCP'", (station,), ) ctx["name"] = "" ctx["tzname"] = "UTC" if cursor.rowcount > 0: row = cursor.fetchone() ctx["name"] = row[0] ctx["tzname"] = row[1] ctx["fdf"] = read_sql( f"""with fx as ( select id, issued, primaryname, primaryunits, secondaryname, secondaryunits from hml_forecast where station = %s and generationtime between %s and %s) SELECT f.id, f.issued at time zone 'UTC' as issued, d.valid at time zone 'UTC' as valid, d.primary_value, f.primaryname, f.primaryunits, d.secondary_value, f.secondaryname, f.secondaryunits from hml_forecast_data_{dt.year} d JOIN fx f on (d.hml_forecast_id = f.id) ORDER by f.id ASC, d.valid ASC """, pgconn, params=( station, dt - datetime.timedelta(days=3), dt + datetime.timedelta(days=1), ), index_col=None, ) if not ctx["fdf"].empty: ctx["fdf"]["valid"] = ctx["fdf"]["valid"].dt.tz_localize(pytz.UTC) ctx["fdf"]["issued"] = ctx["fdf"]["issued"].dt.tz_localize(pytz.UTC) ctx["primary"] = "%s[%s]" % ( ctx["fdf"].iloc[0]["primaryname"], ctx["fdf"].iloc[0]["primaryunits"], ) ctx["secondary"] = "%s[%s]" % ( ctx["fdf"].iloc[0]["secondaryname"], ctx["fdf"].iloc[0]["secondaryunits"], ) # get obs mints = ctx["fdf"]["valid"].min() maxts = ctx["fdf"]["valid"].max() else: mints = dt - datetime.timedelta(days=3) maxts = dt + datetime.timedelta(days=3) df = read_sql( "SELECT distinct valid at time zone 'UTC' as valid, " "h.label, value from hml_observed_data d " "JOIN hml_observed_keys h on (d.key = h.id) WHERE station = %s and " "valid between %s and %s ORDER by valid ASC", pgconn, params=(station, mints, maxts), index_col=None, ) if df.empty: raise NoDataFound("No Data Found.") df["valid"] = df["valid"].dt.tz_localize(pytz.UTC) ctx["odf"] = df.pivot("valid", "label", "value") if not ctx["fdf"].empty: ctx["fdf"].reset_index(inplace=True) ctx["df"] = pd.merge( ctx["fdf"], ctx["odf"], left_on="valid", right_on="valid", how="left", sort=False, ) ctx["title"] = "[%s] %s" % (ctx["station"], ctx["name"]) ctx["subtitle"] = "+/- 72 hours around %s" % ( ctx["dt"] .replace(tzinfo=pytz.UTC) .astimezone(pytz.timezone(ctx["tzname"])) .strftime("%d %b %Y %-I:%M %p %Z"), ) if "df" not in ctx or (ctx["df"].empty and not ctx["odf"].empty): ctx["primary"] = ctx["odf"].columns[0] ctx["secondary"] = ctx["odf"].columns[1] return ctx def highcharts(fdict): """generate highcharts""" ctx = get_context(fdict) if "df" not in ctx: raise NoDataFound("No Data Found.") df = ctx["df"] df["ticks"] = df["valid"].astype(np.int64) // 10 ** 6 lines = [] fxs = df["id"].unique() for fx in fxs: df2 = df[df["id"] == fx] issued = ( df2.iloc[0]["issued"] .tz_convert(pytz.timezone(ctx["tzname"])) .strftime("%-m/%-d %-I%p %Z") ) v = df2[["ticks", ctx["var"] + "_value"]].to_json(orient="values") lines.append( """{ name: '""" + issued + """', type: 'line', tooltip: {valueDecimal: 1}, data: """ + v + """ } """ ) ctx["odf"]["ticks"] = ctx["odf"].index.values.astype(np.int64) // 10 ** 6 if ctx["var"] in ctx: v = ctx["odf"][["ticks", ctx[ctx["var"]]]].to_json(orient="values") lines.append( """{ name: 'Obs', type: 'line', color: 'black', lineWidth: 3, tooltip: {valueDecimal: 1}, data: """ + v + """ } """ ) series = ",".join(lines) return ( """ $("#ap_container").highcharts({ time: { useUTC: false, timezone: '""" + ctx["tzname"] + """' }, title: {text: '""" + ctx["title"] + """'}, subtitle: {text: '""" + ctx["subtitle"] + """'}, chart: {zoomType: 'x'}, tooltip: { shared: true, crosshairs: true, xDateFormat: '%d %b %Y %I:%M %p' }, xAxis: { title: {text: '""" + ctx["tzname"] + """ Timezone'}, type: 'datetime'}, yAxis: {title: {text: '""" + ctx.get(ctx["var"], "primary") + """'}}, series: [""" + series + """] }); """ ) def plotter(fdict): """Go""" ctx = get_context(fdict) if "df" not in ctx or (ctx["df"].empty and ctx["odf"].empty): raise NoDataFound("No Data Found!") df = ctx["df"] title = "\n".join([ctx["title"], ctx["subtitle"]]) (fig, ax) = figure_axes(title=title) fxs = df["id"].unique() for fx in fxs: df2 = df[df["id"] == fx] issued = ( df2.iloc[0]["issued"] .tz_convert(pytz.timezone(ctx["tzname"])) .strftime("%-m/%-d %-I%p %Z") ) ax.plot( df2["valid"], df2[ctx["var"] + "_value"], zorder=2, label=issued ) if not ctx["odf"].empty: ax.plot( ctx["odf"].index.values, ctx["odf"][ctx[ctx["var"]]], lw=2, color="k", label="Obs", zorder=4, ) ax.set_ylabel(ctx[ctx["var"]]) ax.xaxis.set_major_locator( mdates.AutoDateLocator(tz=pytz.timezone(ctx["tzname"])) ) ax.xaxis.set_major_formatter( mdates.DateFormatter("%-d %b\n%Y", tz=pytz.timezone(ctx["tzname"])) ) pos = ax.get_position() ax.grid(True) ax.set_position([pos.x0, pos.y0, 0.74, 0.8]) ax.set_xlabel(f"Timestamps in {ctx['tzname']} Timezone") ax.legend(loc=(1.0, 0.0)) df["issued"] = df["issued"].apply(lambda x: x.strftime("%Y-%m-%d %H:%M")) df["valid"] = df["valid"].apply(lambda x: x.strftime("%Y-%m-%d %H:%M")) return fig, df if __name__ == "__main__": plotter(dict(station="MLGO1", dt="2021-06-19 1653"))
unknown
codeparrot/codeparrot-clean
import os import platform import shutil import sys import tempfile import unittest from git import Repo from mock import MagicMock from buildscripts.bazel_rules_mongo.utils import evergreen_git changed_file_name = "changed_file.txt" new_file_name = "new_file.txt" def write_file(repo: Repo, file_name: str) -> None: # just adding more text to the file so git thinks it has changed or is created with open(os.path.join(repo.working_tree_dir, file_name), "a+") as file: file.write("change\n") @unittest.skipIf( sys.platform == "win32" or platform.machine().lower() in {"ppc64le", "s390x"}, reason="This test breaks on windows and only needs to work on linux", ) class TestChangedFiles(unittest.TestCase): @classmethod def setUpClass(cls): cls.tmp_dir = tempfile.mkdtemp() root_repo = Repo() # commit of HEAD commit = root_repo.head.commit.hexsha files_to_copy = set() # copy the current repo into a temp dir to do testing on root_repo.git.execute(["git", "worktree", "add", cls.tmp_dir, commit]) # get tracked files that have been changed that are tracked by git diff_output = root_repo.git.execute( ["git", "diff", "--name-only", "--diff-filter=d", commit] ) files_to_copy.update(diff_output.split("\n")) # gets all the untracked changes in the current repo untracked_changes = root_repo.git.execute(["git", "add", ".", "-n"]) for line in untracked_changes.split("\n"): if not line: continue files_to_copy.add(line.strip()[5:-1]) # copy all changed files from the current repo to the new worktree for testing. for file in files_to_copy: if not file: continue if not os.path.exists(file): raise RuntimeError(f"Changed file was found and does not exist: {file}") # This means the file is an embeded git repo, this happens when other evergreen modules # are present, we can just ignore them if os.path.isdir(file): continue new_dest = os.path.join(cls.tmp_dir, file) os.makedirs(os.path.dirname(new_dest), exist_ok=True) shutil.copy(file, new_dest) cls.repo = Repo(cls.tmp_dir) # add a testing file to this original commit so we can treat it as a preexisting file that # is going to be modified write_file(cls.repo, changed_file_name) cls.repo.git.execute(["git", "add", "."]) cls.repo.git.execute(["git", "commit", "-m", "Commit changed files"]) # this new commit is out base revision to compare changes against cls.base_revision = cls.repo.head.commit.hexsha cls.original_dir = os.path.abspath(os.curdir) os.chdir(cls.tmp_dir) @classmethod def tearDownClass(cls): os.chdir(cls.original_dir) shutil.rmtree(cls.tmp_dir) def setUp(self): # change the file already commited to the repo write_file(self.repo, changed_file_name) # make a new file that has not been commited yet write_file(self.repo, new_file_name) with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=False) as tmp: tmp.write("fake_expansion: true\n") self.expansions_file = tmp.name def tearDown(self): # reset to the original state between tests self.repo.git.execute(["git", "reset", "--hard", self.base_revision]) os.unlink(self.expansions_file) def test_local_unchanged_files(self): evergreen_git.get_remote_branch_ref = MagicMock(return_value=self.base_revision) new_files = evergreen_git.get_new_files() self.assertEqual( new_files, [], msg="New files list was not empty when no new files were added to git." ) changed_files = evergreen_git.get_changed_files() self.assertEqual( changed_files, [changed_file_name], msg="Changed file list was not as expected." ) self.repo.git.execute(["git", "add", "."]) # random file not tracked by git write_file(self.repo, "random_other_untracked_file.txt") new_files = evergreen_git.get_new_files() self.assertEqual( new_files, [new_file_name], msg="New file list did not contain the new file added to git.", ) changed_files = evergreen_git.get_changed_files() self.assertEqual( changed_files, [changed_file_name, new_file_name], msg="Changed file list was not as expected.", ) def test_evergreen_patch(self): # the files in evergreen patches live as uncommited files added to the index with open(self.expansions_file, "a") as tmp: tmp.write("is_patch: true\n") tmp.write(f"revision: {self.base_revision}\n") self.repo.git.execute(["git", "add", "."]) new_files = evergreen_git.get_new_files(expansions_file=self.expansions_file) self.assertEqual( new_files, [new_file_name], msg="New file list did not contain the new file." ) changed_files = evergreen_git.get_changed_files(expansions_file=self.expansions_file) self.assertEqual( changed_files, [changed_file_name, new_file_name], msg="Changed file list was not as expected.", ) def test_evergreen_waterfall(self): # Evergreen waterfall runs just check against the last commit so we need to commit the changes self.repo.git.execute(["git", "add", "."]) self.repo.git.execute(["git", "commit", "-m", "Fake waterfall changes"]) new_files = evergreen_git.get_new_files(expansions_file=self.expansions_file) self.assertEqual( new_files, [new_file_name], msg="New file list did not contain the new file." ) changed_files = evergreen_git.get_changed_files(expansions_file=self.expansions_file) self.assertEqual( changed_files, [changed_file_name, new_file_name], msg="Changed file list was not as expected.", ) def test_remote_picker(self): remote = evergreen_git.get_mongodb_remote(self.repo) self.assertIn("10gen/mongo", remote.url, msg="The wrong remote was found.")
python
github
https://github.com/mongodb/mongo
buildscripts/bazel_rules_mongo/tests/test_changed_files.py
# -*- coding: utf-8 -*- from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp class product(osv.osv): _inherit = "product.product" def get_product_available(self, cr, uid, ids, context=None): """ Finds whether product is available or not in a particular warehouse. @return: Dictionary of values """ bom_pool = self.pool.get("mrp.bom") res = super(product, self).get_product_available(cr, uid, ids, context=context) if 'done' not in context.get('states', []) or 'in' not in context.get('what', []): return res boms = bom_pool.browse(cr, uid, bom_pool.search(cr, uid, [('product_id', 'in', res.keys())])) for bom in boms: if not bom.bom_lines: continue quantities = [] for l in bom.bom_lines: if not l.product_qty: quantities.append(0) break quantities.append( (res[l.product_id.id] if l.product_id.id in res else l.product_id.qty_available) / l.product_qty) res[bom.product_id.id] += min(quantities) return res product()
unknown
codeparrot/codeparrot-clean
def smallest_Divisor(n): if (n % 2 == 0): return 2; i = 3; while (i*i <= n): if (n % i == 0): return i; i += 2; return n;
unknown
mbpp
import sys import unittest from contextlib import contextmanager from django.test import LiveServerTestCase, tag from django.utils.module_loading import import_string from django.utils.text import capfirst class SeleniumTestCaseBase(type(LiveServerTestCase)): # List of browsers to dynamically create test classes for. browsers = [] # Sentinel value to differentiate browser-specific instances. browser = None def __new__(cls, name, bases, attrs): """ Dynamically create new classes and add them to the test module when multiple browsers specs are provided (e.g. --selenium=firefox,chrome). """ test_class = super().__new__(cls, name, bases, attrs) # If the test class is either browser-specific or a test base, return it. if test_class.browser or not any(name.startswith('test') and callable(value) for name, value in attrs.items()): return test_class elif test_class.browsers: # Reuse the created test class to make it browser-specific. # We can't rename it to include the browser name or create a # subclass like we do with the remaining browsers as it would # either duplicate tests or prevent pickling of its instances. first_browser = test_class.browsers[0] test_class.browser = first_browser # Create subclasses for each of the remaining browsers and expose # them through the test's module namespace. module = sys.modules[test_class.__module__] for browser in test_class.browsers[1:]: browser_test_class = cls.__new__( cls, "%s%s" % (capfirst(browser), name), (test_class,), {'browser': browser, '__module__': test_class.__module__} ) setattr(module, browser_test_class.__name__, browser_test_class) return test_class # If no browsers were specified, skip this class (it'll still be discovered). return unittest.skip('No browsers specified.')(test_class) @classmethod def import_webdriver(cls, browser): return import_string("selenium.webdriver.%s.webdriver.WebDriver" % browser) def create_webdriver(self): return self.import_webdriver(self.browser)() @tag('selenium') class SeleniumTestCase(LiveServerTestCase, metaclass=SeleniumTestCaseBase): implicit_wait = 10 @classmethod def setUpClass(cls): cls.selenium = cls.create_webdriver() cls.selenium.implicitly_wait(cls.implicit_wait) super().setUpClass() @classmethod def _tearDownClassInternal(cls): # quit() the WebDriver before attempting to terminate and join the # single-threaded LiveServerThread to avoid a dead lock if the browser # kept a connection alive. if hasattr(cls, 'selenium'): cls.selenium.quit() super()._tearDownClassInternal() @contextmanager def disable_implicit_wait(self): """Disable the default implicit wait.""" self.selenium.implicitly_wait(0) try: yield finally: self.selenium.implicitly_wait(self.implicit_wait)
unknown
codeparrot/codeparrot-clean
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tempfile import json from nose.tools import assert_equal from pylons import tmpl_context as c from allura.tests import decorators as td from allura import model as M from alluratest.controller import setup_basic_test class TestBulkExport(object): def setUp(self): setup_basic_test() @td.with_link def test_bulk_export(self): # Clear out some context vars, to properly simulate how this is run from the export task # Besides, it's better not to need c context vars c.app = c.project = None project = M.Project.query.get(shortname='test') link = project.app_instance('link') link.config.options['url'] = 'http://domain.net' f = tempfile.TemporaryFile() link.bulk_export(f) f.seek(0) assert_equal(json.loads(f.read())['url'], 'http://domain.net')
unknown
codeparrot/codeparrot-clean
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // This file is MACHINE GENERATED! Do not edit. #ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_MATH_OPS_H_ #define TENSORFLOW_C_EXPERIMENTAL_OPS_MATH_OPS_H_ #include "absl/status/status.h" #include "tensorflow/c/eager/abstract_context.h" #include "tensorflow/c/eager/abstract_tensor_handle.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { namespace ops { // Returns x * y element-wise. absl::Status Mul(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle* const y, AbstractTensorHandle** z, const char* name = nullptr, const char* raw_device_name = nullptr); // Returns the complex conjugate of a complex number. absl::Status Conj(AbstractContext* ctx, AbstractTensorHandle* const input, AbstractTensorHandle** output, const char* name = nullptr, const char* raw_device_name = nullptr); // Returns x + y element-wise. absl::Status AddV2(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle* const y, AbstractTensorHandle** z, const char* name = nullptr, const char* raw_device_name = nullptr); // Multiply the matrix "a" by the matrix "b". absl::Status MatMul(AbstractContext* ctx, AbstractTensorHandle* const a, AbstractTensorHandle* const b, AbstractTensorHandle** product, bool transpose_a = false, bool transpose_b = false, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes numerical negative value element-wise. absl::Status Neg(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle** y, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes the sum of elements across dimensions of a tensor. absl::Status Sum(AbstractContext* ctx, AbstractTensorHandle* const input, AbstractTensorHandle* const reduction_indices, AbstractTensorHandle** output, bool keep_dims = false, const char* name = nullptr, const char* raw_device_name = nullptr); // Returns x - y element-wise. absl::Status Sub(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle* const y, AbstractTensorHandle** z, const char* name = nullptr, const char* raw_device_name = nullptr); // Returns x / y element-wise. absl::Status Div(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle* const y, AbstractTensorHandle** z, const char* name = nullptr, const char* raw_device_name = nullptr); // Returns 0 if the denominator is zero. absl::Status DivNoNan(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle* const y, AbstractTensorHandle** z, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes exponential of x element-wise. \\(y = e^x\\). absl::Status Exp(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle** y, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes square root of x element-wise. absl::Status Sqrt(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle** y, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes the gradient for the sqrt of `x` wrt its input. absl::Status SqrtGrad(AbstractContext* ctx, AbstractTensorHandle* const y, AbstractTensorHandle* const dy, AbstractTensorHandle** z, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes natural logarithm of (1 + x) element-wise. absl::Status Log1p(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle** y, const char* name = nullptr, const char* raw_device_name = nullptr); } // namespace ops } // namespace tensorflow #endif // TENSORFLOW_C_EXPERIMENTAL_OPS_MATH_OPS_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/c/experimental/ops/math_ops.h
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from pyspark import SparkContext from pyspark.sql import SparkSession class MLlibTestCase(unittest.TestCase): def setUp(self): self.sc = SparkContext('local[4]', "MLlib tests") self.spark = SparkSession(self.sc) def tearDown(self): self.spark.stop()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import ctypes as ct import socket import weakref from ip4tc import Rule, Table, IPTCError from util import find_library, load_kernel from xtables import (XT_INV_PROTO, NFPROTO_IPV6, xt_align, xt_counters) __all__ = ["Table6", "Rule6"] load_kernel("ip6_tables") _IFNAMSIZ = 16 def is_table6_available(name): try: Table6(name) return True except IPTCError: pass return False class in6_addr(ct.Structure): """This class is a representation of the C struct in6_addr.""" _fields_ = [("s6_addr", ct.c_uint8 * 16)] # IPv6 address class ip6t_ip6(ct.Structure): """This class is a representation of the C struct ip6t_ip6.""" _fields_ = [("src", in6_addr), # Source and destination IP6 addr ("dst", in6_addr), # Mask for src and dest IP6 addr ("smsk", in6_addr), ("dmsk", in6_addr), ("iniface", ct.c_char * _IFNAMSIZ), ("outiface", ct.c_char * _IFNAMSIZ), ("iniface_mask", ct.c_char * _IFNAMSIZ), ("outiface_mask", ct.c_char * _IFNAMSIZ), ("proto", ct.c_uint16), # Upper protocol number ("tos", ct.c_uint8), # TOS, match iff flags & IP6T_F_TOS ("flags", ct.c_uint8), # Flags word ("invflags", ct.c_uint8)] # Inverse flags # flags IP6T_F_PROTO = 0x01 # Set if rule cares about upper protocols IP6T_F_TOS = 0x02 # Match the TOS IP6T_F_GOTO = 0x04 # Set if jump is a goto IP6T_F_MASK = 0x07 # All possible flag bits mask # invflags IP6T_INV_VIA_IN = 0x01 # Invert the sense of IN IFACE IP6T_INV_VIA_OUT = 0x02 # Invert the sense of OUT IFACE IP6T_INV_TOS = 0x04 # Invert the sense of TOS IP6T_INV_SRCIP = 0x08 # Invert the sense of SRC IP IP6T_INV_DSTIP = 0x10 # Invert the sense of DST OP IP6T_INV_FRAG = 0x20 # Invert the sense of FRAG IP6T_INV_PROTO = XT_INV_PROTO IP6T_INV_MASK = 0x7F # All possible flag bits mask def __init__(self): # default: full netmask self.smsk.s6_addr = self.dmsk.s6_addr = 0xff * 16 class ip6t_entry(ct.Structure): """This class is a representation of the C struct ip6t_entry.""" _fields_ = [("ipv6", ip6t_ip6), ("nfcache", ct.c_uint), # fields that we care about ("target_offset", ct.c_uint16), # size of ip6t_entry + matches ("next_offset", ct.c_uint16), # size of e + matches + target ("comefrom", ct.c_uint), # back pointer ("counters", xt_counters), # packet and byte counters ("elems", ct.c_ubyte * 0)] # the matches then the target _libiptc, _ = find_library("ip6tc", "iptc") # old iptables versions use iptc class ip6tc(object): """This class contains all libip6tc API calls.""" iptc_init = _libiptc.ip6tc_init iptc_init.restype = ct.POINTER(ct.c_int) iptc_init.argstype = [ct.c_char_p] iptc_free = _libiptc.ip6tc_free iptc_free.restype = None iptc_free.argstype = [ct.c_void_p] iptc_commit = _libiptc.ip6tc_commit iptc_commit.restype = ct.c_int iptc_commit.argstype = [ct.c_void_p] iptc_builtin = _libiptc.ip6tc_builtin iptc_builtin.restype = ct.c_int iptc_builtin.argstype = [ct.c_char_p, ct.c_void_p] iptc_first_chain = _libiptc.ip6tc_first_chain iptc_first_chain.restype = ct.c_char_p iptc_first_chain.argstype = [ct.c_void_p] iptc_next_chain = _libiptc.ip6tc_next_chain iptc_next_chain.restype = ct.c_char_p iptc_next_chain.argstype = [ct.c_void_p] iptc_is_chain = _libiptc.ip6tc_is_chain iptc_is_chain.restype = ct.c_int iptc_is_chain.argstype = [ct.c_char_p, ct.c_void_p] iptc_create_chain = _libiptc.ip6tc_create_chain iptc_create_chain.restype = ct.c_int iptc_create_chain.argstype = [ct.c_char_p, ct.c_void_p] iptc_delete_chain = _libiptc.ip6tc_delete_chain iptc_delete_chain.restype = ct.c_int iptc_delete_chain.argstype = [ct.c_char_p, ct.c_void_p] iptc_rename_chain = _libiptc.ip6tc_rename_chain iptc_rename_chain.restype = ct.c_int iptc_rename_chain.argstype = [ct.c_char_p, ct.c_char_p, ct.c_void_p] iptc_flush_entries = _libiptc.ip6tc_flush_entries iptc_flush_entries.restype = ct.c_int iptc_flush_entries.argstype = [ct.c_char_p, ct.c_void_p] iptc_zero_entries = _libiptc.ip6tc_zero_entries iptc_zero_entries.restype = ct.c_int iptc_zero_entries.argstype = [ct.c_char_p, ct.c_void_p] # Get the policy of a given built-in chain iptc_get_policy = _libiptc.ip6tc_get_policy iptc_get_policy.restype = ct.c_char_p iptc_get_policy.argstype = [ct.c_char_p, ct.POINTER(xt_counters), ct.c_void_p] # Set the policy of a chain iptc_set_policy = _libiptc.ip6tc_set_policy iptc_set_policy.restype = ct.c_int iptc_set_policy.argstype = [ct.c_char_p, ct.c_char_p, ct.POINTER(xt_counters), ct.c_void_p] # Get first rule in the given chain: NULL for empty chain. iptc_first_rule = _libiptc.ip6tc_first_rule iptc_first_rule.restype = ct.POINTER(ip6t_entry) iptc_first_rule.argstype = [ct.c_char_p, ct.c_void_p] # Returns NULL when rules run out. iptc_next_rule = _libiptc.ip6tc_next_rule iptc_next_rule.restype = ct.POINTER(ip6t_entry) iptc_next_rule.argstype = [ct.POINTER(ip6t_entry), ct.c_void_p] # Returns a pointer to the target name of this entry. iptc_get_target = _libiptc.ip6tc_get_target iptc_get_target.restype = ct.c_char_p iptc_get_target.argstype = [ct.POINTER(ip6t_entry), ct.c_void_p] # These functions return TRUE for OK or 0 and set errno. If errno == # 0, it means there was a version error (ie. upgrade libiptc). # Rule numbers start at 1 for the first rule. # Insert the entry `e' in chain `chain' into position `rulenum'. iptc_insert_entry = _libiptc.ip6tc_insert_entry iptc_insert_entry.restype = ct.c_int iptc_insert_entry.argstype = [ct.c_char_p, ct.POINTER(ip6t_entry), ct.c_int, ct.c_void_p] # Atomically replace rule `rulenum' in `chain' with `e'. iptc_replace_entry = _libiptc.ip6tc_replace_entry iptc_replace_entry.restype = ct.c_int iptc_replace_entry.argstype = [ct.c_char_p, ct.POINTER(ip6t_entry), ct.c_int, ct.c_void_p] # Append entry `e' to chain `chain'. Equivalent to insert with # rulenum = length of chain. iptc_append_entry = _libiptc.ip6tc_append_entry iptc_append_entry.restype = ct.c_int iptc_append_entry.argstype = [ct.c_char_p, ct.POINTER(ip6t_entry), ct.c_void_p] # Delete the first rule in `chain' which matches `e', subject to # matchmask (array of length == origfw) iptc_delete_entry = _libiptc.ip6tc_delete_entry iptc_delete_entry.restype = ct.c_int iptc_delete_entry.argstype = [ct.c_char_p, ct.POINTER(ip6t_entry), ct.POINTER(ct.c_ubyte), ct.c_void_p] # Delete the rule in position `rulenum' in `chain'. iptc_delete_num_entry = _libiptc.ip6tc_delete_num_entry iptc_delete_num_entry.restype = ct.c_int iptc_delete_num_entry.argstype = [ct.c_char_p, ct.c_uint, ct.c_void_p] # Check the packet `e' on chain `chain'. Returns the verdict, or # NULL and sets errno. #iptc_check_packet = _libiptc.ip6tc_check_packet #iptc_check_packet.restype = ct.c_char_p #iptc_check_packet.argstype = [ct.c_char_p, ct.POINTER(ipt), ct.c_void_p] # Get the number of references to this chain iptc_get_references = _libiptc.ip6tc_get_references iptc_get_references.restype = ct.c_int iptc_get_references.argstype = [ct.c_uint, ct.c_char_p, ct.c_void_p] # read packet and byte counters for a specific rule iptc_read_counter = _libiptc.ip6tc_read_counter iptc_read_counter.restype = ct.POINTER(xt_counters) iptc_read_counter.argstype = [ct.c_char_p, ct.c_uint, ct.c_void_p] # zero packet and byte counters for a specific rule iptc_zero_counter = _libiptc.ip6tc_zero_counter iptc_zero_counter.restype = ct.c_int iptc_zero_counter.argstype = [ct.c_char_p, ct.c_uint, ct.c_void_p] # set packet and byte counters for a specific rule iptc_set_counter = _libiptc.ip6tc_set_counter iptc_set_counter.restype = ct.c_int iptc_set_counter.argstype = [ct.c_char_p, ct.c_uint, ct.POINTER(xt_counters), ct.c_void_p] # Translates errno numbers into more human-readable form than strerror. iptc_strerror = _libiptc.ip6tc_strerror iptc_strerror.restype = ct.c_char_p iptc_strerror.argstype = [ct.c_int] class Rule6(Rule): """This is an IPv6 rule.""" def __init__(self, entry=None, chain=None): self.nfproto = NFPROTO_IPV6 self._matches = [] self._target = None self.chain = chain self.rule = entry def __eq__(self, rule): if self._target != rule._target: return False if len(self._matches) != len(rule._matches): return False if set(rule._matches) != set([x for x in rule._matches if x in self._matches]): return False if (self.src == rule.src and self.dst == rule.dst and self.protocol == rule.protocol and self.in_interface == rule.in_interface and self.out_interface == rule.out_interface): return True return False def save(self, name): return self._save(name, self.entry.ipv6) def _get_tables(self): return [Table6(t) for t in Table6.ALL if is_table6_available(t)] tables = property(_get_tables) """This is the list of tables for our protocol.""" def _count_bits(self, n): bits = 0 while n > 0: if n & 1: bits += 1 n = n >> 1 return bits def _create_mask(self, plen): mask = [0 for x in xrange(16)] i = 0 while plen > 0: if plen >= 8: mask[i] = 0xff else: mask[i] = 2 ** plen - 1 i += 1 plen -= 8 return "".join([chr(x) for x in mask]) def get_src(self): src = "" if self.entry.ipv6.invflags & ip6t_ip6.IP6T_INV_SRCIP: src = "".join([src, "!"]) try: addr = socket.inet_ntop(socket.AF_INET6, self.entry.ipv6.src.s6_addr) except socket.error: raise IPTCError("error in internal state: invalid address") src = "".join([src, addr, "/"]) # create prefix length from mask in smsk plen = 0 for x in self.entry.ipv6.smsk.s6_addr: if x == 0xff: plen += 8 else: plen += self._count_bits(x) break src = "".join([src, str(plen)]) return src def _get_address_netmask(self, a): slash = a.find("/") if slash == -1: addr = a netm = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" else: addr = a[:slash] netm = a[slash + 1:] return addr, netm def _addr2in6addr(self, addr): arr = ct.c_uint8 * 16 ina = in6_addr() try: ina.s6_addr = arr.from_buffer_copy( socket.inet_pton(socket.AF_INET6, addr)) except socket.error: raise ValueError("invalid address %s" % (addr)) return arr, ina def set_src(self, src): if src[0] == "!": self.entry.ipv6.invflags |= ip6t_ip6.IP6T_INV_SRCIP src = src[1:] else: self.entry.ipv6.invflags &= (~ip6t_ip6.IP6T_INV_SRCIP & ip6t_ip6.IP6T_INV_MASK) addr, netm = self._get_address_netmask(src) arr, self.entry.ipv6.src = self._addr2in6addr(addr) # if we got a numeric prefix length if netm.isdigit(): plen = int(netm) if plen < 0 or plen > 128: raise ValueError("invalid prefix length %d" % (plen)) self.entry.ipv6.smsk.s6_addr = arr.from_buffer_copy( self._create_mask(plen)) return # nope, we got an IPv6 address-style prefix neta = in6_addr() try: neta.s6_addr = arr.from_buffer_copy( socket.inet_pton(socket.AF_INET6, netm)) except socket.error: raise ValueError("invalid netmask %s" % (netm)) self.entry.ipv6.smsk = neta src = property(get_src, set_src) """This is the source network address with an optional prefix length in string form.""" def get_dst(self): dst = "" if self.entry.ipv6.invflags & ip6t_ip6.IP6T_INV_DSTIP: dst = "".join([dst, "!"]) try: addr = socket.inet_ntop(socket.AF_INET6, self.entry.ipv6.dst.s6_addr) except socket.error: raise IPTCError("error in internal state: invalid address") dst = "".join([dst, addr, "/"]) # create prefix length from mask in dmsk plen = 0 for x in self.entry.ipv6.dmsk.s6_addr: if x & 0xff == 0xff: plen += 8 else: plen += self._count_bits(x) break dst = "".join([dst, str(plen)]) return dst def set_dst(self, dst): if dst[0] == "!": self.entry.ipv6.invflags |= ip6t_ip6.IP6T_INV_DSTIP dst = dst[1:] else: self.entry.ipv6.invflags &= (~ip6t_ip6.IP6T_INV_DSTIP & ip6t_ip6.IP6T_INV_MASK) addr, netm = self._get_address_netmask(dst) arr, self.entry.ipv6.dst = self._addr2in6addr(addr) # if we got a numeric prefix length if netm.isdigit(): plen = int(netm) if plen < 0 or plen > 128: raise ValueError("invalid prefix length %d" % (plen)) self.entry.ipv6.dmsk.s6_addr = arr.from_buffer_copy( self._create_mask(plen)) return # nope, we got an IPv6 address-style prefix neta = in6_addr() try: neta.s6_addr = arr.from_buffer_copy( socket.inet_pton(socket.AF_INET6, netm)) except socket.error: raise ValueError("invalid netmask %s" % (netm)) self.entry.ipv6.dmsk = neta dst = property(get_dst, set_dst) """This is the destination network address with an optional network mask in string form.""" def get_in_interface(self): intf = "" if self.entry.ipv6.invflags & ip6t_ip6.IP6T_INV_VIA_IN: intf = "".join(["!", intf]) iface = bytearray(_IFNAMSIZ) iface[:len(self.entry.ipv6.iniface)] = self.entry.ipv6.iniface mask = bytearray(_IFNAMSIZ) mask[:len(self.entry.ipv6.iniface_mask)] = self.entry.ipv6.iniface_mask if mask[0] == 0: return None for i in xrange(_IFNAMSIZ): if mask[i] != 0: intf = "".join([intf, chr(iface[i])]) else: if iface[i - 1] != 0: intf = "".join([intf, "+"]) else: intf = intf[:-1] break return intf def set_in_interface(self, intf): if intf[0] == "!": self.entry.ipv6.invflags |= ip6t_ip6.IP6T_INV_VIA_IN intf = intf[1:] else: self.entry.ipv6.invflags &= (~ip6t_ip6.IP6T_INV_VIA_IN & ip6t_ip6.IP6T_INV_MASK) if len(intf) >= _IFNAMSIZ: raise ValueError("interface name %s too long" % (intf)) masklen = len(intf) + 1 if intf[len(intf) - 1] == "+": intf = intf[:-1] masklen -= 2 self.entry.ipv6.iniface = "".join([intf, '\x00' * (_IFNAMSIZ - len(intf))]) self.entry.ipv6.iniface_mask = "".join(['\x01' * masklen, '\x00' * (_IFNAMSIZ - masklen)]) in_interface = property(get_in_interface, set_in_interface) """This is the input network interface e.g. *eth0*. A wildcard match can be achieved via *+* e.g. *ppp+* matches any *ppp* interface.""" def get_out_interface(self): intf = "" if self.entry.ipv6.invflags & ip6t_ip6.IP6T_INV_VIA_OUT: intf = "".join(["!", intf]) iface = bytearray(_IFNAMSIZ) iface[:len(self.entry.ipv6.outiface)] = self.entry.ipv6.outiface mask = bytearray(_IFNAMSIZ) mask[:len(self.entry.ipv6.outiface_mask)] = \ self.entry.ipv6.outiface_mask if mask[0] == 0: return None for i in xrange(_IFNAMSIZ): if mask[i] != 0: intf = "".join([intf, chr(iface[i])]) else: if iface[i - 1] != 0: intf = "".join([intf, "+"]) else: intf = intf[:-1] break return intf def set_out_interface(self, intf): if intf[0] == "!": self.entry.ipv6.invflags |= ip6t_ip6.IP6T_INV_VIA_OUT intf = intf[1:] else: self.entry.ipv6.invflags &= (~ip6t_ip6.IP6T_INV_VIA_OUT & ip6t_ip6.IP6T_INV_MASK) if len(intf) >= _IFNAMSIZ: raise ValueError("interface name %s too long" % (intf)) masklen = len(intf) + 1 if intf[len(intf) - 1] == "+": intf = intf[:-1] masklen -= 2 self.entry.ipv6.outiface = "".join([intf, '\x00' * (_IFNAMSIZ - len(intf))]) self.entry.ipv6.outiface_mask = "".join(['\x01' * masklen, '\x00' * (_IFNAMSIZ - masklen)]) out_interface = property(get_out_interface, set_out_interface) """This is the output network interface e.g. *eth0*. A wildcard match can be achieved via *+* e.g. *ppp+* matches any *ppp* interface.""" def get_protocol(self): if self.entry.ipv6.invflags & ip6t_ip6.IP6T_INV_PROTO: proto = "!" else: proto = "" proto = "".join([proto, self.protocols[self.entry.ipv6.proto]]) return proto def set_protocol(self, proto): if proto[0] == "!": self.entry.ipv6.invflags |= ip6t_ip6.IP6T_INV_PROTO proto = proto[1:] else: self.entry.ipv6.invflags &= (~ip6t_ip6.IP6T_INV_PROTO & ip6t_ip6.IP6T_INV_MASK) for p in self.protocols.items(): if proto.lower() == p[1]: self.entry.ipv6.proto = p[0] return raise ValueError("invalid protocol %s" % (proto)) protocol = property(get_protocol, set_protocol) """This is the transport layer protocol.""" def get_ip(self): return self.entry.ipv6 def _entry_size(self): return xt_align(ct.sizeof(ip6t_entry)) def _entry_type(self): return ip6t_entry def _new_entry(self): return ip6t_entry() class Table6(Table): """The IPv6 version of Table. There are four fixed tables: * **Table.FILTER**, the filter table, * **Table.MANGLE**, the mangle table, * **Table.RAW**, the raw table and * **Table.SECURITY**, the security table. The four tables are cached, so if you create a new Table, and it has been instantiated before, then it will be reused. To get access to e.g. the filter table: >>> import iptc >>> table = iptc.Table6(iptc.Table6.FILTER) The interface provided by *Table* is rather low-level, in fact it maps to *libiptc* API calls one by one, and take low-level iptables structs as parameters. It is encouraged to, when possible, use Chain, Rule, Match and Target to achieve what is wanted instead, since they hide the low-level details from the user. """ FILTER = "filter" """This is the constant for the filter table.""" MANGLE = "mangle" """This is the constant for the mangle table.""" RAW = "raw" """This is the constant for the raw table.""" SECURITY = "security" """This is the constant for the security table.""" ALL = ["filter", "mangle", "raw", "security"] """This is the constant for all tables.""" _cache = dict() def __new__(cls, name, autocommit=None): obj = Table6._cache.get(name, None) if not obj: obj = object.__new__(cls) if autocommit is None: autocommit = True obj._init(name, autocommit) Table6._cache[name] = obj elif autocommit is not None: obj.autocommit = autocommit return obj def _init(self, name, autocommit): """ Here *name* is the name of the table to instantiate, if it has already been instantiated the existing cached object is returned. *Autocommit* specifies that any low-level iptables operation should be committed immediately, making changes visible in the kernel. """ self._iptc = ip6tc() # to keep references to functions self._handle = None self.name = name self.autocommit = autocommit self.refresh() def create_rule(self, entry=None, chain=None): return Rule6(entry, chain)
unknown
codeparrot/codeparrot-clean
import pexpect import sys import time import datetime import sqlite3 from SensorTag import * from SensorCalcs import * csvfile = r'testfile.csv' #set this empty to turn off logging: logfile = r'pexpect.log' def usage(): print 'blepiTemp.py Usage:' print ' blepiTemp.py' print '' print ' SensorTag addresses and labels are stored in SensorInfo.db' print ' See BlepiInit.py for sample code' def unix_time(dt): epoch = datetime.datetime.utcfromtimestamp(0) delta = dt - epoch return delta.total_seconds() def unix_time_millis(dt): return unix_time(dt) * 1000.0 def saveData(data): timestamp = datetime.datetime.now().strftime("%y-%m-%d-%H:%M:%S") f = open(csvfile,"a") f.write("\"" + timestamp + "\"") for dataPoint in data: f.write(",\"" + str(dataPoint) + "\"") f.write("\n") f.close() def saveDataToDB(temp,ambTemp,tagAddr,ipAddr): connection = sqlite3.connect('/home/pi/blepimesh/data/client.db') cursor = connection.cursor() var = unix_time_millis(datetime.datetime.now()) data = (var, 1, temp,ambTemp,tagAddr,ipAddr) cursor.execute('INSERT INTO log (tagDate,logDate,temp,ambTemp,tagAddr,ipAddr) VALUES (?,?,?,?,?,?)', data) #cursor.execute("INSERT INTO log(tagDate,logDate,temp,ambTemp,tagAddr,ipAddr) VALUES(1, 1, temp,ambTemp,tagAddr,ipAddr)") connection.commit() connection.close() def connect(tool): print "Connecting to Sensor Tag" tool.sendline('connect') index = tool.expect (['Connection successful', pexpect.TIMEOUT, pexpect.EOF],3) if index == 0: tool.sendline('char-write-cmd 0x29 01') tool.expect('\[LE\]>') else: tool = None def bleTempCollection(interval=1): print "Create Tools Variable" tools = [] SensorTags = [] stConn = sqlite3.connect('/home/pi/blepisensor/SensorInfo.db') stCursor = stConn.cursor() # Read Sensor Tag addresses from a local database for row in stCursor.execute("SELECT * FROM SensorTags"): print row[1] print row[2] lf = open(logfile, 'a') tool = pexpect.spawn('gatttool -b ' + row[1] + ' --interactive',logfile=lf) tool.expect('\[LE\]>') connect(tool) st = SensorTag(row[1],tool,row[2]) SensorTags.append(st) stConn.close() #iterate over each Sensor Tag object and retrieve temp data while True: for sensorTag in SensorTags: tool = sensorTag.control if tool is None: print "Tool is None" else: tool.sendline('char-read-hnd 0x25') index = tool.expect (['descriptor: .*', 'Disconnected', pexpect.EOF, pexpect.TIMEOUT],3) if index == 0: hexStr = tool.after ambient = ambientTemp(hexStr) irT = irTemp(hexStr) saveData(["ambientTemp", ambient, "IR Temp", irT]) saveDataToDB(irT,ambient,sensorTag.mac,0) elif index == 1: connect(tool) time.sleep(float(interval)) # will this crash if lf is not created ? lf.close() #TODO: cleanup usage and catch erroneous input in main #TODO: Create a DEbug() function and flag next time I need to debug def main(): if (len(sys.argv) < 2): bleTempCollection() elif (len(sys.argv) == 2): bleTemp(sys.argv[1]) else: bleTemp(sys.argv[1],sys.argv[2]) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
""" Testing signals before/after saving and deleting. """ from django.db import models class Author(models.Model): name = models.CharField(max_length=20) def __unicode__(self): return self.name class Book(models.Model): name = models.CharField(max_length=20) authors = models.ManyToManyField(Author) def __unicode__(self): return self.name def pre_save_test(signal, sender, instance, **kwargs): print 'pre_save signal,', instance if kwargs.get('raw'): print 'Is raw' def post_save_test(signal, sender, instance, **kwargs): print 'post_save signal,', instance if 'created' in kwargs: if kwargs['created']: print 'Is created' else: print 'Is updated' if kwargs.get('raw'): print 'Is raw' def pre_delete_test(signal, sender, instance, **kwargs): print 'pre_delete signal,', instance print 'instance.id is not None: %s' % (instance.id != None) def post_delete_test(signal, sender, instance, **kwargs): print 'post_delete signal,', instance print 'instance.id is not None: %s' % (instance.id != None) __test__ = {'API_TESTS':""" # Save up the number of connected signals so that we can check at the end # that all the signals we register get properly unregistered (#9989) >>> pre_signals = (len(models.signals.pre_save.receivers), ... len(models.signals.post_save.receivers), ... len(models.signals.pre_delete.receivers), ... len(models.signals.post_delete.receivers)) >>> models.signals.pre_save.connect(pre_save_test) >>> models.signals.post_save.connect(post_save_test) >>> models.signals.pre_delete.connect(pre_delete_test) >>> models.signals.post_delete.connect(post_delete_test) >>> a1 = Author(name='Neal Stephenson') >>> a1.save() pre_save signal, Neal Stephenson post_save signal, Neal Stephenson Is created >>> b1 = Book(name='Snow Crash') >>> b1.save() pre_save signal, Snow Crash post_save signal, Snow Crash Is created # Assigning to m2m shouldn't generate an m2m signal >>> b1.authors = [a1] # Removing an author from an m2m shouldn't generate an m2m signal >>> b1.authors = [] >>> models.signals.post_delete.disconnect(post_delete_test) >>> models.signals.pre_delete.disconnect(pre_delete_test) >>> models.signals.post_save.disconnect(post_save_test) >>> models.signals.pre_save.disconnect(pre_save_test) # Check that all our signals got disconnected properly. >>> post_signals = (len(models.signals.pre_save.receivers), ... len(models.signals.post_save.receivers), ... len(models.signals.pre_delete.receivers), ... len(models.signals.post_delete.receivers)) >>> pre_signals == post_signals True """}
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Cache\Events; class KeyWritten extends CacheEvent { /** * The value that was written. * * @var mixed */ public $value; /** * The number of seconds the key should be valid. * * @var int|null */ public $seconds; /** * Create a new event instance. * * @param string|null $storeName * @param string $key * @param mixed $value * @param int|null $seconds * @param array $tags */ public function __construct($storeName, $key, $value, $seconds = null, $tags = []) { parent::__construct($storeName, $key, $tags); $this->value = $value; $this->seconds = $seconds; } }
php
github
https://github.com/laravel/framework
src/Illuminate/Cache/Events/KeyWritten.php
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // This file defines the constructor for the eliminate passthrough iteration // arguments pass. #ifndef TENSORFLOW_CORE_TRANSFORMS_ELIMINATE_PASSTHROUGH_ITER_ARGS_PASS_H_ #define TENSORFLOW_CORE_TRANSFORMS_ELIMINATE_PASSTHROUGH_ITER_ARGS_PASS_H_ #include <memory> #include "mlir/Pass/Pass.h" // from @llvm-project namespace mlir { namespace tfg { // Creates a pass that eliminates passthrough iteration arguments from // region-based loop operations. std::unique_ptr<Pass> CreateEliminatePassthroughIterArgsPass(); } // namespace tfg } // namespace mlir #endif // TENSORFLOW_CORE_TRANSFORMS_ELIMINATE_PASSTHROUGH_ITER_ARGS_PASS_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/transforms/eliminate_passthrough_iter_args/pass.h
import json #JSON Encoder and Decoder library from datetime import datetime #TODO: Update/Remove default values of arguments ## JSONmodule # This class controls file IO and stores entries in a map class JSONmodule: ##The constructor #@param filename A file containg the entries in valud JSON # Loads the data from the json file into a dictionary def __init__(self, filename = "test"): self.updateMap(filename) ##Updates the map by reading values from the given file #@param filename The file containing the map in valid JSON format def updateMap (self, filename = "test"): r = open (filename, 'r') self.entryMap = json.load(r) r.close() ##Writes the dictionary to the file specified #@param filename The name of the output file def write (self, filename = "testout"): w = open (filename, 'w') json.dump(self.entryMap, w, indent=4) w.close() ##Extracts Month from Unix timestamp in the format "May 2015" #@param timestamp The timestamp to be converted #@retval month A human-readable month value as a string def timestampToMonth (self, timestamp = 0): return datetime.fromtimestamp(timestamp).strftime('%B %Y') ##Returns an array of entries for the given month #@param month The month for which entries are requested #@retval entries The entries for the specified month as an array def getEntriesByMonth (self, month = "May 2015"): return self.entryMap[month] ##Inserts a new entry in the dictionary and writes the updated map to the given file #@param newEntry A tuple containing the timestamp and the text of the new entry #@param filename The name of the file where the database will be written after the update def addEntry (self, newEntry, filename = "test"): month = self.timestampToMonth(newEntry[0]) print month if not month in self.entryMap: self.entryMap[month] = [] self.entryMap[month].append({"date": newEntry[0], "text":newEntry[1]}) ##Deletes the entry with the given timestamp from the map #@param toDelete A tuple containing the timestamp and the text of the entry to be deleted def removeEntry (self, toDelete): month = self.timestampToMonth(toDelete[0]) self.entryMap[month].remove(({"date": toDelete[0], "text":toDelete[1]}))
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp.osv import fields,osv from openerp import tools import openerp.addons.decimal_precision as dp class mrp_workorder(osv.osv): _name = "mrp.workorder" _description = "Work Order Report" _auto = False _columns = { 'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines 'date': fields.date('Date', readonly=True), 'product_id': fields.many2one('product.product', 'Product', readonly=True), 'product_tmpl_id': fields.many2one('product.template', 'Product Template', readonly=True), 'category_id': fields.many2one('product.category', 'Product Category', readonly=True), 'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), readonly=True), 'state': fields.selection([('draft','Draft'),('startworking', 'In Progress'),('pause','Pause'),('cancel','Cancelled'),('done','Finished')], 'Status', readonly=True), 'total_hours': fields.float('Total Hours', readonly=True), 'total_cycles': fields.float('Total Cycles', readonly=True), 'delay': fields.float('Delay', readonly=True), 'production_id': fields.many2one('mrp.production', 'Production', readonly=True), 'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', readonly=True), 'user_id': fields.many2one('res.users', 'Responsible'), 'routing_id': fields.many2one('mrp.routing', string='Routing', readonly=True), 'bom_id': fields.many2one('mrp.bom', 'Bill of Material', readonly=True), } def init(self, cr): tools.drop_view_if_exists(cr, 'mrp_workorder') cr.execute(""" create or replace view mrp_workorder as ( select date(wl.date_planned) as date, min(wl.id) as id, mp.product_id as product_id, p.product_tmpl_id, t.categ_id as category_id, sum(wl.hour) as total_hours, avg(wl.delay) as delay, (w.costs_hour*sum(wl.hour)) as total_cost, wl.production_id as production_id, wl.workcenter_id as workcenter_id, sum(wl.cycle) as total_cycles, count(*) as nbr, sum(mp.product_qty) as product_qty, wl.state as state, mp.user_id, mp.routing_id, mp.bom_id from mrp_production_workcenter_line wl left join mrp_workcenter w on (w.id = wl.workcenter_id) left join mrp_production mp on (mp.id = wl.production_id) left join product_product p on (mp.product_id=p.id) left join product_template t on (p.product_tmpl_id=t.id) group by w.costs_hour, mp.product_id, mp.name, mp.user_id, mp.routing_id, mp.bom_id, wl.state, wl.date_planned, wl.production_id, wl.workcenter_id, p.product_tmpl_id, t.categ_id )""")
unknown
codeparrot/codeparrot-clean
from decimal import Decimal from django.apps import apps from django.core import checks from django.db import models from django.test import TestCase, skipIfDBFeature from django.test.utils import isolate_apps from .models import Bar, FkToChar, Foo, PrimaryKeyCharModel class ForeignKeyTests(TestCase): def test_callable_default(self): """A lazy callable may be used for ForeignKey.default.""" a = Foo.objects.create(id=1, a='abc', d=Decimal('12.34')) b = Bar.objects.create(b='bcd') self.assertEqual(b.a, a) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_empty_string_fk(self): """ Empty strings foreign key values don't get converted to None (#19299). """ char_model_empty = PrimaryKeyCharModel.objects.create(string='') fk_model_empty = FkToChar.objects.create(out=char_model_empty) fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk) self.assertEqual(fk_model_empty.out, char_model_empty) @isolate_apps('model_fields') def test_warning_when_unique_true_on_fk(self): class Foo(models.Model): pass class FKUniqueTrue(models.Model): fk_field = models.ForeignKey(Foo, models.CASCADE, unique=True) model = FKUniqueTrue() expected_warnings = [ checks.Warning( 'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.', hint='ForeignKey(unique=True) is usually better served by a OneToOneField.', obj=FKUniqueTrue.fk_field.field, id='fields.W342', ) ] warnings = model.check() self.assertEqual(warnings, expected_warnings) def test_related_name_converted_to_text(self): rel_name = Bar._meta.get_field('a').remote_field.related_name self.assertIsInstance(rel_name, str) def test_abstract_model_pending_operations(self): """ Foreign key fields declared on abstract models should not add lazy relations to resolve relationship declared as string (#24215). """ pending_ops_before = list(apps._pending_operations.items()) class AbstractForeignKeyModel(models.Model): fk = models.ForeignKey('missing.FK', models.CASCADE) class Meta: abstract = True self.assertIs(AbstractForeignKeyModel._meta.apps, apps) self.assertEqual( pending_ops_before, list(apps._pending_operations.items()), 'Pending lookup added for a foreign key on an abstract model' ) @isolate_apps('model_fields', 'model_fields.tests') def test_abstract_model_app_relative_foreign_key(self): class AbstractReferent(models.Model): reference = models.ForeignKey('Referred', on_delete=models.CASCADE) class Meta: app_label = 'model_fields' abstract = True def assert_app_model_resolved(label): class Referred(models.Model): class Meta: app_label = label class ConcreteReferent(AbstractReferent): class Meta: app_label = label self.assertEqual(ConcreteReferent._meta.get_field('reference').related_model, Referred) assert_app_model_resolved('model_fields') assert_app_model_resolved('tests') @isolate_apps('model_fields') def test_to_python(self): class Foo(models.Model): pass class Bar(models.Model): fk = models.ForeignKey(Foo, models.CASCADE) self.assertEqual(Bar._meta.get_field('fk').to_python('1'), 1)
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Http\Middleware; use Closure; use Symfony\Component\HttpFoundation\Response; class CheckResponseForModifications { /** * Handle an incoming request. * * @param \Illuminate\Http\Request $request * @param \Closure $next * @return mixed */ public function handle($request, Closure $next) { $response = $next($request); if ($response instanceof Response) { $response->isNotModified($request); } return $response; } }
php
github
https://github.com/laravel/framework
src/Illuminate/Http/Middleware/CheckResponseForModifications.php
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class LocalNetworkGateway(Resource): """A common class for general resource information. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: Resource tags. :type tags: dict :param local_network_address_space: Local network site address space. :type local_network_address_space: :class:`AddressSpace <azure.mgmt.network.v2016_09_01.models.AddressSpace>` :param gateway_ip_address: IP address of local network gateway. :type gateway_ip_address: str :param bgp_settings: Local network gateway's BGP speaker settings. :type bgp_settings: :class:`BgpSettings <azure.mgmt.network.v2016_09_01.models.BgpSettings>` :param resource_guid: The resource GUID property of the LocalNetworkGateway resource. :type resource_guid: str :ivar provisioning_state: The provisioning state of the LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'local_network_address_space': {'required': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'}, 'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'}, 'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, } def __init__(self, local_network_address_space, id=None, location=None, tags=None, gateway_ip_address=None, bgp_settings=None, resource_guid=None, etag=None): super(LocalNetworkGateway, self).__init__(id=id, location=location, tags=tags) self.local_network_address_space = local_network_address_space self.gateway_ip_address = gateway_ip_address self.bgp_settings = bgp_settings self.resource_guid = resource_guid self.provisioning_state = None self.etag = etag
unknown
codeparrot/codeparrot-clean
//// [tests/cases/conformance/async/es5/functionDeclarations/asyncFunctionDeclaration11_es5.ts] //// //// [asyncFunctionDeclaration11_es5.ts] async function await(): Promise<void> { } //// [asyncFunctionDeclaration11_es5.js] "use strict"; function await() { return __awaiter(this, void 0, void 0, function* () { }); }
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/asyncFunctionDeclaration11_es5(target=es2015).js
//===--- TypeLayoutDumper.h - Tool to dump fixed type layouts ---*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // A tool to dump fixed-size type layouts in YAML format. // //===----------------------------------------------------------------------===// #ifndef SWIFT_IRGEN_TYPE_LAYOUT_DUMPER_H #define SWIFT_IRGEN_TYPE_LAYOUT_DUMPER_H #include "llvm/ADT/ArrayRef.h" namespace llvm { class raw_ostream; } // namespace llvm namespace swift { class ModuleDecl; namespace irgen { class IRGenModule; class TypeLayoutDumper { IRGenModule &IGM; public: explicit TypeLayoutDumper(IRGenModule &IGM) : IGM(IGM) {} void write(llvm::ArrayRef<ModuleDecl *> AllModules, llvm::raw_ostream &os) const; }; } // namespace irgen } // namespace swift #endif
c
github
https://github.com/apple/swift
lib/IRGen/TypeLayoutDumper.h
#!/usr/bin/python -tt # Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import re import sys RELEASE_NOTES_DIR = "releasenotes/notes/" IGNORE_FILES = ( 'releasenotes/notes/fix-get-deploy-info-port.yaml', # Newton 6.0.0 'releasenotes/notes/fix-mitaka-ipa-iscsi.yaml', # Newton 6.0.0 ) def main(): return_code = 0 for filename in os.listdir(RELEASE_NOTES_DIR): file_path = os.path.join(RELEASE_NOTES_DIR, filename) if not os.path.isfile(file_path): continue if not file_path.endswith('.yaml'): continue if file_path in IGNORE_FILES: continue if not re.search(r'.*-[0-9a-f]{16}\.yaml', file_path): return_code = 1 print("Error: Release notes file: {!r} was not created with " "'reno new'".format(file_path)) return return_code if '__main__' == __name__: sys.exit(main())
unknown
codeparrot/codeparrot-clean
import datetime import sys import socket from socket import error as SocketError, timeout as SocketTimeout import warnings from .packages import six try: # Python 3 from http.client import HTTPConnection as _HTTPConnection, HTTPException except ImportError: from httplib import HTTPConnection as _HTTPConnection, HTTPException class DummyConnection(object): "Used to detect a failed ConnectionCls import." pass try: # Compiled with SSL? HTTPSConnection = DummyConnection import ssl BaseSSLError = ssl.SSLError except (ImportError, AttributeError): # Platform-specific: No SSL. ssl = None class BaseSSLError(BaseException): pass try: # Python 3: # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError except NameError: # Python 2: class ConnectionError(Exception): pass from .exceptions import ( NewConnectionError, ConnectTimeoutError, SubjectAltNameWarning, SystemTimeWarning, ) from .packages.ssl_match_hostname import match_hostname from .util.ssl_ import ( resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, assert_fingerprint, ) from .util import connection port_by_scheme = { 'http': 80, 'https': 443, } RECENT_DATE = datetime.date(2014, 1, 1) class HTTPConnection(_HTTPConnection, object): """ Based on httplib.HTTPConnection but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` - ``source_address``: Set the source address for the current connection. .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass:: HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port = port_by_scheme['http'] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] #: Whether this connection verifies the host's certificate. is_verified = False def __init__(self, *args, **kw): if six.PY3: # Python 3 kw.pop('strict', None) # Pre-set source_address in case we have an older Python like 2.6. self.source_address = kw.get('source_address') if sys.version_info < (2, 7): # Python 2.6 # _HTTPConnection on Python 2.6 will balk at this keyword arg, but # not newer versions. We can still use it when creating a # connection though, so we pop it *after* we have saved it as # self.source_address. kw.pop('source_address', None) #: The socket options provided by the user. If no options are #: provided, we use the default options. self.socket_options = kw.pop('socket_options', self.default_socket_options) # Superclass also sets self.source_address in Python 2.7+. _HTTPConnection.__init__(self, *args, **kw) def _new_conn(self): """ Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection( (self.host, self.port), self.timeout, **extra_kw) except SocketTimeout as e: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) except SocketError as e: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e) return conn def _prepare_conn(self, conn): self.sock = conn # the _tunnel_host attribute was added in python 2.6.3 (via # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do # not have them. if getattr(self, '_tunnel_host', None): # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 def connect(self): conn = self._new_conn() self._prepare_conn(conn) class HTTPSConnection(HTTPConnection): default_port = port_by_scheme['https'] def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw): HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw) self.key_file = key_file self.cert_file = cert_file # Required property for Google AppEngine 1.9.0 which otherwise causes # HTTPS requests to go out as HTTP. (See Issue #356) self._protocol = 'https' def connect(self): conn = self._new_conn() self._prepare_conn(conn) self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file) class VerifiedHTTPSConnection(HTTPSConnection): """ Based on httplib.HTTPSConnection but wraps the socket with SSL certification. """ cert_reqs = None ca_certs = None ca_cert_dir = None ssl_version = None assert_fingerprint = None def set_cert(self, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None): if (ca_certs or ca_cert_dir) and cert_reqs is None: cert_reqs = 'CERT_REQUIRED' self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint def connect(self): # Add certificate verification conn = self._new_conn() resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs) resolved_ssl_version = resolve_ssl_version(self.ssl_version) hostname = self.host if getattr(self, '_tunnel_host', None): # _tunnel_host was added in Python 2.6.3 # (See: http://hg.python.org/cpython/rev/0f57b30a152f) self.sock = conn # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 # Override the host with the one we're requesting data from. hostname = self._tunnel_host is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn(( 'System time is way off (before {0}). This will probably ' 'lead to SSL verification errors').format(RECENT_DATE), SystemTimeWarning ) # Wrap socket using verification with the root certs in # trusted_root_certs self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file, cert_reqs=resolved_cert_reqs, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, server_hostname=hostname, ssl_version=resolved_ssl_version) if self.assert_fingerprint: assert_fingerprint(self.sock.getpeercert(binary_form=True), self.assert_fingerprint) elif resolved_cert_reqs != ssl.CERT_NONE \ and self.assert_hostname is not False: cert = self.sock.getpeercert() if not cert.get('subjectAltName', ()): warnings.warn(( 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' '`commonName` for now. This feature is being removed by major browsers and ' 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' 'for details.)'.format(hostname)), SubjectAltNameWarning ) match_hostname(cert, self.assert_hostname or hostname) self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or self.assert_fingerprint is not None) if ssl: # Make a copy for testing. UnverifiedHTTPSConnection = HTTPSConnection HTTPSConnection = VerifiedHTTPSConnection else: HTTPSConnection = DummyConnection
unknown
codeparrot/codeparrot-clean
// Macros and other things needed by ceval.c, and bytecodes.c /* Computed GOTOs, or the-optimization-commonly-but-improperly-known-as-"threaded code" using gcc's labels-as-values extension (http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html). The traditional bytecode evaluation loop uses a "switch" statement, which decent compilers will optimize as a single indirect branch instruction combined with a lookup table of jump addresses. However, since the indirect jump instruction is shared by all opcodes, the CPU will have a hard time making the right prediction for where to jump next (actually, it will be always wrong except in the uncommon case of a sequence of several identical opcodes). "Threaded code" in contrast, uses an explicit jump table and an explicit indirect jump instruction at the end of each opcode. Since the jump instruction is at a different address for each opcode, the CPU will make a separate prediction for each of these instructions, which is equivalent to predicting the second opcode of each opcode pair. These predictions have a much better chance to turn out valid, especially in small bytecode loops. A mispredicted branch on a modern CPU flushes the whole pipeline and can cost several CPU cycles (depending on the pipeline depth), and potentially many more instructions (depending on the pipeline width). A correctly predicted branch, however, is nearly free. At the time of this writing, the "threaded code" version is up to 15-20% faster than the normal "switch" version, depending on the compiler and the CPU architecture. NOTE: care must be taken that the compiler doesn't try to "optimize" the indirect jumps by sharing them between all opcodes. Such optimizations can be disabled on gcc by using the -fno-gcse flag (or possibly -fno-crossjumping). */ /* Use macros rather than inline functions, to make it as clear as possible * to the C compiler that the tracing check is a simple test then branch. * We want to be sure that the compiler knows this before it generates * the CFG. */ #ifdef WITH_DTRACE #define OR_DTRACE_LINE | (PyDTrace_LINE_ENABLED() ? 255 : 0) #else #define OR_DTRACE_LINE #endif #ifdef HAVE_COMPUTED_GOTOS #ifndef USE_COMPUTED_GOTOS #define USE_COMPUTED_GOTOS 1 #endif #else #if defined(USE_COMPUTED_GOTOS) && USE_COMPUTED_GOTOS #error "Computed gotos are not supported on this compiler." #endif #undef USE_COMPUTED_GOTOS #define USE_COMPUTED_GOTOS 0 #endif #ifdef Py_STATS #define INSTRUCTION_STATS(op) \ do { \ PyStats *s = _PyStats_GET(); \ OPCODE_EXE_INC(op); \ if (s) s->opcode_stats[lastopcode].pair_count[op]++; \ lastopcode = op; \ } while (0) #else #define INSTRUCTION_STATS(op) ((void)0) #endif #ifdef Py_STATS # define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg, int lastopcode # define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg, lastopcode #else # define TAIL_CALL_PARAMS _PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate, _Py_CODEUNIT *next_instr, const void *instruction_funcptr_table, int oparg # define TAIL_CALL_ARGS frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, oparg #endif #if _Py_TAIL_CALL_INTERP # if defined(__clang__) || defined(__GNUC__) # if !_Py__has_attribute(preserve_none) || !_Py__has_attribute(musttail) # error "This compiler does not have support for efficient tail calling." # endif # elif defined(_MSC_VER) && (_MSC_VER < 1950) # error "You need at least VS 2026 / PlatformToolset v145 for tail calling." # endif # if defined(_MSC_VER) && !defined(__clang__) # define Py_MUSTTAIL [[msvc::musttail]] # define Py_PRESERVE_NONE_CC __preserve_none # else # define Py_MUSTTAIL __attribute__((musttail)) # define Py_PRESERVE_NONE_CC __attribute__((preserve_none)) # endif typedef PyObject *(Py_PRESERVE_NONE_CC *py_tail_call_funcptr)(TAIL_CALL_PARAMS); # define DISPATCH_TABLE_VAR instruction_funcptr_table # define DISPATCH_TABLE instruction_funcptr_handler_table # define TRACING_DISPATCH_TABLE instruction_funcptr_tracing_table # define TARGET(op) Py_NO_INLINE PyObject *Py_PRESERVE_NONE_CC _TAIL_CALL_##op(TAIL_CALL_PARAMS) # define DISPATCH_GOTO() \ do { \ Py_MUSTTAIL return (((py_tail_call_funcptr *)instruction_funcptr_table)[opcode])(TAIL_CALL_ARGS); \ } while (0) # define DISPATCH_GOTO_NON_TRACING() \ do { \ Py_MUSTTAIL return (((py_tail_call_funcptr *)DISPATCH_TABLE)[opcode])(TAIL_CALL_ARGS); \ } while (0) # define JUMP_TO_LABEL(name) \ do { \ Py_MUSTTAIL return (_TAIL_CALL_##name)(TAIL_CALL_ARGS); \ } while (0) # ifdef Py_STATS # define JUMP_TO_PREDICTED(name) \ do { \ Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg, lastopcode); \ } while (0) # else # define JUMP_TO_PREDICTED(name) \ do { \ Py_MUSTTAIL return (_TAIL_CALL_##name)(frame, stack_pointer, tstate, this_instr, instruction_funcptr_table, oparg); \ } while (0) # endif # define LABEL(name) TARGET(name) #elif USE_COMPUTED_GOTOS # define DISPATCH_TABLE_VAR opcode_targets # define DISPATCH_TABLE opcode_targets_table # define TRACING_DISPATCH_TABLE opcode_tracing_targets_table # define TARGET(op) TARGET_##op: # define DISPATCH_GOTO() goto *opcode_targets[opcode] # define DISPATCH_GOTO_NON_TRACING() goto *DISPATCH_TABLE[opcode]; # define JUMP_TO_LABEL(name) goto name; # define JUMP_TO_PREDICTED(name) goto PREDICTED_##name; # define LABEL(name) name: #else # define TARGET(op) case op: TARGET_##op: # define DISPATCH_GOTO() dispatch_code = opcode | tracing_mode ; goto dispatch_opcode # define DISPATCH_GOTO_NON_TRACING() dispatch_code = opcode; goto dispatch_opcode # define JUMP_TO_LABEL(name) goto name; # define JUMP_TO_PREDICTED(name) goto PREDICTED_##name; # define LABEL(name) name: #endif #if (_Py_TAIL_CALL_INTERP || USE_COMPUTED_GOTOS) && _Py_TIER2 # define IS_JIT_TRACING() (DISPATCH_TABLE_VAR == TRACING_DISPATCH_TABLE) # define ENTER_TRACING() \ DISPATCH_TABLE_VAR = TRACING_DISPATCH_TABLE; # define LEAVE_TRACING() \ DISPATCH_TABLE_VAR = DISPATCH_TABLE; #else # define IS_JIT_TRACING() (tracing_mode != 0) # define ENTER_TRACING() tracing_mode = 255 # define LEAVE_TRACING() tracing_mode = 0 #endif #if _Py_TIER2 #define STOP_TRACING() \ do { \ if (IS_JIT_TRACING()) { \ LEAVE_TRACING(); \ _PyJit_FinalizeTracing(tstate, 0); \ } \ } while (0); #else #define STOP_TRACING() ((void)(0)); #endif /* PRE_DISPATCH_GOTO() does lltrace if enabled. Normally a no-op */ #ifdef Py_DEBUG #define PRE_DISPATCH_GOTO() if (frame->lltrace >= 5) { \ lltrace_instruction(frame, stack_pointer, next_instr, opcode, oparg); } #else #define PRE_DISPATCH_GOTO() ((void)0) #endif #ifdef Py_DEBUG #define LLTRACE_RESUME_FRAME() \ do { \ _PyFrame_SetStackPointer(frame, stack_pointer); \ int lltrace = maybe_lltrace_resume_frame(frame, GLOBALS()); \ stack_pointer = _PyFrame_GetStackPointer(frame); \ frame->lltrace = lltrace; \ } while (0) #else #define LLTRACE_RESUME_FRAME() ((void)0) #endif #ifdef Py_GIL_DISABLED #define QSBR_QUIESCENT_STATE(tstate) _Py_qsbr_quiescent_state(((_PyThreadStateImpl *)tstate)->qsbr) #else #define QSBR_QUIESCENT_STATE(tstate) #endif /* Do interpreter dispatch accounting for tracing and instrumentation */ #define DISPATCH() \ { \ assert(frame->stackpointer == NULL); \ NEXTOPARG(); \ PRE_DISPATCH_GOTO(); \ DISPATCH_GOTO(); \ } #define DISPATCH_NON_TRACING() \ { \ assert(frame->stackpointer == NULL); \ NEXTOPARG(); \ PRE_DISPATCH_GOTO(); \ DISPATCH_GOTO_NON_TRACING(); \ } #define DISPATCH_SAME_OPARG() \ { \ opcode = next_instr->op.code; \ PRE_DISPATCH_GOTO(); \ DISPATCH_GOTO_NON_TRACING(); \ } #define DISPATCH_INLINED(NEW_FRAME) \ do { \ assert(tstate->interp->eval_frame == NULL); \ _PyFrame_SetStackPointer(frame, stack_pointer); \ assert((NEW_FRAME)->previous == frame); \ frame = tstate->current_frame = (NEW_FRAME); \ CALL_STAT_INC(inlined_py_calls); \ JUMP_TO_LABEL(start_frame); \ } while (0) /* Tuple access macros */ #ifndef Py_DEBUG #define GETITEM(v, i) PyTuple_GET_ITEM((v), (i)) #else static inline PyObject * GETITEM(PyObject *v, Py_ssize_t i) { assert(PyTuple_Check(v)); assert(i >= 0); assert(i < PyTuple_GET_SIZE(v)); return PyTuple_GET_ITEM(v, i); } #endif /* Code access macros */ /* The integer overflow is checked by an assertion below. */ #define INSTR_OFFSET() ((int)(next_instr - _PyFrame_GetBytecode(frame))) #define NEXTOPARG() do { \ _Py_CODEUNIT word = {.cache = FT_ATOMIC_LOAD_UINT16_RELAXED(*(uint16_t*)next_instr)}; \ opcode = word.op.code; \ oparg = word.op.arg; \ } while (0) /* JUMPBY makes the generator identify the instruction as a jump. SKIP_OVER is * for advancing to the next instruction, taking into account cache entries * and skipped instructions. */ #define JUMPBY(x) (next_instr += (x)) #define SKIP_OVER(x) (next_instr += (x)) #define STACK_LEVEL() ((int)(stack_pointer - _PyFrame_Stackbase(frame))) #define STACK_SIZE() (_PyFrame_GetCode(frame)->co_stacksize) #define WITHIN_STACK_BOUNDS() \ (frame->owner == FRAME_OWNED_BY_INTERPRETER || (STACK_LEVEL() >= 0 && STACK_LEVEL() <= STACK_SIZE())) #if defined(Py_DEBUG) && !defined(_Py_JIT) // This allows temporary stack "overflows", provided it's all in the cache at any point of time. #define WITHIN_STACK_BOUNDS_IGNORING_CACHE() \ (frame->owner == FRAME_OWNED_BY_INTERPRETER || (STACK_LEVEL() >= 0 && (STACK_LEVEL()) <= STACK_SIZE())) #else #define WITHIN_STACK_BOUNDS_IGNORING_CACHE WITHIN_STACK_BOUNDS #endif /* Data access macros */ #define FRAME_CO_CONSTS (_PyFrame_GetCode(frame)->co_consts) #define FRAME_CO_NAMES (_PyFrame_GetCode(frame)->co_names) /* Local variable macros */ #define LOCALS_ARRAY (frame->localsplus) #define GETLOCAL(i) (frame->localsplus[i]) #ifdef Py_STATS #define UPDATE_MISS_STATS(INSTNAME) \ do { \ STAT_INC(opcode, miss); \ STAT_INC((INSTNAME), miss); \ /* The counter is always the first cache entry: */ \ if (ADAPTIVE_COUNTER_TRIGGERS(next_instr->cache)) { \ STAT_INC((INSTNAME), deopt); \ } \ } while (0) #else #define UPDATE_MISS_STATS(INSTNAME) ((void)0) #endif // Try to lock an object in the free threading build, if it's not already // locked. Use with a DEOPT_IF() to deopt if the object is already locked. // These are no-ops in the default GIL build. The general pattern is: // // DEOPT_IF(!LOCK_OBJECT(op)); // if (/* condition fails */) { // UNLOCK_OBJECT(op); // DEOPT_IF(true); // } // ... // UNLOCK_OBJECT(op); // // NOTE: The object must be unlocked on every exit code path and you should // avoid any potentially escaping calls (like PyStackRef_CLOSE) while the // object is locked. #ifdef Py_GIL_DISABLED # define LOCK_OBJECT(op) PyMutex_LockFast(&(_PyObject_CAST(op))->ob_mutex) # define UNLOCK_OBJECT(op) PyMutex_Unlock(&(_PyObject_CAST(op))->ob_mutex) #else # define LOCK_OBJECT(op) (1) # define UNLOCK_OBJECT(op) ((void)0) #endif #define GLOBALS() frame->f_globals #define BUILTINS() frame->f_builtins #define LOCALS() frame->f_locals #define CONSTS() _PyFrame_GetCode(frame)->co_consts #define NAMES() _PyFrame_GetCode(frame)->co_names #define DTRACE_FUNCTION_ENTRY() \ if (PyDTrace_FUNCTION_ENTRY_ENABLED()) { \ dtrace_function_entry(frame); \ } /* This takes a uint16_t instead of a _Py_BackoffCounter, * because it is used directly on the cache entry in generated code, * which is always an integral type. */ // Force re-specialization when tracing a side exit to get good side exits. #define ADAPTIVE_COUNTER_TRIGGERS(COUNTER) \ backoff_counter_triggers(forge_backoff_counter((COUNTER))) #define ADVANCE_ADAPTIVE_COUNTER(COUNTER) \ do { \ (COUNTER) = advance_backoff_counter((COUNTER)); \ } while (0); #define PAUSE_ADAPTIVE_COUNTER(COUNTER) \ do { \ (COUNTER) = pause_backoff_counter((COUNTER)); \ } while (0); #ifdef ENABLE_SPECIALIZATION /* Multiple threads may execute these concurrently if thread-local bytecode is * disabled and they all execute the main copy of the bytecode. Specialization * is disabled in that case so the value is unused, but the RMW cycle should be * free of data races. */ #define RECORD_BRANCH_TAKEN(bitset, flag) \ FT_ATOMIC_STORE_UINT16_RELAXED( \ bitset, (FT_ATOMIC_LOAD_UINT16_RELAXED(bitset) << 1) | (flag)) #else #define RECORD_BRANCH_TAKEN(bitset, flag) #endif #define UNBOUNDLOCAL_ERROR_MSG \ "cannot access local variable '%s' where it is not associated with a value" #define UNBOUNDFREE_ERROR_MSG \ "cannot access free variable '%s' where it is not associated with a value" \ " in enclosing scope" #define NAME_ERROR_MSG "name '%.200s' is not defined" // If a trace function sets a new f_lineno and // *then* raises, we use the destination when searching // for an exception handler, displaying the traceback, and so on #define INSTRUMENTED_JUMP(src, dest, event) \ do { \ if (tstate->tracing) {\ next_instr = dest; \ } else { \ _PyFrame_SetStackPointer(frame, stack_pointer); \ next_instr = _Py_call_instrumentation_jump(this_instr, tstate, event, frame, src, dest); \ stack_pointer = _PyFrame_GetStackPointer(frame); \ if (next_instr == NULL) { \ next_instr = (dest)+1; \ JUMP_TO_LABEL(error); \ } \ } \ } while (0); static inline int _Py_EnterRecursivePy(PyThreadState *tstate) { return (tstate->py_recursion_remaining-- <= 0) && _Py_CheckRecursiveCallPy(tstate); } static inline void _Py_LeaveRecursiveCallPy(PyThreadState *tstate) { tstate->py_recursion_remaining++; } /* Implementation of "macros" that modify the instruction pointer, * stack pointer, or frame pointer. * These need to treated differently by tier 1 and 2. * The Tier 1 version is here; Tier 2 is inlined in ceval.c. */ #define LOAD_IP(OFFSET) do { \ next_instr = frame->instr_ptr + (OFFSET); \ } while (0) /* There's no STORE_IP(), it's inlined by the code generator. */ #define LOAD_SP() \ stack_pointer = _PyFrame_GetStackPointer(frame) #define SAVE_SP() \ _PyFrame_SetStackPointer(frame, stack_pointer) /* Tier-switching macros. */ #define TIER1_TO_TIER2(EXECUTOR) \ do { \ OPT_STAT_INC(traces_executed); \ next_instr = _Py_jit_entry((EXECUTOR), frame, stack_pointer, tstate); \ frame = tstate->current_frame; \ stack_pointer = _PyFrame_GetStackPointer(frame); \ int keep_tracing_bit = (uintptr_t)next_instr & 1; \ next_instr = (_Py_CODEUNIT *)(((uintptr_t)next_instr) & (~1)); \ if (next_instr == NULL) { \ /* gh-140104: The exception handler expects frame->instr_ptr to after this_instr, not this_instr! */ \ next_instr = frame->instr_ptr + 1; \ JUMP_TO_LABEL(error); \ } \ if (keep_tracing_bit) { \ assert(uop_buffer_length(&((_PyThreadStateImpl *)tstate)->jit_tracer_state->code_buffer)); \ ENTER_TRACING(); \ DISPATCH_NON_TRACING(); \ } \ DISPATCH(); \ } while (0) #define TIER2_TO_TIER2(EXECUTOR) \ do { \ OPT_STAT_INC(traces_executed); \ current_executor = (EXECUTOR); \ goto tier2_start; \ } while (0) #define GOTO_TIER_ONE_SETUP \ tstate->current_executor = NULL; \ OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); \ _PyFrame_SetStackPointer(frame, stack_pointer); #define GOTO_TIER_ONE(TARGET) \ do \ { \ GOTO_TIER_ONE_SETUP \ return (_Py_CODEUNIT *)(TARGET); \ } while (0) #define GOTO_TIER_ONE_CONTINUE_TRACING(TARGET) \ do \ { \ GOTO_TIER_ONE_SETUP \ return (_Py_CODEUNIT *)(((uintptr_t)(TARGET))| 1); \ } while (0) #define CURRENT_OPARG() (next_uop[-1].oparg) #define CURRENT_OPERAND0_64() (next_uop[-1].operand0) #define CURRENT_OPERAND1_64() (next_uop[-1].operand1) #define CURRENT_OPERAND0_32() (next_uop[-1].operand0) #define CURRENT_OPERAND1_32() (next_uop[-1].operand1) #define CURRENT_OPERAND0_16() (next_uop[-1].operand0) #define CURRENT_OPERAND1_16() (next_uop[-1].operand1) #define CURRENT_TARGET() (next_uop[-1].target) #define JUMP_TO_JUMP_TARGET() goto jump_to_jump_target #define JUMP_TO_ERROR() goto jump_to_error_target /* Stackref macros */ /* How much scratch space to give stackref to PyObject* conversion. */ #define MAX_STACKREF_SCRATCH 10 #define STACKREFS_TO_PYOBJECTS(ARGS, ARG_COUNT, NAME) \ /* +1 because vectorcall might use -1 to write self */ \ PyObject *NAME##_temp[MAX_STACKREF_SCRATCH+1]; \ PyObject **NAME = _PyObjectArray_FromStackRefArray(ARGS, ARG_COUNT, NAME##_temp); #define STACKREFS_TO_PYOBJECTS_CLEANUP(NAME) \ /* +1 because we +1 previously */ \ _PyObjectArray_Free(NAME - 1, NAME##_temp); #define CONVERSION_FAILED(NAME) ((NAME) == NULL) #if defined(Py_DEBUG) && !defined(_Py_JIT) #define SET_CURRENT_CACHED_VALUES(N) current_cached_values = (N) #define CHECK_CURRENT_CACHED_VALUES(N) assert(current_cached_values == (N)) #else #define SET_CURRENT_CACHED_VALUES(N) ((void)0) #define CHECK_CURRENT_CACHED_VALUES(N) ((void)0) #endif #define IS_PEP523_HOOKED(tstate) (tstate->interp->eval_frame != NULL) static inline int check_periodics(PyThreadState *tstate) { _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY(); QSBR_QUIESCENT_STATE(tstate); if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) { return _Py_HandlePending(tstate); } return 0; } // Mark the generator as executing. Returns true if the state was changed, // false if it was already executing or finished. static inline bool gen_try_set_executing(PyGenObject *gen) { #ifdef Py_GIL_DISABLED if (!_PyObject_IsUniquelyReferenced((PyObject *)gen)) { int8_t frame_state = _Py_atomic_load_int8_relaxed(&gen->gi_frame_state); while (frame_state < FRAME_SUSPENDED_YIELD_FROM_LOCKED) { if (_Py_atomic_compare_exchange_int8(&gen->gi_frame_state, &frame_state, FRAME_EXECUTING)) { return true; } } // NB: We return false for FRAME_SUSPENDED_YIELD_FROM_LOCKED as well. // That case is rare enough that we can just handle it in the deopt. return false; } #endif // Use faster non-atomic modifications in the GIL-enabled build and when // the object is uniquely referenced in the free-threaded build. if (gen->gi_frame_state < FRAME_EXECUTING) { assert(gen->gi_frame_state != FRAME_SUSPENDED_YIELD_FROM_LOCKED); gen->gi_frame_state = FRAME_EXECUTING; return true; } return false; }
c
github
https://github.com/python/cpython
Python/ceval_macros.h
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for swift.common.utils""" from __future__ import print_function from test.unit import temptree, debug_logger, make_timestamp_iter, with_tempdir import ctypes import contextlib import errno import eventlet import eventlet.debug import eventlet.event import eventlet.patcher import functools import grp import logging import platform import os import mock import pwd import random import re import socket import string import sys import json import math import inspect import six from six import BytesIO, StringIO from six.moves.queue import Queue, Empty from six.moves import range from textwrap import dedent import tempfile import time import unittest import fcntl import shutil from getpass import getuser from shutil import rmtree from functools import partial from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp from netifaces import AF_INET6 from mock import MagicMock, patch from six.moves.configparser import NoSectionError, NoOptionError from uuid import uuid4 from swift.common.exceptions import Timeout, MessageTimeout, \ ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \ MimeInvalid from swift.common import utils from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6, \ set_swift_dir from swift.common.container_sync_realms import ContainerSyncRealms from swift.common.header_key_dict import HeaderKeyDict from swift.common.storage_policy import POLICIES, reload_storage_policies from swift.common.swob import Request, Response from test.unit import FakeLogger, requires_o_tmpfile_support, \ quiet_eventlet_exceptions threading = eventlet.patcher.original('threading') class MockOs(object): def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None): if pass_funcs is None: pass_funcs = [] if called_funcs is None: called_funcs = [] if raise_funcs is None: raise_funcs = [] self.closed_fds = [] for func in pass_funcs: setattr(self, func, self.pass_func) self.called_funcs = {} for func in called_funcs: c_func = partial(self.called_func, func) setattr(self, func, c_func) for func in raise_funcs: r_func = partial(self.raise_func, func) setattr(self, func, r_func) def pass_func(self, *args, **kwargs): pass setgroups = chdir = setsid = setgid = setuid = umask = pass_func def called_func(self, name, *args, **kwargs): self.called_funcs[name] = args def raise_func(self, name, *args, **kwargs): self.called_funcs[name] = args raise OSError() def dup2(self, source, target): self.closed_fds.append(target) def geteuid(self): '''Pretend we are running as root.''' return 0 def __getattr__(self, name): # I only over-ride portions of the os module try: return object.__getattr__(self, name) except AttributeError: return getattr(os, name) class MockUdpSocket(object): def __init__(self, sendto_errno=None): self.sent = [] self.sendto_errno = sendto_errno def sendto(self, data, target): if self.sendto_errno: raise socket.error(self.sendto_errno, 'test errno %s' % self.sendto_errno) self.sent.append((data, target)) def close(self): pass class MockSys(object): def __init__(self): self.stdin = TemporaryFile('w') self.stdout = TemporaryFile('r') self.stderr = TemporaryFile('r') self.__stderr__ = self.stderr self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()] def reset_loggers(): if hasattr(utils.get_logger, 'handler4logger'): for logger, handler in utils.get_logger.handler4logger.items(): logger.removeHandler(handler) delattr(utils.get_logger, 'handler4logger') if hasattr(utils.get_logger, 'console_handler4logger'): for logger, h in utils.get_logger.console_handler4logger.items(): logger.removeHandler(h) delattr(utils.get_logger, 'console_handler4logger') # Reset the LogAdapter class thread local state. Use get_logger() here # to fetch a LogAdapter instance because the items from # get_logger.handler4logger above are the underlying logger instances, # not the LogAdapter. utils.get_logger(None).thread_locals = (None, None) def reset_logger_state(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): reset_loggers() try: return f(self, *args, **kwargs) finally: reset_loggers() return wrapper class TestTimestamp(unittest.TestCase): """Tests for swift.common.utils.Timestamp""" def test_invalid_input(self): self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1) def test_invalid_string_conversion(self): t = utils.Timestamp.now() self.assertRaises(TypeError, str, t) def test_offset_limit(self): t = 1417462430.78693 # can't have a offset above MAX_OFFSET self.assertRaises(ValueError, utils.Timestamp, t, offset=utils.MAX_OFFSET + 1) # exactly max offset is fine ts = utils.Timestamp(t, offset=utils.MAX_OFFSET) self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff') # but you can't offset it further self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1) # unless you start below it ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1) self.assertEqual(utils.Timestamp(ts.internal, offset=1), '1417462430.78693_ffffffffffffffff') def test_normal_format_no_offset(self): expected = '1402436408.91203' test_values = ( '1402436408.91203', '1402436408.91203_00000000', '1402436408.912030000', '1402436408.912030000_0000000000000', '000001402436408.912030000', '000001402436408.912030000_0000000000', 1402436408.91203, 1402436408.912029, 1402436408.9120300000000000, 1402436408.91202999999999999, utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.912029), utils.Timestamp(1402436408.912029, offset=0), utils.Timestamp('1402436408.91203'), utils.Timestamp('1402436408.91203', offset=0), utils.Timestamp('1402436408.91203_00000000'), utils.Timestamp('1402436408.91203_00000000', offset=0), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(timestamp.normal, expected) # timestamp instance can also compare to string or float self.assertEqual(timestamp, expected) self.assertEqual(timestamp, float(expected)) self.assertEqual(timestamp, utils.normalize_timestamp(expected)) def test_isoformat(self): expected = '2014-06-10T22:47:32.054580' test_values = ( '1402440452.05458', '1402440452.054579', '1402440452.05458_00000000', '1402440452.054579_00000000', '1402440452.054580000', '1402440452.054579999', '1402440452.054580000_0000000000000', '1402440452.054579999_0000ff00', '000001402440452.054580000', '000001402440452.0545799', '000001402440452.054580000_0000000000', '000001402440452.054579999999_00000fffff', 1402440452.05458, 1402440452.054579, 1402440452.0545800000000000, 1402440452.054579999, utils.Timestamp(1402440452.05458), utils.Timestamp(1402440452.0545799), utils.Timestamp(1402440452.05458, offset=0), utils.Timestamp(1402440452.05457999999, offset=0), utils.Timestamp(1402440452.05458, offset=100), utils.Timestamp(1402440452.054579, offset=100), utils.Timestamp('1402440452.05458'), utils.Timestamp('1402440452.054579999'), utils.Timestamp('1402440452.05458', offset=0), utils.Timestamp('1402440452.054579', offset=0), utils.Timestamp('1402440452.05458', offset=300), utils.Timestamp('1402440452.05457999', offset=300), utils.Timestamp('1402440452.05458_00000000'), utils.Timestamp('1402440452.05457999_00000000'), utils.Timestamp('1402440452.05458_00000000', offset=0), utils.Timestamp('1402440452.05457999_00000aaa', offset=0), utils.Timestamp('1402440452.05458_00000000', offset=400), utils.Timestamp('1402440452.054579_0a', offset=400), ) for value in test_values: self.assertEqual(utils.Timestamp(value).isoformat, expected) expected = '1970-01-01T00:00:00.000000' test_values = ( '0', '0000000000.00000', '0000000000.00000_ffffffffffff', 0, 0.0, ) for value in test_values: self.assertEqual(utils.Timestamp(value).isoformat, expected) def test_not_equal(self): ts = '1402436408.91203_0000000000000001' test_values = ( utils.Timestamp('1402436408.91203_0000000000000002'), utils.Timestamp('1402436408.91203'), utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91204), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.91203, offset=2), ) for value in test_values: self.assertTrue(value != ts) self.assertIs(True, utils.Timestamp(ts) == ts) # sanity self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts)) self.assertIs(False, utils.Timestamp(ts) != ts) self.assertIs(False, utils.Timestamp(ts) is None) self.assertIs(True, utils.Timestamp(ts) is not None) def test_no_force_internal_no_offset(self): """Test that internal is the same as normal with no offset""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000') self.assertEqual(utils.Timestamp(1402437380.58186).internal, '1402437380.58186') self.assertEqual(utils.Timestamp(1402437380.581859).internal, '1402437380.58186') self.assertEqual(utils.Timestamp(0).internal, utils.normalize_timestamp(0)) def test_no_force_internal_with_offset(self): """Test that internal always includes the offset if significant""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual(utils.Timestamp(0, offset=1).internal, '0000000000.00000_0000000000000001') self.assertEqual( utils.Timestamp(1402437380.58186, offset=16).internal, '1402437380.58186_0000000000000010') self.assertEqual( utils.Timestamp(1402437380.581859, offset=240).internal, '1402437380.58186_00000000000000f0') self.assertEqual( utils.Timestamp('1402437380.581859_00000001', offset=240).internal, '1402437380.58186_00000000000000f1') def test_force_internal(self): """Test that internal always includes the offset if forced""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True): self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000_0000000000000000') self.assertEqual(utils.Timestamp(1402437380.58186).internal, '1402437380.58186_0000000000000000') self.assertEqual(utils.Timestamp(1402437380.581859).internal, '1402437380.58186_0000000000000000') self.assertEqual(utils.Timestamp(0, offset=1).internal, '0000000000.00000_0000000000000001') self.assertEqual( utils.Timestamp(1402437380.58186, offset=16).internal, '1402437380.58186_0000000000000010') self.assertEqual( utils.Timestamp(1402437380.581859, offset=16).internal, '1402437380.58186_0000000000000010') def test_internal_format_no_offset(self): expected = '1402436408.91203_0000000000000000' test_values = ( '1402436408.91203', '1402436408.91203_00000000', '1402436408.912030000', '1402436408.912030000_0000000000000', '000001402436408.912030000', '000001402436408.912030000_0000000000', 1402436408.91203, 1402436408.9120300000000000, 1402436408.912029, 1402436408.912029999999999999, utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.912029), utils.Timestamp(1402436408.91202999999999999, offset=0), utils.Timestamp('1402436408.91203'), utils.Timestamp('1402436408.91203', offset=0), utils.Timestamp('1402436408.912029'), utils.Timestamp('1402436408.912029', offset=0), utils.Timestamp('1402436408.912029999999999'), utils.Timestamp('1402436408.912029999999999', offset=0), ) for value in test_values: # timestamp instance is always equivalent self.assertEqual(utils.Timestamp(value), expected) if utils.FORCE_INTERNAL: # the FORCE_INTERNAL flag makes the internal format always # include the offset portion of the timestamp even when it's # not significant and would be bad during upgrades self.assertEqual(utils.Timestamp(value).internal, expected) else: # unless we FORCE_INTERNAL, when there's no offset the # internal format is equivalent to the normalized format self.assertEqual(utils.Timestamp(value).internal, '1402436408.91203') def test_internal_format_with_offset(self): expected = '1402436408.91203_00000000000000f0' test_values = ( '1402436408.91203_000000f0', '1402436408.912030000_0000000000f0', '1402436408.912029_000000f0', '1402436408.91202999999_0000000000f0', '000001402436408.912030000_000000000f0', '000001402436408.9120299999_000000000f0', utils.Timestamp(1402436408.91203, offset=240), utils.Timestamp(1402436408.912029, offset=240), utils.Timestamp('1402436408.91203', offset=240), utils.Timestamp('1402436408.91203_00000000', offset=240), utils.Timestamp('1402436408.91203_0000000f', offset=225), utils.Timestamp('1402436408.9120299999', offset=240), utils.Timestamp('1402436408.9120299999_00000000', offset=240), utils.Timestamp('1402436408.9120299999_00000010', offset=224), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(timestamp.internal, expected) # can compare with offset if the string is internalized self.assertEqual(timestamp, expected) # if comparison value only includes the normalized portion and the # timestamp includes an offset, it is considered greater normal = utils.Timestamp(expected).normal self.assertTrue(timestamp > normal, '%r is not bigger than %r given %r' % ( timestamp, normal, value)) self.assertTrue(timestamp > float(normal), '%r is not bigger than %f given %r' % ( timestamp, float(normal), value)) def test_short_format_with_offset(self): expected = '1402436408.91203_f0' timestamp = utils.Timestamp(1402436408.91203, 0xf0) self.assertEqual(expected, timestamp.short) expected = '1402436408.91203' timestamp = utils.Timestamp(1402436408.91203) self.assertEqual(expected, timestamp.short) def test_raw(self): expected = 140243640891203 timestamp = utils.Timestamp(1402436408.91203) self.assertEqual(expected, timestamp.raw) # 'raw' does not include offset timestamp = utils.Timestamp(1402436408.91203, 0xf0) self.assertEqual(expected, timestamp.raw) def test_delta(self): def _assertWithinBounds(expected, timestamp): tolerance = 0.00001 minimum = expected - tolerance maximum = expected + tolerance self.assertTrue(float(timestamp) > minimum) self.assertTrue(float(timestamp) < maximum) timestamp = utils.Timestamp(1402436408.91203, delta=100) _assertWithinBounds(1402436408.91303, timestamp) self.assertEqual(140243640891303, timestamp.raw) timestamp = utils.Timestamp(1402436408.91203, delta=-100) _assertWithinBounds(1402436408.91103, timestamp) self.assertEqual(140243640891103, timestamp.raw) timestamp = utils.Timestamp(1402436408.91203, delta=0) _assertWithinBounds(1402436408.91203, timestamp) self.assertEqual(140243640891203, timestamp.raw) # delta is independent of offset timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100) self.assertEqual(140243640891303, timestamp.raw) self.assertEqual(42, timestamp.offset) # cannot go negative self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203, delta=-140243640891203) def test_int(self): expected = 1402437965 test_values = ( '1402437965.91203', '1402437965.91203_00000000', '1402437965.912030000', '1402437965.912030000_0000000000000', '000001402437965.912030000', '000001402437965.912030000_0000000000', 1402437965.91203, 1402437965.9120300000000000, 1402437965.912029, 1402437965.912029999999999999, utils.Timestamp(1402437965.91203), utils.Timestamp(1402437965.91203, offset=0), utils.Timestamp(1402437965.91203, offset=500), utils.Timestamp(1402437965.912029), utils.Timestamp(1402437965.91202999999999999, offset=0), utils.Timestamp(1402437965.91202999999999999, offset=300), utils.Timestamp('1402437965.91203'), utils.Timestamp('1402437965.91203', offset=0), utils.Timestamp('1402437965.91203', offset=400), utils.Timestamp('1402437965.912029'), utils.Timestamp('1402437965.912029', offset=0), utils.Timestamp('1402437965.912029', offset=200), utils.Timestamp('1402437965.912029999999999'), utils.Timestamp('1402437965.912029999999999', offset=0), utils.Timestamp('1402437965.912029999999999', offset=100), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(int(timestamp), expected) self.assertTrue(timestamp > expected) def test_float(self): expected = 1402438115.91203 test_values = ( '1402438115.91203', '1402438115.91203_00000000', '1402438115.912030000', '1402438115.912030000_0000000000000', '000001402438115.912030000', '000001402438115.912030000_0000000000', 1402438115.91203, 1402438115.9120300000000000, 1402438115.912029, 1402438115.912029999999999999, utils.Timestamp(1402438115.91203), utils.Timestamp(1402438115.91203, offset=0), utils.Timestamp(1402438115.91203, offset=500), utils.Timestamp(1402438115.912029), utils.Timestamp(1402438115.91202999999999999, offset=0), utils.Timestamp(1402438115.91202999999999999, offset=300), utils.Timestamp('1402438115.91203'), utils.Timestamp('1402438115.91203', offset=0), utils.Timestamp('1402438115.91203', offset=400), utils.Timestamp('1402438115.912029'), utils.Timestamp('1402438115.912029', offset=0), utils.Timestamp('1402438115.912029', offset=200), utils.Timestamp('1402438115.912029999999999'), utils.Timestamp('1402438115.912029999999999', offset=0), utils.Timestamp('1402438115.912029999999999', offset=100), ) tolerance = 0.00001 minimum = expected - tolerance maximum = expected + tolerance for value in test_values: timestamp = utils.Timestamp(value) self.assertTrue(float(timestamp) > minimum, '%f is not bigger than %f given %r' % ( timestamp, minimum, value)) self.assertTrue(float(timestamp) < maximum, '%f is not smaller than %f given %r' % ( timestamp, maximum, value)) # direct comparison of timestamp works too self.assertTrue(timestamp > minimum, '%s is not bigger than %f given %r' % ( timestamp.normal, minimum, value)) self.assertTrue(timestamp < maximum, '%s is not smaller than %f given %r' % ( timestamp.normal, maximum, value)) # ... even against strings self.assertTrue(timestamp > '%f' % minimum, '%s is not bigger than %s given %r' % ( timestamp.normal, minimum, value)) self.assertTrue(timestamp < '%f' % maximum, '%s is not smaller than %s given %r' % ( timestamp.normal, maximum, value)) def test_false(self): self.assertFalse(utils.Timestamp(0)) self.assertFalse(utils.Timestamp(0, offset=0)) self.assertFalse(utils.Timestamp('0')) self.assertFalse(utils.Timestamp('0', offset=0)) self.assertFalse(utils.Timestamp(0.0)) self.assertFalse(utils.Timestamp(0.0, offset=0)) self.assertFalse(utils.Timestamp('0.0')) self.assertFalse(utils.Timestamp('0.0', offset=0)) self.assertFalse(utils.Timestamp(00000000.00000000)) self.assertFalse(utils.Timestamp(00000000.00000000, offset=0)) self.assertFalse(utils.Timestamp('00000000.00000000')) self.assertFalse(utils.Timestamp('00000000.00000000', offset=0)) def test_true(self): self.assertTrue(utils.Timestamp(1)) self.assertTrue(utils.Timestamp(1, offset=1)) self.assertTrue(utils.Timestamp(0, offset=1)) self.assertTrue(utils.Timestamp('1')) self.assertTrue(utils.Timestamp('1', offset=1)) self.assertTrue(utils.Timestamp('0', offset=1)) self.assertTrue(utils.Timestamp(1.1)) self.assertTrue(utils.Timestamp(1.1, offset=1)) self.assertTrue(utils.Timestamp(0.0, offset=1)) self.assertTrue(utils.Timestamp('1.1')) self.assertTrue(utils.Timestamp('1.1', offset=1)) self.assertTrue(utils.Timestamp('0.0', offset=1)) self.assertTrue(utils.Timestamp(11111111.11111111)) self.assertTrue(utils.Timestamp(11111111.11111111, offset=1)) self.assertTrue(utils.Timestamp(00000000.00000000, offset=1)) self.assertTrue(utils.Timestamp('11111111.11111111')) self.assertTrue(utils.Timestamp('11111111.11111111', offset=1)) self.assertTrue(utils.Timestamp('00000000.00000000', offset=1)) def test_greater_no_offset(self): now = time.time() older = now - 1 timestamp = utils.Timestamp(now) test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', 1402443112.213252, '1402443112.213252', '1402443112.213252_ffff', older, '%f' % older, '%f_0000ffff' % older, ) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp > value, '%r is not greater than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp > other, '%r is not greater than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp > other.normal, '%r is not greater than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp > other.internal, '%r is not greater than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp > float(other), '%r is not greater than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp > int(other), '%r is not greater than %r given %r' % ( timestamp, int(other), value)) def test_greater_with_offset(self): now = time.time() older = now - 1 test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff', older, '%f' % older, '%f_0000ffff' % older, now, '%f' % now, '%f_00000000' % now, ) for offset in range(1, 1000, 100): timestamp = utils.Timestamp(now, offset=offset) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp > value, '%r is not greater than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp > other, '%r is not greater than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp > other.normal, '%r is not greater than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp > other.internal, '%r is not greater than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp > float(other), '%r is not greater than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp > int(other), '%r is not greater than %r given %r' % ( timestamp, int(other), value)) def test_smaller_no_offset(self): now = time.time() newer = now + 1 timestamp = utils.Timestamp(now) test_values = ( 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff', newer, '%f' % newer, '%f_0000ffff' % newer, ) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp < value, '%r is not smaller than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp < other, '%r is not smaller than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp < other.normal, '%r is not smaller than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp < other.internal, '%r is not smaller than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp < float(other), '%r is not smaller than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp < int(other), '%r is not smaller than %r given %r' % ( timestamp, int(other), value)) def test_smaller_with_offset(self): now = time.time() newer = now + 1 test_values = ( 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff', newer, '%f' % newer, '%f_0000ffff' % newer, ) for offset in range(1, 1000, 100): timestamp = utils.Timestamp(now, offset=offset) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp < value, '%r is not smaller than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp < other, '%r is not smaller than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp < other.normal, '%r is not smaller than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp < other.internal, '%r is not smaller than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp < float(other), '%r is not smaller than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp < int(other), '%r is not smaller than %r given %r' % ( timestamp, int(other), value)) def test_cmp_with_none(self): self.assertGreater(utils.Timestamp(0), None) self.assertGreater(utils.Timestamp(1.0), None) self.assertGreater(utils.Timestamp(1.0, 42), None) def test_ordering(self): given = [ '1402444820.62590_000000000000000a', '1402444820.62589_0000000000000001', '1402444821.52589_0000000000000004', '1402444920.62589_0000000000000004', '1402444821.62589_000000000000000a', '1402444821.72589_000000000000000a', '1402444920.62589_0000000000000002', '1402444820.62589_0000000000000002', '1402444820.62589_000000000000000a', '1402444820.62590_0000000000000004', '1402444920.62589_000000000000000a', '1402444820.62590_0000000000000002', '1402444821.52589_0000000000000002', '1402444821.52589_0000000000000000', '1402444920.62589', '1402444821.62589_0000000000000004', '1402444821.72589_0000000000000001', '1402444820.62590', '1402444820.62590_0000000000000001', '1402444820.62589_0000000000000004', '1402444821.72589_0000000000000000', '1402444821.52589_000000000000000a', '1402444821.72589_0000000000000004', '1402444821.62589', '1402444821.52589_0000000000000001', '1402444821.62589_0000000000000001', '1402444821.62589_0000000000000002', '1402444821.72589_0000000000000002', '1402444820.62589', '1402444920.62589_0000000000000001'] expected = [ '1402444820.62589', '1402444820.62589_0000000000000001', '1402444820.62589_0000000000000002', '1402444820.62589_0000000000000004', '1402444820.62589_000000000000000a', '1402444820.62590', '1402444820.62590_0000000000000001', '1402444820.62590_0000000000000002', '1402444820.62590_0000000000000004', '1402444820.62590_000000000000000a', '1402444821.52589', '1402444821.52589_0000000000000001', '1402444821.52589_0000000000000002', '1402444821.52589_0000000000000004', '1402444821.52589_000000000000000a', '1402444821.62589', '1402444821.62589_0000000000000001', '1402444821.62589_0000000000000002', '1402444821.62589_0000000000000004', '1402444821.62589_000000000000000a', '1402444821.72589', '1402444821.72589_0000000000000001', '1402444821.72589_0000000000000002', '1402444821.72589_0000000000000004', '1402444821.72589_000000000000000a', '1402444920.62589', '1402444920.62589_0000000000000001', '1402444920.62589_0000000000000002', '1402444920.62589_0000000000000004', '1402444920.62589_000000000000000a', ] # less visual version """ now = time.time() given = [ utils.Timestamp(now + i, offset=offset).internal for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0) for offset in (0, 1, 2, 4, 10) ] expected = [t for t in given] random.shuffle(given) """ self.assertEqual(len(given), len(expected)) # sanity timestamps = [utils.Timestamp(t) for t in given] # our expected values don't include insignificant offsets with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual( [t.internal for t in sorted(timestamps)], expected) # string sorting works as well self.assertEqual( sorted([t.internal for t in timestamps]), expected) def test_hashable(self): ts_0 = utils.Timestamp('1402444821.72589') ts_0_also = utils.Timestamp('1402444821.72589') self.assertEqual(ts_0, ts_0_also) # sanity self.assertEqual(hash(ts_0), hash(ts_0_also)) d = {ts_0: 'whatever'} self.assertIn(ts_0, d) # sanity self.assertIn(ts_0_also, d) class TestTimestampEncoding(unittest.TestCase): def setUp(self): t0 = utils.Timestamp(0.0) t1 = utils.Timestamp(997.9996) t2 = utils.Timestamp(999) t3 = utils.Timestamp(1000, 24) t4 = utils.Timestamp(1001) t5 = utils.Timestamp(1002.00040) # encodings that are expected when explicit = False self.non_explicit_encodings = ( ('0000001000.00000_18', (t3, t3, t3)), ('0000001000.00000_18', (t3, t3, None)), ) # mappings that are expected when explicit = True self.explicit_encodings = ( ('0000001000.00000_18+0+0', (t3, t3, t3)), ('0000001000.00000_18+0', (t3, t3, None)), ) # mappings that are expected when explicit = True or False self.encodings = ( ('0000001000.00000_18+0+186a0', (t3, t3, t4)), ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)), ('0000001000.00000_18-186a0+0', (t3, t2, t2)), ('0000001000.00000_18+0-186a0', (t3, t3, t2)), ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)), ('0000001000.00000_18', (t3, None, None)), ('0000001000.00000_18+186a0', (t3, t4, None)), ('0000001000.00000_18-186a0', (t3, t2, None)), ('0000001000.00000_18', (t3, None, t1)), ('0000001000.00000_18-5f5e100', (t3, t0, None)), ('0000001000.00000_18+0-5f5e100', (t3, t3, t0)), ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)), ) # decodings that are expected when explicit = False self.non_explicit_decodings = ( ('0000001000.00000_18', (t3, t3, t3)), ('0000001000.00000_18+186a0', (t3, t4, t4)), ('0000001000.00000_18-186a0', (t3, t2, t2)), ('0000001000.00000_18+186a0', (t3, t4, t4)), ('0000001000.00000_18-186a0', (t3, t2, t2)), ('0000001000.00000_18-5f5e100', (t3, t0, t0)), ) # decodings that are expected when explicit = True self.explicit_decodings = ( ('0000001000.00000_18+0+0', (t3, t3, t3)), ('0000001000.00000_18+0', (t3, t3, None)), ('0000001000.00000_18', (t3, None, None)), ('0000001000.00000_18+186a0', (t3, t4, None)), ('0000001000.00000_18-186a0', (t3, t2, None)), ('0000001000.00000_18-5f5e100', (t3, t0, None)), ) # decodings that are expected when explicit = True or False self.decodings = ( ('0000001000.00000_18+0+186a0', (t3, t3, t4)), ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)), ('0000001000.00000_18-186a0+0', (t3, t2, t2)), ('0000001000.00000_18+0-186a0', (t3, t3, t2)), ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)), ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)), ) def _assertEqual(self, expected, actual, test): self.assertEqual(expected, actual, 'Got %s but expected %s for parameters %s' % (actual, expected, test)) def test_encoding(self): for test in self.explicit_encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], True) self._assertEqual(test[0], actual, test[1]) for test in self.non_explicit_encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], False) self._assertEqual(test[0], actual, test[1]) for explicit in (True, False): for test in self.encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], explicit) self._assertEqual(test[0], actual, test[1]) def test_decoding(self): for test in self.explicit_decodings: actual = utils.decode_timestamps(test[0], True) self._assertEqual(test[1], actual, test[0]) for test in self.non_explicit_decodings: actual = utils.decode_timestamps(test[0], False) self._assertEqual(test[1], actual, test[0]) for explicit in (True, False): for test in self.decodings: actual = utils.decode_timestamps(test[0], explicit) self._assertEqual(test[1], actual, test[0]) class TestUtils(unittest.TestCase): """Tests for swift.common.utils """ def setUp(self): utils.HASH_PATH_SUFFIX = 'endcap' utils.HASH_PATH_PREFIX = 'startcap' def test_get_zero_indexed_base_string(self): self.assertEqual(utils.get_zero_indexed_base_string('something', 0), 'something') self.assertEqual(utils.get_zero_indexed_base_string('something', None), 'something') self.assertEqual(utils.get_zero_indexed_base_string('something', 1), 'something-1') self.assertRaises(ValueError, utils.get_zero_indexed_base_string, 'something', 'not_integer') @with_tempdir def test_lock_path(self, tmpdir): # 2 locks with limit=1 must fail success = False with utils.lock_path(tmpdir, 0.1): with self.assertRaises(LockTimeout): with utils.lock_path(tmpdir, 0.1): success = True self.assertFalse(success) # 2 locks with limit=2 must succeed success = False with utils.lock_path(tmpdir, 0.1, limit=2): try: with utils.lock_path(tmpdir, 0.1, limit=2): success = True except LockTimeout as exc: self.fail('Unexpected exception %s' % exc) self.assertTrue(success) # 3 locks with limit=2 must fail success = False with utils.lock_path(tmpdir, 0.1, limit=2): with utils.lock_path(tmpdir, 0.1, limit=2): with self.assertRaises(LockTimeout): with utils.lock_path(tmpdir, 0.1): success = True self.assertFalse(success) @with_tempdir def test_lock_path_invalid_limit(self, tmpdir): success = False with self.assertRaises(ValueError): with utils.lock_path(tmpdir, 0.1, limit=0): success = True self.assertFalse(success) with self.assertRaises(ValueError): with utils.lock_path(tmpdir, 0.1, limit=-1): success = True self.assertFalse(success) with self.assertRaises(TypeError): with utils.lock_path(tmpdir, 0.1, limit='1'): success = True self.assertFalse(success) with self.assertRaises(TypeError): with utils.lock_path(tmpdir, 0.1, limit=1.1): success = True self.assertFalse(success) @with_tempdir def test_lock_path_num_sleeps(self, tmpdir): num_short_calls = [0] exception_raised = [False] def my_sleep(to_sleep): if to_sleep == 0.01: num_short_calls[0] += 1 else: raise Exception('sleep time changed: %s' % to_sleep) try: with mock.patch('swift.common.utils.sleep', my_sleep): with utils.lock_path(tmpdir): with utils.lock_path(tmpdir): pass except Exception as e: exception_raised[0] = True self.assertTrue('sleep time changed' in str(e)) self.assertEqual(num_short_calls[0], 11) self.assertTrue(exception_raised[0]) @with_tempdir def test_lock_path_class(self, tmpdir): with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout): exc = None exc2 = None success = False try: with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout): success = True except ReplicationLockTimeout as err: exc = err except LockTimeout as err: exc2 = err self.assertTrue(exc is not None) self.assertTrue(exc2 is None) self.assertTrue(not success) exc = None exc2 = None success = False try: with utils.lock_path(tmpdir, 0.1): success = True except ReplicationLockTimeout as err: exc = err except LockTimeout as err: exc2 = err self.assertTrue(exc is None) self.assertTrue(exc2 is not None) self.assertTrue(not success) def test_normalize_timestamp(self): # Test swift.common.utils.normalize_timestamp self.assertEqual(utils.normalize_timestamp('1253327593.48174'), "1253327593.48174") self.assertEqual(utils.normalize_timestamp(1253327593.48174), "1253327593.48174") self.assertEqual(utils.normalize_timestamp('1253327593.48'), "1253327593.48000") self.assertEqual(utils.normalize_timestamp(1253327593.48), "1253327593.48000") self.assertEqual(utils.normalize_timestamp('253327593.48'), "0253327593.48000") self.assertEqual(utils.normalize_timestamp(253327593.48), "0253327593.48000") self.assertEqual(utils.normalize_timestamp('1253327593'), "1253327593.00000") self.assertEqual(utils.normalize_timestamp(1253327593), "1253327593.00000") self.assertRaises(ValueError, utils.normalize_timestamp, '') self.assertRaises(ValueError, utils.normalize_timestamp, 'abc') def test_normalize_delete_at_timestamp(self): self.assertEqual( utils.normalize_delete_at_timestamp(1253327593), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp(1253327593.67890), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp('1253327593'), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp('1253327593.67890'), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp(-1253327593), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp(-1253327593.67890), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp('-1253327593'), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp('-1253327593.67890'), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp(71253327593), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp(71253327593.67890), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp('71253327593'), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp('71253327593.67890'), '9999999999') self.assertRaises(ValueError, utils.normalize_timestamp, '') self.assertRaises(ValueError, utils.normalize_timestamp, 'abc') def test_last_modified_date_to_timestamp(self): expectations = { '1970-01-01T00:00:00.000000': 0.0, '2014-02-28T23:22:36.698390': 1393629756.698390, '2011-03-19T04:03:00.604554': 1300507380.604554, } for last_modified, ts in expectations.items(): real = utils.last_modified_date_to_timestamp(last_modified) self.assertEqual(real, ts, "failed for %s" % last_modified) def test_last_modified_date_to_timestamp_when_system_not_UTC(self): try: old_tz = os.environ.get('TZ') # Western Argentina Summer Time. Found in glibc manual; this # timezone always has a non-zero offset from UTC, so this test is # always meaningful. os.environ['TZ'] = 'WART4WARST,J1/0,J365/25' self.assertEqual(utils.last_modified_date_to_timestamp( '1970-01-01T00:00:00.000000'), 0.0) finally: if old_tz is not None: os.environ['TZ'] = old_tz else: os.environ.pop('TZ') def test_backwards(self): # Test swift.common.utils.backward # The lines are designed so that the function would encounter # all of the boundary conditions and typical conditions. # Block boundaries are marked with '<>' characters blocksize = 25 lines = [b'123456789x12345678><123456789\n', # block larger than rest b'123456789x123>\n', # block ends just before \n character b'123423456789\n', b'123456789x\n', # block ends at the end of line b'<123456789x123456789x123\n', b'<6789x123\n', # block ends at the beginning of the line b'6789x1234\n', b'1234><234\n', # block ends typically in the middle of line b'123456789x123456789\n'] with TemporaryFile() as f: for line in lines: f.write(line) count = len(lines) - 1 for line in utils.backward(f, blocksize): self.assertEqual(line, lines[count].split(b'\n')[0]) count -= 1 # Empty file case with TemporaryFile('r') as f: self.assertEqual([], list(utils.backward(f))) def test_mkdirs(self): testdir_base = mkdtemp() testroot = os.path.join(testdir_base, 'mkdirs') try: self.assertTrue(not os.path.exists(testroot)) utils.mkdirs(testroot) self.assertTrue(os.path.exists(testroot)) utils.mkdirs(testroot) self.assertTrue(os.path.exists(testroot)) rmtree(testroot, ignore_errors=1) testdir = os.path.join(testroot, 'one/two/three') self.assertTrue(not os.path.exists(testdir)) utils.mkdirs(testdir) self.assertTrue(os.path.exists(testdir)) utils.mkdirs(testdir) self.assertTrue(os.path.exists(testdir)) rmtree(testroot, ignore_errors=1) open(testroot, 'wb').close() self.assertTrue(not os.path.exists(testdir)) self.assertRaises(OSError, utils.mkdirs, testdir) os.unlink(testroot) finally: rmtree(testdir_base) def test_split_path(self): # Test swift.common.utils.split_account_path self.assertRaises(ValueError, utils.split_path, '') self.assertRaises(ValueError, utils.split_path, '/') self.assertRaises(ValueError, utils.split_path, '//') self.assertEqual(utils.split_path('/a'), ['a']) self.assertRaises(ValueError, utils.split_path, '//a') self.assertEqual(utils.split_path('/a/'), ['a']) self.assertRaises(ValueError, utils.split_path, '/a/c') self.assertRaises(ValueError, utils.split_path, '//c') self.assertRaises(ValueError, utils.split_path, '/a/c/') self.assertRaises(ValueError, utils.split_path, '/a//') self.assertRaises(ValueError, utils.split_path, '/a', 2) self.assertRaises(ValueError, utils.split_path, '/a', 2, 3) self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True) self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c']) self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o']) self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3) self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True), ['a', 'c', 'o/r']) self.assertEqual(utils.split_path('/a/c', 2, 3, True), ['a', 'c', None]) self.assertRaises(ValueError, utils.split_path, '/a', 5, 4) self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c']) self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', '']) try: utils.split_path('o\nn e', 2) except ValueError as err: self.assertEqual(str(err), 'Invalid path: o%0An%20e') try: utils.split_path('o\nn e', 2, 3, True) except ValueError as err: self.assertEqual(str(err), 'Invalid path: o%0An%20e') def test_validate_device_partition(self): # Test swift.common.utils.validate_device_partition utils.validate_device_partition('foo', 'bar') self.assertRaises(ValueError, utils.validate_device_partition, '', '') self.assertRaises(ValueError, utils.validate_device_partition, '', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '') self.assertRaises(ValueError, utils.validate_device_partition, 'foo/bar', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', 'foo/bar') self.assertRaises(ValueError, utils.validate_device_partition, '.', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, '..', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '.') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '..') try: utils.validate_device_partition('o\nn e', 'foo') except ValueError as err: self.assertEqual(str(err), 'Invalid device: o%0An%20e') try: utils.validate_device_partition('foo', 'o\nn e') except ValueError as err: self.assertEqual(str(err), 'Invalid partition: o%0An%20e') def test_NullLogger(self): # Test swift.common.utils.NullLogger sio = StringIO() nl = utils.NullLogger() nl.write('test') self.assertEqual(sio.getvalue(), '') def test_LoggerFileObject(self): orig_stdout = sys.stdout orig_stderr = sys.stderr sio = StringIO() handler = logging.StreamHandler(sio) logger = logging.getLogger() logger.addHandler(handler) lfo_stdout = utils.LoggerFileObject(logger) lfo_stderr = utils.LoggerFileObject(logger) lfo_stderr = utils.LoggerFileObject(logger, 'STDERR') print('test1') self.assertEqual(sio.getvalue(), '') sys.stdout = lfo_stdout print('test2') self.assertEqual(sio.getvalue(), 'STDOUT: test2\n') sys.stderr = lfo_stderr print('test4', file=sys.stderr) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') sys.stdout = orig_stdout print('test5') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') print('test6', file=sys.stderr) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\n') sys.stderr = orig_stderr print('test8') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\n') lfo_stdout.writelines(['a', 'b', 'c']) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\n') lfo_stdout.close() lfo_stderr.close() lfo_stdout.write('d') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n') lfo_stdout.flush() self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n') for lfo in (lfo_stdout, lfo_stderr): got_exc = False try: for line in lfo: pass except Exception: got_exc = True self.assertTrue(got_exc) got_exc = False try: for line in lfo: pass except Exception: got_exc = True self.assertTrue(got_exc) self.assertRaises(IOError, lfo.read) self.assertRaises(IOError, lfo.read, 1024) self.assertRaises(IOError, lfo.readline) self.assertRaises(IOError, lfo.readline, 1024) lfo.tell() def test_LoggerFileObject_recursion(self): crashy_calls = [0] class CrashyLogger(logging.Handler): def emit(self, record): crashy_calls[0] += 1 try: # Pretend to be trying to send to syslog, but syslogd is # dead. We need the raise here to set sys.exc_info. raise socket.error(errno.ENOTCONN, "This is an ex-syslog") except socket.error: self.handleError(record) logger = logging.getLogger() logger.addHandler(CrashyLogger()) # Set up some real file descriptors for stdio. If you run # nosetests with "-s", you already have real files there, but # otherwise they're StringIO objects. # # In any case, since capture_stdio() closes sys.stdin and friends, # we'd want to set up some sacrificial files so as to not goof up # the testrunner. new_stdin = open(os.devnull, 'r+b') new_stdout = open(os.devnull, 'w+b') new_stderr = open(os.devnull, 'w+b') with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \ contextlib.closing(new_stderr): # logging.raiseExceptions is set to False in test/__init__.py, but # is True in Swift daemons, and the error doesn't manifest without # it. with mock.patch('sys.stdin', new_stdin), \ mock.patch('sys.stdout', new_stdout), \ mock.patch('sys.stderr', new_stderr), \ mock.patch.object(logging, 'raiseExceptions', True): # Note: since stdio is hooked up to /dev/null in here, using # pdb is basically impossible. Sorry about that. utils.capture_stdio(logger) logger.info("I like ham") self.assertTrue(crashy_calls[0], 1) def test_parse_options(self): # Get a file that is definitely on disk with NamedTemporaryFile() as f: conf_file = f.name conf, options = utils.parse_options(test_args=[conf_file]) self.assertEqual(conf, conf_file) # assert defaults self.assertEqual(options['verbose'], False) self.assertNotIn('once', options) # assert verbose as option conf, options = utils.parse_options(test_args=[conf_file, '-v']) self.assertEqual(options['verbose'], True) # check once option conf, options = utils.parse_options(test_args=[conf_file], once=True) self.assertEqual(options['once'], False) test_args = [conf_file, '--once'] conf, options = utils.parse_options(test_args=test_args, once=True) self.assertEqual(options['once'], True) # check options as arg parsing test_args = [conf_file, 'once', 'plugin_name', 'verbose'] conf, options = utils.parse_options(test_args=test_args, once=True) self.assertEqual(options['verbose'], True) self.assertEqual(options['once'], True) self.assertEqual(options['extra_args'], ['plugin_name']) def test_parse_options_errors(self): orig_stdout = sys.stdout orig_stderr = sys.stderr stdo = StringIO() stde = StringIO() utils.sys.stdout = stdo utils.sys.stderr = stde self.assertRaises(SystemExit, utils.parse_options, once=True, test_args=[]) self.assertTrue('missing config' in stdo.getvalue()) # verify conf file must exist, context manager will delete temp file with NamedTemporaryFile() as f: conf_file = f.name self.assertRaises(SystemExit, utils.parse_options, once=True, test_args=[conf_file]) self.assertTrue('unable to locate' in stdo.getvalue()) # reset stdio utils.sys.stdout = orig_stdout utils.sys.stderr = orig_stderr def test_dump_recon_cache(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') logger = utils.get_logger(None, 'server', log_route='server') try: submit_dict = {'key0': 99, 'key1': {'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(submit_dict, file_dict) # Use a nested entry submit_dict = {'key0': 101, 'key1': {'key2': {'value1': 1, 'value2': 2}}} expect_dict = {'key0': 101, 'key1': {'key2': {'value1': 1, 'value2': 2}, 'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # cached entries are sticky submit_dict = {} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # nested dicts can be erased... submit_dict = {'key1': {'key2': {}}} expect_dict = {'key0': 101, 'key1': {'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # ... and erasure is idempotent utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # top level dicts can be erased... submit_dict = {'key1': {}} expect_dict = {'key0': 101} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # ... and erasure is idempotent utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) finally: rmtree(testdir_base) def test_dump_recon_cache_set_owner(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') logger = utils.get_logger(None, 'server', log_route='server') try: submit_dict = {'key1': {'value1': 1, 'value2': 2}} _ret = lambda: None _ret.pw_uid = 100 _mock_getpwnam = MagicMock(return_value=_ret) _mock_chown = mock.Mock() with patch('os.chown', _mock_chown), \ patch('pwd.getpwnam', _mock_getpwnam): utils.dump_recon_cache(submit_dict, testcache_file, logger, set_owner="swift") _mock_getpwnam.assert_called_once_with("swift") self.assertEqual(_mock_chown.call_args[0][1], 100) finally: rmtree(testdir_base) def test_dump_recon_cache_permission_denied(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') class MockLogger(object): def __init__(self): self._excs = [] def exception(self, message): _junk, exc, _junk = sys.exc_info() self._excs.append(exc) logger = MockLogger() try: submit_dict = {'key1': {'value1': 1, 'value2': 2}} with mock.patch( 'swift.common.utils.NamedTemporaryFile', side_effect=IOError(13, 'Permission Denied')): utils.dump_recon_cache(submit_dict, testcache_file, logger) self.assertIsInstance(logger._excs[0], IOError) finally: rmtree(testdir_base) def test_get_logger(self): sio = StringIO() logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server', log_route='server') logger.warning('test1') self.assertEqual(sio.getvalue(), 'test1\n') logger.debug('test2') self.assertEqual(sio.getvalue(), 'test1\n') logger = utils.get_logger({'log_level': 'DEBUG'}, 'server', log_route='server') logger.debug('test3') self.assertEqual(sio.getvalue(), 'test1\ntest3\n') # Doesn't really test that the log facility is truly being used all the # way to syslog; but exercises the code. logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server', log_route='server') logger.warning('test4') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\n') # make sure debug doesn't log by default logger.debug('test5') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\n') # make sure notice lvl logs by default logger.notice('test6') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\ntest6\n') def test_get_logger_sysloghandler_plumbing(self): orig_sysloghandler = utils.ThreadSafeSysLogHandler syslog_handler_args = [] def syslog_handler_catcher(*args, **kwargs): syslog_handler_args.append((args, kwargs)) return orig_sysloghandler(*args, **kwargs) syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0 syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3 with mock.patch.object(utils, 'ThreadSafeSysLogHandler', syslog_handler_catcher): utils.get_logger({ 'log_facility': 'LOG_LOCAL3', }, 'server', log_route='server') expected_args = [((), {'address': '/dev/log', 'facility': orig_sysloghandler.LOG_LOCAL3})] if not os.path.exists('/dev/log') or \ os.path.isfile('/dev/log') or \ os.path.isdir('/dev/log'): # Since socket on OSX is in /var/run/syslog, there will be # a fallback to UDP. expected_args.append( ((), {'facility': orig_sysloghandler.LOG_LOCAL3})) self.assertEqual(expected_args, syslog_handler_args) syslog_handler_args = [] utils.get_logger({ 'log_facility': 'LOG_LOCAL3', 'log_address': '/foo/bar', }, 'server', log_route='server') self.assertEqual([ ((), {'address': '/foo/bar', 'facility': orig_sysloghandler.LOG_LOCAL3}), # Second call is because /foo/bar didn't exist (and wasn't a # UNIX domain socket). ((), {'facility': orig_sysloghandler.LOG_LOCAL3})], syslog_handler_args) # Using UDP with default port syslog_handler_args = [] utils.get_logger({ 'log_udp_host': 'syslog.funtimes.com', }, 'server', log_route='server') self.assertEqual([ ((), {'address': ('syslog.funtimes.com', logging.handlers.SYSLOG_UDP_PORT), 'facility': orig_sysloghandler.LOG_LOCAL0})], syslog_handler_args) # Using UDP with non-default port syslog_handler_args = [] utils.get_logger({ 'log_udp_host': 'syslog.funtimes.com', 'log_udp_port': '2123', }, 'server', log_route='server') self.assertEqual([ ((), {'address': ('syslog.funtimes.com', 2123), 'facility': orig_sysloghandler.LOG_LOCAL0})], syslog_handler_args) @reset_logger_state def test_clean_logger_exception(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v def log_exception(exc): try: raise exc except (Exception, Timeout): logger.exception('blah') try: # establish base case self.assertEqual(strip_value(sio), '') logger.info('test') self.assertEqual(strip_value(sio), 'test\n') self.assertEqual(strip_value(sio), '') logger.info('test') logger.info('test') self.assertEqual(strip_value(sio), 'test\ntest\n') self.assertEqual(strip_value(sio), '') # test OSError for en in (errno.EIO, errno.ENOSPC): log_exception(OSError(en, 'my %s error message' % en)) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertIn('my %s error message' % en, log_msg) # unfiltered log_exception(OSError()) self.assertTrue('Traceback' in strip_value(sio)) # test socket.error log_exception(socket.error(errno.ECONNREFUSED, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('errno.ECONNREFUSED message test', log_msg) self.assertIn('Connection refused', log_msg) log_exception(socket.error(errno.EHOSTUNREACH, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('my error message', log_msg) self.assertIn('Host unreachable', log_msg) log_exception(socket.error(errno.ETIMEDOUT, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('my error message', log_msg) self.assertIn('Connection timeout', log_msg) # unfiltered log_exception(socket.error(0, 'my error message')) log_msg = strip_value(sio) self.assertIn('Traceback', log_msg) self.assertIn('my error message', log_msg) # test eventlet.Timeout connection_timeout = ConnectionTimeout(42, 'my error message') log_exception(connection_timeout) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertTrue('ConnectionTimeout' in log_msg) self.assertTrue('(42s)' in log_msg) self.assertNotIn('my error message', log_msg) connection_timeout.cancel() message_timeout = MessageTimeout(42, 'my error message') log_exception(message_timeout) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertTrue('MessageTimeout' in log_msg) self.assertTrue('(42s)' in log_msg) self.assertTrue('my error message' in log_msg) message_timeout.cancel() # test unhandled log_exception(Exception('my error message')) log_msg = strip_value(sio) self.assertTrue('Traceback' in log_msg) self.assertTrue('my error message' in log_msg) finally: logger.logger.removeHandler(handler) @reset_logger_state def test_swift_log_formatter_max_line_length(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) formatter = utils.SwiftLogFormatter(max_line_length=10) handler.setFormatter(formatter) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v try: logger.info('12345') self.assertEqual(strip_value(sio), '12345\n') logger.info('1234567890') self.assertEqual(strip_value(sio), '1234567890\n') logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12 ... de\n') formatter.max_line_length = 11 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123 ... cde\n') formatter.max_line_length = 0 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234567890abcde\n') formatter.max_line_length = 1 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1\n') formatter.max_line_length = 2 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12\n') formatter.max_line_length = 3 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123\n') formatter.max_line_length = 4 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234\n') formatter.max_line_length = 5 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12345\n') formatter.max_line_length = 6 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123456\n') formatter.max_line_length = 7 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1 ... e\n') formatter.max_line_length = -10 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234567890abcde\n') finally: logger.logger.removeHandler(handler) @reset_logger_state def test_swift_log_formatter(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) handler.setFormatter(utils.SwiftLogFormatter()) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v try: self.assertFalse(logger.txn_id) logger.error('my error message') log_msg = strip_value(sio) self.assertIn('my error message', log_msg) self.assertNotIn('txn', log_msg) logger.txn_id = '12345' logger.error('test') log_msg = strip_value(sio) self.assertIn('txn', log_msg) self.assertIn('12345', log_msg) # test txn in info message self.assertEqual(logger.txn_id, '12345') logger.info('test') log_msg = strip_value(sio) self.assertIn('txn', log_msg) self.assertIn('12345', log_msg) # test txn already in message self.assertEqual(logger.txn_id, '12345') logger.warning('test 12345 test') self.assertEqual(strip_value(sio), 'test 12345 test\n') # Test multi line collapsing logger.error('my\nerror\nmessage') log_msg = strip_value(sio) self.assertIn('my#012error#012message', log_msg) # test client_ip self.assertFalse(logger.client_ip) logger.error('my error message') log_msg = strip_value(sio) self.assertIn('my error message', log_msg) self.assertNotIn('client_ip', log_msg) logger.client_ip = '1.2.3.4' logger.error('test') log_msg = strip_value(sio) self.assertIn('client_ip', log_msg) self.assertIn('1.2.3.4', log_msg) # test no client_ip on info message self.assertEqual(logger.client_ip, '1.2.3.4') logger.info('test') log_msg = strip_value(sio) self.assertNotIn('client_ip', log_msg) self.assertNotIn('1.2.3.4', log_msg) # test client_ip (and txn) already in message self.assertEqual(logger.client_ip, '1.2.3.4') logger.warning('test 1.2.3.4 test 12345') self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n') finally: logger.logger.removeHandler(handler) def test_storage_directory(self): self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'), 'objects/1/DEF/ABCDEF') def test_is_valid_ip(self): self.assertTrue(is_valid_ip("127.0.0.1")) self.assertTrue(is_valid_ip("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "::1" self.assertTrue(is_valid_ip(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ip(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ip(not_ipv6)) def test_is_valid_ipv4(self): self.assertTrue(is_valid_ipv4("127.0.0.1")) self.assertTrue(is_valid_ipv4("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "::1" self.assertFalse(is_valid_ipv4(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ipv4(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ipv4(not_ipv6)) def test_is_valid_ipv6(self): self.assertFalse(is_valid_ipv6("127.0.0.1")) self.assertFalse(is_valid_ipv6("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "::1" self.assertTrue(is_valid_ipv6(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ipv6(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ipv6(not_ipv6)) def test_expand_ipv6(self): expanded_ipv6 = "fe80::204:61ff:fe9d:f156" upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6)) omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6)) less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6)) def test_whataremyips(self): myips = utils.whataremyips() self.assertTrue(len(myips) > 1) self.assertTrue('127.0.0.1' in myips) def test_whataremyips_bind_to_all(self): for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000', '::0', '::0000', '::', # Wacky parse-error input produces all IPs 'I am a bear'): myips = utils.whataremyips(any_addr) self.assertTrue(len(myips) > 1) self.assertTrue('127.0.0.1' in myips) def test_whataremyips_bind_ip_specific(self): self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4')) def test_whataremyips_error(self): def my_interfaces(): return ['eth0'] def my_ifaddress_error(interface): raise ValueError with patch('netifaces.interfaces', my_interfaces), \ patch('netifaces.ifaddresses', my_ifaddress_error): self.assertEqual(utils.whataremyips(), []) def test_whataremyips_ipv6(self): test_ipv6_address = '2001:6b0:dead:beef:2::32' test_interface = 'eth0' def my_ipv6_interfaces(): return ['eth0'] def my_ipv6_ifaddresses(interface): return {AF_INET6: [{'netmask': 'ffff:ffff:ffff:ffff::', 'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]} with patch('netifaces.interfaces', my_ipv6_interfaces), \ patch('netifaces.ifaddresses', my_ipv6_ifaddresses): myips = utils.whataremyips() self.assertEqual(len(myips), 1) self.assertEqual(myips[0], test_ipv6_address) def test_hash_path(self): # Yes, these tests are deliberately very fragile. We want to make sure # that if someones changes the results hash_path produces, they know it with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''): self.assertEqual(utils.hash_path('a'), '1c84525acb02107ea475dcd3d09c2c58') self.assertEqual(utils.hash_path('a', 'c'), '33379ecb053aa5c9e356c68997cbb59e') self.assertEqual(utils.hash_path('a', 'c', 'o'), '06fbf0b514e5199dfc4e00f42eb5ea83') self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False), '06fbf0b514e5199dfc4e00f42eb5ea83') self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True), '\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN' '\x00\xf4.\xb5\xea\x83') self.assertRaises(ValueError, utils.hash_path, 'a', object='o') utils.HASH_PATH_PREFIX = 'abcdef' self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False), '363f9b535bfb7d17a43a46a358afca0e') def test_validate_hash_conf(self): # no section causes InvalidHashPathConfigError self._test_validate_hash_conf([], [], True) # 'swift-hash' section is there but no options causes # InvalidHashPathConfigError self._test_validate_hash_conf(['swift-hash'], [], True) # if we have the section and either of prefix or suffix, # InvalidHashPathConfigError doesn't occur self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_prefix'], False) self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_suffix'], False) # definitely, we have the section and both of them, # InvalidHashPathConfigError doesn't occur self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_suffix', 'swift_hash_path_prefix'], False) # But invalid section name should make an error even if valid # options are there self._test_validate_hash_conf( ['swift-hash-xxx'], ['swift_hash_path_suffix', 'swift_hash_path_prefix'], True) def _test_validate_hash_conf(self, sections, options, should_raise_error): class FakeConfigParser(object): def read(self, conf_path): return True def get(self, section, option): if section not in sections: raise NoSectionError('section error') elif option not in options: raise NoOptionError('option error', 'this option') else: return 'some_option_value' with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \ mock.patch('swift.common.utils.HASH_PATH_SUFFIX', ''), \ mock.patch('swift.common.utils.ConfigParser', FakeConfigParser): try: utils.validate_hash_conf() except utils.InvalidHashPathConfigError: if not should_raise_error: self.fail('validate_hash_conf should not raise an error') else: if should_raise_error: self.fail('validate_hash_conf should raise an error') def test_load_libc_function(self): self.assertTrue(callable( utils.load_libc_function('printf'))) self.assertTrue(callable( utils.load_libc_function('some_not_real_function'))) self.assertRaises(AttributeError, utils.load_libc_function, 'some_not_real_function', fail_if_missing=True) def test_readconf(self): conf = '''[section1] foo = bar [section2] log_name = yarr''' # setup a real file fd, temppath = tempfile.mkstemp() with os.fdopen(fd, 'wb') as f: f.write(conf) make_filename = lambda: temppath # setup a file stream make_fp = lambda: StringIO(conf) for conf_object_maker in (make_filename, make_fp): conffile = conf_object_maker() result = utils.readconf(conffile) expected = {'__file__': conffile, 'log_name': None, 'section1': {'foo': 'bar'}, 'section2': {'log_name': 'yarr'}} self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1') expected = {'__file__': conffile, 'log_name': 'section1', 'foo': 'bar'} self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section2').get('log_name') expected = 'yarr' self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1', log_name='foo').get('log_name') expected = 'foo' self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1', defaults={'bar': 'baz'}) expected = {'__file__': conffile, 'log_name': 'section1', 'foo': 'bar', 'bar': 'baz'} self.assertEqual(result, expected) self.assertRaisesRegexp( ValueError, 'Unable to find section3 config section in.*', utils.readconf, temppath, 'section3') os.unlink(temppath) self.assertRaises(IOError, utils.readconf, temppath) def test_readconf_raw(self): conf = '''[section1] foo = bar [section2] log_name = %(yarr)s''' # setup a real file fd, temppath = tempfile.mkstemp() with os.fdopen(fd, 'wb') as f: f.write(conf) make_filename = lambda: temppath # setup a file stream make_fp = lambda: StringIO(conf) for conf_object_maker in (make_filename, make_fp): conffile = conf_object_maker() result = utils.readconf(conffile, raw=True) expected = {'__file__': conffile, 'log_name': None, 'section1': {'foo': 'bar'}, 'section2': {'log_name': '%(yarr)s'}} self.assertEqual(result, expected) os.unlink(temppath) self.assertRaises(IOError, utils.readconf, temppath) def test_readconf_dir(self): config_dir = { 'server.conf.d/01.conf': """ [DEFAULT] port = 8080 foo = bar [section1] name=section1 """, 'server.conf.d/section2.conf': """ [DEFAULT] port = 8081 bar = baz [section2] name=section2 """, 'other-server.conf.d/01.conf': """ [DEFAULT] port = 8082 [section3] name=section3 """ } # strip indent from test config contents config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items()) with temptree(*zip(*config_dir.items())) as path: conf_dir = os.path.join(path, 'server.conf.d') conf = utils.readconf(conf_dir) expected = { '__file__': os.path.join(path, 'server.conf.d'), 'log_name': None, 'section1': { 'port': '8081', 'foo': 'bar', 'bar': 'baz', 'name': 'section1', }, 'section2': { 'port': '8081', 'foo': 'bar', 'bar': 'baz', 'name': 'section2', }, } self.assertEqual(conf, expected) def test_readconf_dir_ignores_hidden_and_nondotconf_files(self): config_dir = { 'server.conf.d/01.conf': """ [section1] port = 8080 """, 'server.conf.d/.01.conf.swp': """ [section] port = 8081 """, 'server.conf.d/01.conf-bak': """ [section] port = 8082 """, } # strip indent from test config contents config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items()) with temptree(*zip(*config_dir.items())) as path: conf_dir = os.path.join(path, 'server.conf.d') conf = utils.readconf(conf_dir) expected = { '__file__': os.path.join(path, 'server.conf.d'), 'log_name': None, 'section1': { 'port': '8080', }, } self.assertEqual(conf, expected) def _check_drop_privileges(self, mock_os, required_func_calls, call_setsid=True): user = getuser() user_data = pwd.getpwnam(user) self.assertFalse(mock_os.called_funcs) # sanity check # over-ride os with mock with mock.patch('swift.common.utils.os', mock_os): # exercise the code utils.drop_privileges(user, call_setsid=call_setsid) for func in required_func_calls: self.assertIn(func, mock_os.called_funcs) self.assertEqual(user_data[5], mock_os.environ['HOME']) groups = {g.gr_gid for g in grp.getgrall() if user in g.gr_mem} self.assertEqual(groups, set(mock_os.called_funcs['setgroups'][0])) self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0]) self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0]) self.assertEqual('/', mock_os.called_funcs['chdir'][0]) self.assertEqual(0o22, mock_os.called_funcs['umask'][0]) def test_drop_privileges(self): required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid', 'chdir', 'umask') mock_os = MockOs(called_funcs=required_func_calls) self._check_drop_privileges(mock_os, required_func_calls) def test_drop_privileges_setsid_error(self): # OSError trying to get session leader required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid', 'chdir', 'umask') mock_os = MockOs(called_funcs=required_func_calls, raise_funcs=('setsid',)) self._check_drop_privileges(mock_os, required_func_calls) def test_drop_privileges_no_call_setsid(self): required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir', 'umask') # OSError if trying to get session leader, but it shouldn't be called bad_func_calls = ('setsid',) mock_os = MockOs(called_funcs=required_func_calls, raise_funcs=bad_func_calls) self._check_drop_privileges(mock_os, required_func_calls, call_setsid=False) for func in bad_func_calls: self.assertNotIn(func, mock_os.called_funcs) @reset_logger_state def test_capture_stdio(self): # stubs logger = utils.get_logger(None, 'dummy') # mock utils system modules _orig_sys = utils.sys _orig_os = utils.os try: utils.sys = MockSys() utils.os = MockOs() # basic test utils.capture_stdio(logger) self.assertTrue(utils.sys.excepthook is not None) self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds) self.assertTrue( isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertTrue( isinstance(utils.sys.stderr, utils.LoggerFileObject)) # reset; test same args, but exc when trying to close stdio utils.os = MockOs(raise_funcs=('dup2',)) utils.sys = MockSys() # test unable to close stdio utils.capture_stdio(logger) self.assertTrue(utils.sys.excepthook is not None) self.assertEqual(utils.os.closed_fds, []) self.assertTrue( isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertTrue( isinstance(utils.sys.stderr, utils.LoggerFileObject)) # reset; test some other args utils.os = MockOs() utils.sys = MockSys() logger = utils.get_logger(None, log_to_console=True) # test console log utils.capture_stdio(logger, capture_stdout=False, capture_stderr=False) self.assertTrue(utils.sys.excepthook is not None) # when logging to console, stderr remains open self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2]) reset_loggers() # stdio not captured self.assertFalse(isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertFalse(isinstance(utils.sys.stderr, utils.LoggerFileObject)) finally: utils.sys = _orig_sys utils.os = _orig_os @reset_logger_state def test_get_logger_console(self): logger = utils.get_logger(None) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertFalse(console_handlers) logger = utils.get_logger(None, log_to_console=True) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertTrue(console_handlers) # make sure you can't have two console handlers self.assertEqual(len(console_handlers), 1) old_handler = console_handlers[0] logger = utils.get_logger(None, log_to_console=True) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertEqual(len(console_handlers), 1) new_handler = console_handlers[0] self.assertNotEqual(new_handler, old_handler) def verify_under_pseudo_time( self, func, target_runtime_ms=1, *args, **kwargs): curr_time = [42.0] def my_time(): curr_time[0] += 0.001 return curr_time[0] def my_sleep(duration): curr_time[0] += 0.001 curr_time[0] += duration with patch('time.time', my_time), \ patch('time.sleep', my_sleep), \ patch('eventlet.sleep', my_sleep): start = time.time() func(*args, **kwargs) # make sure it's accurate to 10th of a second, converting the time # difference to milliseconds, 100 milliseconds is 1/10 of a second diff_from_target_ms = abs( target_runtime_ms - ((time.time() - start) * 1000)) self.assertTrue(diff_from_target_ms < 100, "Expected %d < 100" % diff_from_target_ms) def test_ratelimit_sleep(self): def testfunc(): running_time = 0 for i in range(100): running_time = utils.ratelimit_sleep(running_time, -5) self.verify_under_pseudo_time(testfunc, target_runtime_ms=1) def testfunc(): running_time = 0 for i in range(100): running_time = utils.ratelimit_sleep(running_time, 0) self.verify_under_pseudo_time(testfunc, target_runtime_ms=1) def testfunc(): running_time = 0 for i in range(50): running_time = utils.ratelimit_sleep(running_time, 200) self.verify_under_pseudo_time(testfunc, target_runtime_ms=250) def test_ratelimit_sleep_with_incr(self): def testfunc(): running_time = 0 vals = [5, 17, 0, 3, 11, 30, 40, 4, 13, 2, -1] * 2 # adds up to 248 total = 0 for i in vals: running_time = utils.ratelimit_sleep(running_time, 500, incr_by=i) total += i self.assertEqual(248, total) self.verify_under_pseudo_time(testfunc, target_runtime_ms=500) def test_ratelimit_sleep_with_sleep(self): def testfunc(): running_time = 0 sleeps = [0] * 7 + [.2] * 3 + [0] * 30 for i in sleeps: running_time = utils.ratelimit_sleep(running_time, 40, rate_buffer=1) time.sleep(i) self.verify_under_pseudo_time(testfunc, target_runtime_ms=900) def test_urlparse(self): parsed = utils.urlparse('http://127.0.0.1/') self.assertEqual(parsed.scheme, 'http') self.assertEqual(parsed.hostname, '127.0.0.1') self.assertEqual(parsed.path, '/') parsed = utils.urlparse('http://127.0.0.1:8080/') self.assertEqual(parsed.port, 8080) parsed = utils.urlparse('https://127.0.0.1/') self.assertEqual(parsed.scheme, 'https') parsed = utils.urlparse('http://[::1]/') self.assertEqual(parsed.hostname, '::1') parsed = utils.urlparse('http://[::1]:8080/') self.assertEqual(parsed.hostname, '::1') self.assertEqual(parsed.port, 8080) parsed = utils.urlparse('www.example.com') self.assertEqual(parsed.hostname, '') def test_search_tree(self): # file match & ext miss with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t: asdf = utils.search_tree(t, 'a*', '.conf') self.assertEqual(len(asdf), 1) self.assertEqual(asdf[0], os.path.join(t, 'asdf.conf')) # multi-file match & glob miss & sort with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t: app_bins = utils.search_tree(t, 'app*', 'bin') self.assertEqual(len(app_bins), 2) self.assertEqual(app_bins[0], os.path.join(t, 'apple.bin')) self.assertEqual(app_bins[1], os.path.join(t, 'application.bin')) # test file in folder & ext miss & glob miss files = ( 'sub/file1.ini', 'sub/file2.conf', 'sub.bin', 'bus.ini', 'bus/file3.ini', ) with temptree(files) as t: sub_ini = utils.search_tree(t, 'sub*', '.ini') self.assertEqual(len(sub_ini), 1) self.assertEqual(sub_ini[0], os.path.join(t, 'sub/file1.ini')) # test multi-file in folder & sub-folder & ext miss & glob miss files = ( 'folder_file.txt', 'folder/1.txt', 'folder/sub/2.txt', 'folder2/3.txt', 'Folder3/4.txt' 'folder.rc', ) with temptree(files) as t: folder_texts = utils.search_tree(t, 'folder*', '.txt') self.assertEqual(len(folder_texts), 4) f1 = os.path.join(t, 'folder_file.txt') f2 = os.path.join(t, 'folder/1.txt') f3 = os.path.join(t, 'folder/sub/2.txt') f4 = os.path.join(t, 'folder2/3.txt') for f in [f1, f2, f3, f4]: self.assertTrue(f in folder_texts) def test_search_tree_with_directory_ext_match(self): files = ( 'object-server/object-server.conf-base', 'object-server/1.conf.d/base.conf', 'object-server/1.conf.d/1.conf', 'object-server/2.conf.d/base.conf', 'object-server/2.conf.d/2.conf', 'object-server/3.conf.d/base.conf', 'object-server/3.conf.d/3.conf', 'object-server/4.conf.d/base.conf', 'object-server/4.conf.d/4.conf', ) with temptree(files) as t: conf_dirs = utils.search_tree(t, 'object-server', '.conf', dir_ext='conf.d') self.assertEqual(len(conf_dirs), 4) for i in range(4): conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1)) self.assertTrue(conf_dir in conf_dirs) def test_search_tree_conf_dir_with_named_conf_match(self): files = ( 'proxy-server/proxy-server.conf.d/base.conf', 'proxy-server/proxy-server.conf.d/pipeline.conf', 'proxy-server/proxy-noauth.conf.d/base.conf', 'proxy-server/proxy-noauth.conf.d/pipeline.conf', ) with temptree(files) as t: conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf', dir_ext='noauth.conf.d') self.assertEqual(len(conf_dirs), 1) conf_dir = conf_dirs[0] expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d') self.assertEqual(conf_dir, expected) def test_search_tree_conf_dir_pid_with_named_conf_match(self): files = ( 'proxy-server/proxy-server.pid.d', 'proxy-server/proxy-noauth.pid.d', ) with temptree(files) as t: pid_files = utils.search_tree(t, 'proxy-server', exts=['noauth.pid', 'noauth.pid.d']) self.assertEqual(len(pid_files), 1) pid_file = pid_files[0] expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d') self.assertEqual(pid_file, expected) def test_write_file(self): with temptree([]) as t: file_name = os.path.join(t, 'test') utils.write_file(file_name, 'test') with open(file_name, 'r') as f: contents = f.read() self.assertEqual(contents, 'test') # and also subdirs file_name = os.path.join(t, 'subdir/test2') utils.write_file(file_name, 'test2') with open(file_name, 'r') as f: contents = f.read() self.assertEqual(contents, 'test2') # but can't over-write files file_name = os.path.join(t, 'subdir/test2/test3') self.assertRaises(IOError, utils.write_file, file_name, 'test3') def test_remove_file(self): with temptree([]) as t: file_name = os.path.join(t, 'blah.pid') # assert no raise self.assertEqual(os.path.exists(file_name), False) self.assertIsNone(utils.remove_file(file_name)) with open(file_name, 'w') as f: f.write('1') self.assertTrue(os.path.exists(file_name)) self.assertIsNone(utils.remove_file(file_name)) self.assertFalse(os.path.exists(file_name)) def test_human_readable(self): self.assertEqual(utils.human_readable(0), '0') self.assertEqual(utils.human_readable(1), '1') self.assertEqual(utils.human_readable(10), '10') self.assertEqual(utils.human_readable(100), '100') self.assertEqual(utils.human_readable(999), '999') self.assertEqual(utils.human_readable(1024), '1Ki') self.assertEqual(utils.human_readable(1535), '1Ki') self.assertEqual(utils.human_readable(1536), '2Ki') self.assertEqual(utils.human_readable(1047552), '1023Ki') self.assertEqual(utils.human_readable(1048063), '1023Ki') self.assertEqual(utils.human_readable(1048064), '1Mi') self.assertEqual(utils.human_readable(1048576), '1Mi') self.assertEqual(utils.human_readable(1073741824), '1Gi') self.assertEqual(utils.human_readable(1099511627776), '1Ti') self.assertEqual(utils.human_readable(1125899906842624), '1Pi') self.assertEqual(utils.human_readable(1152921504606846976), '1Ei') self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi') self.assertEqual(utils.human_readable(1208925819614629174706176), '1Yi') self.assertEqual(utils.human_readable(1237940039285380274899124224), '1024Yi') def test_validate_sync_to(self): fname = 'container-sync-realms.conf' fcontents = ''' [US] key = 9ff3b71c849749dbaec4ccdd3cbab62b cluster_dfw1 = http://dfw1.host/v1/ ''' with temptree([fname], [fcontents]) as tempdir: logger = FakeLogger() fpath = os.path.join(tempdir, fname) csr = ContainerSyncRealms(fpath, logger) for realms_conf in (None, csr): for goodurl, result in ( ('http://1.1.1.1/v1/a/c', (None, 'http://1.1.1.1/v1/a/c', None, None)), ('http://1.1.1.1:8080/a/c', (None, 'http://1.1.1.1:8080/a/c', None, None)), ('http://2.2.2.2/a/c', (None, 'http://2.2.2.2/a/c', None, None)), ('https://1.1.1.1/v1/a/c', (None, 'https://1.1.1.1/v1/a/c', None, None)), ('//US/DFW1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//us/DFW1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//us/dfw1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//', (None, None, None, None)), ('', (None, None, None, None))): if goodurl.startswith('//') and not realms_conf: self.assertEqual( utils.validate_sync_to( goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf), (None, None, None, None)) else: self.assertEqual( utils.validate_sync_to( goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf), result) for badurl, result in ( ('http://1.1.1.1', ('Path required in X-Container-Sync-To', None, None, None)), ('httpq://1.1.1.1/v1/a/c', ('Invalid scheme \'httpq\' in X-Container-Sync-To, ' 'must be "//", "http", or "https".', None, None, None)), ('http://1.1.1.1/v1/a/c?query', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query=param', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query=param#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.2/v1/a/c', ("Invalid host '1.1.1.2' in X-Container-Sync-To", None, None, None)), ('//us/invalid/a/c', ("No cluster endpoint for 'us' 'invalid'", None, None, None)), ('//invalid/dfw1/a/c', ("No realm key for 'invalid'", None, None, None)), ('//us/invalid1/a/', ("Invalid X-Container-Sync-To format " "'//us/invalid1/a/'", None, None, None)), ('//us/invalid1/a', ("Invalid X-Container-Sync-To format " "'//us/invalid1/a'", None, None, None)), ('//us/invalid1/', ("Invalid X-Container-Sync-To format " "'//us/invalid1/'", None, None, None)), ('//us/invalid1', ("Invalid X-Container-Sync-To format " "'//us/invalid1'", None, None, None)), ('//us/', ("Invalid X-Container-Sync-To format " "'//us/'", None, None, None)), ('//us', ("Invalid X-Container-Sync-To format " "'//us'", None, None, None))): if badurl.startswith('//') and not realms_conf: self.assertEqual( utils.validate_sync_to( badurl, ['1.1.1.1', '2.2.2.2'], realms_conf), (None, None, None, None)) else: self.assertEqual( utils.validate_sync_to( badurl, ['1.1.1.1', '2.2.2.2'], realms_conf), result) def test_TRUE_VALUES(self): for v in utils.TRUE_VALUES: self.assertEqual(v, v.lower()) def test_config_true_value(self): orig_trues = utils.TRUE_VALUES try: utils.TRUE_VALUES = 'hello world'.split() for val in 'hello world HELLO WORLD'.split(): self.assertTrue(utils.config_true_value(val) is True) self.assertTrue(utils.config_true_value(True) is True) self.assertTrue(utils.config_true_value('foo') is False) self.assertTrue(utils.config_true_value(False) is False) finally: utils.TRUE_VALUES = orig_trues def test_config_positive_int_value(self): expectations = { # value : expected, '1': 1, 1: 1, '2': 2, '1024': 1024, '0': ValueError, '-1': ValueError, '0x01': ValueError, 'asdf': ValueError, None: ValueError, 0: ValueError, -1: ValueError, '1.2': ValueError, # string expresses float should be value error } for value, expected in expectations.items(): try: rv = utils.config_positive_int_value(value) except Exception as e: if e.__class__ is not expected: raise else: self.assertEqual( 'Config option must be an positive int number, ' 'not "%s".' % value, e.message) else: self.assertEqual(expected, rv) def test_config_auto_int_value(self): expectations = { # (value, default) : expected, ('1', 0): 1, (1, 0): 1, ('asdf', 0): ValueError, ('auto', 1): 1, ('AutO', 1): 1, ('Aut0', 1): ValueError, (None, 1): 1, } for (value, default), expected in expectations.items(): try: rv = utils.config_auto_int_value(value, default) except Exception as e: if e.__class__ is not expected: raise else: self.assertEqual(expected, rv) def test_streq_const_time(self): self.assertTrue(utils.streq_const_time('abc123', 'abc123')) self.assertFalse(utils.streq_const_time('a', 'aaaaa')) self.assertFalse(utils.streq_const_time('ABC123', 'abc123')) def test_quorum_size(self): expected_sizes = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3} got_sizes = dict([(n, utils.quorum_size(n)) for n in expected_sizes]) self.assertEqual(expected_sizes, got_sizes) def test_majority_size(self): expected_sizes = {1: 1, 2: 2, 3: 2, 4: 3, 5: 3} got_sizes = dict([(n, utils.majority_size(n)) for n in expected_sizes]) self.assertEqual(expected_sizes, got_sizes) def test_rsync_ip_ipv4_localhost(self): self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1') def test_rsync_ip_ipv6_random_ip(self): self.assertEqual( utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'), '[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]') def test_rsync_ip_ipv6_ipv4_compatible(self): self.assertEqual( utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]') def test_rsync_module_interpolation(self): fake_device = {'ip': '127.0.0.1', 'port': 11, 'replication_ip': '127.0.0.2', 'replication_port': 12, 'region': '1', 'zone': '2', 'device': 'sda1', 'meta': 'just_a_string'} self.assertEqual( utils.rsync_module_interpolation('{ip}', fake_device), '127.0.0.1') self.assertEqual( utils.rsync_module_interpolation('{port}', fake_device), '11') self.assertEqual( utils.rsync_module_interpolation('{replication_ip}', fake_device), '127.0.0.2') self.assertEqual( utils.rsync_module_interpolation('{replication_port}', fake_device), '12') self.assertEqual( utils.rsync_module_interpolation('{region}', fake_device), '1') self.assertEqual( utils.rsync_module_interpolation('{zone}', fake_device), '2') self.assertEqual( utils.rsync_module_interpolation('{device}', fake_device), 'sda1') self.assertEqual( utils.rsync_module_interpolation('{meta}', fake_device), 'just_a_string') self.assertEqual( utils.rsync_module_interpolation('{replication_ip}::object', fake_device), '127.0.0.2::object') self.assertEqual( utils.rsync_module_interpolation('{ip}::container{port}', fake_device), '127.0.0.1::container11') self.assertEqual( utils.rsync_module_interpolation( '{replication_ip}::object_{device}', fake_device), '127.0.0.2::object_sda1') self.assertEqual( utils.rsync_module_interpolation( '127.0.0.3::object_{replication_port}', fake_device), '127.0.0.3::object_12') self.assertRaises(ValueError, utils.rsync_module_interpolation, '{replication_ip}::object_{deivce}', fake_device) def test_fallocate_reserve(self): class StatVFS(object): f_frsize = 1024 f_bavail = 1 f_blocks = 100 def fstatvfs(fd): return StatVFS() orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE orig_fstatvfs = utils.os.fstatvfs try: fallocate = utils.FallocateWrapper(noop=True) utils.os.fstatvfs = fstatvfs # Make sure setting noop, which disables fallocate, also stops the # fallocate_reserve check. # Set the fallocate_reserve to 99% and request an object that is # about 50% the size. With fallocate_reserve off this will succeed. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('99%') self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(500)), 0) # Setting noop to False after the constructor allows us to use # a noop fallocate syscall and still test fallocate_reserve. fallocate.noop = False # Want 1023 reserved, have 1024 * 1 free, so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1023') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0) # Want 1023 reserved, have 512 * 2 free, so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1023') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0) # Want 1024 reserved, have 1024 * 1 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 1024 reserved, have 512 * 2 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 2048 reserved, have 1024 * 1 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2048') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 2048 reserved, have 512 * 2 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2048') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 1023 reserved, have 1024 * 1 free, but file size is 1, so # fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1023') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(1)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1023 <= 1023' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 1022 reserved, have 1024 * 1 free, and file size is 1, so # succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1022') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0) # Want 1% reserved, have 100 bytes * 2/100 free, and file size is # 99, so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 2 StatVFS.f_blocks = 100 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(99)), 0) # Want 2% reserved, have 50 bytes * 2/50 free, and file size is 49, # so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2%') StatVFS.f_frsize = 50 StatVFS.f_bavail = 2 StatVFS.f_blocks = 50 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(49)), 0) # Want 100% reserved, have 100 * 100/100 free, and file size is 0, # so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('100%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 100 StatVFS.f_blocks = 100 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 100.0 <= 100.0' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 1% reserved, have 100 * 2/100 free, and file size is 101, # so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 2 StatVFS.f_blocks = 100 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(101)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 0.99 <= 1.0' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # is 100, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('98%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 99 StatVFS.f_blocks = 100 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(100)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 98.0 <= 98.0' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 2% reserved, have 1000 bytes * 21/1000 free, and file size # is 999, so succeeds. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2%') StatVFS.f_frsize = 1000 StatVFS.f_bavail = 21 StatVFS.f_blocks = 1000 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(999)), 0) # Want 2% resereved, have 1000 bytes * 21/1000 free, and file size # is 1000, so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2%') StatVFS.f_frsize = 1000 StatVFS.f_bavail = 21 StatVFS.f_blocks = 1000 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(1000)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 2.0 <= 2.0' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) finally: utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE utils.os.fstatvfs = orig_fstatvfs def test_fallocate_func(self): class FallocateWrapper(object): def __init__(self): self.last_call = None def __call__(self, *args): self.last_call = list(args) self.last_call[-1] = self.last_call[-1].value return 0 with patch.object(utils, '_sys_fallocate', FallocateWrapper()): utils._sys_fallocate = FallocateWrapper() # Ensure fallocate calls _sys_fallocate even with 0 bytes utils._sys_fallocate.last_call = None utils.fallocate(1234, 0) self.assertEqual(utils._sys_fallocate.last_call, [1234, 1, 0, 0]) # Ensure fallocate calls _sys_fallocate even with negative bytes utils._sys_fallocate.last_call = None utils.fallocate(1234, -5678) self.assertEqual(utils._sys_fallocate.last_call, [1234, 1, 0, 0]) # Ensure fallocate calls _sys_fallocate properly with positive # bytes utils._sys_fallocate.last_call = None utils.fallocate(1234, 1) self.assertEqual(utils._sys_fallocate.last_call, [1234, 1, 0, 1]) utils._sys_fallocate.last_call = None utils.fallocate(1234, 10 * 1024 * 1024 * 1024) self.assertEqual(utils._sys_fallocate.last_call, [1234, 1, 0, 10 * 1024 * 1024 * 1024]) def test_generate_trans_id(self): fake_time = 1366428370.5163341 with patch.object(utils.time, 'time', return_value=fake_time): trans_id = utils.generate_trans_id('') self.assertEqual(len(trans_id), 34) self.assertEqual(trans_id[:2], 'tx') self.assertEqual(trans_id[23], '-') self.assertEqual(int(trans_id[24:], 16), int(fake_time)) with patch.object(utils.time, 'time', return_value=fake_time): trans_id = utils.generate_trans_id('-suffix') self.assertEqual(len(trans_id), 41) self.assertEqual(trans_id[:2], 'tx') self.assertEqual(trans_id[34:], '-suffix') self.assertEqual(trans_id[23], '-') self.assertEqual(int(trans_id[24:34], 16), int(fake_time)) def test_get_trans_id_time(self): ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e') self.assertIsNone(ts) ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06') self.assertEqual(ts, 1366428678) self.assertEqual( time.asctime(time.gmtime(ts)) + ' UTC', 'Sat Apr 20 03:31:18 2013 UTC') ts = utils.get_trans_id_time( 'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix') self.assertEqual(ts, 1366428678) self.assertEqual( time.asctime(time.gmtime(ts)) + ' UTC', 'Sat Apr 20 03:31:18 2013 UTC') ts = utils.get_trans_id_time('') self.assertIsNone(ts) ts = utils.get_trans_id_time('garbage') self.assertIsNone(ts) ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright') self.assertIsNone(ts) def test_config_fallocate_value(self): fallocate_value, is_percent = utils.config_fallocate_value('10%') self.assertEqual(fallocate_value, 10) self.assertTrue(is_percent) fallocate_value, is_percent = utils.config_fallocate_value('10') self.assertEqual(fallocate_value, 10) self.assertFalse(is_percent) try: fallocate_value, is_percent = utils.config_fallocate_value('ab%') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: ab% is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('ab') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: ab is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('1%%') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: 1%% is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('10.0') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for ' 'fallocate_reserve.') fallocate_value, is_percent = utils.config_fallocate_value('10.5%') self.assertEqual(fallocate_value, 10.5) self.assertTrue(is_percent) fallocate_value, is_percent = utils.config_fallocate_value('10.000%') self.assertEqual(fallocate_value, 10.000) self.assertTrue(is_percent) def test_tpool_reraise(self): with patch.object(utils.tpool, 'execute', lambda f: f()): self.assertTrue( utils.tpool_reraise(MagicMock(return_value='test1')), 'test1') self.assertRaises( Exception, utils.tpool_reraise, MagicMock(side_effect=Exception('test2'))) self.assertRaises( BaseException, utils.tpool_reraise, MagicMock(side_effect=BaseException('test3'))) def test_lock_file(self): flags = os.O_CREAT | os.O_RDWR with NamedTemporaryFile(delete=False) as nt: nt.write("test string") nt.flush() nt.close() with utils.lock_file(nt.name, unlink=False) as f: self.assertEqual(f.read(), "test string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises(IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) with utils.lock_file(nt.name, unlink=False, append=True) as f: f.seek(0) self.assertEqual(f.read(), "test string") f.seek(0) f.write("\nanother string") f.flush() f.seek(0) self.assertEqual(f.read(), "test string\nanother string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises(IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) with utils.lock_file(nt.name, timeout=3, unlink=False) as f: try: with utils.lock_file( nt.name, timeout=1, unlink=False) as f: self.assertTrue( False, "Expected LockTimeout exception") except LockTimeout: pass with utils.lock_file(nt.name, unlink=True) as f: self.assertEqual(f.read(), "test string\nanother string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises( IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) self.assertRaises(OSError, os.remove, nt.name) def test_lock_file_unlinked_after_open(self): os_open = os.open first_pass = [True] def deleting_open(filename, flags): # unlink the file after it's opened. once. fd = os_open(filename, flags) if first_pass[0]: os.unlink(filename) first_pass[0] = False return fd with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.open', deleting_open): with utils.lock_file(nt.name, unlink=True) as f: self.assertNotEqual(os.fstat(nt.fileno()).st_ino, os.fstat(f.fileno()).st_ino) first_pass = [True] def recreating_open(filename, flags): # unlink and recreate the file after it's opened fd = os_open(filename, flags) if first_pass[0]: os.unlink(filename) os.close(os_open(filename, os.O_CREAT | os.O_RDWR)) first_pass[0] = False return fd with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.open', recreating_open): with utils.lock_file(nt.name, unlink=True) as f: self.assertNotEqual(os.fstat(nt.fileno()).st_ino, os.fstat(f.fileno()).st_ino) def test_lock_file_held_on_unlink(self): os_unlink = os.unlink def flocking_unlink(filename): # make sure the lock is held when we unlink fd = os.open(filename, os.O_RDWR) self.assertRaises( IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) os.close(fd) os_unlink(filename) with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.unlink', flocking_unlink): with utils.lock_file(nt.name, unlink=True): pass def test_lock_file_no_unlink_if_fail(self): os_open = os.open with NamedTemporaryFile(delete=True) as nt: def lock_on_open(filename, flags): # lock the file on another fd after it's opened. fd = os_open(filename, flags) fd2 = os_open(filename, flags) fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB) return fd try: timedout = False with mock.patch('os.open', lock_on_open): with utils.lock_file(nt.name, unlink=False, timeout=0.01): pass except LockTimeout: timedout = True self.assertTrue(timedout) self.assertTrue(os.path.exists(nt.name)) def test_ismount_path_does_not_exist(self): tmpdir = mkdtemp() try: self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar'))) finally: shutil.rmtree(tmpdir) def test_ismount_path_not_mount(self): tmpdir = mkdtemp() try: self.assertFalse(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_path_error(self): def _mock_os_lstat(path): raise OSError(13, "foo") tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): # Raises exception with _raw -- see next test. utils.ismount(tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_raw_path_error(self): def _mock_os_lstat(path): raise OSError(13, "foo") tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertRaises(OSError, utils.ismount_raw, tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_path_is_symlink(self): tmpdir = mkdtemp() try: link = os.path.join(tmpdir, "tmp") os.symlink(tempfile.gettempdir(), link) self.assertFalse(utils.ismount(link)) finally: shutil.rmtree(tmpdir) def test_ismount_path_is_root(self): self.assertTrue(utils.ismount('/')) def test_ismount_parent_path_error(self): _os_lstat = os.lstat def _mock_os_lstat(path): if path.endswith(".."): raise OSError(13, "foo") else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): # Raises exception with _raw -- see next test. utils.ismount(tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_raw_parent_path_error(self): _os_lstat = os.lstat def _mock_os_lstat(path): if path.endswith(".."): raise OSError(13, "foo") else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertRaises(OSError, utils.ismount_raw, tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_successes_dev(self): _os_lstat = os.lstat class MockStat(object): def __init__(self, mode, dev, ino): self.st_mode = mode self.st_dev = dev self.st_ino = ino def _mock_os_lstat(path): if path.endswith(".."): parent = _os_lstat(path) return MockStat(parent.st_mode, parent.st_dev + 1, parent.st_ino) else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_successes_ino(self): _os_lstat = os.lstat class MockStat(object): def __init__(self, mode, dev, ino): self.st_mode = mode self.st_dev = dev self.st_ino = ino def _mock_os_lstat(path): if path.endswith(".."): return _os_lstat(path) else: parent_path = os.path.join(path, "..") child = _os_lstat(path) parent = _os_lstat(parent_path) return MockStat(child.st_mode, parent.st_ino, child.st_dev) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_successes_stubfile(self): tmpdir = mkdtemp() fname = os.path.join(tmpdir, ".ismount") try: with open(fname, "w") as stubfile: stubfile.write("") self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_parse_content_type(self): self.assertEqual(utils.parse_content_type('text/plain'), ('text/plain', [])) self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'), ('text/plain', [('charset', 'utf-8')])) self.assertEqual( utils.parse_content_type('text/plain;hello="world";charset=utf-8'), ('text/plain', [('hello', '"world"'), ('charset', 'utf-8')])) self.assertEqual( utils.parse_content_type('text/plain; hello="world"; a=b'), ('text/plain', [('hello', '"world"'), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x="\""; a=b'), ('text/plain', [('x', r'"\""'), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x; a=b'), ('text/plain', [('x', ''), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x="\""; a'), ('text/plain', [('x', r'"\""'), ('a', '')])) def test_override_bytes_from_content_type(self): listing_dict = { 'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv', 'content_type': 'text/plain; hello="world"; swift_bytes=15'} utils.override_bytes_from_content_type(listing_dict, logger=FakeLogger()) self.assertEqual(listing_dict['bytes'], 15) self.assertEqual(listing_dict['content_type'], 'text/plain;hello="world"') listing_dict = { 'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv', 'content_type': 'text/plain; hello="world"; swift_bytes=hey'} utils.override_bytes_from_content_type(listing_dict, logger=FakeLogger()) self.assertEqual(listing_dict['bytes'], 1234) self.assertEqual(listing_dict['content_type'], 'text/plain;hello="world"') def test_extract_swift_bytes(self): scenarios = { # maps input value -> expected returned tuple '': ('', None), 'text/plain': ('text/plain', None), 'text/plain; other=thing': ('text/plain;other=thing', None), 'text/plain; swift_bytes=123': ('text/plain', '123'), 'text/plain; other=thing;swift_bytes=123': ('text/plain;other=thing', '123'), 'text/plain; swift_bytes=123; other=thing': ('text/plain;other=thing', '123'), 'text/plain; swift_bytes=123; swift_bytes=456': ('text/plain', '456'), 'text/plain; swift_bytes=123; other=thing;swift_bytes=456': ('text/plain;other=thing', '456')} for test_value, expected in scenarios.items(): self.assertEqual(expected, utils.extract_swift_bytes(test_value)) def test_clean_content_type(self): subtests = { '': '', 'text/plain': 'text/plain', 'text/plain; someother=thing': 'text/plain; someother=thing', 'text/plain; swift_bytes=123': 'text/plain', 'text/plain; someother=thing; swift_bytes=123': 'text/plain; someother=thing', # Since Swift always tacks on the swift_bytes, clean_content_type() # only strips swift_bytes if it's last. The next item simply shows # that if for some other odd reason it's not last, # clean_content_type() will not remove it from the header. 'text/plain; swift_bytes=123; someother=thing': 'text/plain; swift_bytes=123; someother=thing'} for before, after in subtests.items(): self.assertEqual(utils.clean_content_type(before), after) def test_get_valid_utf8_str(self): def do_test(input_value, expected): actual = utils.get_valid_utf8_str(input_value) self.assertEqual(expected, actual) self.assertIsInstance(actual, six.binary_type) actual.decode('utf-8') do_test(b'abc', b'abc') do_test(u'abc', b'abc') do_test(u'\uc77c\uc601', b'\xec\x9d\xbc\xec\x98\x81') do_test(b'\xec\x9d\xbc\xec\x98\x81', b'\xec\x9d\xbc\xec\x98\x81') # test some invalid UTF-8 do_test(b'\xec\x9d\xbc\xec\x98', b'\xec\x9d\xbc\xef\xbf\xbd') # check surrogate pairs, too do_test(u'\U0001f0a1', b'\xf0\x9f\x82\xa1'), do_test(u'\uD83C\uDCA1', b'\xf0\x9f\x82\xa1'), do_test(b'\xf0\x9f\x82\xa1', b'\xf0\x9f\x82\xa1'), do_test(b'\xed\xa0\xbc\xed\xb2\xa1', b'\xf0\x9f\x82\xa1'), def test_quote(self): res = utils.quote('/v1/a/c3/subdirx/') assert res == '/v1/a/c3/subdirx/' res = utils.quote('/v1/a&b/c3/subdirx/') assert res == '/v1/a%26b/c3/subdirx/' res = utils.quote('/v1/a&b/c3/subdirx/', safe='&') assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F' unicode_sample = u'\uc77c\uc601' account = 'abc_' + unicode_sample valid_utf8_str = utils.get_valid_utf8_str(account) account = 'abc_' + unicode_sample.encode('utf-8')[::-1] invalid_utf8_str = utils.get_valid_utf8_str(account) self.assertEqual('abc_%EC%9D%BC%EC%98%81', utils.quote(valid_utf8_str)) self.assertEqual('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD', utils.quote(invalid_utf8_str)) def test_get_hmac(self): self.assertEqual( utils.get_hmac('GET', '/path', 1, 'abc'), 'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f') def test_get_policy_index(self): # Account has no information about a policy req = Request.blank( '/sda1/p/a', environ={'REQUEST_METHOD': 'GET'}) res = Response() self.assertIsNone(utils.get_policy_index(req.headers, res.headers)) # The policy of a container can be specified by the response header req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) res = Response(headers={'X-Backend-Storage-Policy-Index': '1'}) self.assertEqual('1', utils.get_policy_index(req.headers, res.headers)) # The policy of an object to be created can be specified by the request # header req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Backend-Storage-Policy-Index': '2'}) res = Response() self.assertEqual('2', utils.get_policy_index(req.headers, res.headers)) def test_get_log_line(self): req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'}) res = Response() trans_time = 1.2 additional_info = 'some information' server_pid = 1234 exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \ '/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -' with mock.patch( 'time.gmtime', mock.MagicMock(side_effect=[time.gmtime(10001.0)])): with mock.patch( 'os.getpid', mock.MagicMock(return_value=server_pid)): self.assertEqual( exp_line, utils.get_log_line(req, res, trans_time, additional_info)) def test_cache_from_env(self): # should never get logging when swift.cache is found env = {'swift.cache': 42} logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env, False)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env, True)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) # check allow_none controls logging when swift.cache is not found err_msg = 'ERROR: swift.cache could not be found in env!' env = {} logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env)) self.assertTrue(err_msg in logger.get_lines_for_level('error')) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env, False)) self.assertTrue(err_msg in logger.get_lines_for_level('error')) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env, True)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) def test_fsync_dir(self): tempdir = None fd = None try: tempdir = mkdtemp() fd, temppath = tempfile.mkstemp(dir=tempdir) _mock_fsync = mock.Mock() _mock_close = mock.Mock() with patch('swift.common.utils.fsync', _mock_fsync): with patch('os.close', _mock_close): utils.fsync_dir(tempdir) self.assertTrue(_mock_fsync.called) self.assertTrue(_mock_close.called) self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int)) self.assertEqual(_mock_fsync.call_args[0][0], _mock_close.call_args[0][0]) # Not a directory - arg is file path self.assertRaises(OSError, utils.fsync_dir, temppath) logger = FakeLogger() def _mock_fsync(fd): raise OSError(errno.EBADF, os.strerror(errno.EBADF)) with patch('swift.common.utils.fsync', _mock_fsync): with mock.patch('swift.common.utils.logging', logger): utils.fsync_dir(tempdir) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) finally: if fd is not None: os.close(fd) os.unlink(temppath) if tempdir: os.rmdir(tempdir) def test_renamer_with_fsync_dir(self): tempdir = None try: tempdir = mkdtemp() # Simulate part of object path already existing part_dir = os.path.join(tempdir, 'objects/1234/') os.makedirs(part_dir) obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32) obj_path = os.path.join(obj_dir, '1425276031.12345.data') # Object dir had to be created _m_os_rename = mock.Mock() _m_fsync_dir = mock.Mock() with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.renamer("fake_path", obj_path) _m_os_rename.assert_called_once_with('fake_path', obj_path) # fsync_dir on parents of all newly create dirs self.assertEqual(_m_fsync_dir.call_count, 3) # Object dir existed _m_os_rename.reset_mock() _m_fsync_dir.reset_mock() with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.renamer("fake_path", obj_path) _m_os_rename.assert_called_once_with('fake_path', obj_path) # fsync_dir only on the leaf dir self.assertEqual(_m_fsync_dir.call_count, 1) finally: if tempdir: shutil.rmtree(tempdir) def test_renamer_when_fsync_is_false(self): _m_os_rename = mock.Mock() _m_fsync_dir = mock.Mock() _m_makedirs_count = mock.Mock(return_value=2) with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): with patch('swift.common.utils.makedirs_count', _m_makedirs_count): utils.renamer("fake_path", "/a/b/c.data", fsync=False) _m_makedirs_count.assert_called_once_with("/a/b") _m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data") self.assertFalse(_m_fsync_dir.called) def test_makedirs_count(self): tempdir = None fd = None try: tempdir = mkdtemp() os.makedirs(os.path.join(tempdir, 'a/b')) # 4 new dirs created dirpath = os.path.join(tempdir, 'a/b/1/2/3/4') ret = utils.makedirs_count(dirpath) self.assertEqual(ret, 4) # no new dirs created - dir already exists ret = utils.makedirs_count(dirpath) self.assertEqual(ret, 0) # path exists and is a file fd, temppath = tempfile.mkstemp(dir=dirpath) os.close(fd) self.assertRaises(OSError, utils.makedirs_count, temppath) finally: if tempdir: shutil.rmtree(tempdir) def test_modify_priority(self): pid = os.getpid() logger = debug_logger() called = {} def _fake_setpriority(*args): called['setpriority'] = args def _fake_syscall(*args): called['syscall'] = args # Test if current architecture supports changing of priority try: utils.NR_ioprio_set() except OSError as e: raise unittest.SkipTest(e) with patch('swift.common.utils._libc_setpriority', _fake_setpriority), \ patch('swift.common.utils._posix_syscall', _fake_syscall): called = {} # not set / default utils.modify_priority({}, logger) self.assertEqual(called, {}) called = {} # just nice utils.modify_priority({'nice_priority': '1'}, logger) self.assertEqual(called, {'setpriority': (0, pid, 1)}) called = {} # just ionice class uses default priority 0 utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger) architecture = os.uname()[4] arch_bits = platform.architecture()[0] if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)}) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)}) else: self.fail("Unexpected call: %r" % called) called = {} # just ionice priority is ignored utils.modify_priority({'ionice_priority': '4'}, logger) self.assertEqual(called, {}) called = {} # bad ionice class utils.modify_priority({'ionice_class': 'class_foo'}, logger) self.assertEqual(called, {}) called = {} # ionice class & priority utils.modify_priority({ 'ionice_class': 'IOPRIO_CLASS_BE', 'ionice_priority': '4', }, logger) if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, { 'syscall': (251, 1, pid, 2 << 13 | 4) }) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, { 'syscall': (30, 1, pid, 2 << 13 | 4) }) else: self.fail("Unexpected call: %r" % called) called = {} # all utils.modify_priority({ 'nice_priority': '-15', 'ionice_class': 'IOPRIO_CLASS_IDLE', 'ionice_priority': '6', }, logger) if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, { 'setpriority': (0, pid, -15), 'syscall': (251, 1, pid, 3 << 13 | 6), }) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, { 'setpriority': (0, pid, -15), 'syscall': (30, 1, pid, 3 << 13 | 6), }) else: self.fail("Unexpected call: %r" % called) def test__NR_ioprio_set(self): with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertEqual(251, utils.NR_ioprio_set()) with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \ patch('platform.architecture', return_value=('32bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertEqual(30, utils.NR_ioprio_set()) with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \ patch('platform.architecture', return_value=('32bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) with patch('os.uname', return_value=('', '', '', '', 'alpha')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) @requires_o_tmpfile_support def test_link_fd_to_path_linkat_success(self): tempdir = mkdtemp() fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY) data = "I'm whatever Gotham needs me to be" _m_fsync_dir = mock.Mock() try: os.write(fd, data) # fd is O_WRONLY self.assertRaises(OSError, os.read, fd, 1) file_path = os.path.join(tempdir, uuid4().hex) with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.link_fd_to_path(fd, file_path, 1) with open(file_path, 'r') as f: self.assertEqual(f.read(), data) self.assertEqual(_m_fsync_dir.call_count, 2) finally: os.close(fd) shutil.rmtree(tempdir) @requires_o_tmpfile_support def test_link_fd_to_path_target_exists(self): tempdir = mkdtemp() # Create and write to a file fd, path = tempfile.mkstemp(dir=tempdir) os.write(fd, "hello world") os.fsync(fd) os.close(fd) self.assertTrue(os.path.exists(path)) fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY) try: os.write(fd, "bye world") os.fsync(fd) utils.link_fd_to_path(fd, path, 0, fsync=False) # Original file now should have been over-written with open(path, 'r') as f: self.assertEqual(f.read(), "bye world") finally: os.close(fd) shutil.rmtree(tempdir) @requires_o_tmpfile_support def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self): _m_linkat = mock.Mock( side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES))) with mock.patch('swift.common.utils.linkat', _m_linkat): try: utils.link_fd_to_path(0, '/path', 1) except IOError as err: self.assertEqual(err.errno, errno.EACCES) else: self.fail("Expecting IOError exception") self.assertTrue(_m_linkat.called) @requires_o_tmpfile_support def test_linkat_race_dir_not_exists(self): tempdir = mkdtemp() target_dir = os.path.join(tempdir, uuid4().hex) target_path = os.path.join(target_dir, uuid4().hex) os.mkdir(target_dir) fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY) # Simulating directory deletion by other backend process os.rmdir(target_dir) self.assertFalse(os.path.exists(target_dir)) try: utils.link_fd_to_path(fd, target_path, 1) self.assertTrue(os.path.exists(target_dir)) self.assertTrue(os.path.exists(target_path)) finally: os.close(fd) shutil.rmtree(tempdir) def test_safe_json_loads(self): expectations = { None: None, '': None, 0: None, 1: None, '"asdf"': 'asdf', '[]': [], '{}': {}, "{'foo': 'bar'}": None, '{"foo": "bar"}': {'foo': 'bar'}, } failures = [] for value, expected in expectations.items(): try: result = utils.safe_json_loads(value) except Exception as e: # it's called safe, if it blows up the test blows up self.fail('%r caused safe method to throw %r!' % ( value, e)) try: self.assertEqual(expected, result) except AssertionError: failures.append('%r => %r (expected %r)' % ( value, result, expected)) if failures: self.fail('Invalid results from pure function:\n%s' % '\n'.join(failures)) def test_strict_b64decode(self): expectations = { None: ValueError, 0: ValueError, b'': b'', u'': b'', b'A': ValueError, b'AA': ValueError, b'AAA': ValueError, b'AAAA': b'\x00\x00\x00', u'AAAA': b'\x00\x00\x00', b'////': b'\xff\xff\xff', u'////': b'\xff\xff\xff', b'A===': ValueError, b'AA==': b'\x00', b'AAA=': b'\x00\x00', b' AAAA': ValueError, b'AAAA ': ValueError, b'AAAA============': b'\x00\x00\x00', b'AA&AA==': ValueError, b'====': b'', } failures = [] for value, expected in expectations.items(): try: result = utils.strict_b64decode(value) except Exception as e: if inspect.isclass(expected) and issubclass( expected, Exception): if not isinstance(e, expected): failures.append('%r raised %r (expected to raise %r)' % (value, e, expected)) else: failures.append('%r raised %r (expected to return %r)' % (value, e, expected)) else: if inspect.isclass(expected) and issubclass( expected, Exception): failures.append('%r => %r (expected to raise %r)' % (value, result, expected)) elif result != expected: failures.append('%r => %r (expected %r)' % ( value, result, expected)) if failures: self.fail('Invalid results from pure function:\n%s' % '\n'.join(failures)) def test_replace_partition_in_path(self): # Check for new part = part * 2 old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77/f' new = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77/f' # Expected outcome self.assertEqual(utils.replace_partition_in_path(old, 11), new) # Make sure there is no change if the part power didn't change self.assertEqual(utils.replace_partition_in_path(old, 10), old) self.assertEqual(utils.replace_partition_in_path(new, 11), new) # Check for new part = part * 2 + 1 old = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f' new = '/s/n/d/o/1387/c77/ad708baea4806dcaba30bf07d9e64c77/f' # Expected outcome self.assertEqual(utils.replace_partition_in_path(old, 11), new) # Make sure there is no change if the part power didn't change self.assertEqual(utils.replace_partition_in_path(old, 10), old) self.assertEqual(utils.replace_partition_in_path(new, 11), new) class ResellerConfReader(unittest.TestCase): def setUp(self): self.default_rules = {'operator_roles': ['admin', 'swiftoperator'], 'service_roles': [], 'require_group': ''} def test_defaults(self): conf = {} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_']) self.assertEqual(options['AUTH_'], self.default_rules) def test_same_as_default(self): conf = {'reseller_prefix': 'AUTH', 'operator_roles': 'admin, swiftoperator'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_']) self.assertEqual(options['AUTH_'], self.default_rules) def test_single_blank_reseller(self): conf = {'reseller_prefix': ''} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''], self.default_rules) def test_single_blank_reseller_with_conf(self): conf = {'reseller_prefix': '', "''operator_roles": 'role1, role2'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''].get('operator_roles'), ['role1', 'role2']) self.assertEqual(options[''].get('service_roles'), self.default_rules.get('service_roles')) self.assertEqual(options[''].get('require_group'), self.default_rules.get('require_group')) def test_multiple_same_resellers(self): conf = {'reseller_prefix': " '' , '' "} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) conf = {'reseller_prefix': '_, _'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['_']) conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', 'PRE2_']) def test_several_resellers_with_conf(self): conf = {'reseller_prefix': 'PRE1, PRE2', 'PRE1_operator_roles': 'role1, role2', 'PRE1_service_roles': 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['PRE1_', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options['PRE1_'].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options['PRE1_'].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['PRE1_'].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_several_resellers_first_blank(self): conf = {'reseller_prefix': " '' , PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_several_resellers_with_blank_comma(self): conf = {'reseller_prefix': "AUTH , '', PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_']) self.assertEqual(set(['admin', 'swiftoperator']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual([], options['AUTH_'].get('service_roles')) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['AUTH_'].get('require_group')) self.assertEqual('', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_stray_comma(self): conf = {'reseller_prefix': "AUTH ,, PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', 'PRE2_']) self.assertEqual(set(['admin', 'swiftoperator']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual([], options['AUTH_'].get('service_roles')) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['AUTH_'].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_multiple_stray_commas_resellers(self): conf = {'reseller_prefix': ' , , ,'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''], self.default_rules) def test_unprefixed_options(self): conf = {'reseller_prefix': "AUTH , '', PRE2", "operator_roles": 'role1, role2', "service_roles": 'role3, role4', 'require_group': 'auth_blank_group', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options['AUTH_'].get('service_roles'))) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('auth_blank_group', options['AUTH_'].get('require_group')) self.assertEqual('auth_blank_group', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) class TestUnlinkOlder(unittest.TestCase): def setUp(self): self.tempdir = mkdtemp() self.mtime = {} self.ts = make_timestamp_iter() def tearDown(self): rmtree(self.tempdir, ignore_errors=True) def touch(self, fpath, mtime=None): self.mtime[fpath] = mtime or next(self.ts) open(fpath, 'w') @contextlib.contextmanager def high_resolution_getmtime(self): orig_getmtime = os.path.getmtime def mock_getmtime(fpath): mtime = self.mtime.get(fpath) if mtime is None: mtime = orig_getmtime(fpath) return mtime with mock.patch('os.path.getmtime', mock_getmtime): yield def test_unlink_older_than_path_not_exists(self): path = os.path.join(self.tempdir, 'does-not-exist') # just make sure it doesn't blow up utils.unlink_older_than(path, next(self.ts)) def test_unlink_older_than_file(self): path = os.path.join(self.tempdir, 'some-file') self.touch(path) with self.assertRaises(OSError) as ctx: utils.unlink_older_than(path, next(self.ts)) self.assertEqual(ctx.exception.errno, errno.ENOTDIR) def test_unlink_older_than_now(self): self.touch(os.path.join(self.tempdir, 'test')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, next(self.ts)) self.assertEqual([], os.listdir(self.tempdir)) def test_unlink_not_old_enough(self): start = next(self.ts) self.touch(os.path.join(self.tempdir, 'test')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, start) self.assertEqual(['test'], os.listdir(self.tempdir)) def test_unlink_mixed(self): self.touch(os.path.join(self.tempdir, 'first')) cutoff = next(self.ts) self.touch(os.path.join(self.tempdir, 'second')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, cutoff) self.assertEqual(['second'], os.listdir(self.tempdir)) def test_unlink_paths(self): paths = [] for item in ('first', 'second', 'third'): path = os.path.join(self.tempdir, item) self.touch(path) paths.append(path) # don't unlink everyone with self.high_resolution_getmtime(): utils.unlink_paths_older_than(paths[:2], next(self.ts)) self.assertEqual(['third'], os.listdir(self.tempdir)) def test_unlink_empty_paths(self): # just make sure it doesn't blow up utils.unlink_paths_older_than([], next(self.ts)) def test_unlink_not_exists_paths(self): path = os.path.join(self.tempdir, 'does-not-exist') # just make sure it doesn't blow up utils.unlink_paths_older_than([path], next(self.ts)) class TestSwiftInfo(unittest.TestCase): def tearDown(self): utils._swift_info = {} utils._swift_admin_info = {} def test_register_swift_info(self): utils.register_swift_info(foo='bar') utils.register_swift_info(lorem='ipsum') utils.register_swift_info('cap1', cap1_foo='cap1_bar') utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum') self.assertTrue('swift' in utils._swift_info) self.assertTrue('foo' in utils._swift_info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertTrue('lorem' in utils._swift_info['swift']) self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum') self.assertTrue('cap1' in utils._swift_info) self.assertTrue('cap1_foo' in utils._swift_info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') self.assertTrue('cap1_lorem' in utils._swift_info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum') self.assertRaises(ValueError, utils.register_swift_info, 'admin', foo='bar') self.assertRaises(ValueError, utils.register_swift_info, 'disallowed_sections', disallowed_sections=None) utils.register_swift_info('goodkey', foo='5.6') self.assertRaises(ValueError, utils.register_swift_info, 'bad.key', foo='5.6') data = {'bad.key': '5.6'} self.assertRaises(ValueError, utils.register_swift_info, 'goodkey', **data) def test_get_swift_info(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info() self.assertNotIn('admin', info) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertIn('cap1', info) self.assertIn('cap1_foo', info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') def test_get_swift_info_with_disallowed_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap3_foo': 'cap3_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3']) self.assertNotIn('admin', info) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(info['swift']['foo'], 'bar') self.assertNotIn('cap1', info) self.assertIn('cap2', info) self.assertIn('cap2_foo', info['cap2']) self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar') self.assertNotIn('cap3', info) def test_register_swift_admin_info(self): utils.register_swift_info(admin=True, admin_foo='admin_bar') utils.register_swift_info(admin=True, admin_lorem='admin_ipsum') utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar') utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum') self.assertIn('swift', utils._swift_admin_info) self.assertIn('admin_foo', utils._swift_admin_info['swift']) self.assertEqual( utils._swift_admin_info['swift']['admin_foo'], 'admin_bar') self.assertIn('admin_lorem', utils._swift_admin_info['swift']) self.assertEqual( utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum') self.assertIn('cap1', utils._swift_admin_info) self.assertIn('ac1_foo', utils._swift_admin_info['cap1']) self.assertEqual( utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar') self.assertIn('ac1_lorem', utils._swift_admin_info['cap1']) self.assertEqual( utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum') self.assertNotIn('swift', utils._swift_info) self.assertNotIn('cap1', utils._swift_info) def test_get_swift_admin_info(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info(admin=True) self.assertIn('admin', info) self.assertIn('admin_cap1', info['admin']) self.assertIn('ac1_foo', info['admin']['admin_cap1']) self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar') self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertIn('cap1', info) self.assertIn('cap1_foo', info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') def test_get_swift_admin_info_with_disallowed_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap3_foo': 'cap3_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info( admin=True, disallowed_sections=['cap1', 'cap3']) self.assertIn('admin', info) self.assertIn('admin_cap1', info['admin']) self.assertIn('ac1_foo', info['admin']['admin_cap1']) self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar') self.assertIn('disallowed_sections', info['admin']) self.assertIn('cap1', info['admin']['disallowed_sections']) self.assertNotIn('cap2', info['admin']['disallowed_sections']) self.assertIn('cap3', info['admin']['disallowed_sections']) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(info['swift']['foo'], 'bar') self.assertNotIn('cap1', info) self.assertIn('cap2', info) self.assertIn('cap2_foo', info['cap2']) self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar') self.assertNotIn('cap3', info) def test_get_swift_admin_info_with_disallowed_sub_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar', 'cap1_moo': 'cap1_baa'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap2_foo': 'cap2_bar'}, 'cap4': {'a': {'b': {'c': 'c'}, 'b.c': 'b.c'}}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info( admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3', 'cap4.a.b.c']) self.assertNotIn('cap3', info) self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa') self.assertNotIn('cap1_foo', info['cap1']) self.assertNotIn('c', info['cap4']['a']['b']) self.assertEqual(info['cap4']['a']['b.c'], 'b.c') def test_get_swift_info_with_unmatched_disallowed_sections(self): cap1 = {'cap1_foo': 'cap1_bar', 'cap1_moo': 'cap1_baa'} utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': cap1} # expect no exceptions info = utils.get_swift_info( disallowed_sections=['cap2.cap1_foo', 'cap1.no_match', 'cap1.cap1_foo.no_match.no_match']) self.assertEqual(info['cap1'], cap1) class TestFileLikeIter(unittest.TestCase): def test_iter_file_iter(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] for chunk in utils.FileLikeIter(in_iter): chunks.append(chunk) self.assertEqual(chunks, in_iter) def test_next(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] iter_file = utils.FileLikeIter(in_iter) while True: try: chunk = next(iter_file) except StopIteration: break chunks.append(chunk) self.assertEqual(chunks, in_iter) def test_read(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] iter_file = utils.FileLikeIter(in_iter) self.assertEqual(iter_file.read(), b''.join(in_iter)) def test_read_with_size(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] iter_file = utils.FileLikeIter(in_iter) while True: chunk = iter_file.read(2) if not chunk: break self.assertTrue(len(chunk) <= 2) chunks.append(chunk) self.assertEqual(b''.join(chunks), b''.join(in_iter)) def test_read_with_size_zero(self): # makes little sense, but file supports it, so... self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'') def test_readline(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = [] iter_file = utils.FileLikeIter(in_iter) while True: line = iter_file.readline() if not line: break lines.append(line) self.assertEqual( lines, [v if v == b'trailing.' else v + b'\n' for v in b''.join(in_iter).split(b'\n')]) def test_readline2(self): self.assertEqual( utils.FileLikeIter([b'abc', b'def\n']).readline(4), b'abcd') def test_readline3(self): self.assertEqual( utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(), (b'a' * 1111) + b'bc\n') def test_readline_with_size(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = [] iter_file = utils.FileLikeIter(in_iter) while True: line = iter_file.readline(2) if not line: break lines.append(line) self.assertEqual( lines, [b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n', b'k\n', b'tr', b'ai', b'li', b'ng', b'.']) def test_readlines(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = utils.FileLikeIter(in_iter).readlines() self.assertEqual( lines, [v if v == b'trailing.' else v + b'\n' for v in b''.join(in_iter).split(b'\n')]) def test_readlines_with_size(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] iter_file = utils.FileLikeIter(in_iter) lists_of_lines = [] while True: lines = iter_file.readlines(2) if not lines: break lists_of_lines.append(lines) self.assertEqual( lists_of_lines, [[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'], [b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'], [b'.']]) def test_close(self): iter_file = utils.FileLikeIter([b'a', b'b', b'c']) self.assertEqual(next(iter_file), b'a') iter_file.close() self.assertTrue(iter_file.closed) self.assertRaises(ValueError, iter_file.next) self.assertRaises(ValueError, iter_file.read) self.assertRaises(ValueError, iter_file.readline) self.assertRaises(ValueError, iter_file.readlines) # Just make sure repeated close calls don't raise an Exception iter_file.close() self.assertTrue(iter_file.closed) class TestStatsdLogging(unittest.TestCase): def setUp(self): def fake_getaddrinfo(host, port, *args): # this is what a real getaddrinfo('localhost', port, # socket.AF_INET) returned once return [(socket.AF_INET, # address family socket.SOCK_STREAM, # socket type socket.IPPROTO_TCP, # socket protocol '', # canonical name, ('127.0.0.1', port)), # socket address (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('127.0.0.1', port))] self.real_getaddrinfo = utils.socket.getaddrinfo self.getaddrinfo_patcher = mock.patch.object( utils.socket, 'getaddrinfo', fake_getaddrinfo) self.mock_getaddrinfo = self.getaddrinfo_patcher.start() self.addCleanup(self.getaddrinfo_patcher.stop) def test_get_logger_statsd_client_not_specified(self): logger = utils.get_logger({}, 'some-name', log_route='some-route') # white-box construction validation self.assertIsNone(logger.logger.statsd_client) def test_get_logger_statsd_client_defaults(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}, 'some-name', log_route='some-route') # white-box construction validation self.assertTrue(isinstance(logger.logger.statsd_client, utils.StatsdClient)) self.assertEqual(logger.logger.statsd_client._host, 'some.host.com') self.assertEqual(logger.logger.statsd_client._port, 8125) self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.') self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1) logger.set_statsd_prefix('some-name.more-specific') self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.more-specific.') logger.set_statsd_prefix('') self.assertEqual(logger.logger.statsd_client._prefix, '') def test_get_logger_statsd_client_non_defaults(self): logger = utils.get_logger({ 'log_statsd_host': 'another.host.com', 'log_statsd_port': '9876', 'log_statsd_default_sample_rate': '0.75', 'log_statsd_sample_rate_factor': '0.81', 'log_statsd_metric_prefix': 'tomato.sauce', }, 'some-name', log_route='some-route') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.some-name.') logger.set_statsd_prefix('some-name.more-specific') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.some-name.more-specific.') logger.set_statsd_prefix('') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.') self.assertEqual(logger.logger.statsd_client._host, 'another.host.com') self.assertEqual(logger.logger.statsd_client._port, 9876) self.assertEqual(logger.logger.statsd_client._default_sample_rate, 0.75) self.assertEqual(logger.logger.statsd_client._sample_rate_factor, 0.81) def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self): def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest): if family == socket.AF_INET: return [(socket.AF_INET, 'blah', 'blah', 'blah', ('127.0.0.1', int(port)))] elif family == socket.AF_INET6: # Implemented so an incorrectly ordered implementation (IPv6 # then IPv4) would realistically fail. return [(socket.AF_INET6, 'blah', 'blah', 'blah', ('::1', int(port), 0, 0))] with mock.patch.object(utils.socket, 'getaddrinfo', new=stub_getaddrinfo_both_ipv4_and_ipv6): logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('localhost', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) def test_ipv4_instantiation_and_socket_creation(self): logger = utils.get_logger({ 'log_statsd_host': '127.0.0.1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('127.0.0.1', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) def test_ipv6_instantiation_and_socket_creation(self): # We have to check the given hostname or IP for IPv4/IPv6 on logger # instantiation so we don't call getaddrinfo() too often and don't have # to call bind() on our socket to detect IPv4/IPv6 on every send. # # This test uses the real getaddrinfo, so we patch over the mock to # put the real one back. If we just stop the mock, then # unittest.exit() blows up, but stacking real-fake-real works okay. with mock.patch.object(utils.socket, 'getaddrinfo', self.real_getaddrinfo): logger = utils.get_logger({ 'log_statsd_host': '::1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET6) self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET6) def test_bad_hostname_instantiation(self): with mock.patch.object(utils.socket, 'getaddrinfo', side_effect=utils.socket.gaierror("whoops")): logger = utils.get_logger({ 'log_statsd_host': 'i-am-not-a-hostname-or-ip', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('i-am-not-a-hostname-or-ip', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) # Maybe the DNS server gets fixed in a bit and it starts working... or # maybe the DNS record hadn't propagated yet. In any case, failed # statsd sends will warn in the logs until the DNS failure or invalid # IP address in the configuration is fixed. def test_sending_ipv6(self): def fake_getaddrinfo(host, port, *args): # this is what a real getaddrinfo('::1', port, # socket.AF_INET6) returned once return [(socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('::1', port, 0, 0)), (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('::1', port, 0, 0))] with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo): logger = utils.get_logger({ 'log_statsd_host': '::1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client fl = FakeLogger() statsd_client.logger = fl mock_socket = MockUdpSocket() statsd_client._open_socket = lambda *_: mock_socket logger.increment('tunafish') self.assertEqual(fl.get_lines_for_level('warning'), []) self.assertEqual(mock_socket.sent, [(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))]) def test_no_exception_when_cant_send_udp_packet(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}) statsd_client = logger.logger.statsd_client fl = FakeLogger() statsd_client.logger = fl mock_socket = MockUdpSocket(sendto_errno=errno.EPERM) statsd_client._open_socket = lambda *_: mock_socket logger.increment('tunafish') expected = ["Error sending UDP message to ('some.host.com', 8125): " "[Errno 1] test errno 1"] self.assertEqual(fl.get_lines_for_level('warning'), expected) def test_sample_rates(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}) mock_socket = MockUdpSocket() # encapsulation? what's that? statsd_client = logger.logger.statsd_client self.assertTrue(statsd_client.random is random.random) statsd_client._open_socket = lambda *_: mock_socket statsd_client.random = lambda: 0.50001 logger.increment('tribbles', sample_rate=0.5) self.assertEqual(len(mock_socket.sent), 0) statsd_client.random = lambda: 0.49999 logger.increment('tribbles', sample_rate=0.5) self.assertEqual(len(mock_socket.sent), 1) payload = mock_socket.sent[0][0] self.assertTrue(payload.endswith(b"|@0.5")) def test_sample_rates_with_sample_rate_factor(self): logger = utils.get_logger({ 'log_statsd_host': 'some.host.com', 'log_statsd_default_sample_rate': '0.82', 'log_statsd_sample_rate_factor': '0.91', }) effective_sample_rate = 0.82 * 0.91 mock_socket = MockUdpSocket() # encapsulation? what's that? statsd_client = logger.logger.statsd_client self.assertTrue(statsd_client.random is random.random) statsd_client._open_socket = lambda *_: mock_socket statsd_client.random = lambda: effective_sample_rate + 0.001 logger.increment('tribbles') self.assertEqual(len(mock_socket.sent), 0) statsd_client.random = lambda: effective_sample_rate - 0.001 logger.increment('tribbles') self.assertEqual(len(mock_socket.sent), 1) payload = mock_socket.sent[0][0] suffix = "|@%s" % effective_sample_rate if six.PY3: suffix = suffix.encode('utf-8') self.assertTrue(payload.endswith(suffix), payload) effective_sample_rate = 0.587 * 0.91 statsd_client.random = lambda: effective_sample_rate - 0.001 logger.increment('tribbles', sample_rate=0.587) self.assertEqual(len(mock_socket.sent), 2) payload = mock_socket.sent[1][0] suffix = "|@%s" % effective_sample_rate if six.PY3: suffix = suffix.encode('utf-8') self.assertTrue(payload.endswith(suffix), payload) def test_timing_stats(self): class MockController(object): def __init__(self, status): self.status = status self.logger = self self.args = () self.called = 'UNKNOWN' def timing_since(self, *args): self.called = 'timing' self.args = args @utils.timing_stats() def METHOD(controller): return Response(status=controller.status) mock_controller = MockController(200) METHOD(mock_controller) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(404) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(412) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(416) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(401) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing') self.assertTrue(mock_controller.args[1] > 0) class UnsafeXrange(object): """ Like xrange(limit), but with extra context switching to screw things up. """ def __init__(self, upper_bound): self.current = 0 self.concurrent_calls = 0 self.upper_bound = upper_bound self.concurrent_call = False def __iter__(self): return self def next(self): if self.concurrent_calls > 0: self.concurrent_call = True self.concurrent_calls += 1 try: if self.current >= self.upper_bound: raise StopIteration else: val = self.current self.current += 1 eventlet.sleep() # yield control return val finally: self.concurrent_calls -= 1 __next__ = next class TestAffinityKeyFunction(unittest.TestCase): def setUp(self): self.nodes = [dict(id=0, region=1, zone=1), dict(id=1, region=1, zone=2), dict(id=2, region=2, zone=1), dict(id=3, region=2, zone=2), dict(id=4, region=3, zone=1), dict(id=5, region=3, zone=2), dict(id=6, region=4, zone=0), dict(id=7, region=4, zone=1)] def test_single_region(self): keyfn = utils.affinity_key_function("r3=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids) def test_bogus_value(self): self.assertRaises(ValueError, utils.affinity_key_function, "r3") self.assertRaises(ValueError, utils.affinity_key_function, "r3=elephant") def test_empty_value(self): # Empty's okay, it just means no preference keyfn = utils.affinity_key_function("") self.assertTrue(callable(keyfn)) ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids) def test_all_whitespace_value(self): # Empty's okay, it just means no preference keyfn = utils.affinity_key_function(" \n") self.assertTrue(callable(keyfn)) ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids) def test_with_zone_zero(self): keyfn = utils.affinity_key_function("r4z0=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids) def test_multiple(self): keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids) def test_more_specific_after_less_specific(self): keyfn = utils.affinity_key_function("r2=100, r2z2=50") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids) class TestAffinityLocalityPredicate(unittest.TestCase): def setUp(self): self.nodes = [dict(id=0, region=1, zone=1), dict(id=1, region=1, zone=2), dict(id=2, region=2, zone=1), dict(id=3, region=2, zone=2), dict(id=4, region=3, zone=1), dict(id=5, region=3, zone=2), dict(id=6, region=4, zone=0), dict(id=7, region=4, zone=1)] def test_empty(self): pred = utils.affinity_locality_predicate('') self.assertTrue(pred is None) def test_region(self): pred = utils.affinity_locality_predicate('r1') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0, 1], ids) def test_zone(self): pred = utils.affinity_locality_predicate('r1z1') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0], ids) def test_multiple(self): pred = utils.affinity_locality_predicate('r1, r3, r4z0') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0, 1, 4, 5, 6], ids) def test_invalid(self): self.assertRaises(ValueError, utils.affinity_locality_predicate, 'falafel') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r8zQ') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r2d2') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r1z1=1') class TestRateLimitedIterator(unittest.TestCase): def run_under_pseudo_time( self, func, *args, **kwargs): curr_time = [42.0] def my_time(): curr_time[0] += 0.001 return curr_time[0] def my_sleep(duration): curr_time[0] += 0.001 curr_time[0] += duration with patch('time.time', my_time), \ patch('eventlet.sleep', my_sleep): return func(*args, **kwargs) def test_rate_limiting(self): def testfunc(): limited_iterator = utils.RateLimitedIterator(range(9999), 100) got = [] started_at = time.time() try: while time.time() - started_at < 0.1: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # it's 11, not 10, because ratelimiting doesn't apply to the very # first element. self.assertEqual(len(got), 11) def test_rate_limiting_sometimes(self): def testfunc(): limited_iterator = utils.RateLimitedIterator( range(9999), 100, ratelimit_if=lambda item: item % 23 != 0) got = [] started_at = time.time() try: while time.time() - started_at < 0.5: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # we'd get 51 without the ratelimit_if, but because 0, 23 and 46 # weren't subject to ratelimiting, we get 54 instead self.assertEqual(len(got), 54) def test_limit_after(self): def testfunc(): limited_iterator = utils.RateLimitedIterator( range(9999), 100, limit_after=5) got = [] started_at = time.time() try: while time.time() - started_at < 0.1: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # it's 16, not 15, because ratelimiting doesn't apply to the very # first element. self.assertEqual(len(got), 16) class TestGreenthreadSafeIterator(unittest.TestCase): def increment(self, iterable): plus_ones = [] for n in iterable: plus_ones.append(n + 1) return plus_ones def test_setup_works(self): # it should work without concurrent access self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4))) iterable = UnsafeXrange(10) pile = eventlet.GreenPile(2) for _ in range(2): pile.spawn(self.increment, iterable) sorted([resp for resp in pile]) self.assertTrue( iterable.concurrent_call, 'test setup is insufficiently crazy') def test_access_is_serialized(self): pile = eventlet.GreenPile(2) unsafe_iterable = UnsafeXrange(10) iterable = utils.GreenthreadSafeIterator(unsafe_iterable) for _ in range(2): pile.spawn(self.increment, iterable) response = sorted(sum([resp for resp in pile], [])) self.assertEqual(list(range(1, 11)), response) self.assertTrue( not unsafe_iterable.concurrent_call, 'concurrent call occurred') class TestStatsdLoggingDelegation(unittest.TestCase): def setUp(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.bind(('localhost', 0)) self.port = self.sock.getsockname()[1] self.queue = Queue() self.reader_thread = threading.Thread(target=self.statsd_reader) self.reader_thread.setDaemon(1) self.reader_thread.start() def tearDown(self): # The "no-op when disabled" test doesn't set up a real logger, so # create one here so we can tell the reader thread to stop. if not getattr(self, 'logger', None): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), }, 'some-name') self.logger.increment('STOP') self.reader_thread.join(timeout=4) self.sock.close() del self.logger def statsd_reader(self): while True: try: payload = self.sock.recv(4096) if payload and b'STOP' in payload: return 42 self.queue.put(payload) except Exception as e: sys.stderr.write('statsd_reader thread: %r' % (e,)) break def _send_and_get(self, sender_fn, *args, **kwargs): """ Because the client library may not actually send a packet with sample_rate < 1, we keep trying until we get one through. """ got = None while not got: sender_fn(*args, **kwargs) try: got = self.queue.get(timeout=0.5) except Empty: pass return got def assertStat(self, expected, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) if six.PY3: got = got.decode('utf-8') return self.assertEqual(expected, got) def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) if six.PY3: got = got.decode('utf-8') return self.assertTrue(re.search(expected_regexp, got), [got, expected_regexp]) def test_methods_are_no_ops_when_not_enabled(self): logger = utils.get_logger({ # No "log_statsd_host" means "disabled" 'log_statsd_port': str(self.port), }, 'some-name') # Delegate methods are no-ops self.assertIsNone(logger.update_stats('foo', 88)) self.assertIsNone(logger.update_stats('foo', 88, 0.57)) self.assertIsNone(logger.update_stats('foo', 88, sample_rate=0.61)) self.assertIsNone(logger.increment('foo')) self.assertIsNone(logger.increment('foo', 0.57)) self.assertIsNone(logger.increment('foo', sample_rate=0.61)) self.assertIsNone(logger.decrement('foo')) self.assertIsNone(logger.decrement('foo', 0.57)) self.assertIsNone(logger.decrement('foo', sample_rate=0.61)) self.assertIsNone(logger.timing('foo', 88.048)) self.assertIsNone(logger.timing('foo', 88.57, 0.34)) self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82)) self.assertIsNone(logger.timing_since('foo', 8938)) self.assertIsNone(logger.timing_since('foo', 8948, 0.57)) self.assertIsNone(logger.timing_since('foo', 849398, sample_rate=0.61)) # Now, the queue should be empty (no UDP packets sent) self.assertRaises(Empty, self.queue.get_nowait) def test_delegate_methods_with_no_default_sample_rate(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), }, 'some-name') self.assertStat('some-name.some.counter:1|c', self.logger.increment, 'some.counter') self.assertStat('some-name.some.counter:-1|c', self.logger.decrement, 'some.counter') self.assertStat('some-name.some.operation:4900.0|ms', self.logger.timing, 'some.operation', 4.9 * 1000) self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms', self.logger.timing_since, 'another.operation', time.time()) self.assertStat('some-name.another.counter:42|c', self.logger.update_stats, 'another.counter', 42) # Each call can override the sample_rate (also, bonus prefix test) self.logger.set_statsd_prefix('pfx') self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment, 'some.counter', sample_rate=0.972) self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement, 'some.counter', sample_rate=0.972) self.assertStat('pfx.some.operation:4900.0|ms|@0.972', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.972) self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.972) self.assertStat('pfx.another.counter:3|c|@0.972', self.logger.update_stats, 'another.counter', 3, sample_rate=0.972) # Can override sample_rate with non-keyword arg self.logger.set_statsd_prefix('') self.assertStat('some.counter:1|c|@0.939', self.logger.increment, 'some.counter', 0.939) self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement, 'some.counter', 0.939) self.assertStat('some.operation:4900.0|ms|@0.939', self.logger.timing, 'some.operation', 4.9 * 1000, 0.939) self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939', self.logger.timing_since, 'another.op', time.time(), 0.939) self.assertStat('another.counter:3|c|@0.939', self.logger.update_stats, 'another.counter', 3, 0.939) def test_delegate_methods_with_default_sample_rate(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), 'log_statsd_default_sample_rate': '0.93', }, 'pfx') self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment, 'some.counter') self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement, 'some.counter') self.assertStat('pfx.some.operation:4760.0|ms|@0.93', self.logger.timing, 'some.operation', 4.76 * 1000) self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93', self.logger.timing_since, 'another.op', time.time()) self.assertStat('pfx.another.counter:3|c|@0.93', self.logger.update_stats, 'another.counter', 3) # Each call can override the sample_rate self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment, 'some.counter', sample_rate=0.9912) self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement, 'some.counter', sample_rate=0.9912) self.assertStat('pfx.some.operation:4900.0|ms|@0.9912', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.9912) self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.9912) self.assertStat('pfx.another.counter:3|c|@0.9912', self.logger.update_stats, 'another.counter', 3, sample_rate=0.9912) # Can override sample_rate with non-keyword arg self.logger.set_statsd_prefix('') self.assertStat('some.counter:1|c|@0.987654', self.logger.increment, 'some.counter', 0.987654) self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement, 'some.counter', 0.987654) self.assertStat('some.operation:4900.0|ms|@0.987654', self.logger.timing, 'some.operation', 4.9 * 1000, 0.987654) self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654', self.logger.timing_since, 'another.op', time.time(), 0.987654) self.assertStat('another.counter:3|c|@0.987654', self.logger.update_stats, 'another.counter', 3, 0.987654) def test_delegate_methods_with_metric_prefix(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), 'log_statsd_metric_prefix': 'alpha.beta', }, 'pfx') self.assertStat('alpha.beta.pfx.some.counter:1|c', self.logger.increment, 'some.counter') self.assertStat('alpha.beta.pfx.some.counter:-1|c', self.logger.decrement, 'some.counter') self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms', self.logger.timing, 'some.operation', 4.76 * 1000) self.assertStatMatches( 'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms', self.logger.timing_since, 'another.op', time.time()) self.assertStat('alpha.beta.pfx.another.counter:3|c', self.logger.update_stats, 'another.counter', 3) self.logger.set_statsd_prefix('') self.assertStat('alpha.beta.some.counter:1|c|@0.9912', self.logger.increment, 'some.counter', sample_rate=0.9912) self.assertStat('alpha.beta.some.counter:-1|c|@0.9912', self.logger.decrement, 'some.counter', 0.9912) self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.9912) self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.9912) self.assertStat('alpha.beta.another.counter:3|c|@0.9912', self.logger.update_stats, 'another.counter', 3, sample_rate=0.9912) @reset_logger_state def test_thread_locals(self): logger = utils.get_logger(None) # test the setter logger.thread_locals = ('id', 'ip') self.assertEqual(logger.thread_locals, ('id', 'ip')) # reset logger.thread_locals = (None, None) self.assertEqual(logger.thread_locals, (None, None)) logger.txn_id = '1234' logger.client_ip = '1.2.3.4' self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4')) logger.txn_id = '5678' logger.client_ip = '5.6.7.8' self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8')) def test_no_fdatasync(self): called = [] class NoFdatasync(object): pass def fsync(fd): called.append(fd) with patch('swift.common.utils.os', NoFdatasync()): with patch('swift.common.utils.fsync', fsync): utils.fdatasync(12345) self.assertEqual(called, [12345]) def test_yes_fdatasync(self): called = [] class YesFdatasync(object): def fdatasync(self, fd): called.append(fd) with patch('swift.common.utils.os', YesFdatasync()): utils.fdatasync(12345) self.assertEqual(called, [12345]) def test_fsync_bad_fullsync(self): class FCNTL(object): F_FULLSYNC = 123 def fcntl(self, fd, op): raise IOError(18) with patch('swift.common.utils.fcntl', FCNTL()): self.assertRaises(OSError, lambda: utils.fsync(12345)) def test_fsync_f_fullsync(self): called = [] class FCNTL(object): F_FULLSYNC = 123 def fcntl(self, fd, op): called[:] = [fd, op] return 0 with patch('swift.common.utils.fcntl', FCNTL()): utils.fsync(12345) self.assertEqual(called, [12345, 123]) def test_fsync_no_fullsync(self): called = [] class FCNTL(object): pass def fsync(fd): called.append(fd) with patch('swift.common.utils.fcntl', FCNTL()): with patch('os.fsync', fsync): utils.fsync(12345) self.assertEqual(called, [12345]) class TestAuditLocationGenerator(unittest.TestCase): def test_drive_tree_access(self): orig_listdir = utils.listdir def _mock_utils_listdir(path): if 'bad_part' in path: raise OSError(errno.EACCES) elif 'bad_suffix' in path: raise OSError(errno.EACCES) elif 'bad_hash' in path: raise OSError(errno.EACCES) else: return orig_listdir(path) # Check Raise on Bad partition tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) obj_path = os.path.join(data, "bad_part") with open(obj_path, "w"): pass part1 = os.path.join(data, "partition1") os.makedirs(part1) part2 = os.path.join(data, "partition2") os.makedirs(part2) with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) # Check Raise on Bad Suffix tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) part1 = os.path.join(data, "partition1") os.makedirs(part1) part2 = os.path.join(data, "partition2") os.makedirs(part2) obj_path = os.path.join(part1, "bad_suffix") with open(obj_path, 'w'): pass suffix = os.path.join(part2, "suffix") os.makedirs(suffix) with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) # Check Raise on Bad Hash tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) part1 = os.path.join(data, "partition1") os.makedirs(part1) suffix = os.path.join(part1, "suffix") os.makedirs(suffix) hash1 = os.path.join(suffix, "hash1") os.makedirs(hash1) obj_path = os.path.join(suffix, "bad_hash") with open(obj_path, 'w'): pass with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) def test_non_dir_drive(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) self.assertEqual(list(locations), []) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) # Test without the logger locations = utils.audit_location_generator( tmpdir, "data", mount_check=False ) self.assertEqual(list(locations), []) def test_mount_check_drive(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') locations = utils.audit_location_generator( tmpdir, "data", mount_check=True, logger=logger ) self.assertEqual(list(locations), []) self.assertEqual(2, len(logger.get_lines_for_level('warning'))) # Test without the logger locations = utils.audit_location_generator( tmpdir, "data", mount_check=True ) self.assertEqual(list(locations), []) def test_non_dir_contents(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) with open(os.path.join(data, "partition1"), "w"): pass partition = os.path.join(data, "partition2") os.makedirs(partition) with open(os.path.join(partition, "suffix1"), "w"): pass suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) with open(os.path.join(suffix, "hash1"), "w"): pass locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) self.assertEqual(list(locations), []) def test_find_objects(self): with temptree([]) as tmpdir: expected_objs = list() logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') partition = os.path.join(data, "partition1") os.makedirs(partition) suffix = os.path.join(partition, "suffix") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj1.db") with open(obj_path, "w"): pass expected_objs.append((obj_path, 'drive', 'partition1')) partition = os.path.join(data, "partition2") os.makedirs(partition) suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash2") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj2.db") with open(obj_path, "w"): pass expected_objs.append((obj_path, 'drive', 'partition2')) locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) got_objs = list(locations) self.assertEqual(len(got_objs), len(expected_objs)) self.assertEqual(sorted(got_objs), sorted(expected_objs)) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) def test_ignore_metadata(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) partition = os.path.join(data, "partition2") os.makedirs(partition) suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash2") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj1.dat") with open(obj_path, "w"): pass meta_path = os.path.join(hash_path, "obj1.meta") with open(meta_path, "w"): pass locations = utils.audit_location_generator( tmpdir, "data", ".dat", mount_check=False, logger=logger ) self.assertEqual(list(locations), [(obj_path, "drive", "partition2")]) class TestGreenAsyncPile(unittest.TestCase): def test_runs_everything(self): def run_test(): tests_ran[0] += 1 return tests_ran[0] tests_ran = [0] pile = utils.GreenAsyncPile(3) for x in range(3): pile.spawn(run_test) self.assertEqual(sorted(x for x in pile), [1, 2, 3]) def test_is_asynchronous(self): def run_test(index): events[index].wait() return index pile = utils.GreenAsyncPile(3) for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)): events = [eventlet.event.Event(), eventlet.event.Event(), eventlet.event.Event()] for x in range(3): pile.spawn(run_test, x) for x in order: events[x].send() self.assertEqual(next(pile), x) def test_next_when_empty(self): def run_test(): pass pile = utils.GreenAsyncPile(3) pile.spawn(run_test) self.assertIsNone(next(pile)) self.assertRaises(StopIteration, lambda: next(pile)) def test_waitall_timeout_timesout(self): def run_test(sleep_duration): eventlet.sleep(sleep_duration) completed[0] += 1 return sleep_duration completed = [0] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 0.1) pile.spawn(run_test, 1.0) self.assertEqual(pile.waitall(0.5), [0.1]) self.assertEqual(completed[0], 1) def test_waitall_timeout_completes(self): def run_test(sleep_duration): eventlet.sleep(sleep_duration) completed[0] += 1 return sleep_duration completed = [0] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 0.1) pile.spawn(run_test, 0.1) self.assertEqual(pile.waitall(0.5), [0.1, 0.1]) self.assertEqual(completed[0], 2) def test_waitfirst_only_returns_first(self): def run_test(name): eventlet.sleep(0) completed.append(name) return name completed = [] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 'first') pile.spawn(run_test, 'second') pile.spawn(run_test, 'third') self.assertEqual(pile.waitfirst(0.5), completed[0]) # 3 still completed, but only the first was returned. self.assertEqual(3, len(completed)) def test_wait_with_firstn(self): def run_test(name): eventlet.sleep(0) completed.append(name) return name for first_n in [None] + list(range(6)): completed = [] pile = utils.GreenAsyncPile(10) for i in range(10): pile.spawn(run_test, i) actual = pile._wait(1, first_n) expected_n = first_n if first_n else 10 self.assertEqual(completed[:expected_n], actual) self.assertEqual(10, len(completed)) def test_pending(self): pile = utils.GreenAsyncPile(3) self.assertEqual(0, pile._pending) for repeats in range(2): # repeat to verify that pending will go again up after going down for i in range(4): pile.spawn(lambda: i) self.assertEqual(4, pile._pending) for i in range(3, -1, -1): next(pile) self.assertEqual(i, pile._pending) # sanity check - the pile is empty self.assertRaises(StopIteration, pile.next) # pending remains 0 self.assertEqual(0, pile._pending) class TestLRUCache(unittest.TestCase): def test_maxsize(self): @utils.LRUCache(maxsize=10) def f(*args): return math.sqrt(*args) _orig_math_sqrt = math.sqrt # setup cache [0-10) for i in range(10): self.assertEqual(math.sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate cache [0-10) with patch('math.sqrt'): for i in range(10): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) # update cache [10-20) for i in range(10, 20): self.assertEqual(math.sqrt(i), f(i)) # cache size is fixed self.assertEqual(f.size(), 10) # validate cache [10-20) with patch('math.sqrt'): for i in range(10, 20): self.assertEqual(_orig_math_sqrt(i), f(i)) # validate un-cached [0-10) with patch('math.sqrt', new=None): for i in range(10): self.assertRaises(TypeError, f, i) # cache unchanged self.assertEqual(f.size(), 10) with patch('math.sqrt'): for i in range(10, 20): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) def test_maxtime(self): @utils.LRUCache(maxtime=30) def f(*args): return math.sqrt(*args) self.assertEqual(30, f.maxtime) _orig_math_sqrt = math.sqrt now = time.time() the_future = now + 31 # setup cache [0-10) with patch('time.time', lambda: now): for i in range(10): self.assertEqual(math.sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate cache [0-10) with patch('math.sqrt'): for i in range(10): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate expired [0-10) with patch('math.sqrt', new=None): with patch('time.time', lambda: the_future): for i in range(10): self.assertRaises(TypeError, f, i) # validate repopulates [0-10) with patch('time.time', lambda: the_future): for i in range(10): self.assertEqual(math.sqrt(i), f(i)) # reuses cache space self.assertEqual(f.size(), 10) def test_set_maxtime(self): @utils.LRUCache(maxtime=30) def f(*args): return math.sqrt(*args) self.assertEqual(30, f.maxtime) self.assertEqual(2, f(4)) self.assertEqual(1, f.size()) # expire everything f.maxtime = -1 # validate un-cached [0-10) with patch('math.sqrt', new=None): self.assertRaises(TypeError, f, 4) def test_set_maxsize(self): @utils.LRUCache(maxsize=10) def f(*args): return math.sqrt(*args) for i in range(12): f(i) self.assertEqual(f.size(), 10) f.maxsize = 4 for i in range(12): f(i) self.assertEqual(f.size(), 4) class TestSpliterator(unittest.TestCase): def test_string(self): input_chunks = ["coun", "ter-", "b", "ra", "nch-mater", "nit", "y-fungusy", "-nummular"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(8)), "counter-") self.assertEqual(''.join(si.take(7)), "branch-") self.assertEqual(''.join(si.take(10)), "maternity-") self.assertEqual(''.join(si.take(8)), "fungusy-") self.assertEqual(''.join(si.take(8)), "nummular") def test_big_input_string(self): input_chunks = ["iridium"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(2)), "ir") self.assertEqual(''.join(si.take(1)), "i") self.assertEqual(''.join(si.take(2)), "di") self.assertEqual(''.join(si.take(1)), "u") self.assertEqual(''.join(si.take(1)), "m") def test_chunk_boundaries(self): input_chunks = ["soylent", "green", "is", "people"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(7)), "soylent") self.assertEqual(''.join(si.take(5)), "green") self.assertEqual(''.join(si.take(2)), "is") self.assertEqual(''.join(si.take(6)), "people") def test_no_empty_strings(self): input_chunks = ["soylent", "green", "is", "people"] si = utils.Spliterator(input_chunks) outputs = (list(si.take(7)) # starts and ends on chunk boundary + list(si.take(2)) # spans two chunks + list(si.take(3)) # begins but does not end chunk + list(si.take(2)) # ends but does not begin chunk + list(si.take(6))) # whole chunk + EOF self.assertNotIn('', outputs) def test_running_out(self): input_chunks = ["not much"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(4)), "not ") self.assertEqual(''.join(si.take(99)), "much") # short self.assertEqual(''.join(si.take(4)), "") self.assertEqual(''.join(si.take(4)), "") def test_overlap(self): input_chunks = ["one fish", "two fish", "red fish", "blue fish"] si = utils.Spliterator(input_chunks) t1 = si.take(20) # longer than first chunk self.assertLess(len(next(t1)), 20) # it's not exhausted t2 = si.take(20) self.assertRaises(ValueError, next, t2) def test_closing(self): input_chunks = ["abcd", "efg", "hij"] si = utils.Spliterator(input_chunks) it = si.take(3) # shorter than first chunk self.assertEqual(next(it), 'abc') it.close() self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij']) si = utils.Spliterator(input_chunks) self.assertEqual(list(si.take(1)), ['a']) it = si.take(1) # still shorter than first chunk self.assertEqual(next(it), 'b') it.close() self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij']) si = utils.Spliterator(input_chunks) it = si.take(6) # longer than first chunk, shorter than first + second self.assertEqual(next(it), 'abcd') self.assertEqual(next(it), 'ef') it.close() self.assertEqual(list(si.take(20)), ['g', 'hij']) si = utils.Spliterator(input_chunks) self.assertEqual(list(si.take(2)), ['ab']) it = si.take(3) # longer than rest of chunk self.assertEqual(next(it), 'cd') it.close() self.assertEqual(list(si.take(20)), ['efg', 'hij']) class TestParseContentRange(unittest.TestCase): def test_good(self): start, end, total = utils.parse_content_range("bytes 100-200/300") self.assertEqual(start, 100) self.assertEqual(end, 200) self.assertEqual(total, 300) def test_bad(self): self.assertRaises(ValueError, utils.parse_content_range, "100-300/500") self.assertRaises(ValueError, utils.parse_content_range, "bytes 100-200/aardvark") self.assertRaises(ValueError, utils.parse_content_range, "bytes bulbous-bouffant/4994801") class TestParseContentDisposition(unittest.TestCase): def test_basic_content_type(self): name, attrs = utils.parse_content_disposition('text/plain') self.assertEqual(name, 'text/plain') self.assertEqual(attrs, {}) def test_content_type_with_charset(self): name, attrs = utils.parse_content_disposition( 'text/plain; charset=UTF8') self.assertEqual(name, 'text/plain') self.assertEqual(attrs, {'charset': 'UTF8'}) def test_content_disposition(self): name, attrs = utils.parse_content_disposition( 'form-data; name="somefile"; filename="test.html"') self.assertEqual(name, 'form-data') self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'}) def test_content_disposition_without_white_space(self): name, attrs = utils.parse_content_disposition( 'form-data;name="somefile";filename="test.html"') self.assertEqual(name, 'form-data') self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'}) class TestIterMultipartMimeDocuments(unittest.TestCase): def test_bad_start(self): it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique') exc = None try: next(it) except MimeInvalid as err: exc = err self.assertTrue('invalid starting boundary' in str(exc)) self.assertTrue('--unique' in str(exc)) def test_empty(self): it = utils.iter_multipart_mime_documents(StringIO('--unique'), 'unique') fp = next(it) self.assertEqual(fp.read(), '') exc = None try: next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) def test_basic(self): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique') fp = next(it) self.assertEqual(fp.read(), 'abcdefg') exc = None try: next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) def test_basic2(self): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), 'unique') fp = next(it) self.assertEqual(fp.read(), 'abcdefg') fp = next(it) self.assertEqual(fp.read(), 'hijkl') exc = None try: next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) def test_tiny_reads(self): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), 'unique') fp = next(it) self.assertEqual(fp.read(2), 'ab') self.assertEqual(fp.read(2), 'cd') self.assertEqual(fp.read(2), 'ef') self.assertEqual(fp.read(2), 'g') self.assertEqual(fp.read(2), '') fp = next(it) self.assertEqual(fp.read(), 'hijkl') exc = None try: next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) def test_big_reads(self): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), 'unique') fp = next(it) self.assertEqual(fp.read(65536), 'abcdefg') self.assertEqual(fp.read(), '') fp = next(it) self.assertEqual(fp.read(), 'hijkl') exc = None try: next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) def test_leading_crlfs(self): it = utils.iter_multipart_mime_documents( StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n' '--unique\r\nhijkl\r\n--unique--'), 'unique') fp = next(it) self.assertEqual(fp.read(65536), 'abcdefg') self.assertEqual(fp.read(), '') fp = next(it) self.assertEqual(fp.read(), 'hijkl') self.assertRaises(StopIteration, it.next) def test_broken_mid_stream(self): # We go ahead and accept whatever is sent instead of rejecting the # whole request, in case the partial form is still useful. it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabc'), 'unique') fp = next(it) self.assertEqual(fp.read(), 'abc') exc = None try: next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) def test_readline(self): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n' 'jkl\r\n\r\n--unique--'), 'unique') fp = next(it) self.assertEqual(fp.readline(), 'ab\r\n') self.assertEqual(fp.readline(), 'cd\ref\ng') self.assertEqual(fp.readline(), '') fp = next(it) self.assertEqual(fp.readline(), 'hi\r\n') self.assertEqual(fp.readline(), '\r\n') self.assertEqual(fp.readline(), 'jkl\r\n') exc = None try: next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) def test_readline_with_tiny_chunks(self): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n' '\r\njkl\r\n\r\n--unique--'), 'unique', read_chunk_size=2) fp = next(it) self.assertEqual(fp.readline(), 'ab\r\n') self.assertEqual(fp.readline(), 'cd\ref\ng') self.assertEqual(fp.readline(), '') fp = next(it) self.assertEqual(fp.readline(), 'hi\r\n') self.assertEqual(fp.readline(), '\r\n') self.assertEqual(fp.readline(), 'jkl\r\n') exc = None try: next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) class TestParseMimeHeaders(unittest.TestCase): def test_parse_mime_headers(self): doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size" Foo: Bar NOT-title-cAsED: quux Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?= Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?= Latin-1: Resincronizaci\xf3n realizada con \xe9xito Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80 This is the body """) headers = utils.parse_mime_headers(doc_file) utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440' if six.PY2: utf8 = utf8.encode('utf-8') expected_headers = { 'Content-Disposition': 'form-data; name="file_size"', 'Foo': "Bar", 'Not-Title-Cased': "quux", # Encoded-word or non-ASCII values are treated just like any other # bytestring (at least for now) 'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=", 'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=", 'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito", 'Utf-8': utf8, } self.assertEqual(expected_headers, headers) self.assertEqual(b"This is the body\n", doc_file.read()) class FakeResponse(object): def __init__(self, status, headers, body): self.status = status self.headers = HeaderKeyDict(headers) self.body = StringIO(body) def getheader(self, header_name): return str(self.headers.get(header_name, '')) def getheaders(self): return self.headers.items() def read(self, length=None): return self.body.read(length) def readline(self, length=None): return self.body.readline(length) class TestDocumentItersToHTTPResponseBody(unittest.TestCase): def test_no_parts(self): body = utils.document_iters_to_http_response_body( iter([]), 'dontcare', multipart=False, logger=FakeLogger()) self.assertEqual(body, '') def test_single_part(self): body = "time flies like an arrow; fruit flies like a banana" doc_iters = [{'part_iter': iter(StringIO(body).read, '')}] resp_body = ''.join( utils.document_iters_to_http_response_body( iter(doc_iters), 'dontcare', multipart=False, logger=FakeLogger())) self.assertEqual(resp_body, body) def test_multiple_parts(self): part1 = "two peanuts were walking down a railroad track" part2 = "and one was a salted. ... peanut." doc_iters = [{ 'start_byte': 88, 'end_byte': 133, 'content_type': 'application/peanut', 'entity_length': 1024, 'part_iter': iter(StringIO(part1).read, ''), }, { 'start_byte': 500, 'end_byte': 532, 'content_type': 'application/salted', 'entity_length': 1024, 'part_iter': iter(StringIO(part2).read, ''), }] resp_body = ''.join( utils.document_iters_to_http_response_body( iter(doc_iters), 'boundaryboundary', multipart=True, logger=FakeLogger())) self.assertEqual(resp_body, ( "--boundaryboundary\r\n" + # This is a little too strict; we don't actually care that the # headers are in this order, but the test is much more legible # this way. "Content-Type: application/peanut\r\n" + "Content-Range: bytes 88-133/1024\r\n" + "\r\n" + part1 + "\r\n" + "--boundaryboundary\r\n" "Content-Type: application/salted\r\n" + "Content-Range: bytes 500-532/1024\r\n" + "\r\n" + part2 + "\r\n" + "--boundaryboundary--")) def test_closed_part_iterator(self): print('test') useful_iter_mock = mock.MagicMock() useful_iter_mock.__iter__.return_value = [''] body_iter = utils.document_iters_to_http_response_body( iter([{'part_iter': useful_iter_mock}]), 'dontcare', multipart=False, logger=FakeLogger()) body = '' for s in body_iter: body += s self.assertEqual(body, '') useful_iter_mock.close.assert_called_once_with() # Calling "close" on the mock will now raise an AttributeError del useful_iter_mock.close body_iter = utils.document_iters_to_http_response_body( iter([{'part_iter': useful_iter_mock}]), 'dontcare', multipart=False, logger=FakeLogger()) body = '' for s in body_iter: body += s class TestPairs(unittest.TestCase): def test_pairs(self): items = [10, 20, 30, 40, 50, 60] got_pairs = set(utils.pairs(items)) self.assertEqual(got_pairs, set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60), (20, 30), (20, 40), (20, 50), (20, 60), (30, 40), (30, 50), (30, 60), (40, 50), (40, 60), (50, 60)])) class TestSocketStringParser(unittest.TestCase): def test_socket_string_parser(self): default = 1337 addrs = [('1.2.3.4', '1.2.3.4', default), ('1.2.3.4:5000', '1.2.3.4', 5000), ('[dead:beef::1]', 'dead:beef::1', default), ('[dead:beef::1]:5000', 'dead:beef::1', 5000), ('example.com', 'example.com', default), ('example.com:5000', 'example.com', 5000), ('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000), ('1.2.3.4:10:20', None, None), ('dead:beef::1:5000', None, None)] for addr, expected_host, expected_port in addrs: if expected_host: host, port = utils.parse_socket_string(addr, default) self.assertEqual(expected_host, host) self.assertEqual(expected_port, int(port)) else: with self.assertRaises(ValueError): utils.parse_socket_string(addr, default) class TestHashForFileFunction(unittest.TestCase): def setUp(self): self.tempfilename = tempfile.mktemp() def tearDown(self): try: os.unlink(self.tempfilename) except OSError: pass def test_hash_for_file_smallish(self): stub_data = 'some data' with open(self.tempfilename, 'wb') as fd: fd.write(stub_data) with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertEqual(rv, mock_hasher.hexdigest.return_value) self.assertEqual([mock.call(stub_data)], mock_hasher.update.call_args_list) def test_hash_for_file_big(self): num_blocks = 10 block_size = utils.MD5_BLOCK_READ_BYTES truncate = 523 start_char = ord('a') expected_blocks = [chr(i) * block_size for i in range(start_char, start_char + num_blocks)] full_data = ''.join(expected_blocks) trimmed_data = full_data[:-truncate] # sanity self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate) with open(self.tempfilename, 'wb') as fd: fd.write(trimmed_data) with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertEqual(rv, mock_hasher.hexdigest.return_value) self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list)) found_blocks = [] for i, (expected_block, call) in enumerate(zip( expected_blocks, mock_hasher.update.call_args_list)): args, kwargs = call self.assertEqual(kwargs, {}) self.assertEqual(1, len(args)) block = args[0] if i < num_blocks - 1: self.assertEqual(block, expected_block) else: self.assertEqual(block, expected_block[:-truncate]) found_blocks.append(block) self.assertEqual(''.join(found_blocks), trimmed_data) def test_hash_for_file_empty(self): with open(self.tempfilename, 'wb'): pass with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertEqual(rv, mock_hasher.hexdigest.return_value) self.assertEqual([], mock_hasher.update.call_args_list) def test_hash_for_file_brittle(self): data_to_expected_hash = { '': 'd41d8cd98f00b204e9800998ecf8427e', 'some data': '1e50210a0202497fb79bc38b6ade6c34', ('a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3', } # unlike some other places where the concrete implementation really # matters for backwards compatibility these brittle tests are probably # not needed or justified, if a future maintainer rips them out later # they're probably doing the right thing failures = [] for stub_data, expected_hash in data_to_expected_hash.items(): with open(self.tempfilename, 'wb') as fd: fd.write(stub_data) rv = utils.md5_hash_for_file(self.tempfilename) try: self.assertEqual(expected_hash, rv) except AssertionError: trim_cap = 80 if len(stub_data) > trim_cap: stub_data = '%s...<truncated>' % stub_data[:trim_cap] failures.append('hash for %r was %s instead of expected %s' % ( stub_data, rv, expected_hash)) if failures: self.fail('Some data did not compute expected hash:\n' + '\n'.join(failures)) class TestSetSwiftDir(unittest.TestCase): def setUp(self): self.swift_dir = tempfile.mkdtemp() self.swift_conf = os.path.join(self.swift_dir, 'swift.conf') self.policy_name = ''.join(random.sample(string.letters, 20)) with open(self.swift_conf, "wb") as sc: sc.write(''' [swift-hash] swift_hash_path_suffix = changeme [storage-policy:0] name = default default = yes [storage-policy:1] name = %s ''' % self.policy_name) def tearDown(self): shutil.rmtree(self.swift_dir, ignore_errors=True) def test_set_swift_dir(self): set_swift_dir(None) reload_storage_policies() self.assertIsNone(POLICIES.get_by_name(self.policy_name)) set_swift_dir(self.swift_dir) reload_storage_policies() self.assertIsNotNone(POLICIES.get_by_name(self.policy_name)) class TestPipeMutex(unittest.TestCase): def setUp(self): self.mutex = utils.PipeMutex() def tearDown(self): self.mutex.close() def test_nonblocking(self): evt_lock1 = eventlet.event.Event() evt_lock2 = eventlet.event.Event() evt_unlock = eventlet.event.Event() def get_the_lock(): self.mutex.acquire() evt_lock1.send('got the lock') evt_lock2.wait() self.mutex.release() evt_unlock.send('released the lock') eventlet.spawn(get_the_lock) evt_lock1.wait() # Now, the other greenthread has the lock. self.assertFalse(self.mutex.acquire(blocking=False)) evt_lock2.send('please release the lock') evt_unlock.wait() # The other greenthread has released the lock. self.assertTrue(self.mutex.acquire(blocking=False)) def test_recursive(self): self.assertTrue(self.mutex.acquire(blocking=False)) self.assertTrue(self.mutex.acquire(blocking=False)) def try_acquire_lock(): return self.mutex.acquire(blocking=False) self.assertFalse(eventlet.spawn(try_acquire_lock).wait()) self.mutex.release() self.assertFalse(eventlet.spawn(try_acquire_lock).wait()) self.mutex.release() self.assertTrue(eventlet.spawn(try_acquire_lock).wait()) def test_release_without_acquire(self): self.assertRaises(RuntimeError, self.mutex.release) def test_too_many_releases(self): self.mutex.acquire() self.mutex.release() self.assertRaises(RuntimeError, self.mutex.release) def test_wrong_releaser(self): self.mutex.acquire() with quiet_eventlet_exceptions(): self.assertRaises(RuntimeError, eventlet.spawn(self.mutex.release).wait) def test_blocking(self): evt = eventlet.event.Event() sequence = [] def coro1(): eventlet.sleep(0) # let coro2 go self.mutex.acquire() sequence.append('coro1 acquire') evt.send('go') self.mutex.release() sequence.append('coro1 release') def coro2(): evt.wait() # wait for coro1 to start us self.mutex.acquire() sequence.append('coro2 acquire') self.mutex.release() sequence.append('coro2 release') c1 = eventlet.spawn(coro1) c2 = eventlet.spawn(coro2) c1.wait() c2.wait() self.assertEqual(sequence, [ 'coro1 acquire', 'coro1 release', 'coro2 acquire', 'coro2 release']) def test_blocking_tpool(self): # Note: this test's success isn't a guarantee that the mutex is # working. However, this test's failure means that the mutex is # definitely broken. sequence = [] def do_stuff(): n = 10 while n > 0: self.mutex.acquire() sequence.append("<") eventlet.sleep(0.0001) sequence.append(">") self.mutex.release() n -= 1 greenthread1 = eventlet.spawn(do_stuff) greenthread2 = eventlet.spawn(do_stuff) real_thread1 = eventlet.patcher.original('threading').Thread( target=do_stuff) real_thread1.start() real_thread2 = eventlet.patcher.original('threading').Thread( target=do_stuff) real_thread2.start() greenthread1.wait() greenthread2.wait() real_thread1.join() real_thread2.join() self.assertEqual(''.join(sequence), "<>" * 40) def test_blocking_preserves_ownership(self): pthread1_event = eventlet.patcher.original('threading').Event() pthread2_event1 = eventlet.patcher.original('threading').Event() pthread2_event2 = eventlet.patcher.original('threading').Event() thread_id = [] owner = [] def pthread1(): thread_id.append(id(eventlet.greenthread.getcurrent())) self.mutex.acquire() owner.append(self.mutex.owner) pthread2_event1.set() orig_os_write = utils.os.write def patched_os_write(*a, **kw): try: return orig_os_write(*a, **kw) finally: pthread1_event.wait() with mock.patch.object(utils.os, 'write', patched_os_write): self.mutex.release() pthread2_event2.set() def pthread2(): pthread2_event1.wait() # ensure pthread1 acquires lock first thread_id.append(id(eventlet.greenthread.getcurrent())) self.mutex.acquire() pthread1_event.set() pthread2_event2.wait() owner.append(self.mutex.owner) self.mutex.release() real_thread1 = eventlet.patcher.original('threading').Thread( target=pthread1) real_thread1.start() real_thread2 = eventlet.patcher.original('threading').Thread( target=pthread2) real_thread2.start() real_thread1.join() real_thread2.join() self.assertEqual(thread_id, owner) self.assertIsNone(self.mutex.owner) @classmethod def tearDownClass(cls): # PipeMutex turns this off when you instantiate one eventlet.debug.hub_prevent_multiple_readers(True) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import random from openerp import SUPERUSER_ID from openerp.osv import osv, orm, fields from openerp.addons.web.http import request class payment_transaction(orm.Model): _inherit = 'payment.transaction' _columns = { # link with the sale order 'sale_order_id': fields.many2one('sale.order', 'Sale Order'), } class sale_order(osv.Model): _inherit = "sale.order" def _cart_qty(self, cr, uid, ids, field_name, arg, context=None): res = dict() for order in self.browse(cr, uid, ids, context=context): res[order.id] = int(sum(l.product_uom_qty for l in (order.website_order_line or []))) return res _columns = { 'website_order_line': fields.one2many( 'sale.order.line', 'order_id', string='Order Lines displayed on Website', readonly=True, help='Order Lines to be displayed on the website. They should not be used for computation purpose.', ), 'cart_quantity': fields.function(_cart_qty, type='integer', string='Cart Quantity'), 'payment_acquirer_id': fields.many2one('payment.acquirer', 'Payment Acquirer', on_delete='set null'), 'payment_tx_id': fields.many2one('payment.transaction', 'Transaction', on_delete='set null'), } def _get_errors(self, cr, uid, order, context=None): return [] def _get_website_data(self, cr, uid, order, context): return { 'partner': order.partner_id.id, 'order': order } def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs): for so in self.browse(cr, uid, ids, context=context): domain = [('order_id', '=', so.id), ('product_id', '=', product_id)] if line_id: domain += [('id', '=', line_id)] return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context) def _website_product_id_change(self, cr, uid, ids, order_id, product_id, line_id=None, context=None): so = self.pool.get('sale.order').browse(cr, uid, order_id, context=context) values = self.pool.get('sale.order.line').product_id_change(cr, SUPERUSER_ID, [], pricelist=so.pricelist_id.id, product=product_id, partner_id=so.partner_id.id, context=context )['value'] if line_id: line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context) values['name'] = line.name else: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) values['name'] = product.description_sale or product.name values['product_id'] = product_id values['order_id'] = order_id if values.get('tax_id') != None: values['tax_id'] = [(6, 0, values['tax_id'])] return values def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs): """ Add or set product quantity, add_qty can be negative """ sol = self.pool.get('sale.order.line') quantity = 0 for so in self.browse(cr, uid, ids, context=context): if line_id != False: line_ids = so._cart_find_product_line(product_id, line_id, context=context, **kwargs) if line_ids: line_id = line_ids[0] # Create line if no line with product_id can be located if not line_id: values = self._website_product_id_change(cr, uid, ids, so.id, product_id, context=context) line_id = sol.create(cr, SUPERUSER_ID, values, context=context) if add_qty: add_qty -= 1 # compute new quantity if set_qty: quantity = set_qty elif add_qty != None: quantity = sol.browse(cr, SUPERUSER_ID, line_id, context=context).product_uom_qty + (add_qty or 0) # Remove zero of negative lines if quantity <= 0: sol.unlink(cr, SUPERUSER_ID, [line_id], context=context) else: # update line values = self._website_product_id_change(cr, uid, ids, so.id, product_id, line_id, context=context) values['product_uom_qty'] = quantity sol.write(cr, SUPERUSER_ID, [line_id], values, context=context) return {'line_id': line_id, 'quantity': quantity} def _cart_accessories(self, cr, uid, ids, context=None): for order in self.browse(cr, uid, ids, context=context): s = set(j.id for l in (order.website_order_line or []) for j in (l.product_id.accessory_product_ids or [])) s -= set(l.product_id.id for l in order.order_line) product_ids = random.sample(s, min(len(s),3)) return self.pool['product.product'].browse(cr, uid, product_ids, context=context) class website(orm.Model): _inherit = 'website' _columns = { 'pricelist_id': fields.related('user_id','partner_id','property_product_pricelist', type='many2one', relation='product.pricelist', string='Default Pricelist'), 'currency_id': fields.related('pricelist_id','currency_id', type='many2one', relation='res.currency', string='Default Currency'), } def sale_product_domain(self, cr, uid, ids, context=None): return [("sale_ok", "=", True)] def sale_get_order(self, cr, uid, ids, force_create=False, code=None, update_pricelist=None, context=None): sale_order_obj = self.pool['sale.order'] sale_order_id = request.session.get('sale_order_id') sale_order = None # create so if needed if not sale_order_id and (force_create or code): # TODO cache partner_id session partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id for w in self.browse(cr, uid, ids): values = { 'user_id': w.user_id.id, 'partner_id': partner.id, 'pricelist_id': partner.property_product_pricelist.id, 'section_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'website', 'salesteam_website_sales')[1], } sale_order_id = sale_order_obj.create(cr, SUPERUSER_ID, values, context=context) values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner.id, context=context)['value'] sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context) request.session['sale_order_id'] = sale_order_id if sale_order_id: # TODO cache partner_id session partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order_id, context=context) if not sale_order.exists(): request.session['sale_order_id'] = None return None # check for change of pricelist with a coupon if code and code != sale_order.pricelist_id.code: pricelist_ids = self.pool['product.pricelist'].search(cr, SUPERUSER_ID, [('code', '=', code)], context=context) if pricelist_ids: pricelist_id = pricelist_ids[0] request.session['sale_order_code_pricelist_id'] = pricelist_id update_pricelist = True request.session['sale_order_code_pricelist_id'] = False pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id # check for change of partner_id ie after signup if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id: flag_pricelist = False if pricelist_id != sale_order.pricelist_id.id: flag_pricelist = True fiscal_position = sale_order.fiscal_position and sale_order.fiscal_position.id or False values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [sale_order_id], partner.id, context=context)['value'] if values.get('fiscal_position'): order_lines = map(int,sale_order.order_line) values.update(sale_order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [], values['fiscal_position'], [[6, 0, order_lines]], context=context)['value']) values['partner_id'] = partner.id sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context) if flag_pricelist or values.get('fiscal_position') != fiscal_position: update_pricelist = True # update the pricelist if update_pricelist: values = {'pricelist_id': pricelist_id} values.update(sale_order.onchange_pricelist_id(pricelist_id, None)['value']) sale_order.write(values) for line in sale_order.order_line: sale_order._cart_update(product_id=line.product_id.id, add_qty=0) # update browse record if (code and code != sale_order.pricelist_id.code) or sale_order.partner_id.id != partner.id: sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order.id, context=context) return sale_order def sale_get_transaction(self, cr, uid, ids, context=None): transaction_obj = self.pool.get('payment.transaction') tx_id = request.session.get('sale_transaction_id') if tx_id: tx_ids = transaction_obj.search(cr, uid, [('id', '=', tx_id), ('state', 'not in', ['cancel'])], context=context) if tx_ids: return transaction_obj.browse(cr, uid, tx_ids[0], context=context) else: request.session['sale_transaction_id'] = False return False def sale_reset(self, cr, uid, ids, context=None): request.session.update({ 'sale_order_id': False, 'sale_transaction_id': False, 'sale_order_code_pricelist_id': False, })
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2015 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_context import context as context_utils import six.moves as sm import testtools from poppy.transport.pecan import controllers from poppy.transport.pecan.controllers import v1 from poppy.transport.validators import helpers class BasePecanControllerUnitTest(testtools.TestCase): def setUp(self, controller): """Engages all patches for unit testing controllers. Patches the request, response, request context, and deserialization decorator to satisfy all controller dependencies for unit testing. :returns: None """ super(BasePecanControllerUnitTest, self).setUp() self.addCleanup( sm.reload_module, controllers ) self.addCleanup( sm.reload_module, v1 ) self.addCleanup( sm.reload_module, controller ) self.addCleanup( sm.reload_module, context_utils ) self.addCleanup( sm.reload_module, helpers ) self.driver = mock.MagicMock() self.response = mock.Mock() context = mock.Mock() context.tenant = '000000001' context.user = 'user_id' context_utils.get_current = context context_utils.get_current.return_value = context pecan_request_patcher = mock.patch('pecan.request') self.request = pecan_request_patcher.start() self.request.host_url = 'test_url' self.request.base_url = 'test_url' pecan_response_patcher = mock.patch('pecan.response') self.response = pecan_response_patcher.start() self.response.headers = {} deco_patcher = mock.patch('poppy.transport.validators.helpers') deco_patcher.start() # Reload to engage patches sm.reload_module(controller) sm.reload_module(v1) sm.reload_module(controllers) sm.reload_module(helpers) # self.addCleanup(deco_patcher.stop) self.addCleanup(deco_patcher.stop) self.addCleanup(pecan_response_patcher.stop) self.addCleanup(pecan_request_patcher.stop)
unknown
codeparrot/codeparrot-clean
import {useReducer, useRef} from 'react'; function Component(props) { const ref = useRef(props.value); const [state] = useReducer( (state, action) => state + action, 0, init => ref.current ); return <Stringify state={state} />; } export const FIXTURE_ENTRYPOINT = { fn: Component, params: [{value: 42}], };
javascript
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.invalid-access-ref-in-reducer-init.js
'use strict'; const common = require('../common.js'); const bench = common.createBenchmark(main, { // Unicode confuses ab on os x. type: ['bytes', 'buffer'], len: [4, 1024, 102400], chunks: [1, 4], c: [50, 500], chunkedEnc: [1, 0], duration: 5, }); function main({ type, len, chunks, c, chunkedEnc, duration }) { const server = require('../fixtures/simple-http-server.js') .listen(0) .on('listening', () => { const path = `/${type}/${len}/${chunks}/normal/${chunkedEnc}`; bench.http({ path, connections: c, duration, port: server.address().port, }, () => { server.close(); }); }); }
javascript
github
https://github.com/nodejs/node
benchmark/http/simple.js
<?php namespace Illuminate\JsonSchema; use Closure; use Illuminate\JsonSchema\Types\Type; /** * @method static Types\ObjectType object(Closure|array<string, Types\Type> $properties = []) * @method static Types\IntegerType integer() * @method static Types\NumberType number() * @method static Types\StringType string() * @method static Types\BooleanType boolean() * @method static Types\ArrayType array() */ class JsonSchema { /** * Dynamically pass static methods to the schema instance. */ public static function __callStatic(string $name, mixed $arguments): Type { return (new JsonSchemaTypeFactory)->$name(...$arguments); } }
php
github
https://github.com/laravel/framework
src/Illuminate/JsonSchema/JsonSchema.php
"""Tests for the create_fake_certs management command. """ from django.test import TestCase from django.core.management.base import CommandError from nose.plugins.attrib import attr from opaque_keys.edx.locator import CourseLocator from student.tests.factories import UserFactory from certificates.management.commands import create_fake_cert from certificates.models import GeneratedCertificate @attr('shard_1') class CreateFakeCertTest(TestCase): """Tests for the create_fake_certs management command. """ USERNAME = "test" COURSE_KEY = CourseLocator(org='edX', course='DemoX', run='Demo_Course') def setUp(self): super(CreateFakeCertTest, self).setUp() self.user = UserFactory.create(username=self.USERNAME) def test_create_fake_cert(self): # No existing cert, so create it self._run_command( self.USERNAME, unicode(self.COURSE_KEY), cert_mode='verified', grade='0.89' ) cert = GeneratedCertificate.eligible_certificates.get(user=self.user, course_id=self.COURSE_KEY) self.assertEqual(cert.status, 'downloadable') self.assertEqual(cert.mode, 'verified') self.assertEqual(cert.grade, '0.89') self.assertEqual(cert.download_uuid, 'test') self.assertEqual(cert.download_url, 'http://www.example.com') # Cert already exists; modify it self._run_command( self.USERNAME, unicode(self.COURSE_KEY), cert_mode='honor' ) cert = GeneratedCertificate.eligible_certificates.get(user=self.user, course_id=self.COURSE_KEY) self.assertEqual(cert.mode, 'honor') def test_too_few_args(self): with self.assertRaisesRegexp(CommandError, 'Usage'): self._run_command(self.USERNAME) def _run_command(self, *args, **kwargs): """Run the management command to generate a fake cert. """ command = create_fake_cert.Command() return command.handle(*args, **kwargs)
unknown
codeparrot/codeparrot-clean
def __getfilesystemencoding(): ''' Note: there's a copy of this method in interpreterInfo.py ''' import sys try: ret = sys.getfilesystemencoding() if not ret: raise RuntimeError('Unable to get encoding.') return ret except: try: #Handle Jython from java.lang import System env = System.getProperty("os.name").lower() if env.find('win') != -1: return 'ISO-8859-1' #mbcs does not work on Jython, so, use a (hopefully) suitable replacement return 'utf-8' except: pass #Only available from 2.3 onwards. if sys.platform == 'win32': return 'mbcs' return 'utf-8' def getfilesystemencoding(): try: ret = __getfilesystemencoding() #Check if the encoding is actually there to be used! if hasattr('', 'encode'): ''.encode(ret) if hasattr('', 'decode'): ''.decode(ret) return ret except: return 'utf-8'
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2016 Adam.Dybbroe # Author(s): # Adam.Dybbroe <a000680@c20671.ad.smhi.se> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Reader for the OCA products """ """ How to gather the LRIT files and skip the header: for file in `ls /disk2/testdata/OCA/L-000-MSG3__-MPEF________-OCAE_____-0000??___-*-__`;do echo $file; dd if=$file bs=1c skip=103 >> tmp;done """ import os import pygrib import numpy as np import os.path from glob import glob import tempfile import pyresample as pr from trollsift import parser from mpop.imageo import geo_image from mpop.imageo import palettes CFG_DIR = os.environ.get('MPEF_OCA_CONFIG_DIR', './') AREA_DEF_FILE = os.path.join(CFG_DIR, "areas.def") if not os.path.exists(AREA_DEF_FILE): raise IOError('Config file %s does not exist!' % AREA_DEF_FILE) LRIT_PATTERN = "L-000-{platform_name:_<5s}_-MPEF________-OCAE_____-{segment:_<9s}-{nominal_time:%Y%m%d%H%M}-{compressed:_<2s}" from .utils import (SCENE_TYPE_LAYERS, OCA_FIELDS, FIELDNAMES, get_reff_legend, get_cot_legend, get_scenetype_legend, get_ctp_legend) palette_func = {'ll_ctp': get_ctp_legend, 'ul_ctp': get_ctp_legend, 'ul_cot': get_cot_legend, 'll_cot': get_cot_legend, 'reff': get_reff_legend, 'scenetype': get_scenetype_legend} class Grib(object): def __init__(self, fname): self._abspath = os.path.abspath(fname) @property def nmsgs(self): '''Number of GRIB messages in file. ''' prop = 'nmsgs' attr = '_{}'.format(prop) if not hasattr(self, attr): grbs = pygrib.open(self._abspath) nmsgs = grbs.messages grbs.close() setattr(self, attr, nmsgs) return getattr(self, attr) def get(self, gmessage, key='values'): ''' Returns the value for the 'key' for a given message number 'gmessage' or message field name 'gmessage'. ''' grbs = pygrib.open(self._abspath) if type(gmessage) == int: mnbr = gmessage elif type(gmessage) == str: msg_found = False msgnum = 1 while msgnum < self.nmsgs + 1: if grbs[msgnum]['parameterName'] == gmessage: msg_found = True break msgnum = msgnum + 1 if msg_found: mnbr = msgnum else: print("No Grib message found with parameter name = %s" % gmessage) return None if grbs[mnbr].valid_key(key): arr = grbs[mnbr][key] grbs.close() return arr else: grbs.close() return class OCAField(object): """One OCA data field with metadata""" def __init__(self, units=None, longname='', shortname=''): self.units = units self.data = None self.error = None self.longname = None self.shortname = None class OCAData(object): """The OCA scene data""" def __init__(self): self._lritfiles = None self._gribfilename = None self._store_grib = False self.scenetype = OCAField() self.cost = OCAField() self.ul_cot = OCAField() self.ll_cot = OCAField() self.ul_ctp = OCAField() self.ll_ctp = OCAField() self.reff = OCAField() self._projectables = [] for field in FIELDNAMES.keys(): self._projectables.append(field) self.timeslot = None self.area_def = pr.utils.load_area(AREA_DEF_FILE, 'met09globeFull') def readgrib(self): """Read the data""" oca = Grib(self._gribfilename) self.scenetype.data = oca.get('Pixel scene type')[::-1, ::-1] self.scenetype.longname = OCA_FIELDS[0]['Pixel scene type'] for field in FIELDNAMES.keys(): setattr(getattr(self, field), 'data', oca.get( FIELDNAMES[field][0])[::-1, ::-1]) param = [s for s in OCA_FIELDS if FIELDNAMES[field][0] in s][0] if 'units' in param: setattr(getattr(self, field), 'units', param['units']) if 'abbrev' in param: setattr(getattr(self, field), 'shortname', param['abbrev']) setattr(getattr(self, field), 'longname', param[FIELDNAMES[field][0]]) param_name = FIELDNAMES[field][1] if param_name: setattr( getattr(self, field), 'error', oca.get(param_name)[::-1, ::-1]) if not self._store_grib: os.remove(self._gribfilename) def read_from_lrit(self, filenames, gribfilename=None): """Read and concatenate the LRIT segments""" self._lritfiles = filenames if len(filenames) == 0: print("No files provided!") return if gribfilename: self._store_grib = True self._gribfilename = gribfilename else: self._store_grib = False self._gribfilename = tempfile.mktemp(suffix='.grb') p__ = parser.Parser(LRIT_PATTERN) bstr = {} nsegments = 0 for lritfile in self._lritfiles: if os.path.basename(lritfile).find('PRO') > 0: print("PRO file... %s: Skip it..." % lritfile) continue res = p__.parse(os.path.basename(lritfile)) segm = int(res['segment'].strip('_')) if not self.timeslot: self.timeslot = res['nominal_time'] print("Segment = %d" % segm) nsegments = nsegments + 1 with open(lritfile) as fpt: fpt.seek(103) bstr[segm] = fpt.read() fstr = bstr[1] for idx in range(2, nsegments + 1): fstr = fstr + bstr[idx] with open(self._gribfilename, 'wb') as fpt: fpt.write(fstr) self.readgrib() def project(self, areaid): """Project the data""" lons, lats = self.area_def.get_lonlats() lons = np.ma.masked_outside(lons, -180, 180) lats = np.ma.masked_outside(lats, -90, 90) swath_def = pr.geometry.SwathDefinition(lons, lats) out_area_def = pr.utils.load_area(AREA_DEF_FILE, areaid) for item in self._projectables: data = getattr(getattr(self, item), 'data') result = pr.kd_tree.resample_nearest(swath_def, data, out_area_def, radius_of_influence=20000, fill_value=None) setattr(getattr(self, item), 'data', result) self.area_def = out_area_def def make_image(self, fieldname): """Make an mpop GeoImage image of the oca parameter 'fieldname'""" palette = palette_func[fieldname]() data = getattr(getattr(self, fieldname), 'data') if fieldname in ['ul_ctp', 'll_ctp']: data = (22. - data / 5000.).astype('Int16') elif fieldname in ['reff']: data = (data * 1000000. + 0.5).astype('uint8') img = geo_image.GeoImage(data, self.area_def.area_id, self.timeslot, fill_value=(0), mode="P", palette=palette) return img
unknown
codeparrot/codeparrot-clean
// RUN: %check_clang_tidy %s altera-kernel-name-restriction %t -- -- -I%S/Inputs/kernel-name-restriction // RUN: %check_clang_tidy -check-suffix=UPPERCASE %s altera-kernel-name-restriction %t -- -- -I%S/Inputs/kernel-name-restriction/uppercase -DUPPERCASE #ifdef UPPERCASE // The warning should be triggered regardless of capitalization #include "KERNEL.cl" // CHECK-MESSAGES-UPPERCASE: :[[@LINE-1]]:1: warning: including 'KERNEL.cl' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] #include "vERILOG.cl" // CHECK-MESSAGES-UPPERCASE: :[[@LINE-1]]:1: warning: including 'vERILOG.cl' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] #include "VHDL.cl" // CHECK-MESSAGES-UPPERCASE: :[[@LINE-1]]:1: warning: including 'VHDL.cl' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] #else // These are the banned kernel filenames, and should trigger warnings #include "kernel.cl" // CHECK-MESSAGES: :[[@LINE-1]]:1: warning: including 'kernel.cl' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] #include "Verilog.cl" // CHECK-MESSAGES: :[[@LINE-1]]:1: warning: including 'Verilog.cl' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] #include "vhdl.CL" // CHECK-MESSAGES: :[[@LINE-1]]:1: warning: including 'vhdl.CL' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] // The warning should be triggered if the names are within a directory #include "some/dir/kernel.cl" // CHECK-MESSAGES: :[[@LINE-1]]:1: warning: including 'kernel.cl' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] #include "somedir/verilog.cl" // CHECK-MESSAGES: :[[@LINE-1]]:1: warning: including 'verilog.cl' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] #include "otherdir/vhdl.cl" // CHECK-MESSAGES: :[[@LINE-1]]:1: warning: including 'vhdl.cl' may cause additional compilation errors due to the name of the kernel source file; consider renaming the included kernel source file [altera-kernel-name-restriction] // There are no FIX-ITs for the altera-kernel-name-restriction lint check // The following include directives shouldn't trigger the warning #include "otherthing.cl" #include "thing.h" // It doesn't make sense to have kernel.h, verilog.h, or vhdl.h as filenames // without the corresponding .cl files, but the Altera Programming Guide doesn't // explicitly forbid it. #include "kernel.h" #include "verilog.h" #include "vhdl.h" // The files can still have the forbidden names in them, so long as they're not // the entire file name, and are not the kernel source file name. #include "some_kernel.cl" #include "other_Verilog.cl" #include "vhdl_number_two.cl" // Naming a directory kernel.cl, verilog.cl, or vhdl.cl is not explicitly // forbidden in the Altera Programming Guide either. #include "some/kernel.cl/foo.h" #include "some/verilog.cl/foo.h" #include "some/vhdl.cl/foo.h" #endif
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/test/clang-tidy/checkers/altera/kernel-name-restriction.cpp
import { Client } from 'components' export async function Component(a) { const b = 1 return ( <Client // Should be 1 110000 0, which is "e0" in hex (counts as two params, // because of the encrypted bound args param) fn1={async (c) => { 'use cache' return a + b + c }} // Should be 1 100000 0, which is "c0" in hex (counts as one param, // because of the encrypted bound args param) fn2={async function () { 'use cache' return a + b }} // Should be 0 110000 0, which is "60" in hex (counts as two params, // because of the encrypted bound args param) fn3={async (c) => { 'use server' return a + b + c }} // Should be 0 100000 0, which is "40" in hex (counts as one param, // because of the encrypted bound args param) fn4={async function () { 'use server' return a + b }} /> ) }
javascript
github
https://github.com/vercel/next.js
crates/next-custom-transforms/tests/fixture/server-actions/server-graph/52/input.js
# Copyright 2013-2015 Free Software Foundation, Inc. # # This is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see # <http://www.gnu.org/licenses/>. import gcc import gccutils import sys want_raii_info = False logging = False show_cfg = False def log(msg, indent=0): global logging if logging: sys.stderr.write('%s%s\n' % (' ' * indent, msg)) sys.stderr.flush() def is_cleanup_type(return_type): if not isinstance(return_type, gcc.PointerType): return False if not isinstance(return_type.dereference, gcc.RecordType): return False if str(return_type.dereference.name) == 'cleanup': return True return False def is_constructor(decl): "Return True if the function DECL is a cleanup constructor; False otherwise" return is_cleanup_type(decl.type.type) and (not decl.name or str(decl.name) != 'make_final_cleanup') destructor_names = set(['do_cleanups', 'discard_cleanups']) def is_destructor(decl): return decl.name in destructor_names # This list is just much too long... we should probably have an # attribute instead. special_names = set(['do_final_cleanups', 'discard_final_cleanups', 'save_cleanups', 'save_final_cleanups', 'restore_cleanups', 'restore_final_cleanups', 'exceptions_state_mc_init', 'make_my_cleanup2', 'make_final_cleanup', 'all_cleanups', 'save_my_cleanups', 'quit_target']) def needs_special_treatment(decl): return decl.name in special_names # Sometimes we need a new placeholder object that isn't the same as # anything else. class Dummy(object): def __init__(self, location): self.location = location # A wrapper for a cleanup which has been assigned to a variable. # This holds the variable and the location. class Cleanup(object): def __init__(self, var, location): self.var = var self.location = location # A class representing a master cleanup. This holds a stack of # cleanup objects and supports a merging operation. class MasterCleanup(object): # Create a new MasterCleanup object. OTHER, if given, is a # MasterCleanup object to copy. def __init__(self, other = None): # 'cleanups' is a list of cleanups. Each element is either a # Dummy, for an anonymous cleanup, or a Cleanup, for a cleanup # which was assigned to a variable. if other is None: self.cleanups = [] self.aliases = {} else: self.cleanups = other.cleanups[:] self.aliases = dict(other.aliases) def compare_vars(self, definition, argument): if definition == argument: return True if argument in self.aliases: argument = self.aliases[argument] if definition in self.aliases: definition = self.aliases[definition] return definition == argument def note_assignment(self, lhs, rhs): log('noting assignment %s = %s' % (lhs, rhs), 4) self.aliases[lhs] = rhs # Merge with another MasterCleanup. # Returns True if this resulted in a change to our state. def merge(self, other): # We do explicit iteration like this so we can easily # update the list after the loop. counter = -1 found_named = False for counter in range(len(self.cleanups) - 1, -1, -1): var = self.cleanups[counter] log('merge checking %s' % var, 4) # Only interested in named cleanups. if isinstance(var, Dummy): log('=> merge dummy', 5) continue # Now see if VAR is found in OTHER. if other._find_var(var.var) >= 0: log ('=> merge found', 5) break log('=>merge not found', 5) found_named = True if found_named and counter < len(self.cleanups) - 1: log ('merging to %d' % counter, 4) if counter < 0: self.cleanups = [] else: self.cleanups = self.cleanups[0:counter] return True # If SELF is empty but OTHER has some cleanups, then consider # that a change as well. if len(self.cleanups) == 0 and len(other.cleanups) > 0: log('merging non-empty other', 4) self.cleanups = other.cleanups[:] return True return False # Push a new constructor onto our stack. LHS is the # left-hand-side of the GimpleCall statement. It may be None, # meaning that this constructor's value wasn't used. def push(self, location, lhs): if lhs is None: obj = Dummy(location) else: obj = Cleanup(lhs, location) log('pushing %s' % lhs, 4) idx = self._find_var(lhs) if idx >= 0: gcc.permerror(location, 'reassigning to known cleanup') gcc.inform(self.cleanups[idx].location, 'previous assignment is here') self.cleanups.append(obj) # A helper for merge and pop that finds BACK_TO in self.cleanups, # and returns the index, or -1 if not found. def _find_var(self, back_to): for i in range(len(self.cleanups) - 1, -1, -1): if isinstance(self.cleanups[i], Dummy): continue if self.compare_vars(self.cleanups[i].var, back_to): return i return -1 # Pop constructors until we find one matching BACK_TO. # This is invoked when we see a do_cleanups call. def pop(self, location, back_to): log('pop:', 4) i = self._find_var(back_to) if i >= 0: self.cleanups = self.cleanups[0:i] else: gcc.permerror(location, 'destructor call with unknown argument') # Check whether ARG is the current master cleanup. Return True if # all is well. def verify(self, location, arg): log('verify %s' % arg, 4) return (len(self.cleanups) > 0 and not isinstance(self.cleanups[0], Dummy) and self.compare_vars(self.cleanups[0].var, arg)) # Check whether SELF is empty. def isempty(self): log('isempty: len = %d' % len(self.cleanups), 4) return len(self.cleanups) == 0 # Emit informational warnings about the cleanup stack. def inform(self): for item in reversed(self.cleanups): gcc.inform(item.location, 'leaked cleanup') class CleanupChecker: def __init__(self, fun): self.fun = fun self.seen_edges = set() self.bad_returns = set() # This maps BB indices to a list of master cleanups for the # BB. self.master_cleanups = {} # Pick a reasonable location for the basic block BB. def guess_bb_location(self, bb): if isinstance(bb.gimple, list): for stmt in bb.gimple: if stmt.loc: return stmt.loc return self.fun.end # Compute the master cleanup list for BB. # Modifies MASTER_CLEANUP in place. def compute_master(self, bb, bb_from, master_cleanup): if not isinstance(bb.gimple, list): return curloc = self.fun.end for stmt in bb.gimple: if stmt.loc: curloc = stmt.loc if isinstance(stmt, gcc.GimpleCall) and stmt.fndecl: if is_constructor(stmt.fndecl): log('saw constructor %s in bb=%d' % (str(stmt.fndecl), bb.index), 2) self.cleanup_aware = True master_cleanup.push(curloc, stmt.lhs) elif is_destructor(stmt.fndecl): if str(stmt.fndecl.name) != 'do_cleanups': self.only_do_cleanups_seen = False log('saw destructor %s in bb=%d, bb_from=%d, argument=%s' % (str(stmt.fndecl.name), bb.index, bb_from, str(stmt.args[0])), 2) master_cleanup.pop(curloc, stmt.args[0]) elif needs_special_treatment(stmt.fndecl): pass # gcc.permerror(curloc, 'function needs special treatment') elif isinstance(stmt, gcc.GimpleAssign): if isinstance(stmt.lhs, gcc.VarDecl) and isinstance(stmt.rhs[0], gcc.VarDecl): master_cleanup.note_assignment(stmt.lhs, stmt.rhs[0]) elif isinstance(stmt, gcc.GimpleReturn): if self.is_constructor: if not master_cleanup.verify(curloc, stmt.retval): gcc.permerror(curloc, 'constructor does not return master cleanup') elif not self.is_special_constructor: if not master_cleanup.isempty(): if curloc not in self.bad_returns: gcc.permerror(curloc, 'cleanup stack is not empty at return') self.bad_returns.add(curloc) master_cleanup.inform() # Traverse a basic block, updating the master cleanup information # and propagating to other blocks. def traverse_bbs(self, edge, bb, bb_from, entry_master): log('traverse_bbs %d from %d' % (bb.index, bb_from), 1) # Propagate the entry MasterCleanup though this block. master_cleanup = MasterCleanup(entry_master) self.compute_master(bb, bb_from, master_cleanup) modified = False if bb.index in self.master_cleanups: # Merge the newly-computed MasterCleanup into the one we # have already computed. If this resulted in a # significant change, then we need to re-propagate. modified = self.master_cleanups[bb.index].merge(master_cleanup) else: self.master_cleanups[bb.index] = master_cleanup modified = True # EDGE is None for the entry BB. if edge is not None: # If merging cleanups caused a change, check to see if we # have a bad loop. if edge in self.seen_edges: # This error doesn't really help. # if modified: # gcc.permerror(self.guess_bb_location(bb), # 'invalid cleanup use in loop') return self.seen_edges.add(edge) if not modified: return # Now propagate to successor nodes. for edge in bb.succs: self.traverse_bbs(edge, edge.dest, bb.index, master_cleanup) def check_cleanups(self): if not self.fun.cfg or not self.fun.decl: return 'ignored' if is_destructor(self.fun.decl): return 'destructor' if needs_special_treatment(self.fun.decl): return 'special' self.is_constructor = is_constructor(self.fun.decl) self.is_special_constructor = not self.is_constructor and str(self.fun.decl.name).find('with_cleanup') > -1 # Yuck. if str(self.fun.decl.name) == 'gdb_xml_create_parser_and_cleanup_1': self.is_special_constructor = True if self.is_special_constructor: gcc.inform(self.fun.start, 'function %s is a special constructor' % (self.fun.decl.name)) # If we only see do_cleanups calls, and this function is not # itself a constructor, then we can convert it easily to RAII. self.only_do_cleanups_seen = not self.is_constructor # If we ever call a constructor, then we are "cleanup-aware". self.cleanup_aware = False entry_bb = self.fun.cfg.entry master_cleanup = MasterCleanup() self.traverse_bbs(None, entry_bb, -1, master_cleanup) if want_raii_info and self.only_do_cleanups_seen and self.cleanup_aware: gcc.inform(self.fun.decl.location, 'function %s could be converted to RAII' % (self.fun.decl.name)) if self.is_constructor: return 'constructor' return 'OK' class CheckerPass(gcc.GimplePass): def execute(self, fun): if fun.decl: log("Starting " + fun.decl.name) if show_cfg: dot = gccutils.cfg_to_dot(fun.cfg, fun.decl.name) gccutils.invoke_dot(dot, name=fun.decl.name) checker = CleanupChecker(fun) what = checker.check_cleanups() if fun.decl: log(fun.decl.name + ': ' + what, 2) ps = CheckerPass(name = 'check-cleanups') # We need the cfg, but we want a relatively high-level Gimple. ps.register_after('cfg')
unknown
codeparrot/codeparrot-clean
import json import os import re from threading import Thread import time from membase.api.rest_client import RestConnection from memcached.helper.data_helper import MemcachedClientHelper from remote.remote_util import RemoteMachineShellConnection, RemoteMachineHelper import testconstants import gzip import urllib from mc_bin_client import MemcachedError RETRIES = 10 # The histo dict is returned by add_timing_sample(). # The percentiles must be sorted, ascending, like [0.90, 0.99]. def histo_percentile(histo, percentiles): v_sum = 0 bins = histo.keys() bins.sort() for bin in bins: v_sum += histo[bin] v_sum = float(v_sum) v_cur = 0 # Running total. rv = [] for bin in bins: if not percentiles: return rv v_cur += histo[bin] while percentiles and (v_cur / v_sum) >= percentiles[0]: rv.append((percentiles[0], bin)) percentiles.pop(0) return rv class StatsCollector(object): _task = {} _verbosity = True _mb_stats = {"snapshots": []} # manually captured memcached stats _reb_stats = {} def __init__(self, verbosity): self._verbosity = verbosity self.is_leader = False self.active_mergers = 0 #this function starts collecting stats from all nodes with the given #frequency def start(self, nodes, bucket, pnames, name, frequency, client_id='', collect_server_stats = True): self._task = {"state": "running", "threads": []} self._task["name"] = name self._task["time"] = time.time() self._task["ops"] = [] self._task["totalops"] = [] self._task["ops-temp"] = [] self._task["latency"] = {} self._task["data_size_stats"] = [] rest = RestConnection(nodes[0]) info = rest.get_nodes_self() self.data_path = info.storage[0].get_data_path() self.client_id = str(client_id) if collect_server_stats: mbstats_thread = Thread(target=self.membase_stats, args=(nodes, bucket, 600, self._verbosity)) mbstats_thread.start() sysstats_thread = Thread(target=self.system_stats, args=(nodes, pnames, frequency, self._verbosity)) sysstats_thread.start() ns_server_stats_thread = Thread(target=self.ns_server_stats, args=([nodes[0]], bucket, 60, self._verbosity)) ns_server_stats_thread.start() rest = RestConnection(nodes[0]) bucket_size_thead = Thread(target=self.get_bucket_size, args=(bucket, rest, frequency)) bucket_size_thead.start() # data_size_thread = Thread(target=self.get_data_file_size, # args=(nodes, 60, bucket)) # data_size_thread.start() self._task["threads"] = [sysstats_thread, ns_server_stats_thread, bucket_size_thead, mbstats_thread] #data_size_thread ] # Getting build/machine stats from only one node in the cluster self.build_stats(nodes) self.machine_stats(nodes) def stop(self): self._task["state"] = "stopped" for t in self._task["threads"]: t.join() self._task["time"] = time.time() - self._task["time"] def sample(self, cur): pass def export(self, name, test_params): for latency in self._task["latency"].keys(): # save the last histogram snapshot histos = self._task["latency"].get(latency, []) if histos: key = latency + "-histogram" self._task["latency"][key] = histos[-1].copy() del self._task["latency"][key]["delta"] self._task["latency"][key]["client_id"] = self.client_id # calculate percentiles key = 'percentile-' + latency self._task["latency"][key] = [] for histo in histos: # for every sample histogram, produce a temp summary: # temp = [90 per, 95 per, 99 per, client_id, delta] temp = [] time = histo['time'] delta = histo['delta'] del histo['delta'], histo['time'] p = histo_percentile(histo, [0.80, 0.90, 0.95, 0.99, 0.999]) # p is list of tuples for val in p: temp.append(val[-1]) temp.append(self.client_id) temp.append(time) temp.append(delta) self._task["latency"][key].append(temp) test_params.update(self._reb_stats) obj = {"buildinfo": self._task.get("buildstats", {}), "machineinfo": self._task.get("machinestats", {}), "membasestats": self._task.get("membasestats", []), "systemstats": self._task.get("systemstats", []), "name": name, "totalops":self._task["totalops"], "ops":self._task["ops"], "time": self._task["time"], "info": test_params, "ns_server_data": self._task.get("ns_server_stats", []), "ns_server_data_system": self._task.get("ns_server_stats_system", []), "timings": self._task.get("timings", []), "dispatcher": self._task.get("dispatcher", []), "bucket-size":self._task.get("bucket_size", []), "data-size": self._task.get("data_size_stats", []), "latency-set-histogram":self._task["latency"].get("latency-set-histogram", []), "latency-set":self._task["latency"].get('percentile-latency-set', []), "latency-set-recent":self._task["latency"].get('percentile-latency-set-recent', []), "latency-get-histogram":self._task["latency"].get("latency-get-histogram", []), "latency-get":self._task["latency"].get('percentile-latency-get', []), "latency-get-recent":self._task["latency"].get('percentile-latency-get-recent', []), "latency-delete":self._task["latency"].get('percentile-latency-delete', []), "latency-delete-recent":self._task["latency"].get('percentile-latency-delete-recent', []), "latency-query-histogram":self._task["latency"].get("latency-query-histogram", []), "latency-query":self._task["latency"].get('percentile-latency-query', []), "latency-query-recent":self._task["latency"].get('percentile-latency-query-recent', []), "latency-obs-persist-server-histogram":self._task["latency"].get("latency-obs-persist-server-histogram", []), "latency-obs-persist-server":self._task["latency"].get('percentile-latency-obs-persist-server-server', []), "latency-obs-persist-server-recent":self._task["latency"].get('percentile-latency-obs-persist-server-recent', []), "latency-obs-persist-client-histogram":self._task["latency"].get("latency-obs-persist-client-histogram", []), "latency-obs-persist-client":self._task["latency"].get('percentile-latency-obs-persist-client', []), "latency-obs-persist-client-recent":self._task["latency"].get('percentile-latency-obs-persist-client-recent', []), "latency-obs-repl-client-histogram":self._task["latency"].get("latency-obs-repl-client-histogram", []), "latency-obs-repl-client":self._task["latency"].get('percentile-latency-obs-repl-client', []), "latency-obs-repl-client-recent":self._task["latency"].get('percentile-latency-obs-repl-client-recent', []), "latency-woq-obs-histogram":self._task["latency"].get("latency-woq-obs-histogram", []), "latency-woq-obs":self._task["latency"].get('percentile-latency-woq-obs', []), "latency-woq-obs-recent":self._task["latency"].get('percentile-latency-woq-obs-recent', []), "latency-woq-query-histogram":self._task["latency"].get("latency-woq-query-histogram", []), "latency-woq-query":self._task["latency"].get('percentile-latency-woq-query', []), "latency-woq-query-recent":self._task["latency"].get('percentile-latency-woq-query-recent', []), "latency-woq-histogram":self._task["latency"].get("latency-woq-histogram", []), "latency-woq":self._task["latency"].get('percentile-latency-woq', []), "latency-woq-recent":self._task["latency"].get('percentile-latency-woq-recent', []) } if self.client_id: patterns = ['reload$', 'load$', 'warmup$', 'index$'] phases = ['.reload', '.load', '.warmup', '.index'] name_picker = lambda (pattern, phase): re.search(pattern, self._task["name"]) try: phase = filter(name_picker, zip(patterns, phases))[0][1] except IndexError: phase = '.loop' filename = str(self.client_id) + phase file = gzip.open("{0}.json.gz".format(filename), 'wb') file.write("{0}".format(json.dumps(obj))) file.close() else: file = gzip.open("{0}.json.gz".format(name), 'wb') file.write("{0}".format(json.dumps(obj))) file.close() def get_bucket_size(self, bucket, rest, frequency): self._task["bucket_size"] = [] d = [] while not self._aborted(): print "Collecting bucket size stats" status, db_size = rest.get_database_disk_size(bucket) if status: d.append(db_size) else: print "Enable to read bucket stats" time.sleep(frequency) self._task["bucket_size"] = d print "finished bucket size stats" def get_data_file_size(self, nodes, frequency, bucket): shells = [] for node in nodes: try: shells.append(RemoteMachineShellConnection(node)) except: pass paths = [] if shells[0].is_membase_installed(): paths.append(self.data_path+'/{0}-data'.format(bucket)) else: bucket_path = self.data_path+'/{0}'.format(bucket) paths.append(bucket_path) view_path = bucket_path +'/set_view_{0}_design'.format(bucket) paths.append(view_path) d = {"snapshots": []} start_time = str(self._task["time"]) while not self._aborted(): time.sleep(frequency) current_time = time.time() i = 0 for shell in shells: node = nodes[i] unique_id = node.ip+'-'+start_time value = {} for path in paths: size = shell.get_data_file_size(path) value["file"] = path.split('/')[-1] value["size"] = size value["unique_id"] = unique_id value["time"] = current_time value["ip"] = node.ip d["snapshots"].append(value.copy()) i += 1 self._task["data_size_stats"] = d["snapshots"] print " finished data_size_stats" #ops stats #{'tot-sets': 899999, 'tot-gets': 1, 'tot-items': 899999, 'tot-creates': 899999} def ops_stats(self, ops_stat): ops_stat["time"] = time.time() self._task["ops-temp"].append(ops_stat) if len(self._task["ops-temp"]) >= 500 * (1 + self.active_mergers): # Prevent concurrent merge while self.active_mergers: time.sleep(0.1) # Semaphore: +1 active self.active_mergers += 1 # Merge merged = self._merge() self._task["ops"].append(merged) self._task["ops-temp"] = self._task["ops-temp"][500:] # Semaphore: -1 active self.active_mergers -= 1 #if self._task["ops"] has more than 1000 elements try to aggregate them ? def latency_stats(self, latency_cmd, latency_stat, cur_time=0): if self._task["latency"].get(latency_cmd) is None: self._task["latency"][latency_cmd] = [] temp_latency_stat = latency_stat.copy() if not cur_time: cur_time = time.time() temp_latency_stat['time'] = int(cur_time) temp_latency_stat['delta'] = cur_time - self._task['time'] self._task["latency"][latency_cmd].append(temp_latency_stat) def _merge(self): first = self._task["ops-temp"][0] merged = {"startTime":first["start-time"]} totalgets = 0 totalsets = 0 totalqueries = 0 delta = 0 for i in range(499): current = self._task["ops-temp"][i] next = self._task["ops-temp"][i+1] totalgets += current["tot-gets"] totalsets += current["tot-sets"] totalqueries += current["tot-queries"] delta += (next["start-time"] - current["start-time"]) merged["endTime"] = merged["startTime"] + delta merged["totalSets"] = totalsets merged["totalGets"] = totalgets merged["totalQueries"] = totalqueries qps = totalqueries / float(delta) merged["queriesPerSec"] = qps return merged def total_stats(self, ops_stat): ops_stat["time"] = time.time() self._task["totalops"].append(ops_stat) def build_stats(self,nodes): json_response = StatUtil.build_info(nodes[0]) self._task["buildstats"] = json_response def machine_stats(self,nodes): machine_stats = StatUtil.machine_info(nodes[0]) self._task["machinestats"] = machine_stats def reb_stats(self, start, dur): print "[reb_stats] recording reb start = {0}, reb duration = {1}"\ .format(start, dur) self._reb_stats["reb_start"] = start self._reb_stats["reb_dur"] = dur def _extract_proc_info(self, shell, pid): o, r = shell.execute_command("cat /proc/{0}/stat".format(pid)) fields = ('pid comm state ppid pgrp session tty_nr tpgid flags minflt ' 'cminflt majflt cmajflt utime stime cutime cstime priority ' 'nice num_threads itrealvalue starttime vsize rss rsslim ' 'startcode endcode startstack kstkesp kstkeip signal blocked ' 'sigignore sigcatch wchan nswap cnswap exit_signal ' 'processor rt_priority policy delayacct_blkio_ticks ' 'guest_time cguest_time ').split(' ') d = dict(zip(fields, o[0].split(' '))) return d def system_stats(self, nodes, pnames, frequency, verbosity=False): shells = [] for node in nodes: try: bucket = RestConnection(node).get_buckets()[0].name MemcachedClientHelper.direct_client(node, bucket) shells.append(RemoteMachineShellConnection(node)) except: pass d = {"snapshots": []} # "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}] start_time = str(self._task["time"]) while not self._aborted(): time.sleep(frequency) current_time = time.time() i = 0 for shell in shells: node = nodes[i] unique_id = node.ip+'-'+start_time for pname in pnames: obj = RemoteMachineHelper(shell).is_process_running(pname) if obj and obj.pid: value = self._extract_proc_info(shell, obj.pid) value["name"] = pname value["id"] = obj.pid value["unique_id"] = unique_id value["time"] = current_time value["ip"] = node.ip d["snapshots"].append(value) i += 1 self._task["systemstats"] = d["snapshots"] print " finished system_stats" def couchdb_stats(nodes): pass def capture_mb_snapshot(self, node): """ Capture membase stats snapshot manually """ print "[capture_mb_snapshot] capturing memcache stats snapshot for {0}"\ .format(node.ip) stats = {} try: bucket = RestConnection(node).get_buckets()[0].name mc = MemcachedClientHelper.direct_client(node, bucket) stats = mc.stats() except Exception as e: print "[capture_mb_snapshot] Exception: {0}".format(str(e)) return False stats["time"] = time.time() stats["ip"] = node.ip self._mb_stats["snapshots"].append(stats) print stats print "[capture_mb_snapshot] memcache stats snapshot captured" return True def membase_stats(self,nodes, bucket, frequency, verbose=False): mcs = [] for node in nodes: try: bucket = RestConnection(node).get_buckets()[0].name mcs.append(MemcachedClientHelper.direct_client(node, bucket)) except: pass self._task["membasestats"] = [] self._task["timings"] = [] self._task["dispatcher"] = [] d = {} # "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}] for mc in mcs: d[mc.host] = {"snapshots": [], "timings":[], "dispatcher":[]} while not self._aborted(): time_left = frequency timings = None # at minimum we want to check for aborted every minute while not self._aborted() and time_left > 0: time.sleep(min(time_left, 60)) time_left -= 60 for mc in mcs: retries = 0 stats = {} while not stats and retries < RETRIES: try: stats = mc.stats() try: mem_stats = mc.stats('raw memory') except MemcachedError: mem_stats = mc.stats('memory') stats.update(mem_stats) except Exception as e: print "[memebase_stats] Exception: {0}, retries = {1}"\ .format(str(e), retries) time.sleep(2) mc.reconnect() retries += 1 continue stats["time"] = time.time() stats["ip"] = mc.host d[mc.host]["snapshots"].append(stats) timings = mc.stats('timings') d[mc.host]["timings"].append(timings) dispatcher = mc.stats('dispatcher') d[mc.host]["dispatcher"].append(dispatcher) print "\nDumping disk timing stats: {0}".format(time.strftime('%X %x %Z')) if timings: # TODO dump timings for all servers for key, value in sorted(timings.iteritems()): if key.startswith("disk"): print "{0:50s}: {1}".format(key, value) start_time = str(self._task["time"]) for mc in mcs: ip = mc.host unique_id = ip+'-'+start_time current_time = time.time() if self._mb_stats["snapshots"]: # use manually captured stats self._task["membasestats"] = self._mb_stats["snapshots"] else: # use periodically captured stats for snapshot in d[mc.host]["snapshots"]: snapshot['unique_id'] = unique_id snapshot['time'] = current_time snapshot['ip'] = ip self._task["membasestats"].append(snapshot) for timing in d[mc.host]["timings"]: timing['unique_id'] = unique_id timing['time'] = current_time timing['ip'] = ip self._task["timings"].append(timing) for dispatcher in d[mc.host]["dispatcher"]: dispatcher['unique_id'] = unique_id dispatcher['time'] = current_time dispatcher['ip'] = ip self._task["dispatcher"].append(dispatcher) print " finished membase_stats" def ns_server_stats(self, nodes, bucket, frequency, verbose=False): self._task["ns_server_stats"] = [] self._task["ns_server_stats_system"] = [] d = {} for node in nodes: d[node] = {"snapshots": [], "system_snapshots": [] } while not self._aborted(): time.sleep(frequency) print "Collecting ns_server_stats" for node in nodes: rest = RestConnection(node) data_json = rest.fetch_bucket_stats(bucket=bucket, zoom='minute') d[node]["snapshots"].append(data_json) data_json = rest.fetch_system_stats() d[node]["system_snapshots"].append(data_json) for node in nodes: for snapshot in d[node]["snapshots"]: self._task["ns_server_stats"].append(snapshot) for snapshot in d[node]["system_snapshots"]: self._task["ns_server_stats_system"].append(snapshot) print " finished ns_server_stats" def _aborted(self): return self._task["state"] == "stopped" # Invokes optional callback when registered levels have been reached # during stats sample()'ing. # class CallbackStatsCollector(StatsCollector): def __init__(self, verbosity): # Tuples of level_name, level, callback. self.level_callbacks = [] super(CallbackStatsCollector, self).__init__(verbosity) def sample(self, cur): for level_name, level, callback in self.level_callbacks: if level < cur.get(level_name, -1): callback(cur) return super(CallbackStatsCollector, self).sample(cur) class StatUtil(object): @staticmethod def build_info(node): rest = RestConnection(node) api = rest.baseUrl + 'nodes/self' status, content, header = rest._http_request(api) json_parsed = json.loads(content) return json_parsed @staticmethod def machine_info(node): shell = RemoteMachineShellConnection(node) info = shell.extract_remote_info() return {"type": info.type, "distribution": info.distribution_type, "version": info.distribution_version, "ram": info.ram, "cpu": info.cpu, "disk": info.disk, "hostname":info.hostname}
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Core Spring Boot classes. * * @see org.springframework.boot.SpringApplication */ @NullMarked package org.springframework.boot; import org.jspecify.annotations.NullMarked;
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot/src/main/java/org/springframework/boot/package-info.java
"""Test script for ftplib module.""" # Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS # environment import ftplib import socket import io import errno import os import threading import time import unittest try: import ssl except ImportError: ssl = None from unittest import TestCase, skipUnless from test import support from test.support import requires_subprocess from test.support import threading_helper from test.support import socket_helper from test.support import warnings_helper from test.support import asynchat from test.support import asyncore from test.support.socket_helper import HOST, HOSTv6 support.requires_working_socket(module=True) TIMEOUT = support.LOOPBACK_TIMEOUT DEFAULT_ENCODING = 'utf-8' # the dummy data returned by server over the data channel when # RETR, LIST, NLST, MLSD commands are issued RETR_DATA = 'abcde\xB9\xB2\xB3\xA4\xA6\r\n' * 1000 LIST_DATA = 'foo\r\nbar\r\n non-ascii char \xAE\r\n' NLST_DATA = 'foo\r\nbar\r\n non-ascii char \xAE\r\n' MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n" "type=pdir;perm=e;unique==keVO1+d?3; ..\r\n" "type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n" "type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n" "type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n" "type=file;perm=awr;unique==keVO1+8G4; writable\r\n" "type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n" "type=dir;perm=;unique==keVO1+1t2; no-exec\r\n" "type=file;perm=r;unique==keVO1+EG4; two words\r\n" "type=file;perm=r;unique==keVO1+IH4; leading space\r\n" "type=file;perm=r;unique==keVO1+1G4; file1\r\n" "type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n" "type=file;perm=r;unique==keVO1+1G4; file2\r\n" "type=file;perm=r;unique==keVO1+1G4; file3\r\n" "type=file;perm=r;unique==keVO1+1G4; file4\r\n" "type=dir;perm=cpmel;unique==SGP1; dir \xAE non-ascii char\r\n" "type=file;perm=r;unique==SGP2; file \xAE non-ascii char\r\n") def default_error_handler(): # bpo-44359: Silently ignore socket errors. Such errors occur when a client # socket is closed, in TestFTPClass.tearDown() and makepasv() tests, and # the server gets an error on its side. pass class DummyDTPHandler(asynchat.async_chat): dtp_conn_closed = False def __init__(self, conn, baseclass): asynchat.async_chat.__init__(self, conn) self.baseclass = baseclass self.baseclass.last_received_data = bytearray() self.encoding = baseclass.encoding def handle_read(self): new_data = self.recv(1024) self.baseclass.last_received_data += new_data def handle_close(self): # XXX: this method can be called many times in a row for a single # connection, including in clear-text (non-TLS) mode. # (behaviour witnessed with test_data_connection) if not self.dtp_conn_closed: self.baseclass.push('226 transfer complete') self.shutdown() self.dtp_conn_closed = True def push(self, what): if self.baseclass.next_data is not None: what = self.baseclass.next_data self.baseclass.next_data = None if not what: return self.close_when_done() super(DummyDTPHandler, self).push(what.encode(self.encoding)) def handle_error(self): default_error_handler() def shutdown(self): self.close() class DummyFTPHandler(asynchat.async_chat): dtp_handler = DummyDTPHandler def __init__(self, conn, encoding=DEFAULT_ENCODING): asynchat.async_chat.__init__(self, conn) # tells the socket to handle urgent data inline (ABOR command) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1) self.set_terminator(b"\r\n") self.in_buffer = [] self.dtp = None self.last_received_cmd = None self.last_received_data = bytearray() self.next_response = '' self.next_data = None self.rest = None self.next_retr_data = RETR_DATA self.push('220 welcome') self.encoding = encoding # We use this as the string IPv4 address to direct the client # to in response to a PASV command. To test security behavior. # https://bugs.python.org/issue43285/. self.fake_pasv_server_ip = '252.253.254.255' def collect_incoming_data(self, data): self.in_buffer.append(data) def found_terminator(self): line = b''.join(self.in_buffer).decode(self.encoding) self.in_buffer = [] if self.next_response: self.push(self.next_response) self.next_response = '' cmd = line.split(' ')[0].lower() self.last_received_cmd = cmd space = line.find(' ') if space != -1: arg = line[space + 1:] else: arg = "" if hasattr(self, 'cmd_' + cmd): method = getattr(self, 'cmd_' + cmd) method(arg) else: self.push('550 command "%s" not understood.' %cmd) def handle_error(self): default_error_handler() def push(self, data): asynchat.async_chat.push(self, data.encode(self.encoding) + b'\r\n') def cmd_port(self, arg): addr = list(map(int, arg.split(','))) ip = '%d.%d.%d.%d' %tuple(addr[:4]) port = (addr[4] * 256) + addr[5] s = socket.create_connection((ip, port), timeout=TIMEOUT) self.dtp = self.dtp_handler(s, baseclass=self) self.push('200 active data connection established') def cmd_pasv(self, arg): with socket.create_server((self.socket.getsockname()[0], 0)) as sock: sock.settimeout(TIMEOUT) port = sock.getsockname()[1] ip = self.fake_pasv_server_ip ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256 self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2)) conn, addr = sock.accept() self.dtp = self.dtp_handler(conn, baseclass=self) def cmd_eprt(self, arg): af, ip, port = arg.split(arg[0])[1:-1] port = int(port) s = socket.create_connection((ip, port), timeout=TIMEOUT) self.dtp = self.dtp_handler(s, baseclass=self) self.push('200 active data connection established') def cmd_epsv(self, arg): with socket.create_server((self.socket.getsockname()[0], 0), family=socket.AF_INET6) as sock: sock.settimeout(TIMEOUT) port = sock.getsockname()[1] self.push('229 entering extended passive mode (|||%d|)' %port) conn, addr = sock.accept() self.dtp = self.dtp_handler(conn, baseclass=self) def cmd_echo(self, arg): # sends back the received string (used by the test suite) self.push(arg) def cmd_noop(self, arg): self.push('200 noop ok') def cmd_user(self, arg): self.push('331 username ok') def cmd_pass(self, arg): self.push('230 password ok') def cmd_acct(self, arg): self.push('230 acct ok') def cmd_rnfr(self, arg): self.push('350 rnfr ok') def cmd_rnto(self, arg): self.push('250 rnto ok') def cmd_dele(self, arg): self.push('250 dele ok') def cmd_cwd(self, arg): self.push('250 cwd ok') def cmd_size(self, arg): self.push('250 1000') def cmd_mkd(self, arg): self.push('257 "%s"' %arg) def cmd_rmd(self, arg): self.push('250 rmd ok') def cmd_pwd(self, arg): self.push('257 "pwd ok"') def cmd_type(self, arg): self.push('200 type ok') def cmd_quit(self, arg): self.push('221 quit ok') self.shutdown() def cmd_abor(self, arg): self.push('226 abor ok') def cmd_stor(self, arg): self.push('125 stor ok') def cmd_rest(self, arg): self.rest = arg self.push('350 rest ok') def cmd_retr(self, arg): self.push('125 retr ok') if self.rest is not None: offset = int(self.rest) else: offset = 0 self.dtp.push(self.next_retr_data[offset:]) self.dtp.close_when_done() self.rest = None def cmd_list(self, arg): self.push('125 list ok') self.dtp.push(LIST_DATA) self.dtp.close_when_done() def cmd_nlst(self, arg): self.push('125 nlst ok') self.dtp.push(NLST_DATA) self.dtp.close_when_done() def cmd_opts(self, arg): self.push('200 opts ok') def cmd_mlsd(self, arg): self.push('125 mlsd ok') self.dtp.push(MLSD_DATA) self.dtp.close_when_done() def cmd_setlongretr(self, arg): # For testing. Next RETR will return long line. self.next_retr_data = 'x' * int(arg) self.push('125 setlongretr ok') class DummyFTPServer(asyncore.dispatcher, threading.Thread): handler = DummyFTPHandler def __init__(self, address, af=socket.AF_INET, encoding=DEFAULT_ENCODING): threading.Thread.__init__(self) asyncore.dispatcher.__init__(self) self.daemon = True self.create_socket(af, socket.SOCK_STREAM) self.bind(address) self.listen(5) self.active = False self.active_lock = threading.Lock() self.host, self.port = self.socket.getsockname()[:2] self.handler_instance = None self.encoding = encoding def start(self): assert not self.active self.__flag = threading.Event() threading.Thread.start(self) self.__flag.wait() def run(self): self.active = True self.__flag.set() while self.active and asyncore.socket_map: self.active_lock.acquire() asyncore.loop(timeout=0.1, count=1) self.active_lock.release() asyncore.close_all(ignore_all=True) def stop(self): assert self.active self.active = False self.join() def handle_accepted(self, conn, addr): self.handler_instance = self.handler(conn, encoding=self.encoding) def handle_connect(self): self.shutdown() handle_read = handle_connect def writable(self): return 0 def handle_error(self): default_error_handler() if ssl is not None: CERTFILE = os.path.join(os.path.dirname(__file__), "certdata", "keycert3.pem") CAFILE = os.path.join(os.path.dirname(__file__), "certdata", "pycacert.pem") class SSLConnection(asyncore.dispatcher): """An asyncore.dispatcher subclass supporting TLS/SSL.""" _ssl_accepting = False _ssl_closing = False def secure_connection(self): context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.load_cert_chain(CERTFILE) socket = context.wrap_socket(self.socket, suppress_ragged_eofs=False, server_side=True, do_handshake_on_connect=False) self.del_channel() self.set_socket(socket) self._ssl_accepting = True def _do_ssl_handshake(self): try: self.socket.do_handshake() except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return elif err.args[0] == ssl.SSL_ERROR_EOF: return self.handle_close() # TODO: SSLError does not expose alert information elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]: return self.handle_close() raise except OSError as err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def _do_ssl_shutdown(self): self._ssl_closing = True try: self.socket = self.socket.unwrap() except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return except OSError: # Any "socket error" corresponds to a SSL_ERROR_SYSCALL return # from OpenSSL's SSL_shutdown(), corresponding to a # closed socket condition. See also: # http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html pass self._ssl_closing = False if getattr(self, '_ccc', False) is False: super(SSLConnection, self).close() else: pass def handle_read_event(self): if self._ssl_accepting: self._do_ssl_handshake() elif self._ssl_closing: self._do_ssl_shutdown() else: super(SSLConnection, self).handle_read_event() def handle_write_event(self): if self._ssl_accepting: self._do_ssl_handshake() elif self._ssl_closing: self._do_ssl_shutdown() else: super(SSLConnection, self).handle_write_event() def send(self, data): try: return super(SSLConnection, self).send(data) except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return 0 raise def recv(self, buffer_size): try: return super(SSLConnection, self).recv(buffer_size) except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return b'' if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): self.handle_close() return b'' raise def handle_error(self): default_error_handler() def shutdown(self): if (isinstance(self.socket, ssl.SSLSocket) and self.socket._sslobj is not None): self._do_ssl_shutdown() else: self.close() class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler): """A DummyDTPHandler subclass supporting TLS/SSL.""" def __init__(self, conn, baseclass): DummyDTPHandler.__init__(self, conn, baseclass) if self.baseclass.secure_data_channel: self.secure_connection() class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler): """A DummyFTPHandler subclass supporting TLS/SSL.""" dtp_handler = DummyTLS_DTPHandler def __init__(self, conn, encoding=DEFAULT_ENCODING): DummyFTPHandler.__init__(self, conn, encoding=encoding) self.secure_data_channel = False self._ccc = False def cmd_auth(self, line): """Set up secure control channel.""" self.push('234 AUTH TLS successful') self.secure_connection() def cmd_ccc(self, line): self.push('220 Reverting back to clear-text') self._ccc = True self._do_ssl_shutdown() def cmd_pbsz(self, line): """Negotiate size of buffer for secure data transfer. For TLS/SSL the only valid value for the parameter is '0'. Any other value is accepted but ignored. """ self.push('200 PBSZ=0 successful.') def cmd_prot(self, line): """Setup un/secure data channel.""" arg = line.upper() if arg == 'C': self.push('200 Protection set to Clear') self.secure_data_channel = False elif arg == 'P': self.push('200 Protection set to Private') self.secure_data_channel = True else: self.push("502 Unrecognized PROT type (use C or P).") class DummyTLS_FTPServer(DummyFTPServer): handler = DummyTLS_FTPHandler class TestFTPClass(TestCase): def setUp(self, encoding=DEFAULT_ENCODING): self.server = DummyFTPServer((HOST, 0), encoding=encoding) self.server.start() self.client = ftplib.FTP(timeout=TIMEOUT, encoding=encoding) self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() # Explicitly clear the attribute to prevent dangling thread self.server = None asyncore.close_all(ignore_all=True) def check_data(self, received, expected): self.assertEqual(len(received), len(expected)) self.assertEqual(received, expected) def test_getwelcome(self): self.assertEqual(self.client.getwelcome(), '220 welcome') def test_sanitize(self): self.assertEqual(self.client.sanitize('foo'), repr('foo')) self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****')) self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****')) def test_exceptions(self): self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0') self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0') self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0') self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400') self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499') self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500') self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599') self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999') def test_all_errors(self): exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm, ftplib.error_proto, ftplib.Error, OSError, EOFError) for x in exceptions: try: raise x('exception not included in all_errors set') except ftplib.all_errors: pass def test_set_pasv(self): # passive mode is supposed to be enabled by default self.assertTrue(self.client.passiveserver) self.client.set_pasv(True) self.assertTrue(self.client.passiveserver) self.client.set_pasv(False) self.assertFalse(self.client.passiveserver) def test_voidcmd(self): self.assertEqual(self.client.voidcmd('echo 200'), '200') self.assertEqual(self.client.voidcmd('echo 299'), '299') self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199') self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300') def test_login(self): self.client.login() def test_acct(self): self.client.acct('passwd') def test_rename(self): self.client.rename('a', 'b') self.server.handler_instance.next_response = '200' self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b') def test_delete(self): self.client.delete('foo') self.server.handler_instance.next_response = '199' self.assertRaises(ftplib.error_reply, self.client.delete, 'foo') def test_size(self): self.client.size('foo') def test_mkd(self): dir = self.client.mkd('/foo') self.assertEqual(dir, '/foo') def test_rmd(self): self.client.rmd('foo') def test_cwd(self): dir = self.client.cwd('/foo') self.assertEqual(dir, '250 cwd ok') def test_pwd(self): dir = self.client.pwd() self.assertEqual(dir, 'pwd ok') def test_quit(self): self.assertEqual(self.client.quit(), '221 quit ok') # Ensure the connection gets closed; sock attribute should be None self.assertEqual(self.client.sock, None) def test_abort(self): self.client.abort() def test_retrbinary(self): received = [] self.client.retrbinary('retr', received.append) self.check_data(b''.join(received), RETR_DATA.encode(self.client.encoding)) def test_retrbinary_rest(self): for rest in (0, 10, 20): received = [] self.client.retrbinary('retr', received.append, rest=rest) self.check_data(b''.join(received), RETR_DATA[rest:].encode(self.client.encoding)) def test_retrlines(self): received = [] self.client.retrlines('retr', received.append) self.check_data(''.join(received), RETR_DATA.replace('\r\n', '')) def test_storbinary(self): f = io.BytesIO(RETR_DATA.encode(self.client.encoding)) self.client.storbinary('stor', f) self.check_data(self.server.handler_instance.last_received_data, RETR_DATA.encode(self.server.encoding)) # test new callback arg flag = [] f.seek(0) self.client.storbinary('stor', f, callback=lambda x: flag.append(None)) self.assertTrue(flag) def test_storbinary_rest(self): data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding) f = io.BytesIO(data) for r in (30, '30'): f.seek(0) self.client.storbinary('stor', f, rest=r) self.assertEqual(self.server.handler_instance.rest, str(r)) def test_storlines(self): data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding) f = io.BytesIO(data) self.client.storlines('stor', f) self.check_data(self.server.handler_instance.last_received_data, RETR_DATA.encode(self.server.encoding)) # test new callback arg flag = [] f.seek(0) self.client.storlines('stor foo', f, callback=lambda x: flag.append(None)) self.assertTrue(flag) f = io.StringIO(RETR_DATA.replace('\r\n', '\n')) # storlines() expects a binary file, not a text file with warnings_helper.check_warnings(('', BytesWarning), quiet=True): self.assertRaises(TypeError, self.client.storlines, 'stor foo', f) def test_nlst(self): self.client.nlst() self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1]) def test_dir(self): l = [] self.client.dir(l.append) self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', '')) def test_mlsd(self): list(self.client.mlsd()) list(self.client.mlsd(path='/')) list(self.client.mlsd(path='/', facts=['size', 'type'])) ls = list(self.client.mlsd()) for name, facts in ls: self.assertIsInstance(name, str) self.assertIsInstance(facts, dict) self.assertTrue(name) self.assertIn('type', facts) self.assertIn('perm', facts) self.assertIn('unique', facts) def set_data(data): self.server.handler_instance.next_data = data def test_entry(line, type=None, perm=None, unique=None, name=None): type = 'type' if type is None else type perm = 'perm' if perm is None else perm unique = 'unique' if unique is None else unique name = 'name' if name is None else name set_data(line) _name, facts = next(self.client.mlsd()) self.assertEqual(_name, name) self.assertEqual(facts['type'], type) self.assertEqual(facts['perm'], perm) self.assertEqual(facts['unique'], unique) # plain test_entry('type=type;perm=perm;unique=unique; name\r\n') # "=" in fact value test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe") test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type") test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe") test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====") # spaces in name test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me") test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ") test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name") test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e") # ";" in name test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me") test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name") test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;") test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;") # case sensitiveness set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n') _name, facts = next(self.client.mlsd()) for x in facts: self.assertTrue(x.islower()) # no data (directory empty) set_data('') self.assertRaises(StopIteration, next, self.client.mlsd()) set_data('') for x in self.client.mlsd(): self.fail("unexpected data %s" % x) def test_makeport(self): with self.client.makeport(): # IPv4 is in use, just make sure send_eprt has not been used self.assertEqual(self.server.handler_instance.last_received_cmd, 'port') def test_makepasv(self): host, port = self.client.makepasv() conn = socket.create_connection((host, port), timeout=TIMEOUT) conn.close() # IPv4 is in use, just make sure send_epsv has not been used self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv') def test_makepasv_issue43285_security_disabled(self): """Test the opt-in to the old vulnerable behavior.""" self.client.trust_server_pasv_ipv4_address = True bad_host, port = self.client.makepasv() self.assertEqual( bad_host, self.server.handler_instance.fake_pasv_server_ip) # Opening and closing a connection keeps the dummy server happy # instead of timing out on accept. socket.create_connection((self.client.sock.getpeername()[0], port), timeout=TIMEOUT).close() def test_makepasv_issue43285_security_enabled_default(self): self.assertFalse(self.client.trust_server_pasv_ipv4_address) trusted_host, port = self.client.makepasv() self.assertNotEqual( trusted_host, self.server.handler_instance.fake_pasv_server_ip) # Opening and closing a connection keeps the dummy server happy # instead of timing out on accept. socket.create_connection((trusted_host, port), timeout=TIMEOUT).close() def test_with_statement(self): self.client.quit() def is_client_connected(): if self.client.sock is None: return False try: self.client.sendcmd('noop') except (OSError, EOFError): return False return True # base test with ftplib.FTP(timeout=TIMEOUT) as self.client: self.client.connect(self.server.host, self.server.port) self.client.sendcmd('noop') self.assertTrue(is_client_connected()) self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit') self.assertFalse(is_client_connected()) # QUIT sent inside the with block with ftplib.FTP(timeout=TIMEOUT) as self.client: self.client.connect(self.server.host, self.server.port) self.client.sendcmd('noop') self.client.quit() self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit') self.assertFalse(is_client_connected()) # force a wrong response code to be sent on QUIT: error_perm # is expected and the connection is supposed to be closed try: with ftplib.FTP(timeout=TIMEOUT) as self.client: self.client.connect(self.server.host, self.server.port) self.client.sendcmd('noop') self.server.handler_instance.next_response = '550 error on quit' except ftplib.error_perm as err: self.assertEqual(str(err), '550 error on quit') else: self.fail('Exception not raised') # needed to give the threaded server some time to set the attribute # which otherwise would still be == 'noop' time.sleep(0.1) self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit') self.assertFalse(is_client_connected()) def test_source_address(self): self.client.quit() port = socket_helper.find_unused_port() try: self.client.connect(self.server.host, self.server.port, source_address=(HOST, port)) self.assertEqual(self.client.sock.getsockname()[1], port) self.client.quit() except OSError as e: if e.errno == errno.EADDRINUSE: self.skipTest("couldn't bind to port %d" % port) raise def test_source_address_passive_connection(self): port = socket_helper.find_unused_port() self.client.source_address = (HOST, port) try: with self.client.transfercmd('list') as sock: self.assertEqual(sock.getsockname()[1], port) except OSError as e: if e.errno == errno.EADDRINUSE: self.skipTest("couldn't bind to port %d" % port) raise def test_parse257(self): self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar') self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar') self.assertEqual(ftplib.parse257('257 ""'), '') self.assertEqual(ftplib.parse257('257 "" created'), '') self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"') # The 257 response is supposed to include the directory # name and in case it contains embedded double-quotes # they must be doubled (see RFC-959, chapter 7, appendix 2). self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar') self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar') def test_line_too_long(self): self.assertRaises(ftplib.Error, self.client.sendcmd, 'x' * self.client.maxline * 2) def test_retrlines_too_long(self): self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2)) received = [] self.assertRaises(ftplib.Error, self.client.retrlines, 'retr', received.append) def test_storlines_too_long(self): f = io.BytesIO(b'x' * self.client.maxline * 2) self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f) def test_encoding_param(self): encodings = ['latin-1', 'utf-8'] for encoding in encodings: with self.subTest(encoding=encoding): self.tearDown() self.setUp(encoding=encoding) self.assertEqual(encoding, self.client.encoding) self.test_retrbinary() self.test_storbinary() self.test_retrlines() new_dir = self.client.mkd('/non-ascii dir \xAE') self.check_data(new_dir, '/non-ascii dir \xAE') # Check default encoding client = ftplib.FTP(timeout=TIMEOUT) self.assertEqual(DEFAULT_ENCODING, client.encoding) @skipUnless(socket_helper.IPV6_ENABLED, "IPv6 not enabled") class TestIPv6Environment(TestCase): def setUp(self): self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6, encoding=DEFAULT_ENCODING) self.server.start() self.client = ftplib.FTP(timeout=TIMEOUT, encoding=DEFAULT_ENCODING) self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() # Explicitly clear the attribute to prevent dangling thread self.server = None asyncore.close_all(ignore_all=True) def test_af(self): self.assertEqual(self.client.af, socket.AF_INET6) def test_makeport(self): with self.client.makeport(): self.assertEqual(self.server.handler_instance.last_received_cmd, 'eprt') def test_makepasv(self): host, port = self.client.makepasv() conn = socket.create_connection((host, port), timeout=TIMEOUT) conn.close() self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv') def test_transfer(self): def retr(): received = [] self.client.retrbinary('retr', received.append) self.assertEqual(b''.join(received), RETR_DATA.encode(self.client.encoding)) self.client.set_pasv(True) retr() self.client.set_pasv(False) retr() @skipUnless(ssl, "SSL not available") @requires_subprocess() class TestTLS_FTPClassMixin(TestFTPClass): """Repeat TestFTPClass tests starting the TLS layer for both control and data connections first. """ def setUp(self, encoding=DEFAULT_ENCODING): self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding) self.server.start() self.client = ftplib.FTP_TLS(timeout=TIMEOUT, encoding=encoding) self.client.connect(self.server.host, self.server.port) # enable TLS self.client.auth() self.client.prot_p() @skipUnless(ssl, "SSL not available") @requires_subprocess() class TestTLS_FTPClass(TestCase): """Specific TLS_FTP class tests.""" def setUp(self, encoding=DEFAULT_ENCODING): self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding) self.server.start() self.client = ftplib.FTP_TLS(timeout=TIMEOUT) self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() # Explicitly clear the attribute to prevent dangling thread self.server = None asyncore.close_all(ignore_all=True) def test_control_connection(self): self.assertNotIsInstance(self.client.sock, ssl.SSLSocket) self.client.auth() self.assertIsInstance(self.client.sock, ssl.SSLSocket) def test_data_connection(self): # clear text with self.client.transfercmd('list') as sock: self.assertNotIsInstance(sock, ssl.SSLSocket) self.assertEqual(sock.recv(1024), LIST_DATA.encode(self.client.encoding)) self.assertEqual(self.client.voidresp(), "226 transfer complete") # secured, after PROT P self.client.prot_p() with self.client.transfercmd('list') as sock: self.assertIsInstance(sock, ssl.SSLSocket) # consume from SSL socket to finalize handshake and avoid # "SSLError [SSL] shutdown while in init" self.assertEqual(sock.recv(1024), LIST_DATA.encode(self.client.encoding)) self.assertEqual(self.client.voidresp(), "226 transfer complete") # PROT C is issued, the connection must be in cleartext again self.client.prot_c() with self.client.transfercmd('list') as sock: self.assertNotIsInstance(sock, ssl.SSLSocket) self.assertEqual(sock.recv(1024), LIST_DATA.encode(self.client.encoding)) self.assertEqual(self.client.voidresp(), "226 transfer complete") def test_login(self): # login() is supposed to implicitly secure the control connection self.assertNotIsInstance(self.client.sock, ssl.SSLSocket) self.client.login() self.assertIsInstance(self.client.sock, ssl.SSLSocket) # make sure that AUTH TLS doesn't get issued again self.client.login() def test_auth_issued_twice(self): self.client.auth() self.assertRaises(ValueError, self.client.auth) def test_context(self): self.client.quit() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE self.assertRaises(TypeError, ftplib.FTP_TLS, keyfile=CERTFILE, context=ctx) self.assertRaises(TypeError, ftplib.FTP_TLS, certfile=CERTFILE, context=ctx) self.assertRaises(TypeError, ftplib.FTP_TLS, certfile=CERTFILE, keyfile=CERTFILE, context=ctx) self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT) self.client.connect(self.server.host, self.server.port) self.assertNotIsInstance(self.client.sock, ssl.SSLSocket) self.client.auth() self.assertIs(self.client.sock.context, ctx) self.assertIsInstance(self.client.sock, ssl.SSLSocket) self.client.prot_p() with self.client.transfercmd('list') as sock: self.assertIs(sock.context, ctx) self.assertIsInstance(sock, ssl.SSLSocket) def test_ccc(self): self.assertRaises(ValueError, self.client.ccc) self.client.login(secure=True) self.assertIsInstance(self.client.sock, ssl.SSLSocket) self.client.ccc() self.assertRaises(ValueError, self.client.sock.unwrap) @skipUnless(False, "FIXME: bpo-32706") def test_check_hostname(self): self.client.quit() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertEqual(ctx.check_hostname, True) ctx.load_verify_locations(CAFILE) self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT) # 127.0.0.1 doesn't match SAN self.client.connect(self.server.host, self.server.port) with self.assertRaises(ssl.CertificateError): self.client.auth() # exception quits connection self.client.connect(self.server.host, self.server.port) self.client.prot_p() with self.assertRaises(ssl.CertificateError): with self.client.transfercmd("list") as sock: pass self.client.quit() self.client.connect("localhost", self.server.port) self.client.auth() self.client.quit() self.client.connect("localhost", self.server.port) self.client.prot_p() with self.client.transfercmd("list") as sock: pass class TestTimeouts(TestCase): def setUp(self): self.evt = threading.Event() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(20) self.port = socket_helper.bind_port(self.sock) self.server_thread = threading.Thread(target=self.server) self.server_thread.daemon = True self.server_thread.start() # Wait for the server to be ready. self.evt.wait() self.evt.clear() self.old_port = ftplib.FTP.port ftplib.FTP.port = self.port def tearDown(self): ftplib.FTP.port = self.old_port self.server_thread.join() # Explicitly clear the attribute to prevent dangling thread self.server_thread = None def server(self): # This method sets the evt 3 times: # 1) when the connection is ready to be accepted. # 2) when it is safe for the caller to close the connection # 3) when we have closed the socket self.sock.listen() # (1) Signal the caller that we are ready to accept the connection. self.evt.set() try: conn, addr = self.sock.accept() except TimeoutError: pass else: conn.sendall(b"1 Hola mundo\n") conn.shutdown(socket.SHUT_WR) # (2) Signal the caller that it is safe to close the socket. self.evt.set() conn.close() finally: self.sock.close() def testTimeoutDefault(self): # default -- use global socket timeout self.assertIsNone(socket.getdefaulttimeout()) socket.setdefaulttimeout(30) try: ftp = ftplib.FTP(HOST) finally: socket.setdefaulttimeout(None) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutNone(self): # no timeout -- do not use global socket timeout self.assertIsNone(socket.getdefaulttimeout()) socket.setdefaulttimeout(30) try: ftp = ftplib.FTP(HOST, timeout=None) finally: socket.setdefaulttimeout(None) self.assertIsNone(ftp.sock.gettimeout()) self.evt.wait() ftp.close() def testTimeoutValue(self): # a value ftp = ftplib.FTP(HOST, timeout=30) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() # bpo-39259 with self.assertRaises(ValueError): ftplib.FTP(HOST, timeout=0) def testTimeoutConnect(self): ftp = ftplib.FTP() ftp.connect(HOST, timeout=30) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutDifferentOrder(self): ftp = ftplib.FTP(timeout=30) ftp.connect(HOST) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutDirectAccess(self): ftp = ftplib.FTP() ftp.timeout = 30 ftp.connect(HOST) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() class MiscTestCase(TestCase): def test__all__(self): not_exported = { 'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF', 'Error', 'parse150', 'parse227', 'parse229', 'parse257', 'print_line', 'ftpcp', 'test'} support.check__all__(self, ftplib, not_exported=not_exported) def setUpModule(): thread_info = threading_helper.threading_setup() unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info) if __name__ == '__main__': unittest.main()
python
github
https://github.com/python/cpython
Lib/test/test_ftplib.py
# Copyright 2008-2015 Canonical # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # For further info, check http://launchpad.net/filesync-server """Tests for txlog utilities.""" import datetime from mock import patch from backends.filesync.data import dbmanager from backends.filesync.data.testing.ormtestcase import ORMTestCase from backends.txlog import utils from backends.txlog.model import TransactionLog from backends.txlog.tests.test_model import txlog_disabled class TransactionLogUtilsTestCase(ORMTestCase): """Tests for the Materialised Views database access functions.""" def setUp(self): super(TransactionLogUtilsTestCase, self).setUp() self._orig_make_user = self.obj_factory.make_user # Overwrite .obj_factory.make_user with a custom version that # doesn't create TransactionLogs as that would pollute our tests. p = patch.object(self.obj_factory, 'make_user') self.addCleanup(p.stop) mock_factory = p.start() mock_factory.side_effect = self._make_user_without_txlog self.store = dbmanager.get_shard_store(self.obj_factory.sstore_name) def _make_user_without_txlog(self, *args, **kwargs): """Custom make_user function that does not create TransactionLogs.""" with txlog_disabled(): return self._orig_make_user(*args, **kwargs) def _create_db_worker_last_row_entry(self, worker_name, txlog): """Create a new entry on the txlog.db_worker_last_row table.""" worker_name = unicode(worker_name) self.obj_factory.sstore.execute( u"""INSERT INTO txlog.db_worker_last_row (worker_id, row_id, timestamp) VALUES (?, ?, ?)""", params=(worker_name, txlog.id, txlog.timestamp)) def _find_last_row_worker_names(self): """Find all worker names from the db_worker_last_row table.""" result = self.obj_factory.sstore.execute( u"""SELECT worker_id FROM txlog.db_worker_last_row""") return [row[0] for row in result] def test_get_last_row_with_no_data(self): """Test the get_last_row function when no data is present.""" # First, check the db directly to ensure it is empty. result = self.obj_factory.sstore.execute( u"""SELECT row_id FROM txlog.db_worker_last_row""") self.assertEqual(0, result.rowcount) self.assertEqual(utils.NEW_WORKER_LAST_ROW, utils.get_last_row( 'some worker', self.obj_factory.sstore_name)) def test_get_last_row_with_other_data_returns_the_oldest_one(self): """get_last_row returns the row for the oldest txlog ID in the table, if the worker name is not found for that.""" txlog1 = self.obj_factory.make_transaction_log() txlog2 = self.obj_factory.make_transaction_log() txlog3 = self.obj_factory.make_transaction_log() self._create_db_worker_last_row_entry( self.obj_factory.get_unique_unicode(), txlog3) self._create_db_worker_last_row_entry( self.obj_factory.get_unique_unicode(), txlog1) self._create_db_worker_last_row_entry( self.obj_factory.get_unique_unicode(), txlog2) self.assertEqual((txlog1.id, txlog1.timestamp), utils.get_last_row( 'some worker', self.obj_factory.sstore_name)) def test_get_last_row_with_same_data_returns_the_exact_one(self): """Test that get_last_row returns the row for the exact txlog ID in the table, if the worker name is found for that.""" txlog1 = self.obj_factory.make_transaction_log() txlog2 = self.obj_factory.make_transaction_log() txlog3 = self.obj_factory.make_transaction_log() worker_name = self.obj_factory.get_unique_unicode() self._create_db_worker_last_row_entry(worker_name, txlog3) self._create_db_worker_last_row_entry( self.obj_factory.get_unique_unicode(), txlog1) self._create_db_worker_last_row_entry( self.obj_factory.get_unique_unicode(), txlog2) self.assertEqual((txlog3.id, txlog3.timestamp), utils.get_last_row( worker_name, self.obj_factory.sstore_name)) def test_update_last_row_with_no_data(self): """Test the update_last_row function when no data is present.""" txlog = self.obj_factory.make_transaction_log() worker_name = self.obj_factory.get_unique_unicode() utils.update_last_row( worker_name=worker_name, row_id=txlog.id, timestamp=txlog.timestamp, store_name=self.obj_factory.sstore_name) result = self.obj_factory.sstore.execute( u"""SELECT row_id, timestamp FROM txlog.db_worker_last_row WHERE worker_id=?""", (worker_name,)).get_one() self.assertEqual((txlog.id, txlog.timestamp), result) def test_update_last_row_with_data(self): """Test the update_last_row function when data for this worker is present. """ txlog = self.obj_factory.make_transaction_log() txlog2 = self.obj_factory.make_transaction_log() worker_name = self.obj_factory.get_unique_unicode() self._create_db_worker_last_row_entry(worker_name, txlog) utils.update_last_row( worker_name=worker_name, row_id=txlog2.id, timestamp=txlog2.timestamp, store_name=self.obj_factory.sstore_name) result = self.obj_factory.sstore.execute( u"""SELECT row_id, timestamp FROM txlog.db_worker_last_row WHERE worker_id=?""", (worker_name,)).get_one() self.assertEqual((txlog2.id, txlog2.timestamp), result) def test_update_last_row_failure(self): """Test that an exception is raised if update_last_row fails to either update an existing row or insert a new one. """ class DummyStore(object): """A dummy store that returns results with a rowcount of 0.""" def execute(self, statement, params=None, noresult=False): """Dummy execute method that always returns a Result object whose rowcount property is 0. """ return type('DummyResultSet', (object,), dict(rowcount=0)) with patch.object(dbmanager, 'get_shard_store') as mock_get_shard: mock_get_shard.return_value = DummyStore() self.assertRaises( RuntimeError, utils.update_last_row, worker_name=u'test_worker_name', row_id=1, timestamp=datetime.datetime.utcnow(), store_name=self.obj_factory.sstore_name) def _convert_txlogs_to_dicts(self, txlogs): """Convert a list of TransactionLog objects into dictionaries. These dictionaries have the same keys as the ones in the dictionaries returned by get_txn_recs. """ dicts = [] for txlog in txlogs: dicts.append(dict( txn_id=txlog.id, node_id=str(txlog.node_id), owner_id=txlog.owner_id, volume_id=str(txlog.volume_id), op_type=txlog.op_type, path=txlog.path, generation=txlog.generation, timestamp=txlog.timestamp, mimetype=txlog.mimetype, old_path=txlog.old_path, extra_data=txlog.extra_data)) return dicts def test_get_txn_recs_no_previous_no_txns(self): """Test getting a batch of transactions when we have not previously processed any rows and the transaction_log table is empty. """ txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=5, last_id=0) self.assertEqual([], txlist) def test_get_txn_recs_no_previous_small_result_set(self): """Test getting a batch of transactions when we have not previously processed any rows and the number of rows in the transaction_log table is smaller than the number requested. """ txlogs = [self.obj_factory.make_transaction_log(), self.obj_factory.make_transaction_log()] txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=5, last_id=0) self.assertEqual(self._convert_txlogs_to_dicts(txlogs), txlist) def test_get_txn_recs_no_previous_exact_result_set(self): """Test getting a batch of transactions when we have not previously processed any rows and the number of rows in the transaction_log table is exactly the number requested. """ txlogs = [self.obj_factory.make_transaction_log(), self.obj_factory.make_transaction_log()] txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=2, last_id=0) self.assertEqual(self._convert_txlogs_to_dicts(txlogs), txlist) def test_get_txn_recs_no_previous_large_result_set(self): """Test getting a batch of transactions when we have not previously processed any rows and the number of rows in the transaction_log table is larger than the number requested. """ txlogs = [self.obj_factory.make_transaction_log(), self.obj_factory.make_transaction_log()] txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=1, last_id=0) self.assertEqual(self._convert_txlogs_to_dicts(txlogs[:1]), txlist) def test_get_txn_recs_previous_no_new(self): """Test getting a batch of transactions when we have previously processed rows and there are no newer rows in the transaction_log table. """ self.obj_factory.make_transaction_log() self.obj_factory.make_transaction_log() txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=1, last_id=2) self.assertEqual([], txlist) def test_get_txn_recs_previous_small_new(self): """Test getting a batch of transactions when we have previously processed rows and there are fewer newer rows in the transaction_log table than we requested. """ txlogs = [self.obj_factory.make_transaction_log(), self.obj_factory.make_transaction_log()] txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=5, last_id=1) self.assertEqual(self._convert_txlogs_to_dicts(txlogs[1:]), txlist) def test_get_txn_recs_previous_exact_new(self): """Test getting a batch of transactions when we have previously processed rows and there are the exact number of newer rows in the transaction_log table that we requested. """ txlogs = [self.obj_factory.make_transaction_log(), self.obj_factory.make_transaction_log()] txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=1, last_id=1) self.assertEqual(self._convert_txlogs_to_dicts(txlogs[1:]), txlist) def test_get_txn_recs_previous_large_new(self): """Test getting a batch of transactions when we have previously processed rows and there are the more newer rows in the transaction_log table than we requested. """ txlogs = [self.obj_factory.make_transaction_log(), self.obj_factory.make_transaction_log(), self.obj_factory.make_transaction_log()] txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=1, last_id=1) self.assertEqual(self._convert_txlogs_to_dicts(txlogs[1:2]), txlist) def test_get_txn_recs_respects_order(self): """Test that transaction log entries are returned in order.""" txlogs = [self.obj_factory.make_transaction_log(tx_id=3), self.obj_factory.make_transaction_log(tx_id=2), self.obj_factory.make_transaction_log(tx_id=1)] txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=3, last_id=0) txlogs.sort(key=lambda x: x.id) self.assertEqual(self._convert_txlogs_to_dicts(txlogs), txlist) def test_get_txn_recs_unseen(self): """Test getting a batch of transactions when there are unseen ids records those as unseen. Querying again returns unseen transactions if they are now present. """ txlogs = [self.obj_factory.make_transaction_log(tx_id=1), self.obj_factory.make_transaction_log(tx_id=3)] worker_id = self.obj_factory.get_unique_unicode() txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=3, worker_id=worker_id) self.assertEqual( self._convert_txlogs_to_dicts([txlogs[0], txlogs[1]]), txlist) unseen = self.obj_factory.make_transaction_log(tx_id=2) txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=3, last_id=txlogs[1].id, worker_id=worker_id) self.assertEqual( self._convert_txlogs_to_dicts([unseen]), txlist) def test_get_txn_recs_retry_list_no_new_or_retry(self): """Test getting a batch of transactions when there are unseen ids records those as unseen. Querying again when unseen isn't available yet returns nothing. """ txlogs = [self.obj_factory.make_transaction_log(tx_id=1), self.obj_factory.make_transaction_log(tx_id=3)] worker_id = self.obj_factory.get_unique_unicode() txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=3, worker_id=worker_id) self.assertEqual( self._convert_txlogs_to_dicts([txlogs[0], txlogs[1]]), txlist) txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=3, last_id=txlogs[1].id, worker_id=worker_id) self.assertEqual(list(), txlist) def test_get_txn_recs_for_partition(self): """Get txlogs for the provided partition ID. When owner_id % num_partitions == partition_id, the txlog is added to the result set, so that it matches the filter by partition. Also, any txlog that is related to sharing is also returned, no matter what the owner_id is. """ owner_id = 1 num_partitions = 8 partition_id = owner_id % num_partitions txlogs = [ self.obj_factory.make_transaction_log(), self.obj_factory.make_transaction_log(owner_id=2), # Different one self.obj_factory.make_transaction_log(), # Share txlogs, but with a different owner, are also returned. self.obj_factory.make_transaction_log( owner_id=2, op_type=TransactionLog.OP_SHARE_ACCEPTED), self.obj_factory.make_transaction_log( owner_id=2, op_type=TransactionLog.OP_SHARE_DELETED), ] expected_txlogs = [txlogs[0], txlogs[2], txlogs[3], txlogs[4]] txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=5, last_id=0, num_partitions=num_partitions, partition_id=partition_id ) self.assertEqual(self._convert_txlogs_to_dicts(expected_txlogs), txlist) def test_maintains_newish_txlogs_when_purging(self): """Test that txnlogs not old enough are maintained, instead of being deleted.""" now = datetime.datetime.utcnow() limit_datetime = now - datetime.timedelta(days=7) # Not so old old_datetime = limit_datetime + datetime.timedelta(seconds=1) self.obj_factory.make_transaction_log(tx_id=1) self.obj_factory.make_transaction_log(tx_id=2, timestamp=old_datetime) self.obj_factory.make_transaction_log(tx_id=3) self.obj_factory.make_transaction_log(tx_id=4, timestamp=old_datetime) self.store.commit() removed = utils.delete_old_txlogs(self.obj_factory.sstore_name, timestamp_limit=limit_datetime) self.store.rollback() # Shouldn't affect the deletion result txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=4, last_id=0) self.assertEqual(len(txlist), 4) self.assertEqual(removed, 0) ids = sorted(int(txdict['txn_id']) for txdict in txlist) self.assertEqual(ids, [1, 2, 3, 4]) def test_deletes_old_enough_txlogs(self): """Test that txnlogs old enough are deleted.""" now = datetime.datetime.utcnow() timestamp_limit = now - datetime.timedelta(days=7) # Old enough old_datetime = timestamp_limit txlogs = [ self.obj_factory.make_transaction_log(tx_id=1), self.obj_factory.make_transaction_log( tx_id=2, timestamp=old_datetime), self.obj_factory.make_transaction_log(tx_id=3), self.obj_factory.make_transaction_log( tx_id=4, timestamp=old_datetime), ] self.store.commit() removed = utils.delete_old_txlogs(self.obj_factory.sstore_name, timestamp_limit=timestamp_limit) self.store.rollback() # Shouldn't affect the deletion result txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=len(txlogs), last_id=0) self.assertEqual(len(txlist), 2) self.assertEqual(removed, 2) ids = sorted(int(txdict['txn_id']) for txdict in txlist) self.assertEqual(ids, [1, 3]) def test_deletes_old_txlogs_within_quantity_limit(self): """Test that txnlogs old enough are deleted and are within the quantity limit given.""" now = datetime.datetime.utcnow() timestamp_limit = now - datetime.timedelta(days=7) # Old enough old_datetime = timestamp_limit quantity_limit = 2 txlogs = [ self.obj_factory.make_transaction_log(tx_id=1), self.obj_factory.make_transaction_log( tx_id=2, timestamp=old_datetime), self.obj_factory.make_transaction_log(tx_id=3), self.obj_factory.make_transaction_log( tx_id=4, timestamp=old_datetime), self.obj_factory.make_transaction_log( tx_id=5, timestamp=old_datetime), ] self.store.commit() removed = utils.delete_old_txlogs(self.obj_factory.sstore_name, timestamp_limit=timestamp_limit, quantity_limit=quantity_limit) self.store.rollback() # Shouldn't affect the deletion result txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=len(txlogs), last_id=0) self.assertEqual(len(txlist), 3) self.assertEqual(removed, quantity_limit) ids = sorted(int(txdict['txn_id']) for txdict in txlist) self.assertEqual(ids, [1, 3, 5]) def test_deletes_txlogs_slice(self): """Delete a txlog slice by date and quantity.""" now = datetime.datetime.utcnow() timestamp_limit = now - datetime.timedelta(days=7) # Old enough old_dt = timestamp_limit quantity_limit = 2 txlogs = [ self.obj_factory.make_transaction_log(tx_id=1), self.obj_factory.make_transaction_log(tx_id=2, timestamp=old_dt), self.obj_factory.make_transaction_log(tx_id=3), self.obj_factory.make_transaction_log(tx_id=4, timestamp=old_dt), self.obj_factory.make_transaction_log(tx_id=5), ] self.store.commit() removed = utils.delete_txlogs_slice(self.obj_factory.sstore_name, date=now.date(), quantity_limit=quantity_limit) self.store.rollback() # Shouldn't affect the deletion result txlist = utils.get_txn_recs( self.obj_factory.sstore_name, num_recs=len(txlogs), last_id=0) self.assertEqual(len(txlist), 3) self.assertEqual(removed, quantity_limit) ids = sorted(int(txdict['txn_id']) for txdict in txlist) self.assertIn(2, ids) self.assertIn(4, ids) def test_get_row_by_time_with_no_data(self): """Test the get_row_by_time function when no data is present.""" txid, _ = utils.get_row_by_time(self.obj_factory.sstore_name, datetime.datetime.utcnow()) self.assertEqual(txid, None) def test_get_row_by_time_with_data(self): """Test get_row_by_time function when data is present.""" ts = datetime.datetime.utcnow() txlogs = [ self.obj_factory.make_transaction_log( timestamp=ts + datetime.timedelta(i, 0)) for i in range(5)] tstamp = txlogs[2].timestamp txid, newtstamp = utils.get_row_by_time(self.obj_factory.sstore_name, tstamp) self.assertEqual(txid, txlogs[2].id) self.assertEqual(newtstamp, tstamp) def test_get_row_by_time_timestamp_twice(self): """Test get_row_by_time having two lines with same timestamp.""" ts = datetime.datetime.utcnow() txlogs = [ self.obj_factory.make_transaction_log( timestamp=ts + datetime.timedelta(i, 0)) for i in range(5)] # put the timestamp of [3] into [1], the function should return the # id of [1] tstamp = txlogs[1].timestamp = txlogs[3].timestamp txid, newtstamp = utils.get_row_by_time(self.obj_factory.sstore_name, tstamp) self.assertEqual(txid, txlogs[1].id) self.assertEqual(newtstamp, tstamp) def test_get_row_by_time_not_exact(self): """Test get_row_by_time not giving an exact timestamp.""" ts = datetime.datetime.utcnow() txlogs = [ self.obj_factory.make_transaction_log( timestamp=ts + datetime.timedelta(i, 0)) for i in range(5)] # get a timestamp in the middle of [2] and [3], the function should # return the id of [3] tx2, tx3 = txlogs[2:4] delta = (txlogs[3].timestamp - txlogs[2].timestamp) / 2 tstamp = txlogs[2].timestamp + delta txid, newtstamp = utils.get_row_by_time(self.obj_factory.sstore_name, tstamp) self.assertEqual(txid, txlogs[3].id) self.assertEqual(newtstamp, txlogs[3].timestamp) def test_get_row_by_time_nothing_found(self): """Test get_row_by_time with a big enough timestamp.""" txlogs = [self.obj_factory.make_transaction_log() for i in range(2)] tstamp = txlogs[-1].timestamp + datetime.timedelta(seconds=1) txid, newtstamp = utils.get_row_by_time(self.obj_factory.sstore_name, tstamp) self.assertEqual(txid, None) self.assertEqual(newtstamp, None) def test_cleans_last_rows_for_workers_not_in_list(self): """Test that keep_last_rows_for_worker_names removes all rows from workers not in the list of given names.""" initial_workers = [ u'worker1', u'worker2', u'worker3', u'worker4', ] kept_workers = [ u'worker1', u'worker2', u'worker4', ] for worker_name in initial_workers: txlog = self.obj_factory.make_transaction_log() self._create_db_worker_last_row_entry(worker_name, txlog) utils.keep_last_rows_for_worker_names(self.obj_factory.sstore_name, kept_workers) actual_worker_names = self._find_last_row_worker_names() self.assertEqual(actual_worker_names, kept_workers) def test_cleans_last_rows_for_workers_not_in_list_of_strings(self): """Test that keep_last_rows_for_worker_names removes all rows from workers not in the list of given names as plain strings.""" initial_workers = [ 'worker1', 'worker2', 'worker3', 'worker4', ] kept_workers = [ 'worker1', 'worker2', 'worker4', ] for worker_name in initial_workers: txlog = self.obj_factory.make_transaction_log() self._create_db_worker_last_row_entry(worker_name, txlog) utils.keep_last_rows_for_worker_names(self.obj_factory.sstore_name, kept_workers) actual_worker_names = self._find_last_row_worker_names() self.assertEqual(actual_worker_names, kept_workers)
unknown
codeparrot/codeparrot-clean
"""Calendar printing functions Note when comparing these calendars to the ones printed by cal(1): By default, these calendars have Monday as the first day of the week, and Sunday as the last (the European convention). Use setfirstweekday() to set the first day of the week (0=Monday, 6=Sunday).""" from __future__ import with_statement import sys import datetime import locale as _locale __all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday", "firstweekday", "isleap", "leapdays", "weekday", "monthrange", "monthcalendar", "prmonth", "month", "prcal", "calendar", "timegm", "month_name", "month_abbr", "day_name", "day_abbr"] # Exception raised for bad input (with string parameter for details) error = ValueError # Exceptions raised for bad input class IllegalMonthError(ValueError): def __init__(self, month): self.month = month def __str__(self): return "bad month number %r; must be 1-12" % self.month class IllegalWeekdayError(ValueError): def __init__(self, weekday): self.weekday = weekday def __str__(self): return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday # Constants for months referenced later January = 1 February = 2 # Number of days per month (except for February in leap years) mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # This module used to have hard-coded lists of day and month names, as # English strings. The classes following emulate a read-only version of # that, but supply localized names. Note that the values are computed # fresh on each call, in case the user changes locale between calls. class _localized_month: _months = [datetime.date(2001, i+1, 1).strftime for i in xrange(12)] _months.insert(0, lambda x: "") def __init__(self, format): self.format = format def __getitem__(self, i): funcs = self._months[i] if isinstance(i, slice): return [f(self.format) for f in funcs] else: return funcs(self.format) def __len__(self): return 13 class _localized_day: # January 1, 2001, was a Monday. _days = [datetime.date(2001, 1, i+1).strftime for i in xrange(7)] def __init__(self, format): self.format = format def __getitem__(self, i): funcs = self._days[i] if isinstance(i, slice): return [f(self.format) for f in funcs] else: return funcs(self.format) def __len__(self): return 7 # Full and abbreviated names of weekdays day_name = _localized_day('%A') day_abbr = _localized_day('%a') # Full and abbreviated names of months (1-based arrays!!!) month_name = _localized_month('%B') month_abbr = _localized_month('%b') # Constants for weekdays (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7) def isleap(year): """Return 1 for leap years, 0 for non-leap years.""" return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) def leapdays(y1, y2): """Return number of leap years in range [y1, y2). Assume y1 <= y2.""" y1 -= 1 y2 -= 1 return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400) def weekday(year, month, day): """Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12), day (1-31).""" return datetime.date(year, month, day).weekday() def monthrange(year, month): """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for year, month.""" if not 1 <= month <= 12: raise IllegalMonthError(month) day1 = weekday(year, month, 1) ndays = mdays[month] + (month == February and isleap(year)) return day1, ndays class Calendar(object): """ Base calendar class. This class doesn't do any formatting. It simply provides data to subclasses. """ def __init__(self, firstweekday=0): self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday def getfirstweekday(self): return self._firstweekday % 7 def setfirstweekday(self, firstweekday): self._firstweekday = firstweekday firstweekday = property(getfirstweekday, setfirstweekday) def iterweekdays(self): """ Return a iterator for one week of weekday numbers starting with the configured first one. """ for i in xrange(self.firstweekday, self.firstweekday + 7): yield i%7 def itermonthdates(self, year, month): """ Return an iterator for one month. The iterator will yield datetime.date values and will always iterate through complete weeks, so it will yield dates outside the specified month. """ date = datetime.date(year, month, 1) # Go back to the beginning of the week days = (date.weekday() - self.firstweekday) % 7 date -= datetime.timedelta(days=days) oneday = datetime.timedelta(days=1) while True: yield date date += oneday if date.month != month and date.weekday() == self.firstweekday: break def itermonthdays2(self, year, month): """ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ for date in self.itermonthdates(year, month): if date.month != month: yield (0, date.weekday()) else: yield (date.day, date.weekday()) def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ for date in self.itermonthdates(year, month): if date.month != month: yield 0 else: yield date.day def monthdatescalendar(self, year, month): """ Return a matrix (list of lists) representing a month's calendar. Each row represents a week; week entries are datetime.date values. """ dates = list(self.itermonthdates(year, month)) return [ dates[i:i+7] for i in xrange(0, len(dates), 7) ] def monthdays2calendar(self, year, month): """ Return a matrix representing a month's calendar. Each row represents a week; week entries are (day number, weekday number) tuples. Day numbers outside this month are zero. """ days = list(self.itermonthdays2(year, month)) return [ days[i:i+7] for i in xrange(0, len(days), 7) ] def monthdayscalendar(self, year, month): """ Return a matrix representing a month's calendar. Each row represents a week; days outside this month are zero. """ days = list(self.itermonthdays(year, month)) return [ days[i:i+7] for i in xrange(0, len(days), 7) ] def yeardatescalendar(self, year, width=3): """ Return the data for the specified year ready for formatting. The return value is a list of month rows. Each month row contains upto width months. Each month contains between 4 and 6 weeks and each week contains 1-7 days. Days are datetime.date objects. """ months = [ self.monthdatescalendar(year, i) for i in xrange(January, January+12) ] return [months[i:i+width] for i in xrange(0, len(months), width) ] def yeardays2calendar(self, year, width=3): """ Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are (day number, weekday number) tuples. Day numbers outside this month are zero. """ months = [ self.monthdays2calendar(year, i) for i in xrange(January, January+12) ] return [months[i:i+width] for i in xrange(0, len(months), width) ] def yeardayscalendar(self, year, width=3): """ Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero. """ months = [ self.monthdayscalendar(year, i) for i in xrange(January, January+12) ] return [months[i:i+width] for i in xrange(0, len(months), width) ] class TextCalendar(Calendar): """ Subclass of Calendar that outputs a calendar as a simple plain text similar to the UNIX program cal. """ def prweek(self, theweek, width): """ Print a single week (no newline). """ print self.formatweek(theweek, width), def formatday(self, day, weekday, width): """ Returns a formatted day. """ if day == 0: s = '' else: s = '%2i' % day # right-align single-digit days return s.center(width) def formatweek(self, theweek, width): """ Returns a single week in a string (no newline). """ return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek) def formatweekday(self, day, width): """ Returns a formatted week day name. """ if width >= 9: names = day_name else: names = day_abbr return names[day][:width].center(width) def formatweekheader(self, width): """ Return a header for a week. """ return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays()) def formatmonthname(self, theyear, themonth, width, withyear=True): """ Return a formatted month name. """ s = month_name[themonth] if withyear: s = "%s %r" % (s, theyear) return s.center(width) def prmonth(self, theyear, themonth, w=0, l=0): """ Print a month's calendar. """ print self.formatmonth(theyear, themonth, w, l), def formatmonth(self, theyear, themonth, w=0, l=0): """ Return a month's calendar string (multi-line). """ w = max(2, w) l = max(1, l) s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1) s = s.rstrip() s += '\n' * l s += self.formatweekheader(w).rstrip() s += '\n' * l for week in self.monthdays2calendar(theyear, themonth): s += self.formatweek(week, w).rstrip() s += '\n' * l return s def formatyear(self, theyear, w=2, l=1, c=6, m=3): """ Returns a year's calendar as a multi-line string. """ w = max(2, w) l = max(1, l) c = max(2, c) colwidth = (w + 1) * 7 - 1 v = [] a = v.append a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip()) a('\n'*l) header = self.formatweekheader(w) for (i, row) in enumerate(self.yeardays2calendar(theyear, m)): # months in this row months = xrange(m*i+1, min(m*(i+1)+1, 13)) a('\n'*l) names = (self.formatmonthname(theyear, k, colwidth, False) for k in months) a(formatstring(names, colwidth, c).rstrip()) a('\n'*l) headers = (header for k in months) a(formatstring(headers, colwidth, c).rstrip()) a('\n'*l) # max number of weeks for this row height = max(len(cal) for cal in row) for j in xrange(height): weeks = [] for cal in row: if j >= len(cal): weeks.append('') else: weeks.append(self.formatweek(cal[j], w)) a(formatstring(weeks, colwidth, c).rstrip()) a('\n' * l) return ''.join(v) def pryear(self, theyear, w=0, l=0, c=6, m=3): """Print a year's calendar.""" print self.formatyear(theyear, w, l, c, m) class HTMLCalendar(Calendar): """ This calendar returns complete HTML pages. """ # CSS classes for the day <td>s cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"] def formatday(self, day, weekday): """ Return a day as a table cell. """ if day == 0: return '<td class="noday">&nbsp;</td>' # day outside month else: return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day) def formatweek(self, theweek): """ Return a complete week as a table row. """ s = ''.join(self.formatday(d, wd) for (d, wd) in theweek) return '<tr>%s</tr>' % s def formatweekday(self, day): """ Return a weekday name as a table header. """ return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day]) def formatweekheader(self): """ Return a header for a week as a table row. """ s = ''.join(self.formatweekday(i) for i in self.iterweekdays()) return '<tr>%s</tr>' % s def formatmonthname(self, theyear, themonth, withyear=True): """ Return a month name as a table row. """ if withyear: s = '%s %s' % (month_name[themonth], theyear) else: s = '%s' % month_name[themonth] return '<tr><th colspan="7" class="month">%s</th></tr>' % s def formatmonth(self, theyear, themonth, withyear=True): """ Return a formatted month as a table. """ v = [] a = v.append a('<table border="0" cellpadding="0" cellspacing="0" class="month">') a('\n') a(self.formatmonthname(theyear, themonth, withyear=withyear)) a('\n') a(self.formatweekheader()) a('\n') for week in self.monthdays2calendar(theyear, themonth): a(self.formatweek(week)) a('\n') a('</table>') a('\n') return ''.join(v) def formatyear(self, theyear, width=3): """ Return a formatted year as a table of tables. """ v = [] a = v.append width = max(width, 1) a('<table border="0" cellpadding="0" cellspacing="0" class="year">') a('\n') a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear)) for i in xrange(January, January+12, width): # months in this row months = xrange(i, min(i+width, 13)) a('<tr>') for m in months: a('<td>') a(self.formatmonth(theyear, m, withyear=False)) a('</td>') a('</tr>') a('</table>') return ''.join(v) def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None): """ Return a formatted year as a complete HTML page. """ if encoding is None: encoding = sys.getdefaultencoding() v = [] a = v.append a('<?xml version="1.0" encoding="%s"?>\n' % encoding) a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n') a('<html>\n') a('<head>\n') a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding) if css is not None: a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css) a('<title>Calendar for %d</title\n' % theyear) a('</head>\n') a('<body>\n') a(self.formatyear(theyear, width)) a('</body>\n') a('</html>\n') return ''.join(v).encode(encoding, "xmlcharrefreplace") class TimeEncoding: def __init__(self, locale): self.locale = locale def __enter__(self): self.oldlocale = _locale.setlocale(_locale.LC_TIME, self.locale) return _locale.getlocale(_locale.LC_TIME)[1] def __exit__(self, *args): _locale.setlocale(_locale.LC_TIME, self.oldlocale) class LocaleTextCalendar(TextCalendar): """ This class can be passed a locale name in the constructor and will return month and weekday names in the specified locale. If this locale includes an encoding all strings containing month and weekday names will be returned as unicode. """ def __init__(self, firstweekday=0, locale=None): TextCalendar.__init__(self, firstweekday) if locale is None: locale = _locale.getdefaultlocale() self.locale = locale def formatweekday(self, day, width): with TimeEncoding(self.locale) as encoding: if width >= 9: names = day_name else: names = day_abbr name = names[day] if encoding is not None: name = name.decode(encoding) return name[:width].center(width) def formatmonthname(self, theyear, themonth, width, withyear=True): with TimeEncoding(self.locale) as encoding: s = month_name[themonth] if encoding is not None: s = s.decode(encoding) if withyear: s = "%s %r" % (s, theyear) return s.center(width) class LocaleHTMLCalendar(HTMLCalendar): """ This class can be passed a locale name in the constructor and will return month and weekday names in the specified locale. If this locale includes an encoding all strings containing month and weekday names will be returned as unicode. """ def __init__(self, firstweekday=0, locale=None): HTMLCalendar.__init__(self, firstweekday) if locale is None: locale = _locale.getdefaultlocale() self.locale = locale def formatweekday(self, day): with TimeEncoding(self.locale) as encoding: s = day_abbr[day] if encoding is not None: s = s.decode(encoding) return '<th class="%s">%s</th>' % (self.cssclasses[day], s) def formatmonthname(self, theyear, themonth, withyear=True): with TimeEncoding(self.locale) as encoding: s = month_name[themonth] if encoding is not None: s = s.decode(encoding) if withyear: s = '%s %s' % (s, theyear) return '<tr><th colspan="7" class="month">%s</th></tr>' % s # Support for old module level interface c = TextCalendar() firstweekday = c.getfirstweekday def setfirstweekday(firstweekday): if not MONDAY <= firstweekday <= SUNDAY: raise IllegalWeekdayError(firstweekday) c.firstweekday = firstweekday monthcalendar = c.monthdayscalendar prweek = c.prweek week = c.formatweek weekheader = c.formatweekheader prmonth = c.prmonth month = c.formatmonth calendar = c.formatyear prcal = c.pryear # Spacing of month columns for multi-column year calendar _colwidth = 7*3 - 1 # Amount printed by prweek() _spacing = 6 # Number of spaces between columns def format(cols, colwidth=_colwidth, spacing=_spacing): """Prints multi-column formatting for year calendars""" print formatstring(cols, colwidth, spacing) def formatstring(cols, colwidth=_colwidth, spacing=_spacing): """Returns a string formatted from n strings, centered within n columns.""" spacing *= ' ' return spacing.join(c.center(colwidth) for c in cols) EPOCH = 1970 _EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal() def timegm(tuple): """Unrelated but handy function to calculate Unix timestamp from GMT.""" year, month, day, hour, minute, second = tuple[:6] days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1 hours = days*24 + hour minutes = hours*60 + minute seconds = minutes*60 + second return seconds def main(args): import optparse parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]") parser.add_option( "-w", "--width", dest="width", type="int", default=2, help="width of date column (default 2, text only)" ) parser.add_option( "-l", "--lines", dest="lines", type="int", default=1, help="number of lines for each week (default 1, text only)" ) parser.add_option( "-s", "--spacing", dest="spacing", type="int", default=6, help="spacing between months (default 6, text only)" ) parser.add_option( "-m", "--months", dest="months", type="int", default=3, help="months per row (default 3, text only)" ) parser.add_option( "-c", "--css", dest="css", default="calendar.css", help="CSS to use for page (html only)" ) parser.add_option( "-L", "--locale", dest="locale", default=None, help="locale to be used from month and weekday names" ) parser.add_option( "-e", "--encoding", dest="encoding", default=None, help="Encoding to use for output" ) parser.add_option( "-t", "--type", dest="type", default="text", choices=("text", "html"), help="output type (text or html)" ) (options, args) = parser.parse_args(args) if options.locale and not options.encoding: parser.error("if --locale is specified --encoding is required") sys.exit(1) locale = options.locale, options.encoding if options.type == "html": if options.locale: cal = LocaleHTMLCalendar(locale=locale) else: cal = HTMLCalendar() encoding = options.encoding if encoding is None: encoding = sys.getdefaultencoding() optdict = dict(encoding=encoding, css=options.css) if len(args) == 1: print cal.formatyearpage(datetime.date.today().year, **optdict) elif len(args) == 2: print cal.formatyearpage(int(args[1]), **optdict) else: parser.error("incorrect number of arguments") sys.exit(1) else: if options.locale: cal = LocaleTextCalendar(locale=locale) else: cal = TextCalendar() optdict = dict(w=options.width, l=options.lines) if len(args) != 3: optdict["c"] = options.spacing optdict["m"] = options.months if len(args) == 1: result = cal.formatyear(datetime.date.today().year, **optdict) elif len(args) == 2: result = cal.formatyear(int(args[1]), **optdict) elif len(args) == 3: result = cal.formatmonth(int(args[1]), int(args[2]), **optdict) else: parser.error("incorrect number of arguments") sys.exit(1) if options.encoding: result = result.encode(options.encoding) print result if __name__ == "__main__": main(sys.argv)
unknown
codeparrot/codeparrot-clean
# coding=utf-8 """ [Hosted Graphite](https://www.hostedgraphite.com/) is the powerful open-source application metrics system used by hundreds of companies. We take away the headaches of scaling, maintenance, and upgrades and let you do what you do best - write great software. #### Configuration Enable this handler * handlers = diamond.handler.hostedgraphite.HostedGraphiteHandler, * apikey = API_KEY """ from Handler import Handler from graphite import GraphiteHandler class HostedGraphiteHandler(Handler): def __init__(self, config=None): """ Create a new instance of the HostedGraphiteHandler class """ # Initialize Handler Handler.__init__(self, config) self.key = self.config['apikey'].lower().strip() self.graphite = GraphiteHandler(self.config) def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(HostedGraphiteHandler, self).get_default_config_help() config.update({ 'apikey': 'Api key to use', 'host': 'Hostname', 'port': 'Port', 'proto': 'udp or tcp', 'timeout': '', 'batch': 'How many to store before sending to the graphite server', 'max_backlog_multiplier': 'how many batches to store before trimming', # NOQA 'trim_backlog_multiplier': 'Trim down how many batches', }) return config def get_default_config(self): """ Return the default config for the handler """ config = super(HostedGraphiteHandler, self).get_default_config() config.update({ 'apikey': '', 'host': 'carbon.hostedgraphite.com', 'port': 2003, 'proto': 'tcp', 'timeout': 15, 'batch': 1, 'max_backlog_multiplier': 5, 'trim_backlog_multiplier': 4, }) return config def process(self, metric): """ Process a metric by sending it to graphite """ metric = self.key + '.' + str(metric) self.graphite.process(metric) def _process(self, metric): """ Process a metric by sending it to graphite """ metric = self.key + '.' + str(metric) self.graphite._process(metric) def _flush(self): self.graphite._flush() def flush(self): self.graphite.flush()
unknown
codeparrot/codeparrot-clean
/* * Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.client.tests import io.ktor.client.call.* import io.ktor.client.request.* import io.ktor.client.request.forms.* import io.ktor.client.test.base.* import io.ktor.http.* import io.ktor.http.content.* import io.ktor.utils.io.* import kotlinx.io.readByteArray import kotlin.test.Test import kotlin.test.assertEquals import kotlin.test.assertTrue import kotlin.test.fail /** * Tests client request with multi-part form data. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.tests.MultiPartFormDataTest) */ class MultiPartFormDataTest : ClientLoader() { private val fileDispositionHeaders = Headers.build { append( HttpHeaders.ContentDisposition, """form-data; name="file"; filename="test.png"""" ) } @Test fun testMultiPartFormData() = clientTests(except("native:*")) { test { client -> val result = client.preparePost("$TEST_SERVER/multipart") { setBody( MultiPartFormDataContent( formData { append("file", ByteArray(1024 * 1024), fileDispositionHeaders) } ) ) }.execute() assertEquals(HttpStatusCode.OK, result.status) } } @Test fun testEmptyMultiPartFormData() = clientTests { test { client -> val response = client.submitFormWithBinaryData("$TEST_SERVER/multipart/empty", emptyList()) assertTrue(response.status.isSuccess()) } } @Test fun testReceiveMultiPartFormData() = clientTests { test { client -> val response = client.post("$TEST_SERVER/multipart/receive") val multipart = response.body<MultiPartData>() var textFound = false var fileFound = false multipart.forEachPart { part -> when (part) { is PartData.FormItem -> { assertEquals("text", part.name) assertEquals("Hello, World!", part.value) textFound = true } is PartData.FileItem -> { assertEquals("file", part.name) assertEquals("test.bin", part.originalFileName) val bytes = part.provider().readRemaining().readByteArray() assertEquals(1024, bytes.size) for (i in bytes.indices) { assertEquals(i.toByte(), bytes[i]) } fileFound = true } else -> fail("Unexpected part type: ${part::class.simpleName}") } part.dispose() } assertTrue(textFound, "Text part not found") assertTrue(fileFound, "File part not found") } } }
kotlin
github
https://github.com/ktorio/ktor
ktor-client/ktor-client-tests/common/test/io/ktor/client/tests/MultiPartFormDataTest.kt
#! /usr/bin/env python import os import unittest from pprint import pprint from peyotl.api import TreeCollectionsAPI from peyotl.test.support import test_collections_api, raise_http_error_with_more_detail from peyotl.test.support.pathmap import get_test_ot_service_domains from peyotl.utility import get_logger from requests.exceptions import HTTPError _LOG = get_logger(__name__) @unittest.skipIf('RUN_WEB_SERVICE_TESTS' not in os.environ, 'RUN_WEB_SERVICE_TESTS is not in your environment, so tests that use ' 'Open Tree of Life web services are disabled.') class TestTreeCollectionsAPI(unittest.TestCase): def setUp(self): self.domains = get_test_ot_service_domains() def testPushFailureState(self): tca = TreeCollectionsAPI(self.domains, get_from='api') sl = tca.push_failure_state if sl[0] is not True: pprint('\npush-failure (possibly a stale result? re-run to find out!):\n') pprint(sl) self.assertTrue(sl[0] is True) def testFetchCollectionRemote(self): # drive RESTful API via wrapper tca = TreeCollectionsAPI(self.domains, get_from='api') try: c = tca.get_collection('jimallman/my-test-collection') except HTTPError as err: raise_http_error_with_more_detail(err) except Exception as err: raise err else: # N.B. we get the JSON "wrapper" with history, etc. cn = c['data']['name'] self.assertTrue(cn == u'My test collection') def testRemoteSugar(self): tca = TreeCollectionsAPI(self.domains, get_from='api') try: test_collections_api(self, tca) except HTTPError as err: raise_http_error_with_more_detail(err) except Exception as err: raise err def testExternalSugar(self): tca = TreeCollectionsAPI(self.domains, get_from='external') test_collections_api(self, tca) def testConfig(self): tca = TreeCollectionsAPI(self.domains, get_from='api') x = tca.store_config self.assertTrue('assumed_doc_version' in x.keys()) # TODO: add testExternalURL and support for this call in collections API? if __name__ == "__main__": unittest.main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python """Tests for grr.lib.email_alerts.""" import mock from grr.lib import email_alerts from grr.lib import flags from grr.lib import test_lib from grr.lib.rdfvalues import standard as rdf_standard class SendEmailTests(test_lib.GRRBaseTest): def setUp(self): super(SendEmailTests, self).setUp() # We have to stop mail_stubber, otherwise email_alerts.EMAIL_ALERTER will # be just a stub and there will be nothing to test. self.mail_stubber.Stop() def testSplitEmailsAndAppendEmailDomain(self): self.assertEqual( email_alerts.EMAIL_ALERTER.SplitEmailsAndAppendEmailDomain(""), []) def testSendEmail(self): # This is already patched out in tests but in this specific test we # are interested in the results so we just add another patcher. self.smtp_patcher = mock.patch("smtplib.SMTP") self.mock_smtp = self.smtp_patcher.start() try: testdomain = "test.com" with test_lib.ConfigOverrider({"Logging.domain": testdomain}): smtp_conn = self.mock_smtp.return_value # Single fully qualified address to_address = "testto@example.com" from_address = "me@example.com" subject = "test" message = "" email_alerts.EMAIL_ALERTER.SendEmail(to_address, from_address, subject, message) c_from, c_to, msg = smtp_conn.sendmail.call_args[0] self.assertItemsEqual(from_address, c_from) self.assertItemsEqual([to_address], c_to) self.assertFalse("CC:" in msg) # Single fully qualified address as rdf_standard.DomainEmailAddress to_address = rdf_standard.DomainEmailAddress("testto@%s" % testdomain) from_address = "me@example.com" subject = "test" message = "" email_alerts.EMAIL_ALERTER.SendEmail(to_address, from_address, subject, message) c_from, c_to, msg = smtp_conn.sendmail.call_args[0] self.assertItemsEqual(from_address, c_from) self.assertItemsEqual([to_address], c_to) self.assertFalse("CC:" in msg) # Multiple unqualified to addresses, one cc to_address = "testto,abc,def" to_address_expected = [ x + testdomain for x in ["testto@", "abc@", "def@", "testcc@"] ] cc_address = "testcc" email_alerts.EMAIL_ALERTER.SendEmail( to_address, from_address, subject, message, cc_addresses=cc_address) c_from, c_to, message = smtp_conn.sendmail.call_args[0] self.assertItemsEqual(from_address, c_from) self.assertItemsEqual(to_address_expected, c_to) self.assertTrue("CC: testcc@%s" % testdomain in message) # Multiple unqualified to addresses as DomainEmailAddress, one cc to_address = [ rdf_standard.DomainEmailAddress("testto@%s" % testdomain), rdf_standard.DomainEmailAddress("abc@%s" % testdomain), rdf_standard.DomainEmailAddress("def@%s" % testdomain) ] to_address_expected = [ x + testdomain for x in ["testto@", "abc@", "def@", "testcc@"] ] cc_address = "testcc" email_alerts.EMAIL_ALERTER.SendEmail( to_address, from_address, subject, message, cc_addresses=cc_address) c_from, c_to, message = smtp_conn.sendmail.call_args[0] self.assertItemsEqual(from_address, c_from) self.assertItemsEqual(to_address_expected, c_to) self.assertTrue("CC: testcc@%s" % testdomain in message) # Multiple unqualified to addresses, two cc, message_id set to_address = "testto,abc,def" to_address_expected = [ x + testdomain for x in ["testto@", "abc@", "def@", "testcc@", "testcc2@"] ] cc_address = "testcc,testcc2" email_msg_id = "123123" email_alerts.EMAIL_ALERTER.SendEmail( to_address, from_address, subject, message, cc_addresses=cc_address, message_id=email_msg_id) c_from, c_to, message = smtp_conn.sendmail.call_args[0] self.assertItemsEqual(from_address, c_from) self.assertItemsEqual(to_address_expected, c_to) self.assertTrue("CC: testcc@%s,testcc2@%s" % (testdomain, testdomain) in message) self.assertTrue("Message-ID: %s" % email_msg_id) # Multiple address types, two cc, no default domain with test_lib.ConfigOverrider({"Logging.domain": None}): to_address = [ "testto@localhost", "hij", rdf_standard.DomainEmailAddress("klm@localhost") ] cc_address = "testcc,testcc2@localhost" to_address_expected = [ "testto@localhost", "hij@localhost", "klm@localhost", "testcc@localhost", "testcc2@localhost" ] email_alerts.EMAIL_ALERTER.SendEmail( to_address, from_address, subject, message, cc_addresses=cc_address) c_from, c_to, message = smtp_conn.sendmail.call_args[0] self.assertItemsEqual(from_address, c_from) self.assertItemsEqual(to_address_expected, c_to) self.assertTrue("CC: testcc@%s,testcc2@%s" % (testdomain, testdomain) in message) finally: self.smtp_patcher.stop() def main(argv): test_lib.GrrTestProgram(argv=argv) if __name__ == "__main__": flags.StartMain(main)
unknown
codeparrot/codeparrot-clean
# -*- encoding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'Recruitment', 'version': '1.0', 'category': 'Human Resources/Recruitment', 'sequence': 90, 'summary': 'Track your recruitment pipeline', 'description': "", 'website': 'https://www.odoo.com/page/recruitment', 'depends': [ 'hr', 'calendar', 'fetchmail', 'utm', 'attachment_indexation', 'web_tour', 'digest', ], 'data': [ 'security/hr_recruitment_security.xml', 'security/ir.model.access.csv', 'data/hr_recruitment_data.xml', 'data/digest_data.xml', 'data/hr_recruitment_templates.xml', 'views/hr_recruitment_views.xml', 'views/res_config_settings_views.xml', 'views/hr_recruitment_templates.xml', 'views/hr_department_views.xml', 'views/hr_job_views.xml', 'views/mail_activity_views.xml', 'views/digest_views.xml', ], 'demo': [ 'data/hr_recruitment_demo.xml', ], 'installable': True, 'auto_install': False, 'application': True, }
unknown
codeparrot/codeparrot-clean
//! Module to handle integer operations. //! This module exists because some integer types are not supported on some gcc platforms, e.g. //! 128-bit integers on 32-bit platforms and thus require to be handled manually. // cSpell:words cmpti divti modti mulodi muloti udivti umodti use gccjit::{ BinaryOp, CType, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp, }; use rustc_abi::{CanonAbi, Endian, ExternAbi}; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeCodegenMethods, BuilderMethods, OverflowOp}; use rustc_middle::ty::{self, Ty}; use rustc_target::callconv::{ArgAbi, ArgAttributes, FnAbi, PassMode}; use rustc_type_ir::{Interner, TyKind}; use crate::builder::{Builder, ToGccComp}; use crate::common::{SignType, TypeReflection}; use crate::context::CodegenCx; impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { // 128-bit unsigned %: __umodti3 self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b) } pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { // 128-bit signed %: __modti3 self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b) } pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> { let typ = a.get_type(); if self.is_native_int_type_or_bool(typ) { let operation = if typ.is_bool() { UnaryOp::LogicalNegate } else { UnaryOp::BitwiseNegate }; self.cx.context.new_unary_op(self.location, operation, typ, a) } else { let element_type = typ.dyncast_array().expect("element type"); self.concat_low_high_rvalues( typ, self.cx.context.new_unary_op( self.location, UnaryOp::BitwiseNegate, element_type, self.low(a), ), self.cx.context.new_unary_op( self.location, UnaryOp::BitwiseNegate, element_type, self.high(a), ), ) } } pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> { let a_type = a.get_type(); if self.is_native_int_type(a_type) || a_type.is_vector() { self.cx.context.new_unary_op(self.location, UnaryOp::Minus, a.get_type(), a) } else { self.gcc_add(self.gcc_not(a), self.gcc_int(a_type, 1)) } } pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b, self.location) } pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); let a_native = self.is_native_int_type(a_type); let b_native = self.is_native_int_type(b_type); if a_native && b_native { // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number. // TODO(antoyo): cast to unsigned to do a logical shift if that does not work. if a_type.is_signed(self) != b_type.is_signed(self) { let b = self.context.new_cast(self.location, b, a_type); a >> b } else { let a_size = a_type.get_size(); let b_size = b_type.get_size(); match a_size.cmp(&b_size) { std::cmp::Ordering::Equal => a >> b, _ => { // NOTE: it is OK to cast even if b has a type bigger than a because b has // been masked by codegen_ssa before calling Builder::lshr or // Builder::ashr. let b = self.context.new_cast(self.location, b, a_type); a >> b } } } } else if a_type.is_vector() && b_type.is_vector() { a >> b } else if a_native && !b_native { self.gcc_lshr(a, self.gcc_int_cast(b, a_type)) } else { // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most // significant half of the number) which uses lshr. let native_int_type = a_type.dyncast_array().expect("get element type"); let func = self.current_func(); let then_block = func.new_block("then"); let else_block = func.new_block("else"); let after_block = func.new_block("after"); let b0_block = func.new_block("b0"); let actual_else_block = func.new_block("actual_else"); let result = func.new_local(self.location, a_type, "shiftResult"); let sixty_four = self.gcc_int(native_int_type, 64); let sixty_three = self.gcc_int(native_int_type, 63); let zero = self.gcc_zero(native_int_type); let b = self.gcc_int_cast(b, native_int_type); let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero); self.llbb().end_with_conditional(self.location, condition, then_block, else_block); let shift_value = self.gcc_sub(b, sixty_four); let high = self.high(a); let sign = if a_type.is_signed(self) { high >> sixty_three } else { zero }; let array_value = self.concat_low_high_rvalues(a_type, high >> shift_value, sign); then_block.add_assignment(self.location, result, array_value); then_block.end_with_jump(self.location, after_block); let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero); else_block.end_with_conditional(self.location, condition, b0_block, actual_else_block); b0_block.add_assignment(self.location, result, a); b0_block.end_with_jump(self.location, after_block); let shift_value = self.gcc_sub(sixty_four, b); // NOTE: cast low to its unsigned type in order to perform a logical right shift. let unsigned_type = native_int_type.to_unsigned(self.cx); let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type); let shifted_low = casted_low >> self.context.new_cast(self.location, b, unsigned_type); let shifted_low = self.context.new_cast(self.location, shifted_low, native_int_type); let array_value = self.concat_low_high_rvalues( a_type, (high << shift_value) | shifted_low, high >> b, ); actual_else_block.add_assignment(self.location, result, array_value); actual_else_block.end_with_jump(self.location, after_block); // NOTE: since jumps were added in a place rustc does not expect, the current block in the // state need to be updated. self.switch_to_block(after_block); result.to_rvalue() } } fn additive_operation( &self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>, ) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)) || (a_type.is_vector() && b_type.is_vector()) { if a_type != b_type { if a_type.is_vector() { // Vector types need to be bitcast. // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. b = self.context.new_bitcast(self.location, b, a_type); } else { b = self.context.new_cast(self.location, b, a_type); } } self.context.new_binary_op(self.location, operation, a_type, a, b) } else { debug_assert!(a_type.dyncast_array().is_some()); debug_assert!(b_type.dyncast_array().is_some()); let signed = a_type.is_compatible_with(self.i128_type); let func_name = match (operation, signed) { (BinaryOp::Plus, true) => "__rust_i128_add", (BinaryOp::Plus, false) => "__rust_u128_add", (BinaryOp::Minus, true) => "__rust_i128_sub", (BinaryOp::Minus, false) => "__rust_u128_sub", _ => unreachable!("unexpected additive operation {:?}", operation), }; let param_a = self.context.new_parameter(self.location, a_type, "a"); let param_b = self.context.new_parameter(self.location, b_type, "b"); let func = self.context.new_function( self.location, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false, ); self.context.new_call(self.location, func, &[a, b]) } } pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { self.additive_operation(BinaryOp::Plus, a, b) } pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b) } pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { self.additive_operation(BinaryOp::Minus, a, b) } fn multiplicative_operation( &self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, mut b: RValue<'gcc>, ) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)) || (a_type.is_vector() && b_type.is_vector()) { if !a_type.is_compatible_with(b_type) { if a_type.is_vector() { // Vector types need to be bitcast. // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. b = self.context.new_bitcast(self.location, b, a_type); } else { b = self.context.new_cast(self.location, b, a_type); } } self.context.new_binary_op(self.location, operation, a_type, a, b) } else { debug_assert!(a_type.dyncast_array().is_some()); debug_assert!(b_type.dyncast_array().is_some()); let sign = if signed { "" } else { "u" }; let func_name = format!("__{}{}ti3", sign, operation_name); let param_a = self.context.new_parameter(self.location, a_type, "a"); let param_b = self.context.new_parameter(self.location, b_type, "b"); let func = self.context.new_function( self.location, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false, ); self.context.new_call(self.location, func, &[a, b]) } } pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { // TODO(antoyo): check if the types are signed? // 128-bit, signed: __divti3 // TODO(antoyo): convert the arguments to signed? self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b) } pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { // 128-bit, unsigned: __udivti3 self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b) } pub fn gcc_checked_binop( &self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value, ) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) { use rustc_middle::ty::IntTy::*; use rustc_middle::ty::UintTy::*; use rustc_middle::ty::{Int, Uint}; let new_kind = match *typ.kind() { Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)), Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)), t @ (Uint(_) | Int(_)) => t, _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), }; // TODO(antoyo): remove duplication with intrinsic? let name = if self.is_native_int_type(lhs.get_type()) { match oop { OverflowOp::Add => "__builtin_add_overflow", OverflowOp::Sub => "__builtin_sub_overflow", OverflowOp::Mul => "__builtin_mul_overflow", } } else { let (func_name, width) = match oop { OverflowOp::Add => match new_kind { Int(I128) => ("__rust_i128_addo", 128), Uint(U128) => ("__rust_u128_addo", 128), _ => unreachable!(), }, OverflowOp::Sub => match new_kind { Int(I128) => ("__rust_i128_subo", 128), Uint(U128) => ("__rust_u128_subo", 128), _ => unreachable!(), }, OverflowOp::Mul => match new_kind { Int(I32) => ("__mulosi4", 32), Int(I64) => ("__mulodi4", 64), Int(I128) => ("__rust_i128_mulo", 128), // TODO(antoyo): use __muloti4d instead? Uint(U128) => ("__rust_u128_mulo", 128), _ => unreachable!(), }, }; return self.operation_with_overflow(func_name, lhs, rhs, width); }; let intrinsic = self.context.get_builtin_function(name); let res = self .current_func() // TODO(antoyo): is it correct to use rhs type instead of the parameter typ? .new_local(self.location, rhs.get_type(), "binopResult") .get_address(self.location); let new_type = type_kind_to_gcc_type(new_kind); let new_type = self.context.new_c_type(new_type); let lhs = self.context.new_cast(self.location, lhs, new_type); let rhs = self.context.new_cast(self.location, rhs, new_type); let res = self.context.new_cast(self.location, res, new_type.make_pointer()); let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None); (res.dereference(self.location).to_rvalue(), overflow) } /// Non-`__builtin_*` overflow operations with a `fn(T, T, &mut i32) -> T` signature. pub fn operation_with_overflow( &self, func_name: &str, lhs: RValue<'gcc>, rhs: RValue<'gcc>, width: u64, ) -> (RValue<'gcc>, RValue<'gcc>) { let a_type = lhs.get_type(); let b_type = rhs.get_type(); debug_assert!(a_type.dyncast_array().is_some()); debug_assert!(b_type.dyncast_array().is_some()); let overflow_type = self.i32_type; let overflow_param_type = overflow_type.make_pointer(); let res_type = a_type; let overflow_value = self.current_func().new_local(self.location, overflow_type, "overflow"); let overflow_addr = overflow_value.get_address(self.location); let param_a = self.context.new_parameter(self.location, a_type, "a"); let param_b = self.context.new_parameter(self.location, b_type, "b"); let param_overflow = self.context.new_parameter(self.location, overflow_param_type, "overflow"); let a_elem_type = a_type.dyncast_array().expect("non-array a value"); debug_assert!(a_elem_type.is_integral()); let res_ty = match width { 32 => self.tcx.types.i32, 64 => self.tcx.types.i64, 128 => self.tcx.types.i128, _ => unreachable!("unexpected integer size"), }; let layout = self .tcx .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(res_ty)) .unwrap(); let arg_abi = ArgAbi { layout, mode: PassMode::Direct(ArgAttributes::new()) }; let mut fn_abi = FnAbi { args: vec![arg_abi.clone(), arg_abi.clone(), arg_abi.clone()].into_boxed_slice(), ret: arg_abi, c_variadic: false, fixed_count: 3, conv: CanonAbi::C, can_unwind: false, }; fn_abi.adjust_for_foreign_abi(self.cx, ExternAbi::C { unwind: false }); let ret_indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. }); let call = if ret_indirect { let res_value = self.current_func().new_local(self.location, res_type, "result_value"); let res_addr = res_value.get_address(self.location); let res_param_type = res_type.make_pointer(); let param_res = self.context.new_parameter(self.location, res_param_type, "result"); let func = self.context.new_function( self.location, FunctionType::Extern, self.type_void(), &[param_res, param_a, param_b, param_overflow], func_name, false, ); let _void = self.context.new_call(self.location, func, &[res_addr, lhs, rhs, overflow_addr]); res_value.to_rvalue() } else { let func = self.context.new_function( self.location, FunctionType::Extern, res_type, &[param_a, param_b, param_overflow], func_name, false, ); self.context.new_call(self.location, func, &[lhs, rhs, overflow_addr]) }; // NOTE: we must assign the result of the operation to a variable at this point to make // sure it will be evaluated by libgccjit now. // Otherwise, it will only be evaluated when the rvalue for the call is used somewhere else // and overflow_value will not be initialized at the correct point in the program. let result = self.current_func().new_local(self.location, res_type, "result"); self.block.add_assignment(self.location, result, call); ( result.to_rvalue(), self.context.new_cast(self.location, overflow_value, self.bool_type).to_rvalue(), ) } pub fn gcc_icmp( &mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>, ) -> RValue<'gcc> { let a_type = lhs.get_type(); let b_type = rhs.get_type(); if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) { // This algorithm is based on compiler-rt's __cmpti2: // https://github.com/llvm-mirror/compiler-rt/blob/f0745e8476f069296a7c71accedd061dce4cdf79/lib/builtins/cmpti2.c#L21 let result = self.current_func().new_local(self.location, self.int_type, "icmp_result"); let block1 = self.current_func().new_block("block1"); let block2 = self.current_func().new_block("block2"); let block3 = self.current_func().new_block("block3"); let block4 = self.current_func().new_block("block4"); let block5 = self.current_func().new_block("block5"); let block6 = self.current_func().new_block("block6"); let block7 = self.current_func().new_block("block7"); let block8 = self.current_func().new_block("block8"); let after = self.current_func().new_block("after"); let native_int_type = a_type.dyncast_array().expect("get element type"); // NOTE: cast low to its unsigned type in order to perform a comparison correctly (e.g. // the sign is only on high). let unsigned_type = native_int_type.to_unsigned(self.cx); let lhs_low = self.context.new_cast(self.location, self.low(lhs), unsigned_type); let rhs_low = self.context.new_cast(self.location, self.low(rhs), unsigned_type); let mut lhs_high = self.high(lhs); let mut rhs_high = self.high(rhs); match op { IntPredicate::IntUGT | IntPredicate::IntUGE | IntPredicate::IntULT | IntPredicate::IntULE => { lhs_high = self.context.new_cast(self.location, lhs_high, unsigned_type); rhs_high = self.context.new_cast(self.location, rhs_high, unsigned_type); } // TODO(antoyo): we probably need to handle signed comparison for unsigned // integers. _ => (), } let condition = self.context.new_comparison( self.location, ComparisonOp::LessThan, lhs_high, rhs_high, ); self.llbb().end_with_conditional(self.location, condition, block1, block2); block1.add_assignment( self.location, result, self.context.new_rvalue_zero(self.int_type), ); block1.end_with_jump(self.location, after); let condition = self.context.new_comparison( self.location, ComparisonOp::GreaterThan, lhs_high, rhs_high, ); block2.end_with_conditional(self.location, condition, block3, block4); block3.add_assignment( self.location, result, self.context.new_rvalue_from_int(self.int_type, 2), ); block3.end_with_jump(self.location, after); let condition = self.context.new_comparison( self.location, ComparisonOp::LessThan, lhs_low, rhs_low, ); block4.end_with_conditional(self.location, condition, block5, block6); block5.add_assignment( self.location, result, self.context.new_rvalue_zero(self.int_type), ); block5.end_with_jump(self.location, after); let condition = self.context.new_comparison( self.location, ComparisonOp::GreaterThan, lhs_low, rhs_low, ); block6.end_with_conditional(self.location, condition, block7, block8); block7.add_assignment( self.location, result, self.context.new_rvalue_from_int(self.int_type, 2), ); block7.end_with_jump(self.location, after); block8.add_assignment( self.location, result, self.context.new_rvalue_one(self.int_type), ); block8.end_with_jump(self.location, after); // NOTE: since jumps were added in a place rustc does not expect, the current block in the // state need to be updated. self.switch_to_block(after); let cmp = result.to_rvalue(); let (op, limit) = match op { IntPredicate::IntEQ => { return self.context.new_comparison( self.location, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type), ); } IntPredicate::IntNE => { return self.context.new_comparison( self.location, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type), ); } // TODO(antoyo): cast to u128 for unsigned comparison. See below. IntPredicate::IntUGT => (ComparisonOp::Equals, 2), IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1), IntPredicate::IntULT => (ComparisonOp::Equals, 0), IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1), IntPredicate::IntSGT => (ComparisonOp::Equals, 2), IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1), IntPredicate::IntSLT => (ComparisonOp::Equals, 0), IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1), }; self.context.new_comparison( self.location, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit), ) } else if a_type.get_pointee().is_some() && b_type.get_pointee().is_some() { // NOTE: gcc cannot compare pointers to different objects, but rustc does that, so cast them to usize. lhs = self.context.new_bitcast(self.location, lhs, self.usize_type); rhs = self.context.new_bitcast(self.location, rhs, self.usize_type); self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs) } else { if a_type != b_type { // NOTE: because libgccjit cannot compare function pointers. if a_type.dyncast_function_ptr_type().is_some() && b_type.dyncast_function_ptr_type().is_some() { lhs = self.context.new_cast(self.location, lhs, self.usize_type.make_pointer()); rhs = self.context.new_cast(self.location, rhs, self.usize_type.make_pointer()); } // NOTE: hack because we try to cast a vector type to the same vector type. else if format!("{:?}", a_type) != format!("{:?}", b_type) { rhs = self.context.new_cast(self.location, rhs, a_type); } } match op { IntPredicate::IntUGT | IntPredicate::IntUGE | IntPredicate::IntULT | IntPredicate::IntULE => { if !a_type.is_vector() { let unsigned_type = a_type.to_unsigned(self.cx); lhs = self.context.new_cast(self.location, lhs, unsigned_type); rhs = self.context.new_cast(self.location, rhs, unsigned_type); } } // TODO(antoyo): we probably need to handle signed comparison for unsigned // integers. _ => (), } self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs) } } pub fn gcc_xor(&self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); if a_type.is_vector() && b_type.is_vector() { let b = self.bitcast_if_needed(b, a_type); a ^ b } else if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { if !a_type.is_compatible_with(b_type) { b = self.context.new_cast(self.location, b, a_type); } a ^ b } else { self.concat_low_high_rvalues( a_type, self.low(a) ^ self.low(b), self.high(a) ^ self.high(b), ) } } pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); let a_native = self.is_native_int_type(a_type); let b_native = self.is_native_int_type(b_type); if a_native && b_native { // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number. if a_type.is_unsigned(self) && b_type.is_signed(self) { let a = self.context.new_cast(self.location, a, b_type); let result = a << b; self.context.new_cast(self.location, result, a_type) } else if a_type.is_signed(self) && b_type.is_unsigned(self) { let b = self.context.new_cast(self.location, b, a_type); a << b } else { let a_size = a_type.get_size(); let b_size = b_type.get_size(); match a_size.cmp(&b_size) { std::cmp::Ordering::Equal => a << b, _ => { // NOTE: it is OK to cast even if b has a type bigger than a because b has // been masked by codegen_ssa before calling Builder::shl. let b = self.context.new_cast(self.location, b, a_type); a << b } } } } else if a_type.is_vector() && b_type.is_vector() { a << b } else if a_native && !b_native { self.gcc_shl(a, self.gcc_int_cast(b, a_type)) } else { // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl. let native_int_type = a_type.dyncast_array().expect("get element type"); let func = self.current_func(); let then_block = func.new_block("then"); let else_block = func.new_block("else"); let after_block = func.new_block("after"); let b0_block = func.new_block("b0"); let actual_else_block = func.new_block("actual_else"); let result = func.new_local(self.location, a_type, "shiftResult"); let b = self.gcc_int_cast(b, native_int_type); let sixty_four = self.gcc_int(native_int_type, 64); let zero = self.gcc_zero(native_int_type); let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero); self.llbb().end_with_conditional(self.location, condition, then_block, else_block); let array_value = self.concat_low_high_rvalues(a_type, zero, self.low(a) << (b - sixty_four)); then_block.add_assignment(self.location, result, array_value); then_block.end_with_jump(self.location, after_block); let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero); else_block.end_with_conditional(self.location, condition, b0_block, actual_else_block); b0_block.add_assignment(self.location, result, a); b0_block.end_with_jump(self.location, after_block); // NOTE: cast low to its unsigned type in order to perform a logical right shift. // TODO(antoyo): adjust this ^ comment. let unsigned_type = native_int_type.to_unsigned(self.cx); let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type); let shift_value = self.context.new_cast(self.location, sixty_four - b, unsigned_type); let high_low = self.context.new_cast(self.location, casted_low >> shift_value, native_int_type); let array_value = self.concat_low_high_rvalues( a_type, self.low(a) << b, (self.high(a) << b) | high_low, ); actual_else_block.add_assignment(self.location, result, array_value); actual_else_block.end_with_jump(self.location, after_block); // NOTE: since jumps were added in a place rustc does not expect, the current block in the // state need to be updated. self.switch_to_block(after_block); result.to_rvalue() } } pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> { let arg_type = arg.get_type(); if !self.is_native_int_type(arg_type) { let native_int_type = arg_type.dyncast_array().expect("get element type"); let lsb = self.low(arg); let swapped_lsb = self.gcc_bswap(lsb, width / 2); let swapped_lsb = self.context.new_cast(self.location, swapped_lsb, native_int_type); let msb = self.high(arg); let swapped_msb = self.gcc_bswap(msb, width / 2); let swapped_msb = self.context.new_cast(self.location, swapped_msb, native_int_type); // NOTE: we also need to swap the two elements here, in addition to swapping inside // the elements themselves like done above. return self.concat_low_high_rvalues(arg_type, swapped_msb, swapped_lsb); } // TODO(antoyo): check if it's faster to use string literals and a // match instead of format!. let bswap = self.cx.context.get_builtin_function(format!("__builtin_bswap{}", width)); // FIXME(antoyo): this cast should not be necessary. Remove // when having proper sized integer types. let param_type = bswap.get_param(0).to_rvalue().get_type(); if param_type != arg_type { arg = self.bitcast(arg, param_type); } self.cx.context.new_call(self.location, bswap, &[arg]) } } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { if self.is_native_int_type_or_bool(typ) { self.context.new_rvalue_from_long(typ, int) } else { // NOTE: set the sign in high. self.concat_low_high(typ, int, -(int.is_negative() as i64)) } } pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> { if typ.is_u128(self) { // FIXME(antoyo): libgccjit cannot create 128-bit values yet. let num = self.context.new_rvalue_from_long(self.u64_type, int as i64); self.gcc_int_cast(num, typ) } else if self.is_native_int_type_or_bool(typ) { self.context.new_rvalue_from_long(typ, int as i64) } else { self.concat_low_high(typ, int as i64, 0) } } pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> { let low = num as u64; let high = (num >> 64) as u64; if num >> 64 != 0 { // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()? if self.is_native_int_type(typ) { let low = self.context.new_rvalue_from_long(self.u64_type, low as i64); let high = self.context.new_rvalue_from_long(typ, high as i64); let sixty_four = self.context.new_rvalue_from_long(typ, 64); let shift = high << sixty_four; shift | self.context.new_cast(None, low, typ) } else { self.concat_low_high(typ, low as i64, high as i64) } } else if typ.is_i128(self) { // FIXME(antoyo): libgccjit cannot create 128-bit values yet. let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64); self.gcc_int_cast(num, typ) } else { self.gcc_uint(typ, num as u64) } } pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> { if self.is_native_int_type_or_bool(typ) { self.context.new_rvalue_zero(typ) } else { self.concat_low_high(typ, 0, 0) } } pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 { if self.is_native_int_type_or_bool(typ) { typ.get_size() as u64 * 8 } else { // NOTE: the only unsupported types are u128 and i128. 128 } } fn bitwise_operation( &self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>, loc: Option<Location<'gcc>>, ) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); let a_native = self.is_native_int_type_or_bool(a_type); let b_native = self.is_native_int_type_or_bool(b_type); if a_type.is_vector() && b_type.is_vector() { let b = self.bitcast_if_needed(b, a_type); self.context.new_binary_op(loc, operation, a_type, a, b) } else if a_native && b_native { if a_type != b_type { b = self.context.new_cast(loc, b, a_type); } self.context.new_binary_op(loc, operation, a_type, a, b) } else { assert!( !a_native && !b_native, "both types should either be native or non-native for or operation" ); let native_int_type = a_type.dyncast_array().expect("get element type"); self.concat_low_high_rvalues( a_type, self.context.new_binary_op( loc, operation, native_int_type, self.low(a), self.low(b), ), self.context.new_binary_op( loc, operation, native_int_type, self.high(a), self.high(b), ), ) } } pub fn gcc_or( &self, a: RValue<'gcc>, b: RValue<'gcc>, loc: Option<Location<'gcc>>, ) -> RValue<'gcc> { self.bitwise_operation(BinaryOp::BitwiseOr, a, b, loc) } // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead? pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { let value_type = value.get_type(); if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) { // TODO: use self.location. self.context.new_cast(None, value, dest_typ) } else if self.is_native_int_type_or_bool(dest_typ) { self.context.new_cast(None, self.low(value), dest_typ) } else if self.is_native_int_type_or_bool(value_type) { let dest_element_type = dest_typ.dyncast_array().expect("get element type"); // NOTE: set the sign of the value. let zero = self.context.new_rvalue_zero(value_type); let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero); let is_negative = self.gcc_int_cast(is_negative, dest_element_type); self.concat_low_high_rvalues( dest_typ, self.context.new_cast(None, value, dest_element_type), self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative), ) } else { // Since u128 and i128 are the only types that can be unsupported, we know the type of // value and the destination type have the same size, so a bitcast is fine. // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. self.context.new_bitcast(None, value, dest_typ) } } fn int_to_float_cast( &self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>, ) -> RValue<'gcc> { let value_type = value.get_type(); if self.is_native_int_type_or_bool(value_type) { return self.context.new_cast(None, value, dest_typ); } debug_assert!(value_type.dyncast_array().is_some()); let name_suffix = match self.type_kind(dest_typ) { // cSpell:disable TypeKind::Float => "tisf", TypeKind::Double => "tidf", TypeKind::FP128 => "titf", // cSpell:enable kind => panic!("cannot cast a non-native integer to type {:?}", kind), }; let sign = if signed { "" } else { "un" }; let func_name = format!("__float{}{}", sign, name_suffix); let param = self.context.new_parameter(None, value_type, "n"); let func = self.context.new_function( None, FunctionType::Extern, dest_typ, &[param], func_name, false, ); self.context.new_call(None, func, &[value]) } pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { self.int_to_float_cast(true, value, dest_typ) } pub fn gcc_uint_to_float_cast( &self, value: RValue<'gcc>, dest_typ: Type<'gcc>, ) -> RValue<'gcc> { self.int_to_float_cast(false, value, dest_typ) } fn float_to_int_cast( &self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>, ) -> RValue<'gcc> { let value_type = value.get_type(); if self.is_native_int_type_or_bool(dest_typ) { return self.context.new_cast(None, value, dest_typ); } debug_assert!(dest_typ.dyncast_array().is_some()); let name_suffix = match self.type_kind(value_type) { // cSpell:disable TypeKind::Float => "sfti", TypeKind::Double => "dfti", // cSpell:enable kind => panic!("cannot cast a {:?} to non-native integer", kind), }; let sign = if signed { "" } else { "uns" }; let func_name = format!("__fix{}{}", sign, name_suffix); let param = self.context.new_parameter(None, value_type, "n"); let func = self.context.new_function( None, FunctionType::Extern, dest_typ, &[param], func_name, false, ); self.context.new_call(None, func, &[value]) } pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { self.float_to_int_cast(true, value, dest_typ) } pub fn gcc_float_to_uint_cast( &self, value: RValue<'gcc>, dest_typ: Type<'gcc>, ) -> RValue<'gcc> { self.float_to_int_cast(false, value, dest_typ) } fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> { let index = match self.sess().target.options.endian { Endian::Little => 1, Endian::Big => 0, }; self.context .new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index)) .to_rvalue() } fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> { let index = match self.sess().target.options.endian { Endian::Little => 0, Endian::Big => 1, }; self.context .new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index)) .to_rvalue() } fn concat_low_high_rvalues( &self, typ: Type<'gcc>, low: RValue<'gcc>, high: RValue<'gcc>, ) -> RValue<'gcc> { let (first, last) = match self.sess().target.options.endian { Endian::Little => (low, high), Endian::Big => (high, low), }; let values = [first, last]; self.context.new_array_constructor(None, typ, &values) } fn concat_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> { let (first, last) = match self.sess().target.options.endian { Endian::Little => (low, high), Endian::Big => (high, low), }; let native_int_type = typ.dyncast_array().expect("get element type"); let values = [ self.context.new_rvalue_from_long(native_int_type, first), self.context.new_rvalue_from_long(native_int_type, last), ]; self.context.new_array_constructor(None, typ, &values) } } fn type_kind_to_gcc_type<I: Interner>(kind: TyKind<I>) -> CType { use rustc_middle::ty::IntTy::*; use rustc_middle::ty::UintTy::*; use rustc_middle::ty::{Int, Uint}; match kind { Int(I8) => CType::Int8t, Int(I16) => CType::Int16t, Int(I32) => CType::Int32t, Int(I64) => CType::Int64t, Int(I128) => CType::Int128t, Uint(U8) => CType::UInt8t, Uint(U16) => CType::UInt16t, Uint(U32) => CType::UInt32t, Uint(U64) => CType::UInt64t, Uint(U128) => CType::UInt128t, _ => unimplemented!("Kind: {:?}", kind), } }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_codegen_gcc/src/int.rs
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.autoconfigure.condition; import java.util.Collection; import org.junit.jupiter.api.Test; import org.springframework.boot.test.context.assertj.AssertableApplicationContext; import org.springframework.boot.test.context.runner.ApplicationContextRunner; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Import; import org.springframework.context.annotation.ImportResource; import static org.assertj.core.api.Assertions.assertThat; /** * Tests for {@link ConditionalOnClass @ConditionalOnClass}. * * @author Dave Syer * @author Stephane Nicoll */ class ConditionalOnClassTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner(); @Test void testVanillaOnClassCondition() { this.contextRunner.withUserConfiguration(BasicConfiguration.class, FooConfiguration.class) .run(this::hasBarBean); } @Test void testMissingOnClassCondition() { this.contextRunner.withUserConfiguration(MissingConfiguration.class, FooConfiguration.class).run((context) -> { assertThat(context).doesNotHaveBean("bar"); assertThat(context).hasBean("foo"); assertThat(context.getBean("foo")).isEqualTo("foo"); }); } @Test void testOnClassConditionWithXml() { this.contextRunner.withUserConfiguration(BasicConfiguration.class, XmlConfiguration.class) .run(this::hasBarBean); } @Test void testOnClassConditionWithCombinedXml() { this.contextRunner.withUserConfiguration(CombinedXmlConfiguration.class).run(this::hasBarBean); } @Test void onClassConditionOutputShouldNotContainConditionalOnMissingClassInMessage() { this.contextRunner.withUserConfiguration(BasicConfiguration.class).run((context) -> { Collection<ConditionEvaluationReport.ConditionAndOutcomes> conditionAndOutcomes = ConditionEvaluationReport .get(context.getSourceApplicationContext().getBeanFactory()) .getConditionAndOutcomesBySource() .values(); String message = conditionAndOutcomes.iterator().next().iterator().next().getOutcome().getMessage(); assertThat(message).doesNotContain("@ConditionalOnMissingClass did not find unwanted class"); }); } private void hasBarBean(AssertableApplicationContext context) { assertThat(context).hasBean("bar"); assertThat(context.getBean("bar")).isEqualTo("bar"); } @Configuration(proxyBeanMethods = false) @ConditionalOnClass(ConditionalOnClassTests.class) static class BasicConfiguration { @Bean String bar() { return "bar"; } } @Configuration(proxyBeanMethods = false) @ConditionalOnClass(name = "FOO") static class MissingConfiguration { @Bean String bar() { return "bar"; } } @Configuration(proxyBeanMethods = false) static class FooConfiguration { @Bean String foo() { return "foo"; } } @Configuration(proxyBeanMethods = false) @ImportResource("org/springframework/boot/autoconfigure/condition/foo.xml") static class XmlConfiguration { } @Configuration(proxyBeanMethods = false) @Import(BasicConfiguration.class) @ImportResource("org/springframework/boot/autoconfigure/condition/foo.xml") static class CombinedXmlConfiguration { } }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnClassTests.java
''' https://leetcode.com/problems/sum-root-to-leaf-numbers/description/ Input: [1,2,3] 1 / \ 2 3 Output: 25 Explanation: The root-to-leaf path 1->2 represents the number 12. The root-to-leaf path 1->3 represents the number 13. Therefore, sum = 12 + 13 = 25. Input: [4,9,0,5,1] 4 / \ 9 0 / \ 5 1 Output: 1026 Explanation: The root-to-leaf path 4->9->5 represents the number 495. The root-to-leaf path 4->9->1 represents the number 491. The root-to-leaf path 4->0 represents the number 40. Therefore, sum = 495 + 491 + 40 = 1026. ''' class Node: def __init__(self, data): self.data = data self.left = None self.right = None root = Node(4) root.left = Node(9) root.left.left = Node(5) root.left.right = Node(1) root.right = Node(0) result = [] # that wil store all the path def findPath(root,path): if root.left == None or root.right == None: path.append(root.data) temp = int("".join(map(str,path)))# convert array on integer in to integer global result result.append(temp) #print(temp) return findPath(root.left,path + [root.data]) findPath(root.right,path+[root.data]) if path: path.pop() findPath(root,[]) print sum(result)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ A list of Colombian departaments as `choices` in a formfield. This exists in this standalone file so that it's only imported into memory when explicitly needed. """ from __future__ import unicode_literals DEPARTMENT_CHOICES = ( ('AMA', 'Amazonas'), ('ANT', 'Antioquia'), ('ARA', 'Arauca'), ('ATL', 'Atlántico'), ('DC', 'Bogotá'), ('BOL', 'Bolívar'), ('BOY', 'Boyacá'), ('CAL', 'Caldas'), ('CAQ', 'Caquetá'), ('CAS', 'Casanare'), ('CAU', 'Cauca'), ('CES', 'Cesar'), ('CHO', 'Chocó'), ('COR', 'Córdoba'), ('CUN', 'Cundinamarca'), ('GUA', 'Guainía'), ('GUV', 'Guaviare'), ('HUI', 'Huila'), ('LAG', 'La Guajira'), ('MAG', 'Magdalena'), ('MET', 'Meta'), ('NAR', 'Nariño'), ('NSA', 'Norte de Santander'), ('PUT', 'Putumayo'), ('QUI', 'Quindío'), ('RIS', 'Risaralda'), ('SAP', 'San Andrés and Providencia'), ('SAN', 'Santander'), ('SUC', 'Sucre'), ('TOL', 'Tolima'), ('VAC', 'Valle del Cauca'), ('VAU', 'Vaupés'), ('VID', 'Vichada'), )
unknown
codeparrot/codeparrot-clean
import re, os, sys, string import datetime import getopt import getpass version = "1.0.2" lines = "" iesDefs = {} ieofielist = {} outdir = './' filenames = [] verbosity = 0 prefix = "" FAIL = '\033[91m' WARN = '\033[93m' ENDC = '\033[0m' fileprefix = "" fileprefix_first_upper = "" def printFail(string): sys.stderr.write(FAIL + string + ENDC + "\n") def printWarning(string): print WARN + string + ENDC def printDebug(string): if verbosity > 0: print string def outputHeaderToFile(f, filename): now = datetime.datetime.now() f.write("""/******************************************************************************* Eurecom OpenAirInterface Copyright(c) 1999 - 2013 Eurecom This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information Openair Admin: openair_admin@eurecom.fr Openair Tech : openair_tech@eurecom.fr Forums : http://forums.eurecom.fr/openairinterface Address : EURECOM, Campus SophiaTech, 450 Route des Chappes 06410 Biot FRANCE *******************************************************************************/ """) f.write("/*******************************************************************************\n") f.write(" * This file had been created by asn1tostruct.py script v%s\n" % (version)) f.write(" * Please do not modify this file but regenerate it via script.\n") f.write(" * Created on: %s by %s\n * from %s\n" % (str(now), getpass.getuser(), filenames)) f.write(" ******************************************************************************/\n") def lowerFirstCamelWord(word): """ puts the first word in a CamelCase Word in lowercase. I.e. CustomerID becomes customerID, XMLInfoTest becomes xmlInfoTest """ newstr = '' swapped = word.swapcase() idx = 0 # if it's all-caps, return an all-lowered version lowered = word.lower() if swapped == lowered: return lowered for c in swapped: if c in string.lowercase: newstr += c idx += 1 else: break if idx < 2: newstr += word[idx:] else: newstr = newstr[:-1]+ word[idx-1:] return newstr def usage(): print "Python parser for asn1 v%s" % (version) print "Usage: python asn1tostruct.py [options]" print "Available options:" print "-d Enable script debug" print "-f [file] Input file to parse" print "-o [dir] Output files to given directory" print "-h Print this help and return" try: opts, args = getopt.getopt(sys.argv[1:], "df:ho:", ["debug", "file", "help", "outdir"]) except getopt.GetoptError as err: # print help information and exit: usage() sys.exit(2) for o, a in opts: if o in ("-f", "--file"): filenames.append(a) if o in ("-d", "--debug"): verbosity = 1 if o in ("-o", "--outdir"): outdir = a if outdir.rfind('/') != len(outdir): outdir += '/' if o in ("-h", "--help"): usage() sys.exit(2) for filename in filenames: file = open(filename, 'r') for line in file: # Removing any comment if line.find('--') >= 0: line = line[:line.find('--')] # Removing any carriage return lines += re.sub('\r', '', line) for m in re.findall(r'([a-zA-Z0-9-]+)\s*::=\s+SEQUENCE\s+\(\s*SIZE\s*\(\s*\d+\s*\.\.\s*[0-9a-zA-Z-]+\s*\)\s*\)\s*OF\s+[a-zA-Z-]+\s*\{\s*\{\s*([0-9a-zA-Z-]+)\s*\}\s*\}', lines, re.MULTILINE): ieofielist[m[0]] = m[1] for m in re.findall(r'([a-zA-Z0-9-]+)\s*::=\s+E-RAB-IE-ContainerList\s*\{\s*\{\s*([a-zA-Z0-9-]+)\s*\}\s*\}', lines, re.MULTILINE): ieofielist[m[0]] = m[1] for i in re.findall(r'([a-zA-Z0-9-]+)\s+([A-Z0-9-]+)\s*::=\s*\{\s+([\,\|\{\}\t\n\.{3}\ \-a-zA-Z0-9]+)\s+}\n', lines, re.MULTILINE): ies = [] maxLength = 0 # TODO: handle extensions if i[1].find('EXTENSION') >= 0: continue if fileprefix == "": fileprefix = i[1][:i[1].find('-')].lower() for j in re.findall(r'\s*\{\s*([a-zA-Z0-9-\ \t]+)\s*\}\s*[\|,]*', i[2], re.MULTILINE): for k in re.findall(r'ID\s*([a-zA-Z0-9\-]+)\s*CRITICALITY\s*([a-zA-Z0-9\-]+)\s+[A-Z]+\s+([a-zA-Z0-9\-]+)\s*PRESENCE\s*([a-zA-Z0-9\-]+)', j, re.MULTILINE): printDebug("Got new ie for message " + i[0] + ": " + str(k)) if len(k[2]) > maxLength: maxLength = len(k[2]) ies.append(k) if len(ies) > 0: iesDefs[i[0]] = { "length": maxLength, "ies": ies } else: printWarning("Didn't find any information element for message: " + i[0]) if len(iesDefs) == 0: printFail("No Information Element parsed, exiting") sys.exit(0) fileprefix_first_upper = fileprefix[0].upper() + fileprefix[1:] f = open(outdir + fileprefix + '_ies_defs.h', 'w') outputHeaderToFile(f, filename) f.write("#include \"%s_common.h\"\n\n" % (fileprefix)) f.write("#ifndef %s_IES_DEFS_H_\n#define %s_IES_DEFS_H_\n\n" % (fileprefix.upper(), fileprefix.upper())) f.write("/* Define the version of script used to generate this file */\n") f.write("#define %s_SCRIPT_VERSION (%s)\n\n" % (fileprefix.upper(), re.sub('\.', '', version))) for key in iesDefs: if key not in ieofielist.values(): continue for (i, j) in ieofielist.items(): if j == key: break f.write("typedef struct %sIEs_s {\n" % (re.sub('-', '_', i))) f.write(" A_SEQUENCE_OF(struct %s_s) %s;\n" % (re.sub('IEs', '', re.sub('-', '_', ieofielist[i])), lowerFirstCamelWord(re.sub('IEs', '', re.sub('-', '_', ieofielist[i]))))) f.write("} %sIEs_t;\n\n" % (re.sub('-', '_', i))) for key in iesDefs: keyupperunderscore = re.sub('-', '_', key.upper()) keylowerunderscore = re.sub('-', '_', key.lower()) shift = 0 if len(iesDefs[key]["ies"]) == 0: continue # Presence mask for ie in iesDefs[key]["ies"]: ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper() if ie[3] == "optional" or ie[3] == "conditional": f.write("#define {0:<{pad}} {1}\n".format("%s_%s_PRESENT" % (keyupperunderscore, ieupperunderscore), "(1 << %d)" % shift, pad=iesDefs[key]["length"] + len(keyupperunderscore) + 9)) shift += 1 if (shift > 0): f.write("\n") f.write("typedef struct %s_s {\n" % (re.sub('-', '_', key))) if (shift > 0): f.write(" {0:<{pad}} {1};\n".format("uint16_t", "presenceMask", pad=iesDefs[key]["length"] + 2)) for ie in iesDefs[key]["ies"]: ieunderscore = re.sub('-', '_', ie[2]) iename = re.sub('id-', '', ie[0]) ienameunderscore = lowerFirstCamelWord(re.sub('-', '_', iename)) if ie[2] in ieofielist: f.write(" %sIEs_t %s;" % (re.sub('-', '_', ie[2]), ienameunderscore)) else: f.write(" {0:<{pad}} {1};".format("%s_t" % ieunderscore, ienameunderscore, pad=iesDefs[key]["length"] + 2)) if ie[3] == "optional": f.write(" ///< Optional field") elif ie[3] == "conditional": f.write(" ///< Conditional field") f.write("\n") f.write("} %s_t;\n\n" % (re.sub('-', '_', key))) f.write("typedef struct %s_message_s {\n" % (fileprefix)) f.write(" %s_ProcedureCode_t procedureCode;\n" % (fileprefix_first_upper)) f.write(" %s_Criticality_t criticality;\n" % (fileprefix_first_upper)) f.write(" uint8_t direction;\n") f.write(" union {\n") messageList = iesDefs.keys() messageList.sort() for message in messageList: if message in ieofielist.values(): continue if len(iesDefs[message]["ies"]) == 0: continue f.write(" %s_t %s;\n" % (re.sub('-', '_', message), lowerFirstCamelWord(re.sub('-', '_', message)))) f.write(" } msg;\n") f.write("} %s_message;\n\n" % (fileprefix)) for key in iesDefs: if key in ieofielist.values(): continue structName = re.sub('ies', '', key) asn1cStruct = re.sub('-', '_', re.sub('IEs', '', re.sub('-IEs', '', key))) asn1cStruct = re.sub('Item', 'List', asn1cStruct) keylowerunderscore = re.sub('-', '_', key.lower()) firstlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct))) f.write("/** \\brief Decode function for %s ies.\n" % (key)) if len(iesDefs[key]["ies"]) != 0: f.write(" * \\param %s Pointer to ASN1 structure in which data will be stored\n" % (lowerFirstCamelWord(re.sub('-', '_', key)))) f.write(" * \\param any_p Pointer to the ANY value to decode.\n") f.write(" **/\n") f.write("int %s_decode_%s(\n" % (fileprefix, keylowerunderscore)) if len(iesDefs[key]["ies"]) != 0: f.write(" %s_t *%s,\n" % (re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key)))) f.write(" ANY_t *any_p);\n\n") if len(iesDefs[key]["ies"]) == 0: continue f.write("/** \\brief Encode function for %s ies.\n" % (key)) f.write(" * \\param %s Pointer to the ASN1 structure.\n" % (firstlower)) f.write(" * \\param %s Pointer to the IES structure.\n" % (lowerFirstCamelWord(re.sub('-', '_', key)))) f.write(" **/\n") f.write("int %s_encode_%s(\n" % (fileprefix, re.sub('-', '_', structName.lower()))) f.write(" %s_t *%s,\n" % (asn1cStruct, firstlower)) f.write(" %s_t *%s);\n\n" % (re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key)))) for key in iesDefs: if key not in ieofielist.values(): continue asn1cStruct = re.sub('-', '_', re.sub('IEs', '', key)) asn1cStruct = re.sub('Item', 'List', asn1cStruct) firstlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct))) f.write("/** \\brief Encode function for %s ies.\n" % (key)) f.write(" * \\param %s Pointer to the ASN1 structure.\n" % (firstlower)) f.write(" * \\param %s Pointer to the IES structure.\n" % (lowerFirstCamelWord(re.sub('-', '_', key)))) f.write(" **/\n") f.write("int %s_encode_%s(\n" % (fileprefix, firstlower.lower())) f.write(" %s_t *%s,\n" % (asn1cStruct, firstlower)) f.write(" %sIEs_t *%sIEs);\n\n" % (asn1cStruct, firstlower)) f.write("/** \\brief Decode function for %s ies.\n" % (key)) f.write(" * \\param any_p Pointer to the ANY value to decode.\n") f.write(" * \\param callback Callback function called when any_p is successfully decoded.\n") f.write(" **/\n") f.write("int %s_decode_%s(\n" % (fileprefix, firstlower.lower())) f.write(" %sIEs_t *%sIEs,\n" % (asn1cStruct, firstlower)) f.write(" %s_t *%s);\n\n" % (asn1cStruct, lowerFirstCamelWord(asn1cStruct))) for key in iesDefs: asn1cStruct = re.sub('-', '_', re.sub('IEs', '', key)) asn1cStruct = re.sub('Item', 'List', asn1cStruct) firstlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct))) if key in ieofielist.values(): f.write("/** \\brief Display %s encapsulated IE using XER encoding.\n" % (asn1cStruct)) f.write(" * \\param %s Pointer to the IES structure.\n" % (lowerFirstCamelWord(re.sub('-', '_', key)))) f.write(" * \\param file File descriptor to write output.\n") f.write(" **/\n") f.write("asn_enc_rval_t %s_xer_print_%s(\n" % (fileprefix, re.sub('item', 'list', firstlower.lower()))) f.write(" asn_app_consume_bytes_f *cb,\n") f.write(" void *app_key,\n") f.write(" %sIEs_t *%sIEs);\n\n" % (re.sub('item', 'list', asn1cStruct), firstlower)) else: f.write("/** \\brief Display %s message using XER encoding.\n" % (asn1cStruct)) f.write(" * \\param message_p Pointer to root message.\n") f.write(" * \\param file File descriptor to write output.\n") f.write(" **/\n") f.write("asn_enc_rval_t %s_xer_print_%s(\n" % (fileprefix, firstlower.lower())) f.write(" asn_app_consume_bytes_f *cb,\n") f.write(" void *app_key,\n") f.write(" %s_message *message_p);\n\n" % (fileprefix)) f.write("int %s_xer__print2sp(const void *buffer, size_t size, void *app_key);\n\n" % (fileprefix.lower())) f.write("int %s_xer__print2fp(const void *buffer, size_t size, void *app_key);\n\n" % (fileprefix.lower())) f.write("extern size_t %s_string_total_size;\n\n" % (fileprefix.lower())) f.write("#endif /* %s_IES_DEFS_H_ */\n\n" % (fileprefix.upper())) #Generate Decode functions f = open(outdir + fileprefix + '_decoder.c', 'w') outputHeaderToFile(f, filename) f.write("#include \"%s_common.h\"\n#include \"%s_ies_defs.h\"\n\n" % (fileprefix, fileprefix)) for key in iesDefs: if key in ieofielist.values(): continue structName = re.sub('ies', '', key) asn1cStruct = re.sub('-', '_', re.sub('IEs', '', key)) if asn1cStruct.rfind('_') == len(asn1cStruct) - 1: asn1cStruct = asn1cStruct[:-1] asn1cStruct = re.sub('Item', 'List', asn1cStruct) ielistname = re.sub('UE', 'ue', asn1cStruct) ielistnamefirstlower = ielistname[:1].lower() + ielistname[1:] asn1cStructfirstlower = asn1cStruct[:1].lower() + asn1cStruct[1:] keyName = re.sub('-', '_', key) keyupperunderscore = keyName.upper() firstlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct))) iesaccess = "" if key not in ieofielist.values(): iesaccess = "%s_ies." % (firstlower) f.write("int %s_decode_%s(\n" % (fileprefix, re.sub('-', '_', structName.lower()))) if len(iesDefs[key]["ies"]) != 0: f.write(" %s_t *%s,\n" % (re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key)))) f.write(" ANY_t *any_p) {\n\n") f.write(" %s_t %s;\n %s_t *%s_p = &%s;\n" % (asn1cStruct, asn1cStructfirstlower, asn1cStruct, asn1cStructfirstlower, asn1cStructfirstlower)) f.write(" int i, decoded = 0;\n") if len(iesDefs[key]["ies"]) != 0: f.write(" int tempDecoded = 0;\n") f.write(" assert(any_p != NULL);\n") if len(iesDefs[key]["ies"]) != 0: f.write(" assert(%s != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '_', key)))) f.write(" %s_DEBUG(\"Decoding message %s (%%s:%%d)\\n\", __FILE__, __LINE__);\n\n" % (fileprefix.upper(), re.sub('-', '_', keyName))) f.write(" ANY_to_type_aper(any_p, &asn_DEF_%s, (void**)&%s_p);\n\n" % (asn1cStruct, asn1cStructfirstlower)) f.write(" for (i = 0; i < %s_p->%slist.count; i++) {\n" % (asn1cStructfirstlower, iesaccess)) f.write(" %s_IE_t *ie_p;\n" % (fileprefix[0].upper() + fileprefix[1:])) f.write(" ie_p = %s_p->%slist.array[i];\n" % (asn1cStructfirstlower, iesaccess)) f.write(" switch(ie_p->id) {\n") for ie in iesDefs[key]["ies"]: iename = re.sub('id-', '', ie[0]) ienameunderscore = lowerFirstCamelWord(re.sub('-', '_', iename)) ienameunderscorefirstlower = lowerFirstCamelWord(ienameunderscore) ietypesubst = re.sub('-', '', ie[2]) ietypeunderscore = re.sub('-', '_', ie[2]) ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper() if ie[3] == "optional": f.write(" /* Optional field */\n") elif ie[3] == "conditional": f.write(" /* Conditional field */\n") f.write(" case %s_ProtocolIE_ID_%s:\n" % (fileprefix_first_upper, re.sub('-', '_', ie[0]))) f.write(" {\n") f.write(" %s_t *%s_p = NULL;\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst))) if ie[3] != "mandatory": f.write(" %s->presenceMask |= %s_%s_PRESENT;\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), keyupperunderscore, ieupperunderscore)) f.write(" tempDecoded = ANY_to_type_aper(&ie_p->value, &asn_DEF_%s, (void**)&%s_p);\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst))) f.write(" if (tempDecoded < 0 || %s_p == NULL) {\n" % (lowerFirstCamelWord(ietypesubst))) f.write(" %s_ERROR(\"Decoding of IE %s failed\\n\");\n" % (fileprefix.upper(), ienameunderscore)) f.write(" if (%s_p)\n" % (lowerFirstCamelWord(ietypesubst))) f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst))) f.write(" return -1;\n") f.write(" }\n") f.write(" decoded += tempDecoded;\n") f.write(" if (asn1_xer_print)\n") f.write(" xer_fprint(stdout, &asn_DEF_%s, %s_p);\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst))) if ie[2] in ieofielist.keys(): f.write(" if (%s_decode_%s(&%s->%s, %s_p) < 0) {\n" % (fileprefix, ietypeunderscore.lower(), lowerFirstCamelWord(re.sub('-', '_', key)), ienameunderscore, lowerFirstCamelWord(ietypesubst))) f.write(" %s_ERROR(\"Decoding of encapsulated IE %s failed\\n\");\n" % (fileprefix.upper(), lowerFirstCamelWord(ietypesubst))) f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst))) f.write(" }\n") else: f.write(" memcpy(&%s->%s, %s_p, sizeof(%s_t));\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), ienameunderscore, lowerFirstCamelWord(ietypesubst), ietypeunderscore)) #f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst))) f.write(" } break;\n") f.write(" default:\n") f.write(" %s_ERROR(\"Unknown protocol IE id (%%d) for message %s\\n\", (int)ie_p->id);\n" % (fileprefix.upper(), re.sub('-', '_', structName.lower()))) f.write(" return -1;\n") f.write(" }\n") f.write(" }\n") f.write(" return decoded;\n") f.write("}\n\n") for key in iesDefs: if key not in ieofielist.values(): continue keyname = re.sub('IEs', '', re.sub('Item', 'List', key)) f.write("int %s_decode_%s(\n" % (fileprefix, re.sub('-', '_', keyname).lower())) f.write(" %sIEs_t *%sIEs,\n" % (re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '_', keyname)))) f.write(" %s_t *%s) {\n\n" % (re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '_', keyname)))) f.write(" int i, decoded = 0;\n") f.write(" int tempDecoded = 0;\n\n") f.write(" assert(%s != NULL);\n" % (lowerFirstCamelWord(re.sub('-', '_', keyname)))); f.write(" assert(%sIEs != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '_', keyname)))); f.write(" for (i = 0; i < %s->list.count; i++) {\n" % (lowerFirstCamelWord(re.sub('-', '_', keyname)))) f.write(" %s_IE_t *ie_p = %s->list.array[i];\n" % (fileprefix[0].upper() + fileprefix[1:], lowerFirstCamelWord(re.sub('-', '_', keyname)))) f.write(" switch (ie_p->id) {\n") for ie in iesDefs[key]["ies"]: iename = re.sub('id-', '', ie[0]) ienameunderscore = lowerFirstCamelWord(re.sub('-', '_', iename)) f.write(" case %s_ProtocolIE_ID_%s:\n" % (fileprefix_first_upper, re.sub('-', '_', ie[0]))) f.write(" {\n") f.write(" %s_t *%s_p = NULL;\n" % (re.sub('-', '_', ie[2]), lowerFirstCamelWord(re.sub('-', '', ie[2])))) f.write(" tempDecoded = ANY_to_type_aper(&ie_p->value, &asn_DEF_%s, (void**)&%s_p);\n" % (re.sub('-', '_', ie[2]), lowerFirstCamelWord(re.sub('-', '', ie[2])))) f.write(" if (tempDecoded < 0 || %s_p == NULL) {\n" % (lowerFirstCamelWord(re.sub('-', '', ie[2])))) f.write(" %s_ERROR(\"Decoding of IE %s for message %s failed\\n\");\n" % (fileprefix.upper(), ienameunderscore, re.sub('-', '_', keyname))) f.write(" if (%s_p)\n" % (lowerFirstCamelWord(re.sub('-', '', ie[2])))) #f.write(" free(%s_p);\n" % (lowerFirstCamelWord(re.sub('-', '', ie[2])))) f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" % (re.sub('-', '_', ie[2]), lowerFirstCamelWord(re.sub('-', '', ie[2])))) f.write(" return -1;\n") f.write(" }\n") f.write(" decoded += tempDecoded;\n") f.write(" if (asn1_xer_print)\n") f.write(" xer_fprint(stdout, &asn_DEF_%s, %s_p);\n" % (re.sub('-', '_', ie[2]), lowerFirstCamelWord(re.sub('-', '', ie[2])))) f.write(" ASN_SEQUENCE_ADD(&%sIEs->%s, %s_p);\n" % (lowerFirstCamelWord(re.sub('-', '_', keyname)), re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))), lowerFirstCamelWord(re.sub('-', '', ie[2])))) f.write(" } break;\n") f.write(" default:\n") f.write(" %s_ERROR(\"Unknown protocol IE id (%%d) for message %s\\n\", (int)ie_p->id);\n" % (fileprefix.upper(), re.sub('-', '_', structName.lower()))) f.write(" return -1;\n") f.write(" }\n") f.write(" }\n") f.write(" return decoded;\n") f.write("}\n\n") #Generate IES Encode functions f = open(outdir + fileprefix + '_encoder.c', 'w') outputHeaderToFile(f,filename) f.write("#include \"%s_common.h\"\n" % (fileprefix)) f.write("#include \"%s_ies_defs.h\"\n\n" % (fileprefix)) for key in iesDefs: if key in ieofielist.values(): continue structName = re.sub('ies', '', key) asn1cStruct = re.sub('-', '_', re.sub('IEs', '', key)) asn1cStruct = re.sub('Item', 'List', asn1cStruct) if asn1cStruct.rfind('_') == len(asn1cStruct) - 1: asn1cStruct = asn1cStruct[:-1] asn1cStructfirstlower = asn1cStruct[:1].lower() + asn1cStruct[1:] firstwordlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct))) iesaccess = "" if key not in ieofielist.values(): iesaccess = "%s_ies." % (firstwordlower) keyName = re.sub('-', '_', key) keyupperunderscore = keyName.upper() # No IE to encode... if len(iesDefs[key]["ies"]) == 0: continue f.write("int %s_encode_%s(\n" % (fileprefix, re.sub('-', '_', structName.lower()))) f.write(" %s_t *%s,\n" % (asn1cStruct, firstwordlower)) f.write(" %s_t *%s) {\n\n" % (re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key)))) f.write(" %s_IE_t *ie;\n\n" % (fileprefix_first_upper)) f.write(" assert(%s != NULL);\n" % (firstwordlower)); f.write(" assert(%s != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '_', key)))); for ie in iesDefs[key]["ies"]: iename = re.sub('-', '_', re.sub('id-', '', ie[0])) ienameunderscore = re.sub('-', '_', iename) ienamefirstwordlower = lowerFirstCamelWord(iename) ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper() ietypeunderscore = re.sub('-', '_', ie[2]) if ie[3] != "mandatory": if ie[3] == "optional": f.write(" /* Optional field */\n") elif ie[3] == "conditional": f.write(" /* Conditional field */\n") f.write(" if (%s->presenceMask & %s_%s_PRESENT) {\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), keyupperunderscore, ieupperunderscore)) #f.write(" == %s_%s_PRESENT) {\n" % (keyupperunderscore, ieupperunderscore)) f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n" % (fileprefix, fileprefix_first_upper, re.sub('-', '_', ie[0]))) f.write(" %s_Criticality_%s,\n" % (fileprefix_first_upper, ie[1])) f.write(" &asn_DEF_%s,\n" % (ietypeunderscore)) f.write(" &%s->%s)) == NULL) {\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower)) f.write(" return -1;\n") f.write(" }\n") f.write(" ASN_SEQUENCE_ADD(&%s->%slist, ie);\n" % (firstwordlower, iesaccess)) f.write(" }\n\n") else: if ie[2] in ieofielist.keys(): f.write(" %s_t %s;\n\n" % (ietypeunderscore, ienamefirstwordlower)) f.write(" memset(&%s, 0, sizeof(%s_t));\n" % (ienamefirstwordlower, ietypeunderscore)) f.write("\n") f.write(" if (%s_encode_%s(&%s, &%s->%s) < 0) return -1;\n" % (fileprefix, ietypeunderscore.lower(), ienamefirstwordlower, lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower)) f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n" % (fileprefix, fileprefix_first_upper, re.sub('-', '_', ie[0]))) f.write(" %s_Criticality_%s,\n" % (fileprefix_first_upper, ie[1])) f.write(" &asn_DEF_%s,\n" % (ietypeunderscore)) if ie[2] in ieofielist.keys(): f.write(" &%s)) == NULL) {\n" % (ienamefirstwordlower)) else: f.write(" &%s->%s)) == NULL) {\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower)) f.write(" return -1;\n") f.write(" }\n") f.write(" ASN_SEQUENCE_ADD(&%s->%slist, ie);\n\n" % (firstwordlower, iesaccess)) if ie[2] in ieofielist.keys(): f.write(" /* Free any dynamic allocation that is no more used */\n") f.write(" ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_%s, &%s);\n\n" % (ietypeunderscore, ienamefirstwordlower)) f.write(" return 0;\n") f.write("}\n\n") for (key, value) in iesDefs.items(): if key not in ieofielist.values(): continue ie = value["ies"][0] ietypeunderscore = re.sub('-', '_', ie[2]) asn1cStruct = re.sub('-', '_', re.sub('IEs', '', re.sub('-IEs', '', key))) asn1cStruct = re.sub('Item', 'List', asn1cStruct) firstwordlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct))) for (i, j) in ieofielist.items(): if j == key: break f.write("int %s_encode_%s(\n" % (fileprefix, re.sub('-', '_', i).lower())) f.write(" %s_t *%s,\n" % (asn1cStruct, firstwordlower)) f.write(" %sIEs_t *%sIEs) {\n\n" % (re.sub('-', '_', i), lowerFirstCamelWord(re.sub('-', '_', i)))) f.write(" int i;\n") f.write(" %s_IE_t *ie;\n\n" % (fileprefix_first_upper)) f.write(" assert(%s != NULL);\n" % (firstwordlower)); f.write(" assert(%sIEs != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '_', i)))); f.write(" for (i = 0; i < %sIEs->%s.count; i++) {\n" % (firstwordlower, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))))) f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n" % (fileprefix, fileprefix_first_upper, re.sub('-', '_', ie[0]))) f.write(" %s_Criticality_%s,\n" % (fileprefix_first_upper, ie[1])) f.write(" &asn_DEF_%s,\n" % (ietypeunderscore)) f.write(" %sIEs->%s.array[i])) == NULL) {\n" % (firstwordlower, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))))) f.write(" return -1;\n") f.write(" }\n") f.write(" ASN_SEQUENCE_ADD(&%s->list, ie);\n" % (firstwordlower)) f.write(" }\n") f.write(" return 0;\n") f.write("}\n\n") #Generate xer print functions f = open(outdir + fileprefix + '_xer_print.c', 'w') outputHeaderToFile(f, filename) f.write("#include <stdlib.h>\n") f.write("#include <stdio.h>\n\n") f.write("#include <asn_application.h>\n#include <asn_internal.h>\n\n") f.write("#include \"%s_common.h\"\n#include \"%s_ies_defs.h\"\n\n" % (fileprefix, fileprefix)) f.write("size_t %s_string_total_size = 0;\n\n" % (fileprefix.lower())) f.write("""int %s_xer__print2fp(const void *buffer, size_t size, void *app_key) { FILE *stream = (FILE *)app_key; if(fwrite(buffer, 1, size, stream) != size) return -1; return 0; } """ % (fileprefix.lower())) f.write("""int %s_xer__print2sp(const void *buffer, size_t size, void *app_key) { char *string = (char *)app_key; /* Copy buffer to the formatted string */ memcpy(&string[%s_string_total_size], buffer, size); %s_string_total_size += size; return 0; } """ % (fileprefix.lower(), fileprefix.lower(), fileprefix.lower())) f.write("""static asn_enc_rval_t xer_encode_local(asn_TYPE_descriptor_t *td, void *sptr, asn_app_consume_bytes_f *cb, void *app_key, int indent) { asn_enc_rval_t er, tmper; const char *mname; size_t mlen; int xcan = 2; if(!td || !sptr) goto cb_failed; mname = td->xml_tag; mlen = strlen(mname); _i_ASN_TEXT_INDENT(0, indent); _ASN_CALLBACK3("<", 1, mname, mlen, ">", 1); tmper = td->xer_encoder(td, sptr, indent + 1, XER_F_BASIC, cb, app_key); if(tmper.encoded == -1) return tmper; _ASN_CALLBACK3("</", 2, mname, mlen, ">\\n", xcan); er.encoded = 4 + xcan + (2 * mlen) + tmper.encoded; _ASN_ENCODED_OK(er); cb_failed: _ASN_ENCODE_FAILED; } """) for (key, value) in iesDefs.items(): keyName = re.sub('-', '_', key) keyupperunderscore = keyName.upper() iesStructName = lowerFirstCamelWord(re.sub('-', '_', key)) ie = value["ies"][0] ietypeunderscore = re.sub('-', '_', ie[2]) if key in ieofielist.values(): f.write("asn_enc_rval_t %s_xer_print_%s(\n" % (fileprefix, re.sub('ies', '', re.sub('item', 'list', re.sub('-', '_', key).lower())))) else: f.write("asn_enc_rval_t %s_xer_print_%s(\n" % (fileprefix, re.sub('ies', '', re.sub('-', '_', key).lower()))) #f.write(" FILE *file,\n") f.write(" asn_app_consume_bytes_f *cb,\n") f.write(" void *app_key,\n") if key in ieofielist.values(): iesStructName = lowerFirstCamelWord(re.sub('Item', 'List', re.sub('-', '_', key))) f.write(" %sIEs_t *%s) {\n\n" % (re.sub('IEs', '', re.sub('Item', 'List', re.sub('-', '_', key))), iesStructName)) f.write(" int i;\n") f.write(" asn_enc_rval_t er;\n") else: f.write(" %s_message *message_p)\n{\n" % (fileprefix)) f.write(" %s_t *%s;\n" % (re.sub('-', '_', key), iesStructName)) f.write(" asn_enc_rval_t er;\n") #f.write(" void *app_key = (void *)file;\n") #f.write(" asn_app_consume_bytes_f *cb = %s_xer__print2fp;\n\n" % (fileprefix.lower())) f.write(" %s = &message_p->msg.%s;\n\n" % (iesStructName, iesStructName)) if key in ieofielist.values(): # Increase indentation level f.write(" for (i = 0; i < %s->%s.count; i++) {\n" % (iesStructName, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))))) #f.write(" xer_fprint(file, &asn_DEF_%s, %s->%s.array[i]);\n" % (ietypeunderscore, iesStructName, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))))) f.write(" er = xer_encode(&asn_DEF_%s, %s->%s.array[i], XER_F_BASIC, cb, app_key);\n" % (ietypeunderscore, iesStructName, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))))) f.write(" }\n") else: f.write(" cb(\"<%s-PDU>\\n\", %d, app_key);\n" % (key, len("<%s-PDU>\n" % (key)))) f.write(" xer_encode_local(&asn_DEF_%s_Criticality, &message_p->criticality, cb, app_key, 1);\n" % fileprefix_first_upper) f.write(" xer_encode_local(&asn_DEF_%s_ProcedureCode, &message_p->procedureCode, cb, app_key, 1);\n" % fileprefix_first_upper) f.write(" cb(\" <%s>\\n\", %d, app_key);\n" % (key, len(" <%s>\n" % (key)))) for ie in iesDefs[key]["ies"]: iename = re.sub('-', '_', re.sub('id-', '', ie[0])) ienameunderscore = re.sub('-', '_', iename) ienamefirstwordlower = lowerFirstCamelWord(iename) ietypeunderscore = re.sub('-', '_', ie[2]) ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper() if ie[3] != "mandatory": if ie[3] == "optional": f.write(" /* Optional field */\n") elif ie[3] == "conditional": f.write(" /* Conditional field */\n") f.write(" if (%s->presenceMask & %s_%s_PRESENT)\n " % (iesStructName, keyupperunderscore, ieupperunderscore)) # Is it an encapsulated IE ? if ie[2] in ieofielist.keys(): f.write(" %s_xer_print_%s(cb, app_key, &%s->%s);\n" % (fileprefix, re.sub('ies', '', re.sub('-', '_', ie[2]).lower()), iesStructName, ienamefirstwordlower)) else: f.write(" xer_encode_local(&asn_DEF_%s, &%s->%s, cb, app_key, 2);\n" % (ietypeunderscore, iesStructName, ienamefirstwordlower)) f.write(" cb(\" </%s>\\n\", %d, app_key);\n" % (key, len(" </%s>\n" % (key)))) f.write(" cb(\"</%s-PDU>\\n\", %d, app_key);\n" % (key, len("</%s-PDU>\n" % (key)))) f.write(" _ASN_ENCODED_OK(er);\n") #if key not in ieofielist.values(): #f.write("cb_failed:\n") #f.write(" return er;\n") f.write("}\n\n")
unknown
codeparrot/codeparrot-clean
import re from django.conf import settings from django.utils.encoding import smart_bytes from pipeline.compilers import CompilerBase from pipeline.exceptions import CompilerError class BrowserifyCompiler(CompilerBase): output_extension = 'browserified.js' def match_file(self, path): # Allow for cache busting hashes between ".browserify" and ".js" return re.search(r'\.browserify(\.[a-fA-F0-9]+)?\.js$', path) is not None def compile_file(self, infile, outfile, outdated=False, force=False): pipeline_settings = getattr(settings, 'PIPELINE', {}) command = "%s %s %s > %s" % ( pipeline_settings.get('BROWSERIFY_BINARY', '/usr/bin/env browserify'), pipeline_settings.get('BROWSERIFY_ARGUMENTS', ''), infile, outfile ) return self.execute_command(command) def execute_command(self, command, content=None, cwd=None): """This is like the one in SubProcessCompiler, except it checks the exit code.""" import subprocess pipe = subprocess.Popen(command, shell=True, cwd=cwd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) if content: content = smart_bytes(content) stdout, stderr = pipe.communicate(content) if self.verbose: print(stderr) if pipe.returncode != 0: raise CompilerError(stderr) return stdout
unknown
codeparrot/codeparrot-clean
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ha.protocolPB; import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; import javax.net.SocketFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.ZKFCProtocol; import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto; import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto; import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.thirdparty.protobuf.RpcController; import static org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc; public class ZKFCProtocolClientSideTranslatorPB implements ZKFCProtocol, Closeable, ProtocolTranslator { private final static RpcController NULL_CONTROLLER = null; private final ZKFCProtocolPB rpcProxy; public ZKFCProtocolClientSideTranslatorPB( InetSocketAddress addr, Configuration conf, SocketFactory socketFactory, int timeout) throws IOException { RPC.setProtocolEngine(conf, ZKFCProtocolPB.class, ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(ZKFCProtocolPB.class, RPC.getProtocolVersion(ZKFCProtocolPB.class), addr, UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout); } @Override public void cedeActive(int millisToCede) throws IOException, AccessControlException { CedeActiveRequestProto req = CedeActiveRequestProto.newBuilder() .setMillisToCede(millisToCede) .build(); ipc(() -> rpcProxy.cedeActive(NULL_CONTROLLER, req)); } @Override public void gracefulFailover() throws IOException, AccessControlException { ipc(() -> rpcProxy.gracefulFailover(NULL_CONTROLLER, GracefulFailoverRequestProto.getDefaultInstance())); } @Override public void close() { RPC.stopProxy(rpcProxy); } @Override public Object getUnderlyingProxyObject() { return rpcProxy; } }
java
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
# $Id: __init__.py 4813 2006-11-13 03:41:08Z wiemann $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ This package contains the Python Source Reader modules. """ __docformat__ = 'reStructuredText' import sys import docutils.readers from docutils.readers.python import moduleparser from docutils import parsers from docutils import nodes from docutils.readers.python import pynodes from docutils import readers class Reader(docutils.readers.Reader): config_section = 'python reader' config_section_dependencies = ('readers',) default_parser = 'restructuredtext' def parse(self): """Parse `self.input` into a document tree.""" self.document = document = self.new_document() module_section = moduleparser.parse_module(self.input, self.source.source_path) module_section.walk(DocformatVisitor(self.document)) visitor = DocstringFormattingVisitor( document=document, default_parser=self.default_parser) module_section.walk(visitor) self.document.append(module_section) class DocformatVisitor(nodes.SparseNodeVisitor): """ This sets docformat attributes in a module. Wherever an assignment to __docformat__ is found, we look for the enclosing scope -- a class, a module, or a function -- and set the docformat attribute there. We can't do this during the DocstringFormattingVisitor walking, because __docformat__ may appear below a docstring in that format (typically below the module docstring). """ def visit_attribute(self, node): assert isinstance(node[0], pynodes.object_name) name = node[0][0].data if name != '__docformat__': return value = None for child in children: if isinstance(child, pynodes.expression_value): value = child[0].data break assert value.startswith("'") or value.startswith('"'), "__docformat__ must be assigned a string literal (not %s); line: %s" % (value, node['lineno']) name = name[1:-1] looking_in = node.parent while not isinstance(looking_in, (pynodes.module_section, pynodes.function_section, pynodes.class_section)): looking_in = looking_in.parent looking_in['docformat'] = name class DocstringFormattingVisitor(nodes.SparseNodeVisitor): def __init__(self, document, default_parser): self.document = document self.default_parser = default_parser self.parsers = {} def visit_docstring(self, node): text = node[0].data docformat = self.find_docformat(node) del node[0] node['docformat'] = docformat parser = self.get_parser(docformat) parser.parse(text, self.document) for child in self.document.children: node.append(child) self.document.current_source = self.document.current_line = None del self.document[:] def get_parser(self, parser_name): """ Get a parser based on its name. We reuse parsers during this visitation, so parser instances are cached. """ parser_name = parsers._parser_aliases.get(parser_name, parser_name) if not self.parsers.has_key(parser_name): cls = parsers.get_parser_class(parser_name) self.parsers[parser_name] = cls() return self.parsers[parser_name] def find_docformat(self, node): """ Find the __docformat__ closest to this node (i.e., look in the class or module) """ while node: if node.get('docformat'): return node['docformat'] node = node.parent return self.default_parser if __name__ == '__main__': try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates pseudo-XML from Python modules ' '(for testing purposes). ' + default_description) publish_cmdline(description=description, reader=Reader())
unknown
codeparrot/codeparrot-clean
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011-2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo.config import cfg import webob.exc from glance.api import policy from glance.common import wsgi import glance.context import glance.openstack.common.log as logging context_opts = [ cfg.BoolOpt('owner_is_tenant', default=True, help=_('When true, this option sets the owner of an image ' 'to be the tenant. Otherwise, the owner of the ' ' image will be the authenticated user issuing the ' 'request.')), cfg.StrOpt('admin_role', default='admin', help=_('Role used to identify an authenticated user as ' 'administrator.')), cfg.BoolOpt('allow_anonymous_access', default=False, help=_('Allow unauthenticated users to access the API with ' 'read-only privileges. This only applies when using ' 'ContextMiddleware.')), ] CONF = cfg.CONF CONF.register_opts(context_opts) LOG = logging.getLogger(__name__) class BaseContextMiddleware(wsgi.Middleware): def process_response(self, resp): try: request_id = resp.request.context.request_id except AttributeError: LOG.warn(_('Unable to retrieve request id from context')) else: resp.headers['x-openstack-request-id'] = 'req-%s' % request_id return resp class ContextMiddleware(BaseContextMiddleware): def __init__(self, app): self.policy_enforcer = policy.Enforcer() super(ContextMiddleware, self).__init__(app) def process_request(self, req): """Convert authentication information into a request context Generate a glance.context.RequestContext object from the available authentication headers and store on the 'context' attribute of the req object. :param req: wsgi request object that will be given the context object :raises webob.exc.HTTPUnauthorized: when value of the X-Identity-Status header is not 'Confirmed' and anonymous access is disallowed """ if req.headers.get('X-Identity-Status') == 'Confirmed': req.context = self._get_authenticated_context(req) elif CONF.allow_anonymous_access: req.context = self._get_anonymous_context() else: raise webob.exc.HTTPUnauthorized() def _get_anonymous_context(self): kwargs = { 'user': None, 'tenant': None, 'roles': [], 'is_admin': False, 'read_only': True, 'policy_enforcer': self.policy_enforcer, } return glance.context.RequestContext(**kwargs) def _get_authenticated_context(self, req): #NOTE(bcwaldon): X-Roles is a csv string, but we need to parse # it into a list to be useful roles_header = req.headers.get('X-Roles', '') roles = [r.strip().lower() for r in roles_header.split(',')] #NOTE(bcwaldon): This header is deprecated in favor of X-Auth-Token deprecated_token = req.headers.get('X-Storage-Token') service_catalog = None if req.headers.get('X-Service-Catalog') is not None: try: catalog_header = req.headers.get('X-Service-Catalog') service_catalog = json.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) kwargs = { 'user': req.headers.get('X-User-Id'), 'tenant': req.headers.get('X-Tenant-Id'), 'roles': roles, 'is_admin': CONF.admin_role.strip().lower() in roles, 'auth_tok': req.headers.get('X-Auth-Token', deprecated_token), 'owner_is_tenant': CONF.owner_is_tenant, 'service_catalog': service_catalog, 'policy_enforcer': self.policy_enforcer, } return glance.context.RequestContext(**kwargs) class UnauthenticatedContextMiddleware(BaseContextMiddleware): def process_request(self, req): """Create a context without an authorized user.""" kwargs = { 'user': None, 'tenant': None, 'roles': [], 'is_admin': True, } req.context = glance.context.RequestContext(**kwargs)
unknown
codeparrot/codeparrot-clean
#This file is part of Tryton. The COPYRIGHT file at the top level of #this repository contains the full copyright notices and license terms. from trytond.pool import Pool from trytond.config import CONFIG from trytond.transaction import Transaction from trytond.exceptions import NotLogged def _get_pool(dbname): database_list = Pool.database_list() pool = Pool(dbname) if not dbname in database_list: pool.init() return pool def login(dbname, loginname, password, cache=True): with Transaction().start(dbname, 0) as transaction: pool = _get_pool(dbname) User = pool.get('res.user') user_id = User.get_login(loginname, password) transaction.cursor.commit() if user_id: if not cache: return user_id with Transaction().start(dbname, user_id) as transaction: Session = pool.get('ir.session') session, = Session.create([{}]) transaction.cursor.commit() return user_id, session.key return False def logout(dbname, user, session): with Transaction().start(dbname, 0) as transaction: pool = _get_pool(dbname) Session = pool.get('ir.session') session, = Session.search([ ('key', '=', session), ]) name = session.create_uid.login Session.delete([session]) transaction.cursor.commit() return name def check_super(passwd): if passwd == CONFIG['admin_passwd']: return True else: raise Exception('AccessDenied') def check(dbname, user, session): if user == 0: raise Exception('AccessDenied') if not user: raise NotLogged() with Transaction().start(dbname, user) as transaction: pool = _get_pool(dbname) Session = pool.get('ir.session') try: if not Session.check(user, session): raise NotLogged() else: return user finally: transaction.cursor.commit()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env bash # Copyright 2014 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Validates that the cluster is healthy. # Error codes are: # 0 - success # 1 - fatal (cluster is unlikely to work) # 2 - non-fatal (encountered some errors, but cluster should be working correctly) set -o errexit set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then source "${KUBE_ROOT}/cluster/env.sh" fi source "${KUBE_ROOT}/hack/lib/util.sh" source "${KUBE_ROOT}/cluster/kube-util.sh" # Run kubectl and retry upon failure. function kubectl_retry() { tries=3 while ! "${KUBE_ROOT}/cluster/kubectl.sh" "$@"; do tries=$((tries-1)) if [[ ${tries} -le 0 ]]; then echo "('kubectl $*' failed, giving up)" >&2 return 1 fi echo "(kubectl failed, will retry ${tries} times)" >&2 sleep 1 done } ALLOWED_NOTREADY_NODES="${ALLOWED_NOTREADY_NODES:-0}" CLUSTER_READY_ADDITIONAL_TIME_SECONDS="${CLUSTER_READY_ADDITIONAL_TIME_SECONDS:-30}" if [[ "${KUBERNETES_PROVIDER:-}" == "gce" ]]; then if [[ "${KUBE_CREATE_NODES}" == "true" ]]; then EXPECTED_NUM_NODES="$(get-num-nodes)" else EXPECTED_NUM_NODES="0" fi echo "Validating gce cluster, MULTIZONE=${MULTIZONE:-}" # In multizone mode we need to add instances for all nodes in the region. if [[ "${MULTIZONE:-}" == "true" ]]; then EXPECTED_NUM_NODES=$(gcloud -q compute instances list --project="${PROJECT}" --format="[no-heading]" \ --filter="(name ~ '${NODE_INSTANCE_PREFIX}.*' OR name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}.*') AND zone:($(gcloud -q compute zones list --project="${PROJECT}" --filter=region="${REGION}" --format="csv[no-heading](name)" | tr "\n" "," | sed "s/,$//"))" | wc -l) echo "Computing number of nodes, NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX}, REGION=${REGION}, EXPECTED_NUM_NODES=${EXPECTED_NUM_NODES}" fi else EXPECTED_NUM_NODES="${NUM_NODES}" fi if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then if [[ "${KUBERNETES_PROVIDER:-}" == "gce" ]]; then NUM_MASTERS=$(get-master-replicas-count) else NUM_MASTERS=1 fi EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+NUM_MASTERS)) fi REQUIRED_NUM_NODES=$((EXPECTED_NUM_NODES - ALLOWED_NOTREADY_NODES)) # Make several attempts to deal with slow cluster birth. return_value=0 attempt=0 # Set the timeout to ~25minutes (100 x 15 second) to avoid timeouts for 1000-node clusters. PAUSE_BETWEEN_ITERATIONS_SECONDS=15 MAX_ATTEMPTS=100 ADDITIONAL_ITERATIONS=$(((CLUSTER_READY_ADDITIONAL_TIME_SECONDS + PAUSE_BETWEEN_ITERATIONS_SECONDS - 1)/PAUSE_BETWEEN_ITERATIONS_SECONDS)) while true; do # Pause between iterations of this large outer loop. if [[ ${attempt} -gt 0 ]]; then sleep 15 fi attempt=$((attempt+1)) # The "kubectl get nodes -o template" exports node information. # # Echo the output and gather 2 counts: # - Total number of nodes. # - Number of "ready" nodes. # # Suppress errors from kubectl output because during cluster bootstrapping # for clusters where the master node is registered, the apiserver will become # available and then get restarted as the kubelet configures the docker bridge. # # We are assigning the result of kubectl_retry get nodes operation to the res # variable in that way, to prevent stopping the whole script on an error. # # Bash command substitution $(kubectl_...) removes all trailing whitespaces # which are important for line counting. # Use trick from https://unix.stackexchange.com/a/383411 to avoid # newline truncation. node=$(kubectl_retry get nodes --no-headers; ret=$?; echo .; exit "$ret") && res="$?" || res="$?" node="${node%.}" if [ "${res}" -ne "0" ]; then if [[ "${attempt}" -gt "${last_run:-$MAX_ATTEMPTS}" ]]; then echo -e "${color_red:-} Failed to get nodes.${color_norm:-}" exit 1 else continue fi fi found=$(echo -n "${node}" | wc -l) # Use grep || true so that empty result doesn't return nonzero exit code. ready=$(echo -n "${node}" | grep -c -v "NotReady" || true) if (( "${found}" == "${EXPECTED_NUM_NODES}" )) && (( "${ready}" == "${EXPECTED_NUM_NODES}")); then break elif (( "${found}" > "${EXPECTED_NUM_NODES}" )); then if [[ "${KUBE_USE_EXISTING_MASTER:-}" != "true" ]]; then echo -e "${color_red}Found ${found} nodes, but expected ${EXPECTED_NUM_NODES}. Your cluster may not behave correctly.${color_norm}" fi break elif (( "${ready}" > "${EXPECTED_NUM_NODES}")); then echo -e "${color_red}Found ${ready} ready nodes, but expected ${EXPECTED_NUM_NODES}. Your cluster may not behave correctly.${color_norm}" break else if [[ "${REQUIRED_NUM_NODES}" -le "${ready}" ]]; then echo -e "${color_green:-}Found ${REQUIRED_NUM_NODES} Nodes, allowing additional ${ADDITIONAL_ITERATIONS} iterations for other Nodes to join.${color_norm}" last_run="${last_run:-$((attempt + ADDITIONAL_ITERATIONS - 1))}" fi if [[ "${attempt}" -gt "${last_run:-$MAX_ATTEMPTS}" ]]; then echo -e "${color_yellow:-}Detected ${ready} ready nodes, found ${found} nodes out of expected ${EXPECTED_NUM_NODES}. Your cluster may not be fully functional.${color_norm}" kubectl_retry get nodes if [[ "${REQUIRED_NUM_NODES}" -gt "${ready}" ]]; then exit 1 else return_value=2 break fi else echo -e "${color_yellow}Waiting for ${EXPECTED_NUM_NODES} ready nodes. ${ready} ready nodes, ${found} registered. Retrying.${color_norm}" fi fi done echo "Found ${found} node(s)." kubectl_retry get nodes attempt=0 while true; do # The "kubectl componentstatuses -o template" exports components health information. # # Echo the output and gather 2 counts: # - Total number of componentstatuses. # - Number of "healthy" components. cs_status=$(kubectl_retry get componentstatuses -o template --template='{{range .items}}{{with index .conditions 0}}{{.type}}:{{.status}}{{end}}{{"\n"}}{{end}}') || true componentstatuses=$(echo "${cs_status}" | grep -c 'Healthy:') || true healthy=$(echo "${cs_status}" | grep -c 'Healthy:True') || true if ((componentstatuses > healthy)) || ((componentstatuses == 0)); then if ((attempt < 5)); then echo -e "${color_yellow}Cluster not working yet.${color_norm}" attempt=$((attempt+1)) sleep 30 else echo -e " ${color_yellow}Validate output:${color_norm}" kubectl_retry get cs echo -e "${color_red}Validation returned one or more failed components. Cluster is probably broken.${color_norm}" exit 1 fi else break fi done echo "Validate output:" kubectl_retry get cs || true if [ "${return_value}" == "0" ]; then echo -e "${color_green}Cluster validation succeeded${color_norm}" else echo -e "${color_yellow}Cluster validation encountered some problems, but cluster should be in working order${color_norm}" fi exit "${return_value}"
unknown
github
https://github.com/kubernetes/kubernetes
cluster/validate-cluster.sh
#!/usr/bin/env bash ## /!\ This file must be used at the root of the prometheus project ## This script provides utils method to help to release and verify the readiness of each libs under the folder ui/ set -e current=$(pwd) root_ui_folder=${current}/web/ui cd "${root_ui_folder}" files=("../../LICENSE" "../../CHANGELOG.md") workspaces=$(jq -r '.workspaces[]' < package.json) function copy() { for file in "${files[@]}"; do for workspace in ${workspaces}; do if [ -f "${file}" ]; then cp "${file}" "${workspace}"/"$(basename "${file}")" fi done done } function publish() { dry_run="${1}" cmd="npm publish --access public" if [[ "${dry_run}" == "dry-run" ]]; then cmd+=" --dry-run" fi for workspace in ${workspaces}; do # package "mantine-ui" is private so we shouldn't try to publish it. if [[ "${workspace}" != "mantine-ui" ]]; then cd "${workspace}" eval "${cmd}" cd "${root_ui_folder}" fi done } function checkPackage() { version=${1} if [[ "${version}" == v* ]]; then version="${version:1}" fi for workspace in ${workspaces}; do cd "${workspace}" package_version=$(npm run env | grep npm_package_version | cut -d= -f2-) if [ "${version}" != "${package_version}" ]; then echo "version of ${workspace} is not the correct one" echo "expected one: ${version}" echo "current one: ${package_version}" echo "please use ./ui_release --bump-version ${version}" exit 1 fi cd "${root_ui_folder}" done } function clean() { for file in "${files[@]}"; do for workspace in ${workspaces}; do f="${workspace}"/"$(basename "${file}")" if [ -f "${f}" ]; then rm "${f}" fi done done } function bumpVersion() { version="${1}" if [[ "${version}" == v* ]]; then version="${version:1}" fi # upgrade the @prometheus-io/* dependencies on all packages for workspace in ${workspaces}; do # sed -i syntax is different on mac and linux if [[ "$OSTYPE" == "darwin"* ]]; then sed -E -i "" "s|(\"@prometheus-io/.+\": )\".+\"|\1\"${version}\"|" "${workspace}"/package.json else sed -E -i "s|(\"@prometheus-io/.+\": )\".+\"|\1\"${version}\"|" "${workspace}"/package.json fi done # increase the version on all packages npm version "${version}" --workspaces --include-workspace-root } if [[ "$1" == "--copy" ]]; then copy fi if [[ $1 == "--publish" ]]; then publish "${@:2}" fi if [[ $1 == "--check-package" ]]; then checkPackage "${@:2}" fi if [[ $1 == "--bump-version" ]]; then bumpVersion "${@:2}" fi if [[ $1 == "--clean" ]]; then clean fi
unknown
github
https://github.com/prometheus/prometheus
scripts/ui_release.sh
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.testcontainers.service.connection; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.testcontainers.containers.Container; import org.testcontainers.utility.DockerImageName; import org.springframework.boot.autoconfigure.service.connection.ConnectionDetails; import org.springframework.context.annotation.Bean; import org.springframework.core.annotation.AliasFor; /** * Indicates that a field or method is a {@link ContainerConnectionSource} which provides * a service that can be connected to. * <p> * If the underling connection supports SSL, the {@link PemKeyStore @PemKeyStore}, * {@link PemTrustStore @PemTrustStore}, {@link JksKeyStore @JksKeyStore}, * {@link JksTrustStore @JksTrustStore}, {@link Ssl @Ssl} annotations may be used to * provide additional configuration. * * @author Moritz Halbritter * @author Andy Wilkinson * @author Phillip Webb * @since 3.1.0 */ @Retention(RetentionPolicy.RUNTIME) @Target({ ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE }) public @interface ServiceConnection { /** * The name of the service being connected to. Container names are used to determine * the connection details that should be created when a technology-specific * {@link Container} subclass is not available. * <p> * If not specified, and if the {@link Container} instance is available, the * {@link DockerImageName#getRepository() repository} part of the * {@link Container#getDockerImageName() docker image name} will be used. Note that * {@link Container} instances are <em>not</em> available early enough when the * container is defined as a {@link Bean @Bean} method. All * {@link ServiceConnection @ServiceConnection} {@link Bean @Bean} methods that need * to match on the connection name <em>must</em> declare this attribute. * <p> * This attribute is an alias for {@link #name()}. * @return the name of the service * @see #name() */ @AliasFor("name") String value() default ""; /** * The name of the service being connected to. Container names are used to determine * the connection details that should be created when a technology-specific * {@link Container} subclass is not available. * <p> * If not specified, and if the {@link Container} instance is available, the * {@link DockerImageName#getRepository() repository} part of the * {@link Container#getDockerImageName() docker image name} will be used. Note that * {@link Container} instances are <em>not</em> available early enough when the * container is defined as a {@link Bean @Bean} method. All * {@link ServiceConnection @ServiceConnection} {@link Bean @Bean} methods that need * to match on the connection name <em>must</em> declare this attribute. * <p> * This attribute is an alias for {@link #value()}. * @return the name of the service * @see #value() */ @AliasFor("value") String name() default ""; /** * A restriction to types of {@link ConnectionDetails} that can be created from this * connection. The default value does not restrict the types that can be created. * @return the connection detail types that can be created to establish the connection */ Class<? extends ConnectionDetails>[] type() default {}; }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-testcontainers/src/main/java/org/springframework/boot/testcontainers/service/connection/ServiceConnection.java
/*! * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.dev/license */ import {cssValueLexer} from './css-value-lexer'; describe('css-value-lexer', () => { it('should extract the tokens for a simple static value', () => { const tokens = cssValueLexer('block'); expect(tokens).toEqual(['block']); }); it('should extract the tokens for a hex color value', () => { const tokens = cssValueLexer('#ff0000'); expect(tokens).toEqual(['#ff0000']); }); it('should extract the tokens for an RGB color value', () => { const tokens = cssValueLexer('rgb(255, 255, 0)'); expect(tokens).toEqual(['rgb', 255, 255, 0]); }); it('should extract the tokens for an RGBA color value', () => { const tokens = cssValueLexer('rgba(255, 255, 0, 0.5)'); expect(tokens).toEqual(['rgba', 255, 255, 0, 0.5]); }); it('should extract the tokens for a single numeric integer value', () => { const tokens = cssValueLexer('42px'); expect(tokens).toEqual([42, 'px']); }); it('should extract the tokens for a single numeric decimal value', () => { const tokens = cssValueLexer('66.6%'); expect(tokens).toEqual([66.6, '%']); }); it('should extract the tokens for a single numeric negative value', () => { const tokens = cssValueLexer('-50%'); expect(tokens).toEqual([-50, '%']); }); it('should extract the tokens for a single unitless numberic value', () => { const tokens = cssValueLexer('1337'); expect(tokens).toEqual([1337]); }); it('should extract the tokens for a single unitless numeric negative value', () => { const tokens = cssValueLexer('-33.3'); expect(tokens).toEqual([-33.3]); }); it('should extract the tokens for a list of numeric values', () => { const tokens = cssValueLexer('42px 13.37rem 0%'); expect(tokens).toEqual([42, 'px', 13.37, 'rem', 0, '%']); }); it('should extract the tokens for a numeric value with negative numbers', () => { const tokens = cssValueLexer('42px -13.37px 0rem -25%'); expect(tokens).toEqual([42, 'px', -13.37, 'px', 0, 'rem', -25, '%']); }); it('should extract the tokens for a simple transform value', () => { const tokens = cssValueLexer('translateX(42%)'); expect(tokens).toEqual(['translateX', 42, '%']); }); it('should extract the tokens for a transform value with a single function with multiple parameters', () => { const tokens = cssValueLexer('translate(42%, 0px)'); expect(tokens).toEqual(['translate', 42, '%', 0, 'px']); }); it('should extract the tokens for a transform value with multiple functions with multiple parameters', () => { const tokens = cssValueLexer('translate(42%, 0px) scale(1.5) rotate(180deg)'); expect(tokens).toEqual(['translate', 42, '%', 0, 'px', 'scale', 1.5, 'rotate', 180, 'deg']); }); it('should extract the tokens for a transform value with negative numbers', () => { const tokens = cssValueLexer('translate(42%, -13.37px) scale(-2)'); expect(tokens).toEqual(['translate', 42, '%', -13.37, 'px', 'scale', -2]); }); });
typescript
github
https://github.com/angular/angular
adev/src/app/features/home/animation/parser/css-value-lexer.spec.ts
#include "git-compat-util.h" #include "hex.h" #include "strbuf.h" #include "trace2/tr2_tbuf.h" #include "trace2/tr2_sid.h" #define TR2_ENVVAR_PARENT_SID "GIT_TRACE2_PARENT_SID" static struct strbuf tr2sid_buf = STRBUF_INIT; static int tr2sid_nr_git_parents; /* * Compute the final component of the SID representing the current process. * This should uniquely identify the process and be a valid filename (to * allow writing trace2 data to per-process files). It should also be fixed * length for possible use as a database key. * * "<yyyymmdd>T<hhmmss>.<fraction>Z-<host>-<process>" * * where <host> is a 9 character string: * "H<first_8_chars_of_sha1_of_hostname>" * "Localhost" when no hostname. * * where <process> is a 9 character string containing the least significant * 32 bits in the process-id. * "P<pid>" * (This is an abribrary choice. On most systems pid_t is a 32 bit value, * so limit doesn't matter. On larger systems, a truncated value is fine * for our purposes here.) */ static void tr2_sid_append_my_sid_component(void) { const struct git_hash_algo *algo = &hash_algos[GIT_HASH_SHA1]; struct tr2_tbuf tb_now; struct git_hash_ctx ctx; pid_t pid = getpid(); unsigned char hash[GIT_MAX_RAWSZ + 1]; char hex[GIT_MAX_HEXSZ + 1]; char hostname[HOST_NAME_MAX + 1]; tr2_tbuf_utc_datetime(&tb_now); strbuf_addstr(&tr2sid_buf, tb_now.buf); strbuf_addch(&tr2sid_buf, '-'); if (xgethostname(hostname, sizeof(hostname))) strbuf_add(&tr2sid_buf, "Localhost", 9); else { algo->init_fn(&ctx); git_hash_update(&ctx, hostname, strlen(hostname)); git_hash_final(hash, &ctx); hash_to_hex_algop_r(hex, hash, algo); strbuf_addch(&tr2sid_buf, 'H'); strbuf_add(&tr2sid_buf, hex, 8); } strbuf_addf(&tr2sid_buf, "-P%08"PRIx32, (uint32_t)pid); } /* * Compute a "unique" session id (SID) for the current process. This allows * all events from this process to have a single label (much like a PID). * * Export this into our environment so that all child processes inherit it. * * If we were started by another git instance, use our parent's SID as a * prefix. (This lets us track parent/child relationships even if there * is an intermediate shell process.) * * Additionally, count the number of nested git processes. */ static void tr2_sid_compute(void) { const char *parent_sid; if (tr2sid_buf.len) return; parent_sid = getenv(TR2_ENVVAR_PARENT_SID); if (parent_sid && *parent_sid) { const char *p; for (p = parent_sid; *p; p++) if (*p == '/') tr2sid_nr_git_parents++; strbuf_addstr(&tr2sid_buf, parent_sid); strbuf_addch(&tr2sid_buf, '/'); tr2sid_nr_git_parents++; } tr2_sid_append_my_sid_component(); setenv(TR2_ENVVAR_PARENT_SID, tr2sid_buf.buf, 1); } const char *tr2_sid_get(void) { if (!tr2sid_buf.len) tr2_sid_compute(); return tr2sid_buf.buf; } int tr2_sid_depth(void) { if (!tr2sid_buf.len) tr2_sid_compute(); return tr2sid_nr_git_parents; } void tr2_sid_release(void) { strbuf_release(&tr2sid_buf); }
c
github
https://github.com/git/git
trace2/tr2_sid.c
#ifndef SRC_ENV_PROPERTIES_H_ #define SRC_ENV_PROPERTIES_H_ #if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS // PER_ISOLATE_* macros: We have a lot of per-isolate properties // and adding and maintaining their getters and setters by hand would be // difficult so let's make the preprocessor generate them for us. // // In each macro, `V` is expected to be the name of a macro or function which // accepts the number of arguments provided in each tuple in the macro body, // typically two. The named function will be invoked against each tuple. // // Make sure that any macro V defined for use with the PER_ISOLATE_* macros is // undefined again after use. // Private symbols are per-isolate primitives but Environment proxies them // for the sake of convenience. Strings should be ASCII-only and have a // "node:" prefix to avoid name clashes with third-party code. #define PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(V) \ V(arrow_message_private_symbol, "node:arrowMessage") \ V(contextify_context_private_symbol, "node:contextify:context") \ V(decorated_private_symbol, "node:decorated") \ V(transfer_mode_private_symbol, "node:transfer_mode") \ V(host_defined_option_symbol, "node:host_defined_option_symbol") \ V(js_transferable_wrapper_private_symbol, "node:js_transferable_wrapper") \ V(entry_point_module_private_symbol, "node:entry_point_module") \ V(entry_point_promise_private_symbol, "node:entry_point_promise") \ V(module_source_private_symbol, "node:module_source") \ V(module_export_names_private_symbol, "node:module_export_names") \ V(module_circular_visited_private_symbol, "node:module_circular_visited") \ V(module_export_private_symbol, "node:module_export") \ V(module_first_parent_private_symbol, "node:module_first_parent") \ V(module_last_parent_private_symbol, "node:module_last_parent") \ V(napi_type_tag, "node:napi:type_tag") \ V(napi_wrapper, "node:napi:wrapper") \ V(untransferable_object_private_symbol, "node:untransferableObject") \ V(exit_info_private_symbol, "node:exit_info_private_symbol") \ V(promise_trace_id, "node:promise_trace_id") \ V(source_map_data_private_symbol, "node:source_map_data_private_symbol") // Symbols are per-isolate primitives but Environment proxies them // for the sake of convenience. #define PER_ISOLATE_SYMBOL_PROPERTIES(V) \ V(fs_use_promises_symbol, "fs_use_promises_symbol") \ V(async_id_symbol, "async_id_symbol") \ V(constructor_key_symbol, "constructor_key_symbol") \ V(handle_onclose_symbol, "handle_onclose") \ V(no_message_symbol, "no_message_symbol") \ V(messaging_deserialize_symbol, "messaging_deserialize_symbol") \ V(imported_cjs_symbol, "imported_cjs_symbol") \ V(messaging_transfer_symbol, "messaging_transfer_symbol") \ V(messaging_clone_symbol, "messaging_clone_symbol") \ V(messaging_transfer_list_symbol, "messaging_transfer_list_symbol") \ V(oninit_symbol, "oninit") \ V(owner_symbol, "owner_symbol") \ V(onpskexchange_symbol, "onpskexchange") \ V(resource_symbol, "resource_symbol") \ V(trigger_async_id_symbol, "trigger_async_id_symbol") \ V(builtin_source_text_module_hdo, "builtin_source_text_module_hdo") \ V(embedder_module_hdo, "embedder_module_hdo") \ V(source_text_module_default_hdo, "source_text_module_default_hdo") \ V(vm_context_no_contextify, "vm_context_no_contextify") \ V(vm_dynamic_import_default_internal, "vm_dynamic_import_default_internal") \ V(vm_dynamic_import_main_context_default, \ "vm_dynamic_import_main_context_default") \ V(vm_dynamic_import_missing_flag, "vm_dynamic_import_missing_flag") \ V(vm_dynamic_import_no_callback, "vm_dynamic_import_no_callback") // Strings are per-isolate primitives but Environment proxies them // for the sake of convenience. Strings should be ASCII-only. #define PER_ISOLATE_STRING_PROPERTIES(V) \ V(__filename_string, "__filename") \ V(__dirname_string, "__dirname") \ V(ack_string, "ack") \ V(address_string, "address") \ V(aliases_string, "aliases") \ V(allow_bare_named_params_string, "allowBareNamedParameters") \ V(allow_unknown_named_params_string, "allowUnknownNamedParameters") \ V(alpn_callback_string, "ALPNCallback") \ V(args_string, "args") \ V(async_ids_stack_string, "async_ids_stack") \ V(attributes_string, "attributes") \ V(backup_string, "backup") \ V(base_string, "base") \ V(base_url_string, "baseURL") \ V(buffer_string, "buffer") \ V(bytes_parsed_string, "bytesParsed") \ V(bytes_read_string, "bytesRead") \ V(bytes_written_string, "bytesWritten") \ V(cached_data_produced_string, "cachedDataProduced") \ V(cached_data_rejected_string, "cachedDataRejected") \ V(cached_data_string, "cachedData") \ V(change_string, "change") \ V(changes_string, "changes") \ V(chunks_sent_since_last_write_string, "chunksSentSinceLastWrite") \ V(clone_unsupported_type_str, "Cannot clone object of unsupported type.") \ V(clone_transfer_needed_str, \ "Object that needs transfer was found in message but not listed in " \ "transferList") \ V(clone_untransferable_str, "Found invalid value in transferList.") \ V(code_string, "code") \ V(config_string, "config") \ V(constants_string, "constants") \ V(crypto_dh_string, "dh") \ V(crypto_dsa_string, "dsa") \ V(crypto_ec_string, "ec") \ V(crypto_ed25519_string, "ed25519") \ V(crypto_ed448_string, "ed448") \ V(crypto_ml_dsa_44_string, "ml-dsa-44") \ V(crypto_ml_dsa_65_string, "ml-dsa-65") \ V(crypto_ml_dsa_87_string, "ml-dsa-87") \ V(crypto_ml_kem_512_string, "ml-kem-512") \ V(crypto_ml_kem_768_string, "ml-kem-768") \ V(crypto_ml_kem_1024_string, "ml-kem-1024") \ V(crypto_slh_dsa_sha2_128f_string, "slh-dsa-sha2-128f") \ V(crypto_slh_dsa_sha2_128s_string, "slh-dsa-sha2-128s") \ V(crypto_slh_dsa_sha2_192f_string, "slh-dsa-sha2-192f") \ V(crypto_slh_dsa_sha2_192s_string, "slh-dsa-sha2-192s") \ V(crypto_slh_dsa_sha2_256f_string, "slh-dsa-sha2-256f") \ V(crypto_slh_dsa_sha2_256s_string, "slh-dsa-sha2-256s") \ V(crypto_slh_dsa_shake_128f_string, "slh-dsa-shake-128f") \ V(crypto_slh_dsa_shake_128s_string, "slh-dsa-shake-128s") \ V(crypto_slh_dsa_shake_192f_string, "slh-dsa-shake-192f") \ V(crypto_slh_dsa_shake_192s_string, "slh-dsa-shake-192s") \ V(crypto_slh_dsa_shake_256f_string, "slh-dsa-shake-256f") \ V(crypto_slh_dsa_shake_256s_string, "slh-dsa-shake-256s") \ V(crypto_x25519_string, "x25519") \ V(crypto_x448_string, "x448") \ V(crypto_rsa_string, "rsa") \ V(crypto_rsa_pss_string, "rsa-pss") \ V(cwd_string, "cwd") \ V(data_string, "data") \ V(default_is_true_string, "defaultIsTrue") \ V(defensive_string, "defensive") \ V(deserialize_info_string, "deserializeInfo") \ V(dest_string, "dest") \ V(destroyed_string, "destroyed") \ V(detached_string, "detached") \ V(dh_string, "DH") \ V(dirname_string, "dirname") \ V(divisor_length_string, "divisorLength") \ V(dns_a_string, "A") \ V(dns_aaaa_string, "AAAA") \ V(dns_caa_string, "CAA") \ V(dns_cname_string, "CNAME") \ V(dns_mx_string, "MX") \ V(dns_naptr_string, "NAPTR") \ V(dns_ns_string, "NS") \ V(dns_ptr_string, "PTR") \ V(dns_soa_string, "SOA") \ V(dns_srv_string, "SRV") \ V(dns_tlsa_string, "TLSA") \ V(dns_txt_string, "TXT") \ V(done_string, "done") \ V(duration_string, "duration") \ V(ecdh_string, "ECDH") \ V(emit_string, "emit") \ V(emit_warning_string, "emitWarning") \ V(encoding_string, "encoding") \ V(env_pairs_string, "envPairs") \ V(env_var_settings_string, "envVarSettings") \ V(err_sqlite_error_string, "ERR_SQLITE_ERROR") \ V(errcode_string, "errcode") \ V(errno_string, "errno") \ V(error_string, "error") \ V(errstr_string, "errstr") \ V(events_waiting, "eventsWaiting") \ V(events, "events") \ V(exclusive_string, "exclusive") \ V(exponent_string, "exponent") \ V(exports_string, "exports") \ V(external_stream_string, "_externalStream") \ V(family_string, "family") \ V(fatal_exception_string, "_fatalException") \ V(fd_string, "fd") \ V(fields_string, "fields") \ V(file_string, "file") \ V(filename_string, "filename") \ V(filter_string, "filter") \ V(flags_string, "flags") \ V(flowlabel_string, "flowlabel") \ V(frames_received_string, "framesReceived") \ V(frames_sent_string, "framesSent") \ V(function_string, "function") \ V(get_string, "get") \ V(get_data_clone_error_string, "_getDataCloneError") \ V(get_shared_array_buffer_id_string, "_getSharedArrayBufferId") \ V(gid_string, "gid") \ V(groups_string, "groups") \ V(has_regexp_groups_string, "hasRegExpGroups") \ V(has_top_level_await_string, "hasTopLevelAwait") \ V(hash_string, "hash") \ V(h2_string, "h2") \ V(handle_string, "handle") \ V(hash_algorithm_string, "hashAlgorithm") \ V(help_text_string, "helpText") \ V(homedir_string, "homedir") \ V(host_string, "host") \ V(hostname_string, "hostname") \ V(href_string, "href") \ V(http_1_1_string, "http/1.1") \ V(id_string, "id") \ V(identity_string, "identity") \ V(ignore_case_string, "ignoreCase") \ V(ignore_string, "ignore") \ V(inherit_string, "inherit") \ V(input_string, "input") \ V(inverse_string, "inverse") \ V(ipv4_string, "IPv4") \ V(ipv6_string, "IPv6") \ V(isclosing_string, "isClosing") \ V(issuercert_string, "issuerCertificate") \ V(iterator_string, "Iterator") \ V(jwk_akp_string, "AKP") \ V(jwk_alg_string, "alg") \ V(jwk_crv_string, "crv") \ V(jwk_d_string, "d") \ V(jwk_dp_string, "dp") \ V(jwk_dq_string, "dq") \ V(jwk_e_string, "e") \ V(jwk_ec_string, "EC") \ V(jwk_k_string, "k") \ V(jwk_kty_string, "kty") \ V(jwk_n_string, "n") \ V(jwk_oct_string, "oct") \ V(jwk_okp_string, "OKP") \ V(jwk_p_string, "p") \ V(jwk_priv_string, "priv") \ V(jwk_pub_string, "pub") \ V(jwk_q_string, "q") \ V(jwk_qi_string, "qi") \ V(jwk_rsa_string, "RSA") \ V(jwk_x_string, "x") \ V(jwk_y_string, "y") \ V(kill_signal_string, "killSignal") \ V(kind_string, "kind") \ V(last_insert_rowid_string, "lastInsertRowid") \ V(length_string, "length") \ V(library_string, "library") \ V(loop_count, "loopCount") \ V(max_buffer_string, "maxBuffer") \ V(max_concurrent_streams_string, "maxConcurrentStreams") \ V(message_port_constructor_string, "MessagePort") \ V(message_port_string, "messagePort") \ V(message_string, "message") \ V(messageerror_string, "messageerror") \ V(mgf1_hash_algorithm_string, "mgf1HashAlgorithm") \ V(module_string, "module") \ V(modulus_length_string, "modulusLength") \ V(name_string, "name") \ V(named_curve_string, "namedCurve") \ V(next_string, "next") \ V(node_string, "node") \ V(object_string, "Object") \ V(ocsp_request_string, "OCSPRequest") \ V(oncertcb_string, "oncertcb") \ V(onchange_string, "onchange") \ V(onclienthello_string, "onclienthello") \ V(oncomplete_string, "oncomplete") \ V(onconflict_string, "onConflict") \ V(onconnection_string, "onconnection") \ V(ondone_string, "ondone") \ V(onerror_string, "onerror") \ V(onexit_string, "onexit") \ V(onhandshakedone_string, "onhandshakedone") \ V(onhandshakestart_string, "onhandshakestart") \ V(onkeylog_string, "onkeylog") \ V(onmessage_string, "onmessage") \ V(onnewsession_string, "onnewsession") \ V(onocspresponse_string, "onocspresponse") \ V(onreadstart_string, "onreadstart") \ V(onreadstop_string, "onreadstop") \ V(onshutdown_string, "onshutdown") \ V(onsignal_string, "onsignal") \ V(onunpipe_string, "onunpipe") \ V(onwrite_string, "onwrite") \ V(ongracefulclosecomplete_string, "ongracefulclosecomplete") \ V(openssl_error_stack, "opensslErrorStack") \ V(options_string, "options") \ V(original_string, "original") \ V(output_string, "output") \ V(overlapped_string, "overlapped") \ V(parse_error_string, "Parse Error") \ V(password_string, "password") \ V(path_string, "path") \ V(pathname_string, "pathname") \ V(pending_handle_string, "pendingHandle") \ V(permission_string, "permission") \ V(phase_string, "phase") \ V(pid_string, "pid") \ V(ping_rtt_string, "pingRTT") \ V(pipe_source_string, "pipeSource") \ V(pipe_string, "pipe") \ V(pipe_target_string, "pipeTarget") \ V(port1_string, "port1") \ V(port2_string, "port2") \ V(port_string, "port") \ V(primordials_string, "primordials") \ V(process_string, "process") \ V(progress_string, "progress") \ V(promise_string, "promise") \ V(protocol_string, "protocol") \ V(prototype_string, "prototype") \ V(psk_string, "psk") \ V(public_exponent_string, "publicExponent") \ V(rate_string, "rate") \ V(read_host_object_string, "_readHostObject") \ V(readable_string, "readable") \ V(read_bigints_string, "readBigInts") \ V(reason_string, "reason") \ V(remaining_pages_string, "remainingPages") \ V(rename_string, "rename") \ V(required_module_facade_url_string, \ "node:internal/require_module_default_facade") \ V(required_module_facade_source_string, \ "export * from 'original'; export { default } from 'original'; export " \ "const __esModule = true;") \ V(require_string, "require") \ V(resource_string, "resource") \ V(result_string, "result") \ V(return_arrays_string, "returnArrays") \ V(salt_length_string, "saltLength") \ V(search_string, "search") \ V(servername_string, "servername") \ V(session_id_string, "sessionId") \ V(set_string, "set") \ V(shared_string, "shared") \ V(shell_string, "shell") \ V(signal_string, "signal") \ V(sink_string, "sink") \ V(size_string, "size") \ V(sni_context_err_string, "Invalid SNI context") \ V(sni_context_string, "sni_context") \ V(source_string, "source") \ V(source_map_url_string, "sourceMapURL") \ V(source_url_string, "sourceURL") \ V(specifier_string, "specifier") \ V(stack_string, "stack") \ V(start_string, "start") \ V(state_string, "state") \ V(stats_string, "stats") \ V(status_string, "status") \ V(stdio_string, "stdio") \ V(step_string, "step") \ V(stream_average_duration_string, "streamAverageDuration") \ V(stream_count_string, "streamCount") \ V(synthetic_string, "synthetic") \ V(syscall_string, "syscall") \ V(table_string, "table") \ V(target_string, "target") \ V(thread_id_string, "threadId") \ V(thread_name_string, "threadName") \ V(ticketkeycallback_string, "onticketkeycallback") \ V(timeout_string, "timeout") \ V(time_to_first_byte_string, "timeToFirstByte") \ V(time_to_first_byte_sent_string, "timeToFirstByteSent") \ V(time_to_first_header_string, "timeToFirstHeader") \ V(tls_ticket_string, "tlsTicket") \ V(total_pages_string, "totalPages") \ V(transfer_string, "transfer") \ V(transfer_unsupported_type_str, \ "Cannot transfer object of unsupported type.") \ V(ttl_string, "ttl") \ V(type_string, "type") \ V(uid_string, "uid") \ V(unknown_string, "<unknown>") \ V(url_string, "url") \ V(username_string, "username") \ V(value_string, "value") \ V(verify_error_string, "verifyError") \ V(version_string, "version") \ V(windows_hide_string, "windowsHide") \ V(windows_verbatim_arguments_string, "windowsVerbatimArguments") \ V(wrap_string, "wrap") \ V(writable_string, "writable") \ V(write_host_object_string, "_writeHostObject") \ V(write_queue_size_string, "writeQueueSize") #define PER_ISOLATE_TEMPLATE_PROPERTIES(V) \ V(a_record_template, v8::DictionaryTemplate) \ V(aaaa_record_template, v8::DictionaryTemplate) \ V(async_wrap_ctor_template, v8::FunctionTemplate) \ V(binding_data_default_template, v8::ObjectTemplate) \ V(blob_constructor_template, v8::FunctionTemplate) \ V(blob_reader_constructor_template, v8::FunctionTemplate) \ V(blocklist_constructor_template, v8::FunctionTemplate) \ V(caa_record_template, v8::DictionaryTemplate) \ V(callsite_template, v8::DictionaryTemplate) \ V(cipherinfo_detail_template, v8::DictionaryTemplate) \ V(cipherinfo_template, v8::DictionaryTemplate) \ V(cname_record_template, v8::DictionaryTemplate) \ V(compiled_function_cjs_template, v8::DictionaryTemplate) \ V(compiled_function_template, v8::DictionaryTemplate) \ V(contextify_global_template, v8::ObjectTemplate) \ V(contextify_wrapper_template, v8::ObjectTemplate) \ V(cpu_usage_template, v8::DictionaryTemplate) \ V(crypto_key_object_handle_constructor, v8::FunctionTemplate) \ V(env_proxy_template, v8::ObjectTemplate) \ V(env_proxy_ctor_template, v8::FunctionTemplate) \ V(ephemeral_key_template, v8::DictionaryTemplate) \ V(dir_instance_template, v8::ObjectTemplate) \ V(dns_ns_record_template, v8::DictionaryTemplate) \ V(fd_constructor_template, v8::ObjectTemplate) \ V(fdclose_constructor_template, v8::ObjectTemplate) \ V(filehandlereadwrap_template, v8::ObjectTemplate) \ V(free_list_statistics_template, v8::DictionaryTemplate) \ V(fsreqpromise_constructor_template, v8::ObjectTemplate) \ V(handle_wrap_ctor_template, v8::FunctionTemplate) \ V(heap_statistics_template, v8::DictionaryTemplate) \ V(v8_heap_statistics_template, v8::DictionaryTemplate) \ V(histogram_ctor_template, v8::FunctionTemplate) \ V(http2settings_constructor_template, v8::ObjectTemplate) \ V(http2stream_constructor_template, v8::ObjectTemplate) \ V(http2ping_constructor_template, v8::ObjectTemplate) \ V(i18n_converter_template, v8::ObjectTemplate) \ V(intervalhistogram_constructor_template, v8::FunctionTemplate) \ V(iter_template, v8::DictionaryTemplate) \ V(js_transferable_constructor_template, v8::FunctionTemplate) \ V(libuv_stream_wrap_ctor_template, v8::FunctionTemplate) \ V(lock_holder_constructor_template, v8::FunctionTemplate) \ V(lock_info_template, v8::DictionaryTemplate) \ V(lock_query_template, v8::DictionaryTemplate) \ V(message_port_constructor_template, v8::FunctionTemplate) \ V(module_wrap_constructor_template, v8::FunctionTemplate) \ V(mx_record_template, v8::DictionaryTemplate) \ V(naptr_record_template, v8::DictionaryTemplate) \ V(object_stats_template, v8::DictionaryTemplate) \ V(page_stats_template, v8::DictionaryTemplate) \ V(pipe_constructor_template, v8::FunctionTemplate) \ V(script_context_constructor_template, v8::FunctionTemplate) \ V(secure_context_constructor_template, v8::FunctionTemplate) \ V(shutdown_wrap_template, v8::ObjectTemplate) \ V(soa_record_template, v8::DictionaryTemplate) \ V(socketaddress_constructor_template, v8::FunctionTemplate) \ V(space_stats_template, v8::DictionaryTemplate) \ V(sqlite_column_template, v8::DictionaryTemplate) \ V(sqlite_run_result_template, v8::DictionaryTemplate) \ V(sqlite_statement_sync_constructor_template, v8::FunctionTemplate) \ V(sqlite_statement_sync_iterator_constructor_template, v8::FunctionTemplate) \ V(sqlite_session_constructor_template, v8::FunctionTemplate) \ V(srv_record_template, v8::DictionaryTemplate) \ V(streambaseoutputstream_constructor_template, v8::ObjectTemplate) \ V(tcp_constructor_template, v8::FunctionTemplate) \ V(tlsa_record_template, v8::DictionaryTemplate) \ V(tty_constructor_template, v8::FunctionTemplate) \ V(txt_record_template, v8::DictionaryTemplate) \ V(urlpatterncomponentresult_template, v8::DictionaryTemplate) \ V(urlpatterninit_template, v8::DictionaryTemplate) \ V(urlpatternresult_template, v8::DictionaryTemplate) \ V(write_wrap_template, v8::ObjectTemplate) \ V(worker_cpu_profile_taker_template, v8::ObjectTemplate) \ V(worker_cpu_usage_taker_template, v8::ObjectTemplate) \ V(worker_heap_profile_taker_template, v8::ObjectTemplate) \ V(worker_heap_snapshot_taker_template, v8::ObjectTemplate) \ V(worker_heap_statistics_taker_template, v8::ObjectTemplate) \ V(x509_constructor_template, v8::FunctionTemplate) \ V(x509_dictionary_template, v8::DictionaryTemplate) #define PER_REALM_STRONG_PERSISTENT_VALUES(V) \ V(async_hooks_after_function, v8::Function) \ V(async_hooks_before_function, v8::Function) \ V(async_hooks_callback_trampoline, v8::Function) \ V(async_hooks_binding, v8::Object) \ V(async_hooks_destroy_function, v8::Function) \ V(async_hooks_init_function, v8::Function) \ V(async_hooks_promise_resolve_function, v8::Function) \ V(buffer_prototype_object, v8::Object) \ V(crypto_key_object_private_constructor, v8::Function) \ V(crypto_key_object_public_constructor, v8::Function) \ V(crypto_key_object_secret_constructor, v8::Function) \ V(enhance_fatal_stack_after_inspector, v8::Function) \ V(enhance_fatal_stack_before_inspector, v8::Function) \ V(get_source_map_error_source, v8::Function) \ V(host_import_module_dynamically_callback, v8::Function) \ V(host_import_meta_resolve_initializer, v8::Function) \ V(host_initialize_import_meta_object_callback, v8::Function) \ V(http2session_on_altsvc_function, v8::Function) \ V(http2session_on_error_function, v8::Function) \ V(http2session_on_frame_error_function, v8::Function) \ V(http2session_on_goaway_data_function, v8::Function) \ V(http2session_on_headers_function, v8::Function) \ V(http2session_on_origin_function, v8::Function) \ V(http2session_on_ping_function, v8::Function) \ V(http2session_on_priority_function, v8::Function) \ V(http2session_on_settings_function, v8::Function) \ V(http2session_on_stream_close_function, v8::Function) \ V(http2session_on_stream_trailers_function, v8::Function) \ V(internal_binding_loader, v8::Function) \ V(immediate_callback_function, v8::Function) \ V(inspector_console_extension_installer, v8::Function) \ V(inspector_disable_async_hooks, v8::Function) \ V(inspector_disable_network_tracking, v8::Function) \ V(inspector_enable_async_hooks, v8::Function) \ V(inspector_enable_network_tracking, v8::Function) \ V(maybe_cache_generated_source_map, v8::Function) \ V(messaging_deserialize_create_object, v8::Function) \ V(message_port, v8::Object) \ V(builtin_module_require, v8::Function) \ V(performance_entry_callback, v8::Function) \ V(prepare_stack_trace_callback, v8::Function) \ V(process_object, v8::Object) \ V(process_emit_warning_sync, v8::Function) \ V(primordials, v8::Object) \ V(primordials_safe_map_prototype_object, v8::Object) \ V(primordials_safe_set_prototype_object, v8::Object) \ V(primordials_safe_weak_map_prototype_object, v8::Object) \ V(primordials_safe_weak_set_prototype_object, v8::Object) \ V(promise_reject_callback, v8::Function) \ V(snapshot_serialize_callback, v8::Function) \ V(snapshot_deserialize_callback, v8::Function) \ V(snapshot_deserialize_main, v8::Function) \ V(source_map_cache_getter, v8::Function) \ V(tick_callback_function, v8::Function) \ V(timers_callback_function, v8::Function) \ V(tls_wrap_constructor_function, v8::Function) \ V(trace_category_state_function, v8::Function) \ V(udp_constructor_function, v8::Function) \ V(wasm_streaming_compilation_impl, v8::Function) \ V(wasm_streaming_object_constructor, v8::Function) #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS #endif // SRC_ENV_PROPERTIES_H_
c
github
https://github.com/nodejs/node
src/env_properties.h
/* Copyright 2017 - 2025 R. Thomas * Copyright 2017 - 2025 Quarkslab * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "LIEF/PE/LoadConfigurations/DynamicRelocation/DynamicRelocationBase.hpp" #include "LIEF/PE/LoadConfigurations/DynamicRelocation/DynamicFixup.hpp" #include "frozen.hpp" namespace LIEF::PE { DynamicRelocation::DynamicRelocation(uint32_t version) : version_(version) {} DynamicRelocation::DynamicRelocation(const DynamicRelocation& other) : version_(other.version_), symbol_(other.symbol_) { if (other.fixups_ != nullptr) { fixups_ = other.fixups_->clone(); } } DynamicRelocation& DynamicRelocation::operator=(const DynamicRelocation& other) { if (this == &other) { return *this; } version_ = other.version_; symbol_ = other.symbol_; if (other.fixups_ != nullptr) { fixups_ = other.fixups_->clone(); } return *this; } DynamicRelocation::DynamicRelocation(DynamicRelocation&&) = default; DynamicRelocation& DynamicRelocation::operator=(DynamicRelocation&&) = default; DynamicRelocation::~DynamicRelocation() = default; DynamicRelocation& DynamicRelocation::fixups(std::unique_ptr<DynamicFixup> F) { fixups_ = std::move(F); return *this; } const char* to_string(DynamicRelocation::IMAGE_DYNAMIC_RELOCATION e) { using IMAGE_DYNAMIC_RELOCATION = DynamicRelocation::IMAGE_DYNAMIC_RELOCATION; #define ENTRY(X) std::pair(IMAGE_DYNAMIC_RELOCATION::X, #X) STRING_MAP enums2str { ENTRY(RELOCATION_GUARD_RF_PROLOGUE), ENTRY(RELOCATION_GUARD_RF_EPILOGUE), ENTRY(RELOCATION_GUARD_IMPORT_CONTROL_TRANSFER), ENTRY(RELOCATION_GUARD_INDIR_CONTROL_TRANSFER), ENTRY(RELOCATION_GUARD_SWITCHTABLE_BRANCH), ENTRY(RELOCATION_ARM64X), ENTRY(RELOCATION_FUNCTION_OVERRIDE), ENTRY(RELOCATION_ARM64_KERNEL_IMPORT_CALL_TRANSFER), }; #undef ENTRY if (auto it = enums2str.find(e); it != enums2str.end()) { return it->second; } return "UNKNOWN"; } }
cpp
github
https://github.com/nodejs/node
deps/LIEF/src/PE/LoadConfigurations/DynamicRelocation/DynamicRelocationBase.cpp
import django from django.contrib import admin from .models import EmailConfirmation, EmailAddress from .adapter import get_adapter class EmailAddressAdmin(admin.ModelAdmin): list_display = ('email', 'user', 'primary', 'verified') list_filter = ('primary', 'verified') search_fields = [] raw_id_fields = ('user',) def __init__(self, *args, **kwargs): super(EmailAddressAdmin, self).__init__(*args, **kwargs) if not self.search_fields and django.VERSION[:2] < (1, 7): self.search_fields = self.get_search_fields(None) def get_search_fields(self, request): base_fields = get_adapter().get_user_search_fields() return ['email'] + list(map(lambda a: 'user__' + a, base_fields)) class EmailConfirmationAdmin(admin.ModelAdmin): list_display = ('email_address', 'created', 'sent', 'key') list_filter = ('sent',) raw_id_fields = ('email_address',) admin.site.register(EmailConfirmation, EmailConfirmationAdmin) admin.site.register(EmailAddress, EmailAddressAdmin)
unknown
codeparrot/codeparrot-clean
import {CONST_NUMBER0, invoke} from 'shared-runtime'; function Foo() { const x = [{value: 0}, {value: 1}, {value: 2}]; const param = CONST_NUMBER0; const foo = () => { return x[param].value; }; return invoke(foo); } export const FIXTURE_ENTRYPOINT = { fn: Foo, params: [{}], };
typescript
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/lambda-array-access-member-expr-captured.ts
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import xml.etree.ElementTree as ET from textwrap import dedent from twitter.common.collections import OrderedSet from pants.backend.core.register import build_file_aliases as register_core from pants.backend.jvm.ivy_utils import (IvyInfo, IvyModule, IvyModuleRef, IvyResolveMappingError, IvyUtils) from pants.backend.jvm.jar_dependency_utils import M2Coordinate from pants.backend.jvm.register import build_file_aliases as register_jvm from pants.backend.jvm.targets.exclude import Exclude from pants.backend.jvm.targets.jar_dependency import JarDependency from pants.backend.jvm.targets.jar_library import JarLibrary from pants.ivy.ivy_subsystem import IvySubsystem from pants.util.contextutil import temporary_dir, temporary_file_path from pants_test.base_test import BaseTest def coord(org, name, classifier=None, rev=None, ext=None): rev = rev or '0.0.1' return M2Coordinate(org=org, name=name, rev=rev, classifier=classifier, ext=ext) class IvyUtilsTestBase(BaseTest): @property def alias_groups(self): return register_core().merge(register_jvm()) class IvyUtilsGenerateIvyTest(IvyUtilsTestBase): def setUp(self): super(IvyUtilsGenerateIvyTest, self).setUp() self.add_to_build_file('src/java/targets', dedent(""" jar_library( name='a', jars=[ jar('org1', 'name1', 'rev1'), jar('org2', 'name2', 'rev2', force=True), ], ) """)) self.b_org = 'com.example' self.b_name = 'b' self.add_to_build_file('src/java/targets', dedent(""" java_library( name='b', dependencies=[':a'], provides=artifact('{org}', '{name}', repo=repository()), sources=['z.java'], ) """.format(org=self.b_org, name=self.b_name))) self.add_to_build_file('3rdparty', dedent(""" jar_library( name='example-morx', jars = [ jar(org='commons-lang', name='commons-lang', rev='2.5', classifier='morx'), ] ) jar_library( name='example-fleem', jars = [ jar(org='commons-lang', name='commons-lang', rev='2.5', classifier='fleem'), ] ) """)) self.add_to_build_file('src/java/targets', dedent(""" java_library( name='c', dependencies=[ '3rdparty:example-morx', '3rdparty:example-fleem', ], sources=['w.java'], ) """.format(org=self.b_org, name=self.b_name))) self.add_to_build_file('src/java/targets', dedent(""" java_library( name='e', dependencies=[ '3rdparty:example-morx', '3rdparty:example-fleem', ], excludes=[exclude(org='commons-lang', name='commons-lang')], sources=['w.java'], ) """.format(org=self.b_org, name=self.b_name))) self.a = self.target('src/java/targets:a') self.b = self.target('src/java/targets:b') self.c = self.target('src/java/targets:c') self.e = self.target('src/java/targets:e') def test_exclude_exported(self): jars, excludes = IvyUtils.calculate_classpath([self.b]) for jar in jars: self.assertEqual(jar.excludes, (Exclude(self.b_org, self.b_name),)) self.assertEqual(excludes, set()) def test_exclude_exported_disabled_when_no_excludes_gathered(self): _, excludes = IvyUtils.calculate_classpath([self.b], gather_excludes=False) self.assertSetEqual(excludes, set()) def test_excludes_generated_when_requested(self): _, excludes = IvyUtils.calculate_classpath([self.e], gather_excludes=True) self.assertSetEqual(excludes, {Exclude(org='commons-lang', name='commons-lang')}) def test_excludes_empty_when_not_requested(self): _, excludes = IvyUtils.calculate_classpath([self.e], gather_excludes=False) self.assertSetEqual(excludes, set()) def test_classifiers(self): jars, _ = IvyUtils.calculate_classpath([self.c]) jars.sort(key=lambda jar: jar.classifier) self.assertEquals(['fleem', 'morx'], [jar.classifier for jar in jars]) def test_module_ref_str_minus_classifier(self): module_ref = IvyModuleRef(org='org', name='name', rev='rev') self.assertEquals("IvyModuleRef(org:name:rev::jar)", str(module_ref)) def test_force_override(self): jars = list(self.a.payload.jars) with temporary_file_path() as ivyxml: IvyUtils.generate_ivy([self.a], jars=jars, excludes=[], ivyxml=ivyxml, confs=['default']) doc = ET.parse(ivyxml).getroot() conf = self.find_single(doc, 'configurations/conf') self.assert_attributes(conf, name='default') dependencies = list(doc.findall('dependencies/dependency')) self.assertEqual(2, len(dependencies)) dep1 = dependencies[0] self.assert_attributes(dep1, org='org1', name='name1', rev='rev1') conf = self.find_single(dep1, 'conf') self.assert_attributes(conf, name='default', mapped='default') dep2 = dependencies[1] self.assert_attributes(dep2, org='org2', name='name2', rev='rev2', force='true') conf = self.find_single(dep1, 'conf') self.assert_attributes(conf, name='default', mapped='default') override = self.find_single(doc, 'dependencies/override') self.assert_attributes(override, org='org2', module='name2', rev='rev2') def test_resove_conflict_no_conflicts(self): v1 = JarDependency('org.example', 'foo', '1', force=False) v1_force = JarDependency('org.example', 'foo', '1', force=True) v2 = JarDependency('org.example', 'foo', '2', force=False) # If neither version is forced, use the latest version. self.assertIs(v2, IvyUtils._resolve_conflict(v1, v2)) self.assertIs(v2, IvyUtils._resolve_conflict(v2, v1)) # If an earlier version is forced, use the forced version. self.assertIs(v1_force, IvyUtils._resolve_conflict(v1_force, v2)) self.assertIs(v1_force, IvyUtils._resolve_conflict(v2, v1_force)) # If the same version is forced, use the forced version. self.assertIs(v1_force, IvyUtils._resolve_conflict(v1, v1_force)) self.assertIs(v1_force, IvyUtils._resolve_conflict(v1_force, v1)) # If the same force is in play in multiple locations, allow it. self.assertIs(v1_force, IvyUtils._resolve_conflict(v1_force, v1_force)) def test_resolve_conflict_conflict(self): v1_force = JarDependency('org.example', 'foo', '1', force=True) v2_force = JarDependency('org.example', 'foo', '2', force=True) with self.assertRaises(IvyUtils.IvyResolveConflictingDepsError): IvyUtils._resolve_conflict(v1_force, v2_force) with self.assertRaises(IvyUtils.IvyResolveConflictingDepsError): IvyUtils._resolve_conflict(v2_force, v1_force) def test_get_resolved_jars_for_jar_library(self): ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_diamond.xml') lib = self.make_target(spec=':org1-name1', target_type=JarLibrary, jars=[JarDependency(org='org1', name='name1', rev='0.0.1', classifier='tests')]) resolved_jars = ivy_info.get_resolved_jars_for_jar_library(lib) expected = {'ivy2cache_path/org1/name1.jar': coord(org='org1', name='name1', classifier='tests'), 'ivy2cache_path/org2/name2.jar': coord(org='org2', name='name2'), 'ivy2cache_path/org3/name3.tar.gz': coord(org='org3', name='name3', ext='tar.gz')} self.maxDiff = None coordinate_by_path = {rj.cache_path: rj.coordinate for rj in resolved_jars} self.assertEqual(expected, coordinate_by_path) def test_resolved_jars_with_different_version(self): # If a jar is resolved as a different version than the requested one, the coordinates of # the resolved jar should match the artifact, not the requested coordinates. lib = self.make_target(spec=':org1-name1', target_type=JarLibrary, jars=[ JarDependency(org='org1', name='name1', rev='0.0.1', classifier='tests')]) ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_resolve_to_other_version.xml') resolved_jars = ivy_info.get_resolved_jars_for_jar_library(lib) self.maxDiff = None self.assertEqual([coord(org='org1', name='name1', classifier='tests', rev='0.0.2')], [jar.coordinate for jar in resolved_jars]) def test_does_not_visit_diamond_dep_twice(self): ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_diamond.xml') ref = IvyModuleRef("toplevel", "toplevelmodule", "latest") seen = set() def collector(r): self.assertNotIn(r, seen) seen.add(r) return {r} result = ivy_info.traverse_dependency_graph(ref, collector) self.assertEqual({IvyModuleRef("toplevel", "toplevelmodule", "latest"), IvyModuleRef(org='org1', name='name1', rev='0.0.1', classifier='tests'), IvyModuleRef(org='org2', name='name2', rev='0.0.1'), IvyModuleRef(org='org3', name='name3', rev='0.0.1', ext='tar.gz')}, result) def test_does_not_follow_cycle(self): ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_cycle.xml') ref = IvyModuleRef("toplevel", "toplevelmodule", "latest") seen = set() def collector(r): self.assertNotIn(r, seen) seen.add(r) return {r} result = ivy_info.traverse_dependency_graph(ref, collector) self.assertEqual( { IvyModuleRef("toplevel", "toplevelmodule", "latest"), IvyModuleRef(org='org1', name='name1', rev='0.0.1'), IvyModuleRef(org='org2', name='name2', rev='0.0.1'), IvyModuleRef(org='org3', name='name3', rev='0.0.1') }, result) def test_memo_reused_across_calls(self): ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_diamond.xml') ref = IvyModuleRef(org='org1', name='name1', rev='0.0.1') def collector(r): return {r} memo = dict() result1 = ivy_info.traverse_dependency_graph(ref, collector, memo=memo) result2 = ivy_info.traverse_dependency_graph(ref, collector, memo=memo) self.assertIs(result1, result2) self.assertEqual( { IvyModuleRef(org='org1', name='name1', rev='0.0.1'), IvyModuleRef(org='org2', name='name2', rev='0.0.1'), IvyModuleRef(org='org3', name='name3', rev='0.0.1', ext='tar.gz') }, result1) def test_parse_fails_when_same_classifier_different_type(self): with self.assertRaises(IvyResolveMappingError): self.parse_ivy_report('ivy_utils_resources/report_with_same_classifier_different_type.xml') def find_single(self, elem, xpath): results = list(elem.findall(xpath)) self.assertEqual(1, len(results)) return results[0] def assert_attributes(self, elem, **kwargs): self.assertEqual(dict(**kwargs), dict(elem.attrib)) def test_symlink_cachepath(self): self.maxDiff = None with temporary_dir() as mock_cache_dir: with temporary_dir() as symlink_dir: with temporary_dir() as classpath_dir: input_path = os.path.join(classpath_dir, 'inpath') output_path = os.path.join(classpath_dir, 'classpath') foo_path = os.path.join(mock_cache_dir, 'foo.jar') with open(foo_path, 'w') as foo: foo.write("test jar contents") with open(input_path, 'w') as inpath: inpath.write(foo_path) result_map = IvyUtils.symlink_cachepath(mock_cache_dir, input_path, symlink_dir, output_path) symlink_foo_path = os.path.join(symlink_dir, 'foo.jar') self.assertEquals( { os.path.realpath(foo_path): symlink_foo_path }, result_map) with open(output_path, 'r') as outpath: self.assertEquals(symlink_foo_path, outpath.readline()) self.assertTrue(os.path.islink(symlink_foo_path)) self.assertTrue(os.path.exists(symlink_foo_path)) # Now add an additional path to the existing map bar_path = os.path.join(mock_cache_dir, 'bar.jar') with open(bar_path, 'w') as bar: bar.write("test jar contents2") with open(input_path, 'w') as inpath: inpath.write(os.pathsep.join([foo_path, bar_path])) result_map = IvyUtils.symlink_cachepath(mock_cache_dir, input_path, symlink_dir, output_path) symlink_bar_path = os.path.join(symlink_dir, 'bar.jar') self.assertEquals( { os.path.realpath(foo_path): symlink_foo_path, os.path.realpath(bar_path): symlink_bar_path, }, result_map) with open(output_path, 'r') as outpath: self.assertEquals(symlink_foo_path + os.pathsep + symlink_bar_path, outpath.readline()) self.assertTrue(os.path.islink(symlink_foo_path)) self.assertTrue(os.path.exists(symlink_foo_path)) self.assertTrue(os.path.islink(symlink_bar_path)) self.assertTrue(os.path.exists(symlink_bar_path)) # Reverse the ordering and make sure order is preserved in the output path with open(input_path, 'w') as inpath: inpath.write(os.pathsep.join([bar_path, foo_path])) IvyUtils.symlink_cachepath(mock_cache_dir, input_path, symlink_dir, output_path) with open(output_path, 'r') as outpath: self.assertEquals(symlink_bar_path + os.pathsep + symlink_foo_path, outpath.readline()) def test_missing_ivy_report(self): self.set_options_for_scope(IvySubsystem.options_scope, cache_dir='DOES_NOT_EXIST', use_nailgun=False) # Hack to initialize Ivy subsystem self.context() with self.assertRaises(IvyUtils.IvyResolveReportError): IvyUtils.parse_xml_report('INVALID_CACHE_DIR', 'INVALID_REPORT_UNIQUE_NAME', 'default') def parse_ivy_report(self, rel_path): path = os.path.join('tests/python/pants_test/backend/jvm/tasks', rel_path) ivy_info = IvyUtils._parse_xml_report(conf='default', path=path) self.assertIsNotNone(ivy_info) return ivy_info def test_ivy_module_ref_cmp(self): self.assertEquals( IvyModuleRef('foo', 'bar', '1.2.3'), IvyModuleRef('foo', 'bar', '1.2.3')) self.assertTrue( IvyModuleRef('foo1', 'bar', '1.2.3') < IvyModuleRef('foo2', 'bar', '1.2.3')) self.assertTrue( IvyModuleRef('foo2', 'bar', '1.2.3') >IvyModuleRef('foo1', 'bar', '1.2.3')) self.assertTrue( IvyModuleRef('foo', 'bar1', '1.2.3') < IvyModuleRef('foo', 'bar2', '1.2.3')) self.assertTrue( IvyModuleRef('foo', 'bar2', '1.2.3') > IvyModuleRef('foo', 'bar1', '1.2.3')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.3') < IvyModuleRef('foo', 'bar', '1.2.4')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.4') > IvyModuleRef('foo', 'bar', '1.2.3')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.3', ext='jar') < IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz') > IvyModuleRef('foo', 'bar', '1.2.3', ext='jar')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.3', ext='jar', classifier='javadoc') < IvyModuleRef('foo', 'bar', '1.2.3', ext='jar', classifier='sources')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz', classifier='sources') > IvyModuleRef('foo', 'bar', '1.2.3', ext='jar', classifier='javadoc')) # make sure rev is sorted last self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.4', classifier='javadoc') < IvyModuleRef('foo', 'bar', '1.2.3', classifier='sources')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.3', classifier='sources') > IvyModuleRef('foo', 'bar', '1.2.4', classifier='javadoc')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.4', ext='jar') < IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz')) self.assertTrue( IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz') > IvyModuleRef('foo', 'bar', '1.2.4', ext='jar')) def test_traverse_dep_graph_sorted(self): """Make sure the modules are returned in a deterministic order by name""" def make_ref(org, name): return IvyModuleRef(org=org, name=name, rev='1.0') ref1 = make_ref('foo', '1') ref2 = make_ref('foo', 'child1') ref3 = make_ref('foo', 'child2') ref4 = make_ref('foo', 'child3') ref5 = make_ref('foo', 'grandchild1') ref6 = make_ref('foo', 'grandchild2') module1 = IvyModule(ref1, '/foo', []) module2 = IvyModule(ref2, '/foo', [ref1]) module3 = IvyModule(ref3, '/foo', [ref1]) module4 = IvyModule(ref4, '/foo', [ref1]) module5 = IvyModule(ref5, '/foo', [ref3]) module6 = IvyModule(ref6, '/foo', [ref3]) def assert_order(inputs): info = IvyInfo('default') for module in inputs: info.add_module(module) def collector(dep): return OrderedSet([dep]) result = [ref for ref in info.traverse_dependency_graph(ref1, collector)] self.assertEquals([ref1, ref2, ref3, ref5, ref6, ref4], result) # Make sure the order remains unchanged no matter what order we insert the into the structure assert_order([module1, module2, module3, module4, module5, module6]) assert_order([module6, module5, module4, module3, module2, module1]) assert_order([module5, module1, module2, module6, module3, module4]) assert_order([module6, module4, module3, module1 ,module2, module5]) assert_order([module4, module2, module1, module3, module6, module5]) assert_order([module4, module2, module5, module6, module1, module3])
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: ucs_mac_pool short_description: Configures MAC address pools on Cisco UCS Manager description: - Configures MAC address pools and MAC address blocks on Cisco UCS Manager. - Examples can be used with the UCS Platform Emulator U(https://communities.cisco.com/ucspe). extends_documentation_fragment: ucs options: state: description: - If C(present), will verify MAC pool is present and will create if needed. - If C(absent), will verify MAC pool is absent and will delete if needed. choices: [present, absent] default: present name: description: - The name of the MAC pool. - This name can be between 1 and 32 alphanumeric characters. - "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)." - You cannot change this name after the MAC pool is created. required: yes descrption: description: - A description of the MAC pool. - Enter up to 256 characters. - "You can use any characters or spaces except the following:" - "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)." aliases: [ descr ] order: description: - The Assignment Order field. - "This can be one of the following:" - "default - Cisco UCS Manager selects a random identity from the pool." - "sequential - Cisco UCS Manager selects the lowest available identity from the pool." choices: [default, sequential] default: default first_addr: description: - The first MAC address in the block of addresses. - This is the From field in the UCS Manager MAC Blocks menu. last_addr: description: - The last MAC address in the block of addresses. - This is the To field in the UCS Manager Add MAC Blocks menu. org_dn: description: - The distinguished name (dn) of the organization where the resource is assigned. default: org-root requirements: - ucsmsdk author: - David Soper (@dsoper2) - CiscoUcs (@CiscoUcs) version_added: '2.5' ''' EXAMPLES = r''' - name: Configure MAC address pool ucs_mac_pool: hostname: 172.16.143.150 username: admin password: password name: mac-A first_addr: 00:25:B5:00:66:00 last_addr: 00:25:B5:00:67:F3 order: sequential - name: Remove MAC address pool ucs_mac_pool: hostname: 172.16.143.150 username: admin password: password name: mac-A state: absent ''' RETURN = r''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec def main(): argument_spec = ucs_argument_spec argument_spec.update( org_dn=dict(type='str', default='org-root'), name=dict(type='str', required=True), descr=dict(type='str', default=''), order=dict(type='str', default='default', choices=['default', 'sequential']), first_addr=dict(type='str'), last_addr=dict(type='str'), state=dict(default='present', choices=['present', 'absent'], type='str'), ) module = AnsibleModule( argument_spec, supports_check_mode=True, ) # UCSModule verifies ucsmsdk is present and exits on failure. Imports are below ucs object creation. ucs = UCSModule(module) err = False from ucsmsdk.mometa.macpool.MacpoolPool import MacpoolPool from ucsmsdk.mometa.macpool.MacpoolBlock import MacpoolBlock changed = False try: mo_exists = False props_match = False # dn is <org_dn>/mac-pool-<name> dn = module.params['org_dn'] + '/mac-pool-' + module.params['name'] mo = ucs.login_handle.query_dn(dn) if mo: mo_exists = True if module.params['state'] == 'absent': if mo_exists: if not module.check_mode: ucs.login_handle.remove_mo(mo) ucs.login_handle.commit() changed = True else: if mo_exists: # check top-level mo props kwargs = dict(assignment_order=module.params['order']) kwargs['descr'] = module.params['descr'] if (mo.check_prop_match(**kwargs)): # top-level props match, check next level mo/props if module.params['last_addr'] and module.params['first_addr']: # mac address block specified, check properties block_dn = dn + '/block-' + module.params['first_addr'].upper() + '-' + module.params['last_addr'].upper() mo_1 = ucs.login_handle.query_dn(block_dn) if mo_1: props_match = True else: # no MAC address block specified, but top-level props matched props_match = True if not props_match: if not module.check_mode: # create if mo does not already exist mo = MacpoolPool( parent_mo_or_dn=module.params['org_dn'], name=module.params['name'], descr=module.params['descr'], assignment_order=module.params['order'], ) if module.params['last_addr'] and module.params['first_addr']: mo_1 = MacpoolBlock( parent_mo_or_dn=mo, to=module.params['last_addr'], r_from=module.params['first_addr'], ) ucs.login_handle.add_mo(mo, True) ucs.login_handle.commit() changed = True except Exception as e: err = True ucs.result['msg'] = "setup error: %s " % str(e) ucs.result['changed'] = changed if err: module.fail_json(**ucs.result) module.exit_json(**ucs.result) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# Copyright (C) 2007-2015 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. """The administrivia rule.""" __all__ = [ 'Administrivia', ] from email.iterators import typed_subpart_iterator from mailman.config import config from mailman.core.i18n import _ from mailman.interfaces.rules import IRule from zope.interface import implementer # The list of email commands we search for in the Subject header and payload. # We probably should get this information from the actual implemented # commands. EMAIL_COMMANDS = { # keyword: (minimum #args, maximum #args) 'confirm': (1, 1), 'help': (0, 0), 'info': (0, 0), 'lists': (0, 0), 'options': (0, 0), 'password': (2, 2), 'remove': (0, 0), 'set': (3, 3), 'subscribe': (0, 3), 'unsubscribe': (0, 1), 'who': (0, 2), } @implementer(IRule) class Administrivia: """The administrivia rule.""" name = 'administrivia' description = _('Catch mis-addressed email commands.') record = True def check(self, mlist, msg, msgdata): """See `IRule`.""" # The list must have the administrivia check enabled. if not mlist.administrivia: return False # First check the Subject text. lines_to_check = [] subject = str(msg.get('subject', '')) if subject != '': lines_to_check.append(subject) # Search only the first text/plain subpart of the message. There's # really no good way to find email commands in any other content type. for part in typed_subpart_iterator(msg, 'text', 'plain'): payload = part.get_payload() lines = payload.splitlines() # Count lines without using enumerate() because blank lines in the # payload don't count against the maximum examined. lineno = 0 for line in lines: line = line.strip() if len(line) == 0: continue lineno += 1 if lineno > int(config.mailman.email_commands_max_lines): break lines_to_check.append(line) # Only look at the first text/plain part. break # For each line we're checking, split the line into words. Then see # if it looks like a command with the min-to-max number of arguments. for line in lines_to_check: words = [word.lower() for word in line.split()] if words[0] not in EMAIL_COMMANDS: # This is not an administrivia command. continue minargs, maxargs = EMAIL_COMMANDS[words[0]] if minargs <= len(words) - 1 <= maxargs: return True return False
unknown
codeparrot/codeparrot-clean
import sys import os import shutil me_filename = 'mediaelement' mep_filename = 'mediaelementplayer' combined_filename = 'mediaelement-and-player' # BUILD MediaElement (single file) print('building MediaElement.js') me_files = [] me_files.append('me-header.js') me_files.append('me-namespace.js') me_files.append('me-utility.js') me_files.append('me-plugindetector.js') me_files.append('me-featuredetection.js') me_files.append('me-mediaelements.js') me_files.append('me-shim.js') code = '' for item in me_files: src_file = open('js/' + item,'r') code += src_file.read() + "\n" tmp_file = open('../build/' + me_filename + '.js','w') tmp_file.write(code) tmp_file.close() # BUILD MediaElementPlayer (single file) print('building MediaElementPlayer.js') mep_files = [] mep_files.append('mep-header.js') mep_files.append('mep-library.js') mep_files.append('mep-player.js') mep_files.append('mep-feature-playpause.js') mep_files.append('mep-feature-stop.js') mep_files.append('mep-feature-progress.js') mep_files.append('mep-feature-time.js') mep_files.append('mep-feature-volume.js') mep_files.append('mep-feature-fullscreen.js') mep_files.append('mep-feature-tracks.js') mep_files.append('mep-feature-contextmenu.js') code = '' for item in mep_files: src_file = open('js/' + item,'r') code += src_file.read() + "\n" tmp_file = open('../build/' + mep_filename + '.js','w') tmp_file.write(code) tmp_file.close() # MINIFY both scripts print('Minifying JavaScript') # os.system("java -jar yuicompressor-2.4.2.jar ../build/" + me_filename + ".js -o ../build/" + me_filename + ".min.js --charset utf-8 -v") # os.system("java -jar yuicompressor-2.4.2.jar ../build/" + mep_filename + ".js -o ../build/" + mep_filename + ".min.js --charset utf-8 -v") os.system("java -jar compiler.jar --js ../build/" + me_filename + ".js --js_output_file ../build/" + me_filename + ".min.js") os.system("java -jar compiler.jar --js ../build/" + mep_filename + ".js --js_output_file ../build/" + mep_filename + ".min.js") # PREPEND intros def addHeader(headerFilename, filename): # get the header text tmp_file = open(headerFilename) header_txt = tmp_file.read(); tmp_file.close() # read the current contents of the file tmp_file = open(filename) file_txt = tmp_file.read() tmp_file.close() # open the file again for writing tmp_file = open(filename, 'w') tmp_file.write(header_txt) # write the original contents tmp_file.write(file_txt) tmp_file.close() addHeader('js/me-header.js', '../build/' + me_filename + '.min.js') addHeader('js/mep-header.js', '../build/' + mep_filename + '.min.js') # COMBINE into single script print('Combining scripts') code = '' src_file = open('../build/' + me_filename + '.js','r') code += src_file.read() + "\n" src_file = open('../build/' + mep_filename + '.js','r') code += src_file.read() + "\n" tmp_file = open('../build/' + combined_filename + '.js','w') tmp_file.write(code) tmp_file.close() code = '' src_file = open('../build/' + me_filename + '.min.js','r') code += src_file.read() + "\n" src_file = open('../build/' + mep_filename + '.min.js','r') code += src_file.read() + "\n" tmp_file = open('../build/' + combined_filename + '.min.js','w') tmp_file.write(code) tmp_file.close() # MINIFY CSS print('Minifying CSS') src_file = open('css/mediaelementplayer.css','r') tmp_file = open('../build/mediaelementplayer.css','w') tmp_file.write(src_file.read()) tmp_file.close() os.system("java -jar yuicompressor-2.4.2.jar ../build/mediaelementplayer.css -o ../build/mediaelementplayer.min.css --charset utf-8 -v") #COPY skin files print('Copying Skin Files') shutil.copy2('css/controls.png','../build/controls.png') shutil.copy2('css/bigplay.png','../build/bigplay.png') shutil.copy2('css/loading.gif','../build/loading.gif') shutil.copy2('css/mejs-skins.css','../build/mejs-skins.css') shutil.copy2('css/controls-ted.png','../build/controls-ted.png') shutil.copy2('css/controls-wmp.png','../build/controls-wmp.png') shutil.copy2('css/controls-wmp-bg.png','../build/controls-wmp-bg.png') print('DONE!')
unknown
codeparrot/codeparrot-clean
//---------------------------------------------------------------------------// // Copyright (c) 2013 Kyle Lutz <kyle.r.lutz@gmail.com> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #ifndef BOOST_COMPUTE_EXCEPTION_HPP #define BOOST_COMPUTE_EXCEPTION_HPP /// \file /// /// Meta-header to include all Boost.Compute exception headers. #include <boost/compute/exception/context_error.hpp> #include <boost/compute/exception/no_device_found.hpp> #include <boost/compute/exception/opencl_error.hpp> #include <boost/compute/exception/unsupported_extension_error.hpp> #endif // BOOST_COMPUTE_EXCEPTION_HPP
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/compute/exception.hpp
#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2008-2010 (ita) import sys from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_dmd(conf): """ Find the program *dmd*, *dmd2*, or *ldc* and set the variable *D* """ conf.find_program(['dmd', 'dmd2', 'ldc'], var='D') # make sure that we're dealing with dmd1, dmd2, or ldc(1) out = conf.cmd_and_log(conf.env.D + ['--help']) if out.find("D Compiler v") == -1: out = conf.cmd_and_log(conf.env.D + ['-version']) if out.find("based on DMD v1.") == -1: conf.fatal("detected compiler is not dmd/ldc") @conf def common_flags_ldc(conf): """ Set the D flags required by *ldc* """ v = conf.env v['DFLAGS'] = ['-d-version=Posix'] v['LINKFLAGS'] = [] v['DFLAGS_dshlib'] = ['-relocation-model=pic'] @conf def common_flags_dmd(conf): """ Set the flags required by *dmd* or *dmd2* """ v = conf.env # _DFLAGS _DIMPORTFLAGS # Compiler is dmd so 'gdc' part will be ignored, just # ensure key is there, so wscript can append flags to it #v['DFLAGS'] = ['-version=Posix'] v['D_SRC_F'] = ['-c'] v['D_TGT_F'] = '-of%s' # linker v['D_LINKER'] = v['D'] v['DLNK_SRC_F'] = '' v['DLNK_TGT_F'] = '-of%s' v['DINC_ST'] = '-I%s' v['DSHLIB_MARKER'] = v['DSTLIB_MARKER'] = '' v['DSTLIB_ST'] = v['DSHLIB_ST'] = '-L-l%s' v['DSTLIBPATH_ST'] = v['DLIBPATH_ST'] = '-L-L%s' v['LINKFLAGS_dprogram']= ['-quiet'] v['DFLAGS_dshlib'] = ['-fPIC'] v['LINKFLAGS_dshlib'] = ['-L-shared'] v['DHEADER_ext'] = '.di' v.DFLAGS_d_with_header = ['-H', '-Hf'] v['D_HDR_F'] = '%s' def configure(conf): """ Configuration for *dmd*, *dmd2*, and *ldc* """ conf.find_dmd() if sys.platform == 'win32': out = conf.cmd_and_log(conf.env.D + ['--help']) if out.find("D Compiler v2.") > -1: conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead') conf.load('ar') conf.load('d') conf.common_flags_dmd() conf.d_platform_flags() if str(conf.env.D).find('ldc') > -1: conf.common_flags_ldc()
unknown
codeparrot/codeparrot-clean
#ifndef NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #define NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #include "common.hpp" namespace np::highway::qsort_simd { #include "highway_qsort.dispatch.h" NPY_CPU_DISPATCH_DECLARE(template <typename T> void QSort, (T *arr, npy_intp size)) #include "highway_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_DECLARE(template <typename T> void QSort, (T *arr, npy_intp size)) } // np::highway::qsort_simd #endif // NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP
unknown
github
https://github.com/numpy/numpy
numpy/_core/src/npysort/highway_qsort.hpp
<?php namespace Illuminate\Notifications; use Illuminate\Bus\Queueable; use Illuminate\Contracts\Queue\ShouldBeEncrypted; use Illuminate\Contracts\Queue\ShouldQueue; use Illuminate\Contracts\Queue\ShouldQueueAfterCommit; use Illuminate\Database\Eloquent\Collection as EloquentCollection; use Illuminate\Database\Eloquent\Model; use Illuminate\Queue\InteractsWithQueue; use Illuminate\Queue\SerializesModels; use Illuminate\Support\Collection; class SendQueuedNotifications implements ShouldQueue { use InteractsWithQueue, Queueable, SerializesModels; /** * The notifiable entities that should receive the notification. * * @var \Illuminate\Support\Collection */ public $notifiables; /** * The notification to be sent. * * @var \Illuminate\Notifications\Notification */ public $notification; /** * All of the channels to send the notification to. * * @var array */ public $channels; /** * The number of times the job may be attempted. * * @var int */ public $tries; /** * The number of seconds the job can run before timing out. * * @var int */ public $timeout; /** * The maximum number of unhandled exceptions to allow before failing. * * @var int */ public $maxExceptions; /** * Indicates if the job should be encrypted. * * @var bool */ public $shouldBeEncrypted = false; /** * Create a new job instance. * * @param \Illuminate\Notifications\Notifiable|\Illuminate\Support\Collection $notifiables * @param \Illuminate\Notifications\Notification $notification * @param array|null $channels */ public function __construct($notifiables, $notification, ?array $channels = null) { $this->channels = $channels; $this->notification = $notification; $this->notifiables = $this->wrapNotifiables($notifiables); $this->tries = property_exists($notification, 'tries') ? $notification->tries : null; $this->timeout = property_exists($notification, 'timeout') ? $notification->timeout : null; $this->maxExceptions = property_exists($notification, 'maxExceptions') ? $notification->maxExceptions : null; if ($notification instanceof ShouldQueueAfterCommit) { $this->afterCommit = true; } else { $this->afterCommit = property_exists($notification, 'afterCommit') ? $notification->afterCommit : null; } $this->shouldBeEncrypted = $notification instanceof ShouldBeEncrypted; } /** * Wrap the notifiable(s) in a collection. * * @param \Illuminate\Notifications\Notifiable|\Illuminate\Support\Collection $notifiables * @return \Illuminate\Support\Collection */ protected function wrapNotifiables($notifiables) { if ($notifiables instanceof Collection) { return $notifiables; } elseif ($notifiables instanceof Model) { return EloquentCollection::wrap($notifiables); } return Collection::wrap($notifiables); } /** * Send the notifications. * * @param \Illuminate\Notifications\ChannelManager $manager * @return void */ public function handle(ChannelManager $manager) { $manager->sendNow($this->notifiables, $this->notification, $this->channels); } /** * Get the display name for the queued job. * * @return string */ public function displayName() { return get_class($this->notification); } /** * Call the failed method on the notification instance. * * @param \Throwable $e * @return void */ public function failed($e) { if (method_exists($this->notification, 'failed')) { $this->notification->failed($e); } } /** * Get the number of seconds before a released notification will be available. * * @return mixed */ public function backoff() { if (! method_exists($this->notification, 'backoff') && ! isset($this->notification->backoff)) { return; } return $this->notification->backoff ?? $this->notification->backoff(); } /** * Determine the time at which the job should timeout. * * @return \DateTime|null */ public function retryUntil() { if (! method_exists($this->notification, 'retryUntil') && ! isset($this->notification->retryUntil)) { return; } return $this->notification->retryUntil ?? $this->notification->retryUntil(); } /** * Prepare the instance for cloning. * * @return void */ public function __clone() { $this->notifiables = clone $this->notifiables; $this->notification = clone $this->notification; } }
php
github
https://github.com/laravel/framework
src/Illuminate/Notifications/SendQueuedNotifications.php
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # FIXME: Store this as a .patch file in some new fixtures directory or similar. DIFF_TEST_DATA = '''diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h index f5d5e74..3b6aa92 100644 --- a/WebCore/rendering/style/StyleFlexibleBoxData.h +++ b/WebCore/rendering/style/StyleFlexibleBoxData.h @@ -47,7 +47,6 @@ public: unsigned align : 3; // EBoxAlignment unsigned pack: 3; // EBoxAlignment - unsigned orient: 1; // EBoxOrient unsigned lines : 1; // EBoxLines private: diff --git a/WebCore/rendering/style/StyleRareInheritedData.cpp b/WebCore/rendering/style/StyleRareInheritedData.cpp index ce21720..324929e 100644 --- a/WebCore/rendering/style/StyleRareInheritedData.cpp +++ b/WebCore/rendering/style/StyleRareInheritedData.cpp @@ -39,6 +39,7 @@ StyleRareInheritedData::StyleRareInheritedData() , textSizeAdjust(RenderStyle::initialTextSizeAdjust()) , resize(RenderStyle::initialResize()) , userSelect(RenderStyle::initialUserSelect()) + , boxOrient(RenderStyle::initialBoxOrient()) { } @@ -58,6 +59,7 @@ StyleRareInheritedData::StyleRareInheritedData(const StyleRareInheritedData& o) , textSizeAdjust(o.textSizeAdjust) , resize(o.resize) , userSelect(o.userSelect) + , boxOrient(o.boxOrient) { } @@ -81,7 +83,8 @@ bool StyleRareInheritedData::operator==(const StyleRareInheritedData& o) const && khtmlLineBreak == o.khtmlLineBreak && textSizeAdjust == o.textSizeAdjust && resize == o.resize - && userSelect == o.userSelect; + && userSelect == o.userSelect + && boxOrient == o.boxOrient; } bool StyleRareInheritedData::shadowDataEquivalent(const StyleRareInheritedData& o) const diff --git a/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum new file mode 100644 index 0000000..6db26bd --- /dev/null +++ b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum @@ -0,0 +1 @@ +61a373ee739673a9dcd7bac62b9f182e \ No newline at end of file '''
unknown
codeparrot/codeparrot-clean
from __future__ import absolute_import from typing import Any, Optional, Text import logging import re from email.header import decode_header import email.message as message from django.conf import settings from zerver.lib.actions import decode_email_address, get_email_gateway_message_string_from_address, \ internal_send_message from zerver.lib.notifications import convert_html_to_markdown from zerver.lib.redis_utils import get_redis_client from zerver.lib.upload import upload_message_image from zerver.lib.utils import generate_random_token from zerver.lib.str_utils import force_text from zerver.models import Stream, Recipient, get_user_profile_by_email, \ get_user_profile_by_id, get_display_recipient, get_recipient, \ Message, Realm, UserProfile from six import binary_type import six import talon from talon import quotations talon.init() logger = logging.getLogger(__name__) def redact_stream(error_message): # type: (Text) -> Text domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1] stream_match = re.search(u'\\b(.*?)@' + domain, error_message) if stream_match: stream_name = stream_match.groups()[0] return error_message.replace(stream_name, "X" * len(stream_name)) return error_message def report_to_zulip(error_message): # type: (Text) -> None if settings.ERROR_BOT is None: return error_bot = get_user_profile_by_email(settings.ERROR_BOT) error_stream = Stream.objects.get(name="errors", realm=error_bot.realm) send_zulip(settings.ERROR_BOT, error_stream, u"email mirror error", u"""~~~\n%s\n~~~""" % (error_message,)) def log_and_report(email_message, error_message, debug_info): # type: (message.Message, Text, Dict[str, Any]) -> None scrubbed_error = u"Sender: %s\n%s" % (email_message.get("From"), redact_stream(error_message)) if "to" in debug_info: scrubbed_error = u"Stream: %s\n%s" % (redact_stream(debug_info["to"]), scrubbed_error) if "stream" in debug_info: scrubbed_error = u"Realm: %s\n%s" % (debug_info["stream"].realm.domain, scrubbed_error) logger.error(scrubbed_error) report_to_zulip(scrubbed_error) # Temporary missed message addresses redis_client = get_redis_client() def missed_message_redis_key(token): # type: (Text) -> Text return 'missed_message:' + token def is_missed_message_address(address): # type: (Text) -> bool msg_string = get_email_gateway_message_string_from_address(address) return is_mm_32_format(msg_string) def is_mm_32_format(msg_string): # type: (Text) -> bool ''' Missed message strings are formatted with a little "mm" prefix followed by a randomly generated 32-character string. ''' return msg_string.startswith('mm') and len(msg_string) == 34 def get_missed_message_token_from_address(address): # type: (Text) -> Text msg_string = get_email_gateway_message_string_from_address(address) if msg_string is None: raise ZulipEmailForwardError('Address not recognized by gateway.') if not is_mm_32_format(msg_string): raise ZulipEmailForwardError('Could not parse missed message address') # strip off the 'mm' before returning the redis key return msg_string[2:] def create_missed_message_address(user_profile, message): # type: (UserProfile, Message) -> Text if settings.EMAIL_GATEWAY_PATTERN == '': logging.warning("EMAIL_GATEWAY_PATTERN is an empty string, using " "NOREPLY_EMAIL_ADDRESS in the 'from' field.") return settings.NOREPLY_EMAIL_ADDRESS if message.recipient.type == Recipient.PERSONAL: # We need to reply to the sender so look up their personal recipient_id recipient_id = get_recipient(Recipient.PERSONAL, message.sender_id).id else: recipient_id = message.recipient_id data = { 'user_profile_id': user_profile.id, 'recipient_id': recipient_id, 'subject': message.subject, } while True: token = generate_random_token(32) key = missed_message_redis_key(token) if redis_client.hsetnx(key, 'uses_left', 1): break with redis_client.pipeline() as pipeline: pipeline.hmset(key, data) pipeline.expire(key, 60 * 60 * 24 * 5) pipeline.execute() address = u'mm' + token return settings.EMAIL_GATEWAY_PATTERN % (address,) def mark_missed_message_address_as_used(address): # type: (Text) -> None token = get_missed_message_token_from_address(address) key = missed_message_redis_key(token) with redis_client.pipeline() as pipeline: pipeline.hincrby(key, 'uses_left', -1) pipeline.expire(key, 60 * 60 * 24 * 5) new_value = pipeline.execute()[0] if new_value < 0: redis_client.delete(key) raise ZulipEmailForwardError('Missed message address has already been used') def send_to_missed_message_address(address, message): # type: (Text, message.Message) -> None token = get_missed_message_token_from_address(address) key = missed_message_redis_key(token) result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject') if not all(val is not None for val in result): raise ZulipEmailForwardError('Missing missed message address data') user_profile_id, recipient_id, subject = result user_profile = get_user_profile_by_id(user_profile_id) recipient = Recipient.objects.get(id=recipient_id) display_recipient = get_display_recipient(recipient) # Testing with basestring so we don't depend on the list return type from # get_display_recipient if not isinstance(display_recipient, six.string_types): recipient_str = ','.join([user['email'] for user in display_recipient]) else: recipient_str = display_recipient body = filter_footer(extract_body(message)) body += extract_and_upload_attachments(message, user_profile.realm) if not body: body = '(No email body)' if recipient.type == Recipient.STREAM: recipient_type_name = 'stream' else: recipient_type_name = 'private' internal_send_message(user_profile.realm, user_profile.email, recipient_type_name, recipient_str, subject, body) logging.info("Successfully processed email from %s to %s" % ( user_profile.email, recipient_str)) ## Sending the Zulip ## class ZulipEmailForwardError(Exception): pass def send_zulip(sender, stream, topic, content): # type: (Text, Stream, Text, Text) -> None internal_send_message( stream.realm, sender, "stream", stream.name, topic[:60], content[:2000]) def valid_stream(stream_name, token): # type: (Text, Text) -> bool try: stream = Stream.objects.get(email_token=token) return stream.name.lower() == stream_name.lower() except Stream.DoesNotExist: return False def get_message_part_by_type(message, content_type): # type: (message.Message, Text) -> Text charsets = message.get_charsets() for idx, part in enumerate(message.walk()): if part.get_content_type() == content_type: content = part.get_payload(decode=True) assert isinstance(content, binary_type) if charsets[idx]: text = content.decode(charsets[idx], errors="ignore") return text def extract_body(message): # type: (message.Message) -> Text # If the message contains a plaintext version of the body, use # that. plaintext_content = get_message_part_by_type(message, "text/plain") if plaintext_content: return quotations.extract_from_plain(plaintext_content) # If we only have an HTML version, try to make that look nice. html_content = get_message_part_by_type(message, "text/html") if html_content: html_content = quotations.extract_from_html(html_content) return convert_html_to_markdown(html_content) raise ZulipEmailForwardError("Unable to find plaintext or HTML message body") def filter_footer(text): # type: (Text) -> Text # Try to filter out obvious footers. possible_footers = [line for line in text.split("\n") if line.strip().startswith("--")] if len(possible_footers) != 1: # Be conservative and don't try to scrub content if there # isn't a trivial footer structure. return text return text.partition("--")[0].strip() def extract_and_upload_attachments(message, realm): # type: (message.Message, Realm) -> Text user_profile = get_user_profile_by_email(settings.EMAIL_GATEWAY_BOT) attachment_links = [] payload = message.get_payload() if not isinstance(payload, list): # This is not a multipart message, so it can't contain attachments. return "" for part in payload: content_type = part.get_content_type() filename = part.get_filename() if filename: attachment = part.get_payload(decode=True) if isinstance(attachment, binary_type): s3_url = upload_message_image(filename, content_type, attachment, user_profile, target_realm=realm) formatted_link = u"[%s](%s)" % (filename, s3_url) attachment_links.append(formatted_link) else: logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." % (filename, message.get("From"))) return u"\n".join(attachment_links) def extract_and_validate(email): # type: (Text) -> Stream try: stream_name, token = decode_email_address(email) except (TypeError, ValueError): raise ZulipEmailForwardError("Malformed email recipient " + email) if not valid_stream(stream_name, token): raise ZulipEmailForwardError("Bad stream token from email recipient " + email) return Stream.objects.get(email_token=token) def find_emailgateway_recipient(message): # type: (message.Message) -> Text # We can't use Delivered-To; if there is a X-Gm-Original-To # it is more accurate, so try to find the most-accurate # recipient list in descending priority order recipient_headers = ["X-Gm-Original-To", "Delivered-To", "To"] recipients = [] # type: List[Text] for recipient_header in recipient_headers: r = message.get_all(recipient_header, None) if r: recipients = r break pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')] match_email_re = re.compile(".*?".join(pattern_parts)) for recipient_email in recipients: if match_email_re.match(recipient_email): return recipient_email raise ZulipEmailForwardError("Missing recipient in mirror email") def process_stream_message(to, subject, message, debug_info): # type: (Text, Text, message.Message, Dict[str, Any]) -> None stream = extract_and_validate(to) body = filter_footer(extract_body(message)) body += extract_and_upload_attachments(message, stream.realm) debug_info["stream"] = stream send_zulip(settings.EMAIL_GATEWAY_BOT, stream, subject, body) logging.info("Successfully processed email to %s (%s)" % ( stream.name, stream.realm.domain)) def process_missed_message(to, message, pre_checked): # type: (Text, message.Message, bool) -> None if not pre_checked: mark_missed_message_address_as_used(to) send_to_missed_message_address(to, message) def process_message(message, rcpt_to=None, pre_checked=False): # type: (message.Message, Optional[Text], bool) -> None subject_header = message.get("Subject", "(no subject)") encoded_subject, encoding = decode_header(subject_header)[0] if encoding is None: subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None else: try: subject = encoded_subject.decode(encoding) except (UnicodeDecodeError, LookupError): subject = u"(unreadable subject)" debug_info = {} try: if rcpt_to is not None: to = rcpt_to else: to = find_emailgateway_recipient(message) debug_info["to"] = to if is_missed_message_address(to): process_missed_message(to, message, pre_checked) else: process_stream_message(to, subject, message, debug_info) except ZulipEmailForwardError as e: # TODO: notify sender of error, retry if appropriate. log_and_report(message, str(e), debug_info)
unknown
codeparrot/codeparrot-clean
__author__ = 'Krivenko' from sys import maxsize class Contact: def __init__(self,firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None,secondaryphone=None, fax=None,byear=None, ayear=None, address2=None, phone2=None, notes=None,droplist=None,droplist2=None, droplist3=None,droplist4=None,id=None,allemail=None,allphones=None,alladress=None,email=None,email2=None,email3=None,): self.firstname=firstname self.middlename=middlename self.lastname=lastname self.nickname=nickname self.title=title self.company=company self.address=address self.homephone=homephone self.mobilephone=mobilephone self.workphone=workphone self.fax=fax self.byear=byear self.ayear=ayear self.address2=address2 self.phone2=phone2 self.notes=notes self.droplist=droplist self.droplist2=droplist2 self.droplist3=droplist3 self.droplist4=droplist4 self.id=id self.alladress=alladress self.allemail=allemail self.allphones=allphones self.secondaryphone=secondaryphone self.email=email self.email2=email2 self.email3=email3 def __repr__(self): return "%s:%s:%s" %(self.id,self.lastname,self.firstname,) def __eq__(self, other): return (self.id is None or other.id is None or self.id==other.id) and\ (self.firstname== other.firstname or self.firstname is None or other.firstname is None) and \ ( self.lastname==other.lastname or self.lastname is None or other.lastname is None)and\ (self.alladress==other.alladress or self.alladress is None or other.alladress is None)and\ (self.allphones==other.allphones or self.allphones is None or other.allphones is None) def id_or_max(self): if self.id: return int(self.id) else: return maxsize
unknown
codeparrot/codeparrot-clean
from functools import wraps from django.utils.cache import patch_vary_headers from django.utils.decorators import available_attrs def vary_on_headers(*headers): """ A view decorator that adds the specified headers to the Vary header of the response. Usage: @vary_on_headers('Cookie', 'Accept-language') def index(request): ... Note that the header names are not case-sensitive. """ def decorator(func): @wraps(func, assigned=available_attrs(func)) def inner_func(*args, **kwargs): response = func(*args, **kwargs) patch_vary_headers(response, headers) return response return inner_func return decorator def vary_on_cookie(func): """ A view decorator that adds "Cookie" to the Vary header of a response. This indicates that a page's contents depends on cookies. Usage: @vary_on_cookie def index(request): ... """ @wraps(func, assigned=available_attrs(func)) def inner_func(*args, **kwargs): response = func(*args, **kwargs) patch_vary_headers(response, ('Cookie',)) return response return inner_func
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'reno.sphinxext', 'openstackdocstheme', ] # openstackdocstheme options repository_name = 'openstack/cyborg' bug_project = 'openstack-cyborg' bug_tag = '' # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Cyborg Release Notes' copyright = u'2018, Cyborg developers' author = u'cyborg developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "" # The full version, including alpha/beta/rc tags. release = "" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars # html_sidebars = {} # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'CyborgReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'CyborgReleaseNotes.tex', u'Cyborg Release Notes Documentation', u'Cyborg developers', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'CyborgReleaseNotes', u'Cyborg Release Notes Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'CyborgReleaseNotes', u'Cyborg Release Notes Documentation', author, 'CyborgReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/']
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python import os import sys from redis import __version__ try: from setuptools import setup from setuptools.command.test import test as TestCommand class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # import here, because outside the eggs aren't loaded import pytest errno = pytest.main(self.test_args) sys.exit(errno) except ImportError: from distutils.core import setup def PyTest(x): x f = open(os.path.join(os.path.dirname(__file__), 'README.rst')) long_description = f.read() f.close() setup( name='redis', version=__version__, description='Python client for Redis key-value store', long_description=long_description, url='http://github.com/andymccurdy/redis-py', author='Andy McCurdy', author_email='sedrik@gmail.com', maintainer='Andy McCurdy', maintainer_email='sedrik@gmail.com', keywords=['Redis', 'key-value store'], license='MIT', packages=['redis'], tests_require=['pytest>=2.5.0'], cmdclass={'test': PyTest}, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ] )
unknown
codeparrot/codeparrot-clean
#from opengmcore import _opengmcore.adder as adder from opengmcore import * from __version__ import version from functionhelper import * from _inf_param import _MetaInfParam , InfParam from _visu import visualizeGm from _misc import defaultAccumulator from __version__ import version import time from _inference_interface_generator import _inject_interface , InferenceBase import inference import hdf5 import benchmark # initialize solver/ inference dictionaries _solverDicts=[ (inference.adder.minimizer.solver.__dict__ , 'adder', 'minimizer' ), (inference.adder.maximizer.solver.__dict__, 'adder', 'maximizer' ), (inference.multiplier.integrator.solver.__dict__,'adder', 'integrator'), (inference.multiplier.minimizer.solver.__dict__, 'multiplier', 'minimizer' ), (inference.multiplier.maximizer.solver.__dict__, 'multiplier', 'maximizer' ), (inference.multiplier.integrator.solver.__dict__,'multiplier', 'integrator') ] for infClass,infName in _inject_interface(_solverDicts): inference.__dict__[infName]=infClass class Timer(object): def __init__(self, name=None): self.name = name def __enter__(self): if self.name: print '[%s]' % self.name self.tstart = time.time() def __exit__(self, type, value, traceback): #if self.name: # print '[%s]' % self.name, print ' Elapsed: %s' % (time.time() - self.tstart) def saveGm(gm, f, d='gm'): """ save a graphical model to a hdf5 file: Args: gm : graphical model to save f : filepath g : dataset (defaut : 'gm') """ hdf5.saveGraphicalModel(gm, f, d) def loadGm(f, d='gm', operator='adder'): """ save a graphical model to a hdf5 file: Args: f : filepath g : dataset (defaut : 'gm') operator : operator of the graphical model ('adder' / 'multiplier') """ if(operator=='adder'): gm=adder.GraphicalModel() elif(operator=='multiplier'): gm=multiplier.GraphicalModel() else: raise RuntimeError("unknown operator: "+ operator) hdf5.loadGraphicalModel(gm,f,d) return gm class TestModels(object): @staticmethod def chain3(nVar,nLabels): model=adder.GraphicalModel([nLabels]*nVar) unaries = numpy.random.rand(nVar,nLabels) model.addFactors(model.addFunctions(unaries),numpy.arange(nVar)) numpy.random.seed(42) for x0 in range(nVar-2): f=numpy.random.rand(nLabels,nLabels,nLabels) model.addFactor(model.addFunction(f),[x0,x0+1,x0+2]) return model @staticmethod def chain4(nVar,nLabels): model=adder.GraphicalModel([nLabels]*nVar) unaries = numpy.random.rand(nVar,nLabels) model.addFactors(model.addFunctions(unaries),numpy.arange(nVar)) numpy.random.seed(42) for x0 in range(nVar-3): f=numpy.random.rand(nLabels,nLabels,nLabels,nLabels) model.addFactor(model.addFunction(f),[x0,x0+1,x0+2,x0+3]) return model @staticmethod def chainN(nVar,nLabels,order,nSpecialUnaries=0,beta=1.0): model=adder.GraphicalModel([nLabels]*nVar) unaries = numpy.random.rand(nVar,nLabels) for sn in range(nSpecialUnaries): r=int(numpy.random.rand(1)*nVar-1) rl=int(numpy.random.rand(1)*nLabels-1) unaries[r,rl]=0.0 model.addFactors(model.addFunctions(unaries),numpy.arange(nVar)) numpy.random.seed(42) for x0 in range(nVar-(order-1)): f=numpy.random.rand( *([nLabels]*order)) f*=beta vis=numpy.arange(order) vis+=x0 model.addFactor(model.addFunction(f),vis) return model @staticmethod def secondOrderGrid(dx,dy,nLabels): nVar=dx*dy model=adder.GraphicalModel([nLabels]*nVar) unaries = numpy.random.rand(nVar,nLabels) model.addFactors(model.addFunctions(unaries),numpy.arange(nVar)) vis2Order=secondOrderGridVis(dx,dy,True) nF2=len(vis2Order)#.shape[0] f2s=numpy.random.rand(nF2,nLabels) model.addFactors(model.addFunctions(f2s),vis2Order) return model class GenericTimingVisitor(object): def __init__(self,visitNth=1,reserve=0,verbose=True,multiline=True): self.visitNth=visitNth self.reserve=reserve self.verbose=verbose self.multiline=multiline self.values_ = None self.runtimes_ = None self.bounds_ = None self.iterations_ = None self.t0 = None self.t1 = None self.iterNr = 0 def getValues(self): return numpy.require(self.values_,dtype=value_type) def getTimes(self): return numpy.require(self.runtimes_,dtype=value_type) def getBounds(self): return numpy.require(self.bounds_,dtype=value_type) def getIterations(self): return numpy.require(self.iterations_,dtype=value_type) def begin(self,inf): v = inf.value() b = inf.bound() self.values_ =[v] self.bounds_ =[b] self.runtimes_ =[0.0] self.iterations_=[self.iterNr] if self.verbose : print 'Begin : %d Value : %f Bound : %f '%(self.iterNr,v,b) # start the timing self.t0 =time.time() self.t1 =time.time() def visit(self,inf): if(self.iterNr==0 or self.iterNr%self.visitNth==0): # "stop the timing" self.t1=time.time() # get the runtime of the run rt=self.t1-self.t0 v = inf.value() b = inf.bound() if self.verbose : print 'Step : %d Value : %f Bound : %f '%(self.iterNr,v,b) # store results self.values_.append(v) self.bounds_.append(b) self.runtimes_.append(rt) self.iterations_.append(self.iterNr) # increment iteration number self.iterNr+=1 # restart the timing self.t0=time.time() else: # increment iteration number self.iterNr+=1 def end(self,inf): # "stop the timing" self.t1=time.time() # get the runtime of the run rt=self.t1-self.t0 v = inf.value() b = inf.bound() if self.verbose : print 'End : %d Value : %f Bound : %f '%(self.iterNr,v,b) # store results self.values_.append(v) self.bounds_.append(b) self.runtimes_.append(rt) self.iterations_.append(self.iterNr) class __RandomFusion__(object): def __init__(self,gm,accumulator=None,parameter=InfParam()): if accumulator is None: self.accumulator=defaultAccumulator(gm=gm) else: self.accumulator=accumulator kwargs=parameter.kwargs self.gm_=gm self.steps = kwargs.get('steps', 100) self.fusionSolver = kwargs.get('fuisionSolver', 'lf2') self.arg_ = None self.value_ = None self.fusionMover=inference.adder.minimizer.FusionMover(self.gm_) self.nLabels = self.gm_.numberOfLabels(0) self.nVar = self.gm_.numberOfVariables def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True): return GenericTimingVisitor(visitNth,reserve,verbose,multiline) def setStartingPoint(self,arg): self.arg_=arg self.value_=gm.evaluate(self.arg_) def infer(self,visitor=None): if(self.arg_ is None): self.arg_ = numpy.zeros(self.gm_.numberOfVariables,dtype=label_type) self.value_ = self.value_=self.gm_.evaluate(self.arg_) # start inference if visitor is not None: visitor.begin(self) # start fusion moves for x in range(self.steps): randState=numpy.random.randint(low=0, high=self.nLabels, size=self.nVar).astype(label_type) r = self.fusionMover.fuse(self.arg_,randState,self.fusionSolver) self.arg_=r[0] self.value_=r[1] visitor.visit(self) # end inference if visitor is not None: visitor.end(self) def name(self): return "RandomFusion" def bound(self): return -1.0*float('inf') def arg(self): return self.arg_ def value(self): return self.value_ class __CheapInitialization__(object): def __init__(self,gm,accumulator=None,parameter=InfParam()): if accumulator is None: self.accumulator=defaultAccumulator(gm=gm) else: self.accumulator=accumulator kwargs=parameter.kwargs self.gm_=gm self.arg_ = None self.value_ = None self.initType = kwargs.get('initType', 'localOpt') def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True): return GenericTimingVisitor(visitNth,reserve,verbose,multiline) def setStartingPoint(self,arg): self.arg_=arg self.value_=gm.evaluate(self.arg_) def infer(self,visitor=None): if(self.arg_ is None): self.arg_ = numpy.zeros(self.gm_.numberOfVariables,dtype=label_type) self.value_ = self.value_=self.gm_.evaluate(self.arg_) # start inference if visitor is not None: visitor.begin(self) if(self.initType=='localOpt'): print "move local opt" self.arg_ = self.gm_.moveLocalOpt('minimizer') print "done" if visitor is not None: visitor.visit(self) # end inference if visitor is not None: visitor.end(self) def name(self): return "CheapInitialization" def bound(self): return -1.0*float('inf') def arg(self): return self.arg_ def value(self): return self.value_ inference.__dict__['CheapInitialization']=__CheapInitialization__ inference.__dict__['RandomFusion']=__RandomFusion__ if __name__ == "__main__": pass
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2012 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Documentation on PRESUBMIT.py can be found at: # http://www.chromium.org/developers/how-tos/depottools/presubmit-scripts import json import hashlib import os import re import gclient_utils def CheckChange(input_api, message_constructor): """Checks for files with a modified contents. Some checking of validator happens on builbots, but comprehensive enumeration tests must be run locally. There are two dangers: 1. Source code for autogenerated files can be modified without regeneration of said files. 2. Source of validator can be changed without running the aforementioned tests. This function catches the situation when source files for validator_x86_??.c are changed but files are not regenerated and it also catches the situation when code is changed without running the dfacheckvalidator tests. """ errors = [] changelist = input_api.change root_path = changelist.RepositoryRoot() if input_api.change.scm == 'svn': try: # With SVN you can decide to commit not all modified files but some of # them thus separate GetAllModifiedFiles() and GetModifiedFiles() lists # are provided. We need to remove root_path from the name of file. assert all(filename.startswith(root_path + os.path.sep) for filename in changelist.GetAllModifiedFiles()) all_filenames = [filename[len(root_path + os.path.sep):] for filename in changelist.GetAllModifiedFiles()] assert all(filename.startswith(root_path + os.path.sep) for filename in changelist.GetModifiedFiles()) modified_filenames = [filename[len(root_path + os.path.sep):] for filename in changelist.GetModifiedFiles()] except: # If gcl is not available (which happens in CQ bots) then we'll try to use # AffectedFiles() instead of GetAllModifiedFiles() all_filenames = [file.LocalPath() for file in changelist.AffectedFiles()] modified_filenames = all_filenames else: # With GIT you must commit all modified files thus only AffectedFiles() # list is provided. all_filenames = [file.LocalPath() for file in changelist.AffectedFiles()] modified_filenames = all_filenames json_filename = os.path.join( 'src', 'trusted', 'validator_ragel', 'gen', 'protected_files.json') protected_files = json.loads( gclient_utils.FileRead(os.path.join(root_path, json_filename))) need_dfagen = False need_dfacheckvalidator = False canonical_prefix = 'native_client/' for filename in sorted(all_filenames): canonical_filename = canonical_prefix + filename.replace('\\', '/') if canonical_filename in protected_files['validator']: file_contents = gclient_utils.FileRead(os.path.join(root_path, filename)) sha512 = hashlib.sha512(file_contents).hexdigest() if sha512 != protected_files['validator'][canonical_filename]: errors.append(message_constructor( 'Incorrect {0} hash:\n expected {1}\n got {2}'.format( canonical_filename, protected_files['validator'][canonical_filename], sha512))) need_dfacheckvalidator = True if canonical_filename in protected_files['generating']: for automaton_filename in protected_files['generated']: if (os.stat(os.path.join(root_path, filename)).st_mtime > os.stat(os.path.join(root_path, automaton_filename[len(canonical_prefix):])).st_mtime): errors.append(message_constructor( 'File {0} is older then {1}'.format( automaton_filename, canonical_filename))) need_dfagen = True if (canonical_filename in protected_files['validator'] or canonical_filename in protected_files['generating'] or filename == json_filename): if filename not in modified_filenames: errors.append(message_constructor( 'File {0} is changed but is excluded from this CL'.format( canonical_filename))) if need_dfagen: errors.append(message_constructor( 'Please run "./scons dfagen" before commit!')) if need_dfacheckvalidator: errors.append(message_constructor( 'Please run "./scons dfacheckvalidator" before commit!')) return errors def CheckChangeOnUpload(input_api, output_api): return CheckChange(input_api, message_constructor=output_api.PresubmitPromptWarning) def CheckChangeOnCommit(input_api, output_api): return CheckChange(input_api, message_constructor=output_api.PresubmitError)
unknown
codeparrot/codeparrot-clean
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.playbook.attribute import FieldAttribute from ansible.playbook.task import Task try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['TaskInclude'] class TaskInclude(Task): """ A task include is derived from a regular task to handle the special circumstances related to the `- include: ...` task. """ # ================================================================================= # ATTRIBUTES _static = FieldAttribute(isa='bool', default=None) def __init__(self, block=None, role=None, task_include=None): super(TaskInclude, self).__init__(block=block, role=role, task_include=task_include) self.statically_loaded = False @staticmethod def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None): t = TaskInclude(block=block, role=role, task_include=task_include) return t.load_data(data, variable_manager=variable_manager, loader=loader) def copy(self, exclude_parent=False, exclude_tasks=False): new_me = super(TaskInclude, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks) new_me.statically_loaded = self.statically_loaded return new_me def get_vars(self): ''' We override the parent Task() classes get_vars here because we need to include the args of the include into the vars as they are params to the included tasks. ''' all_vars = dict() if self._parent: all_vars.update(self._parent.get_vars()) all_vars.update(self.vars) all_vars.update(self.args) if 'tags' in all_vars: del all_vars['tags'] if 'when' in all_vars: del all_vars['when'] return all_vars
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright 2014 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Stéphane Albert # try: import simplejson as json except ImportError: import json import decimal from collections import defaultdict from sqlalchemy import and_ from oslo_db.sqlalchemy import utils import sqlalchemy from cloudkitty import db from cloudkitty import storage from cloudkitty.storage.sqlalchemy import migration from cloudkitty.storage.sqlalchemy import models from cloudkitty import utils as ck_utils import sqlalchemy.ext.declarative import sqlalchemy.orm.interfaces import sqlalchemy.exc import datetime class SQLAlchemyStorage(storage.BaseStorage): """SQLAlchemy Storage Backend """ frame_model = models.RatedDataFrame def __init__(self, **kwargs): super(SQLAlchemyStorage, self).__init__(**kwargs) self._session = {} @staticmethod def init(): migration.upgrade('head') def _pre_commit(self, tenant_id): self._check_session(tenant_id) if not self._has_data.get(tenant_id): empty_frame = {'vol': {'qty': 0, 'unit': 'None'}, 'rating': {'price': 0}, 'desc': ''} self._append_time_frame('_NO_DATA_', empty_frame, tenant_id) def _commit(self, tenant_id): self._session[tenant_id].commit() def _post_commit(self, tenant_id): super(SQLAlchemyStorage, self)._post_commit(tenant_id) del self._session[tenant_id] def _check_session(self, tenant_id): session = self._session.get(tenant_id) if not session: self._session[tenant_id] = db.get_session() self._session[tenant_id].begin() def _dispatch(self, data, tenant_id): self._check_session(tenant_id) for service in data: for frame in data[service]: self._append_time_frame(service, frame, tenant_id) self._has_data[tenant_id] = True def get_state(self, tenant_id=None): session = db.get_session() q = utils.model_query( self.frame_model, session) if tenant_id: q = q.filter( self.frame_model.tenant_id == tenant_id) q = q.order_by( self.frame_model.begin.desc()) r = q.first() if r: return ck_utils.dt2ts(r.begin) # Modified by Muralidharan.s for applying a logic for getting # Total value based on Instance def get_total(self, begin=None, end=None, tenant_id=None, service=None, instance_id=None): model = models.RatedDataFrame # Boundary calculation if not begin: begin = ck_utils.get_month_start() if not end: end = ck_utils.get_next_month() session = db.get_session() q = session.query( sqlalchemy.func.sum(model.rate).label('rate')) if tenant_id: q = q.filter( models.RatedDataFrame.tenant_id == tenant_id) if service: q = q.filter( models.RatedDataFrame.res_type == service) if instance_id: q = q.filter( models.RatedDataFrame.desc.like('%'+instance_id+'%')) q = q.filter( model.begin >= begin, model.end <= end) rate = q.scalar() return rate # For listing invoice # admin and non-admin tenant will be able to list the own invoice # only admin tenant will be able to get the invoice of all tenant (--all-tenants) def list_invoice(self, tenant_name, all_tenants=None): model = models.InvoiceDetails session = db.get_session() # fetch the details for tenant q = session.query(model).order_by(model.id).filter(model.tenant_name == tenant_name) # Fetch the invoice for all tenants if all_tenants: q = session.query(model).order_by(model.id) # Fetch all the values r = q.all() return [entry.to_cloudkitty() for entry in r] # For getting a invoice details as needed # admin tenant section # can get invoice based on tenant id, tenant name, invoice id and payment status def get_invoice(self, tenant_id=None, tenant=None, invoice_id=None, payment_status=None): model = models.InvoiceDetails session = db.get_session() # Fetch the invoice using tenant ID if tenant_id: q = session.query(model).order_by(model.id).filter(model.tenant_id == tenant_id) # Fetch the invoices using tenant name input if tenant: q = session.query(model).order_by(model.id).filter(model.tenant_name == tenant) # Fetch the invoice using invoice ID if invoice_id: q = session.query(model).order_by(model.id).filter(model.invoice_id == invoice_id) # Fetch the invoice using Payment status if payment_status: q = session.query(model).order_by(model.id).filter(model.payment_status == payment_status) # Fetch all the values r = q.all() return [entry.to_cloudkitty() for entry in r] # Invoice for non-admin tenant # get the invoice for non-admin tenant # can be able to fetch using invoice-id and payment_status def get_invoice_for_tenant(self, tenant_name, invoice_id=None, payment_status=None): model = models.InvoiceDetails session = db.get_session() # Fetch the invoice using invoice ID if invoice_id: q = session.query(model).order_by(model.id).filter(and_(model.invoice_id == invoice_id, model.tenant_name == tenant_name)) # Fetch the invoice using payment_status if payment_status: q = session.query(model).order_by(model.id).filter(and_(model.payment_status == payment_status, model.tenant_name == tenant_name)) # Fetch all the values r = q.all() return [entry.to_cloudkitty() for entry in r] # For showing a invoice details as needed # admin tenant section def show_invoice_for_tenant(self, tenant_name, invoice_id): model = models.InvoiceDetails session = db.get_session() # Fetch the invoice using tenant ID if invoice_id: q = session.query(model).order_by(model.id).filter(and_(model.invoice_id == invoice_id, model.tenant_name == tenant_name)) # Fetch all the values r = q.all() return [entry.to_cloudkitty() for entry in r] # For showing a invoice details as needed # non-admin tenant section def show_invoice(self, invoice_id): model = models.InvoiceDetails session = db.get_session() # Fetch the invoice using tenant ID if invoice_id: q = session.query(model).order_by(model.id).filter(model.invoice_id == invoice_id) # Fetch all the values r = q.all() return [entry.to_cloudkitty() for entry in r] # add invoice to the table def add_invoice(self, invoice_id, invoice_date, invoice_period_from, invoice_period_to, tenant_id, invoice_data, tenant_name, total_cost, paid_cost, balance_cost, payment_status): """Create a new invoice entry. """ session = db.get_session() # Add invoice details invoice = models.InvoiceDetails( invoice_date = invoice_date, invoice_period_from = invoice_period_from, invoice_period_to = invoice_period_to, tenant_id = tenant_id, invoice_id = invoice_id, invoice_data = invoice_data, tenant_name = tenant_name, total_cost = total_cost, paid_cost = paid_cost, balance_cost = balance_cost, payment_status = payment_status) try: with session.begin(): session.add(invoice) except sqlalchemy.exc.IntegrityError as exc: reason = exc.message return invoice # update invoice entried in table def update_invoice(self, invoice_id, total_cost, paid_cost, balance_cost, payment_status): """ Update the invoice details """ session = db.get_session() with session.begin(): try: q = utils.model_query( models.InvoiceDetails, session) if invoice_id: q = q.filter(models.InvoiceDetails.invoice_id == invoice_id) q = q.with_lockmode('update') invoice_details = q.one() if total_cost: invoice_details.total_cost = total_cost if paid_cost: invoice_details.paid_cost = paid_cost if balance_cost: invoice_details.balance_cost = balance_cost if payment_status: invoice_details.payment_status = payment_status except sqlalchemy.orm.exc.NoResultFound: invoice_details = None # invoice_details none if invoice_details is None: return invoice_details # invoice details not none # loop through invoice detail and return else: invoice_detail = {} #return [invoice_detail for invoice_detail in invoice_details if total_cost: invoice_detail['total_cost'] = invoice_details.total_cost if balance_cost: invoice_detail['balance_cost'] = invoice_details.balance_cost if paid_cost: invoice_detail['paid_cost'] = invoice_details.paid_cost if payment_status: invoice_detail['payment_status'] = invoice_details.payment_status return invoice_detail # delete invoice entries in table def delete_invoice(self, invoice_id): """ delete the invoice details """ session = db.get_session() with session.begin(): try: q = utils.model_query( models.InvoiceDetails, session) if invoice_id: q = q.filter(models.InvoiceDetails.invoice_id == invoice_id).delete() except sqlalchemy.orm.exc.NoResultFound: invoice_deleted = None def get_tenants(self, begin=None, end=None): # Boundary calculation if not begin: begin = ck_utils.get_month_start() if not end: end = ck_utils.get_next_month() session = db.get_session() q = utils.model_query( self.frame_model, session) q = q.filter( self.frame_model.begin >= begin, self.frame_model.end <= end) tenants = q.distinct().values( self.frame_model.tenant_id) return [tenant.tenant_id for tenant in tenants] def add_time_frame_custom(self, **kwargs): """Create a new time frame custom . :param begin: Start of the dataframe. :param end: End of the dataframe. :param tenant_id: tenant_id of the dataframe owner. :param unit: Unit of the metric. :param qty: Quantity of the metric. :param res_type: Type of the resource. :param rate: Calculated rate for this dataframe. :param desc: Resource description (metadata). """ session = db.get_session() # Add invoice details frame = models.RatedDataFrame( begin = kwargs.get('begin'), end = kwargs.get('end'), tenant_id = kwargs.get('tenant_id'), unit = kwargs.get('unit'), qty = kwargs.get('qty'), res_type = kwargs.get('res_type'), rate = decimal.Decimal(kwargs.get('rate')), desc = json.dumps(kwargs.get('desc'))) try: with session.begin(): session.add(frame) except sqlalchemy.exc.IntegrityError as exc: reason = exc.message def get_time_frame(self, begin, end, **filters): session = db.get_session() q = utils.model_query( self.frame_model, session) q = q.filter( self.frame_model.begin >= ck_utils.ts2dt(begin), self.frame_model.end <= ck_utils.ts2dt(end)) for filter_name, filter_value in filters.items(): if filter_value: q = q.filter( getattr(self.frame_model, filter_name) == filter_value) if not filters.get('res_type'): q = q.filter(self.frame_model.res_type != '_NO_DATA_') count = q.count() if not count: raise storage.NoTimeFrame() r = q.all() return [entry.to_cloudkitty(self._collector) for entry in r] def _append_time_frame(self, res_type, frame, tenant_id): vol_dict = frame['vol'] qty = vol_dict['qty'] unit = vol_dict['unit'] rating_dict = frame.get('rating', {}) rate = rating_dict.get('price') if not rate: rate = decimal.Decimal(0) desc = json.dumps(frame['desc']) self.add_time_frame(begin=self.usage_start_dt.get(tenant_id), end=self.usage_end_dt.get(tenant_id), tenant_id=tenant_id, unit=unit, qty=qty, res_type=res_type, rate=rate, desc=desc) def add_time_frame(self, **kwargs): """Create a new time frame. :param begin: Start of the dataframe. :param end: End of the dataframe. :param tenant_id: tenant_id of the dataframe owner. :param unit: Unit of the metric. :param qty: Quantity of the metric. :param res_type: Type of the resource. :param rate: Calculated rate for this dataframe. :param desc: Resource description (metadata). """ frame = self.frame_model(**kwargs) self._session[kwargs.get('tenant_id')].add(frame)
unknown
codeparrot/codeparrot-clean
apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: templategroups.notifications.alerting.grafana.app spec: group: notifications.alerting.grafana.app names: kind: TemplateGroup plural: templategroups scope: Namespaced versions: - name: v0alpha1 schema: openAPIV3Schema: properties: spec: properties: content: type: string title: type: string required: - title - content type: object status: properties: additionalFields: description: additionalFields is reserved for future use type: object x-kubernetes-preserve-unknown-fields: true operatorStates: additionalProperties: properties: descriptiveState: description: descriptiveState is an optional more descriptive state field which has no requirements on format type: string details: description: details contains any extra information that is operator-specific type: object x-kubernetes-preserve-unknown-fields: true lastEvaluation: description: lastEvaluation is the ResourceVersion last evaluated type: string state: description: |- state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation. enum: - success - in_progress - failed type: string required: - lastEvaluation - state type: object description: |- operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field. type: object type: object required: - spec type: object served: true storage: true subresources: status: {}
unknown
github
https://github.com/grafana/grafana
apps/alerting/notifications/definitions/templategroup.notifications.alerting.grafana.app.yaml
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Integral, Real import numpy as np import scipy.sparse as sp from scipy.sparse.linalg import svds from sklearn.base import ( BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context, ) from sklearn.utils import check_array, check_random_state from sklearn.utils._arpack import _init_arpack_v0 from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.extmath import _randomized_svd, safe_sparse_dot, svd_flip from sklearn.utils.sparsefuncs import mean_variance_axis from sklearn.utils.validation import check_is_fitted, validate_data __all__ = ["TruncatedSVD"] class TruncatedSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): """Dimensionality reduction using truncated SVD (aka LSA). This transformer performs linear dimensionality reduction by means of truncated singular value decomposition (SVD). Contrary to PCA, this estimator does not center the data before computing the singular value decomposition. This means it can work with sparse matrices efficiently. In particular, truncated SVD works on term count/tf-idf matrices as returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In that context, it is known as latent semantic analysis (LSA). This estimator supports two algorithms: a fast randomized SVD solver, and a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or `X.T * X`, whichever is more efficient. Read more in the :ref:`User Guide <LSA>`. Parameters ---------- n_components : int, default=2 Desired dimensionality of output data. If algorithm='arpack', must be strictly less than the number of features. If algorithm='randomized', must be less than or equal to the number of features. The default value is useful for visualisation. For LSA, a value of 100 is recommended. algorithm : {'arpack', 'randomized'}, default='randomized' SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy (scipy.sparse.linalg.svds), or "randomized" for the randomized algorithm due to Halko (2009). n_iter : int, default=5 Number of iterations for randomized SVD solver. Not used by ARPACK. The default is larger than the default in :func:`~sklearn.utils.extmath.randomized_svd` to handle sparse matrices that may have large slowly decaying spectrum. n_oversamples : int, default=10 Number of oversamples for randomized SVD solver. Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd` for a complete description. .. versionadded:: 1.1 power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' Power iteration normalizer for randomized SVD solver. Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd` for more details. .. versionadded:: 1.1 random_state : int, RandomState instance or None, default=None Used during randomized svd. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. tol : float, default=0.0 Tolerance for ARPACK. 0 means machine precision. Ignored by randomized SVD solver. Attributes ---------- components_ : ndarray of shape (n_components, n_features) The right singular vectors of the input data. explained_variance_ : ndarray of shape (n_components,) The variance of the training samples transformed by a projection to each component. explained_variance_ratio_ : ndarray of shape (n_components,) Percentage of variance explained by each of the selected components. singular_values_ : ndarray of shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- DictionaryLearning : Find a dictionary that sparsely encodes data. FactorAnalysis : A simple linear generative model with Gaussian latent variables. IncrementalPCA : Incremental principal components analysis. KernelPCA : Kernel Principal component analysis. NMF : Non-Negative Matrix Factorization. PCA : Principal component analysis. Notes ----- SVD suffers from a problem called "sign indeterminacy", which means the sign of the ``components_`` and the output from transform depend on the algorithm and random state. To work around this, fit instances of this class to data once, then keep the instance around to do transformations. References ---------- :arxiv:`Halko, et al. (2009). "Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions" <0909.4061>` Examples -------- >>> from sklearn.decomposition import TruncatedSVD >>> from scipy.sparse import csr_matrix >>> import numpy as np >>> np.random.seed(0) >>> X_dense = np.random.rand(100, 100) >>> X_dense[:, 2 * np.arange(50)] = 0 >>> X = csr_matrix(X_dense) >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) >>> svd.fit(X) TruncatedSVD(n_components=5, n_iter=7, random_state=42) >>> print(svd.explained_variance_ratio_) [0.0157 0.0512 0.0499 0.0479 0.0453] >>> print(svd.explained_variance_ratio_.sum()) 0.2102 >>> print(svd.singular_values_) [35.2410 4.5981 4.5420 4.4486 4.3288] """ _parameter_constraints: dict = { "n_components": [Interval(Integral, 1, None, closed="left")], "algorithm": [StrOptions({"arpack", "randomized"})], "n_iter": [Interval(Integral, 0, None, closed="left")], "n_oversamples": [Interval(Integral, 1, None, closed="left")], "power_iteration_normalizer": [StrOptions({"auto", "OR", "LU", "none"})], "random_state": ["random_state"], "tol": [Interval(Real, 0, None, closed="left")], } def __init__( self, n_components=2, *, algorithm="randomized", n_iter=5, n_oversamples=10, power_iteration_normalizer="auto", random_state=None, tol=0.0, ): self.algorithm = algorithm self.n_components = n_components self.n_iter = n_iter self.n_oversamples = n_oversamples self.power_iteration_normalizer = power_iteration_normalizer self.random_state = random_state self.tol = tol def fit(self, X, y=None): """Fit model on training data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the transformer object. """ self.fit_transform(X) return self @_fit_context(prefer_skip_nested_validation=True) def fit_transform(self, X, y=None): """Fit model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : Ignored Not used, present here for API consistency by convention. Returns ------- X_new : ndarray of shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = validate_data(self, X, accept_sparse=["csr", "csc"], ensure_min_features=2) random_state = check_random_state(self.random_state) if self.algorithm == "arpack": v0 = _init_arpack_v0(min(X.shape), random_state) U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. Sigma = Sigma[::-1] # u_based_decision=False is needed to be consistent with PCA. U, VT = svd_flip(U[:, ::-1], VT[::-1], u_based_decision=False) elif self.algorithm == "randomized": if self.n_components > X.shape[1]: raise ValueError( f"n_components({self.n_components}) must be <=" f" n_features({X.shape[1]})." ) U, Sigma, VT = _randomized_svd( X, self.n_components, n_iter=self.n_iter, n_oversamples=self.n_oversamples, power_iteration_normalizer=self.power_iteration_normalizer, random_state=random_state, flip_sign=False, ) U, VT = svd_flip(U, VT, u_based_decision=False) self.components_ = VT # As a result of the SVD approximation error on X ~ U @ Sigma @ V.T, # X @ V is not the same as U @ Sigma if self.algorithm == "randomized" or ( self.algorithm == "arpack" and self.tol > 0 ): X_transformed = safe_sparse_dot(X, self.components_.T) else: X_transformed = U * Sigma # Calculate explained variance & explained variance ratio self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) if sp.issparse(X): _, full_var = mean_variance_axis(X, axis=0) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var self.singular_values_ = Sigma # Store the singular values. return X_transformed def transform(self, X): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data. Returns ------- X_new : ndarray of shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ check_is_fitted(self) X = validate_data(self, X, accept_sparse=["csr", "csc"], reset=False) return safe_sparse_dot(X, self.components_.T) def inverse_transform(self, X): """Transform X back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like of shape (n_samples, n_components) New data. Returns ------- X_original : ndarray of shape (n_samples, n_features) Note that this is always a dense array. """ X = check_array(X) return np.dot(X, self.components_) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.transformer_tags.preserves_dtype = ["float64", "float32"] return tags @property def _n_features_out(self): """Number of transformed output features.""" return self.components_.shape[0]
python
github
https://github.com/scikit-learn/scikit-learn
sklearn/decomposition/_truncated_svd.py
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: aci_config_rollback short_description: Provides rollback and rollback preview functionality (config:ImportP) description: - Provides rollback and rollback preview functionality for Cisco ACI fabrics. - Config Rollbacks are done using snapshots C(aci_snapshot) with the configImportP class. seealso: - module: aci_config_snapshot - name: APIC Management Information Model reference description: More information about the internal APIC class B(config:ImportP). link: https://developer.cisco.com/docs/apic-mim-ref/ author: - Jacob McGill (@jmcgill298) version_added: '2.4' options: compare_export_policy: description: - The export policy that the C(compare_snapshot) is associated to. type: str compare_snapshot: description: - The name of the snapshot to compare with C(snapshot). type: str description: description: - The description for the Import Policy. type: str aliases: [ descr ] export_policy: description: - The export policy that the C(snapshot) is associated to. type: str required: yes fail_on_decrypt: description: - Determines if the APIC should fail the rollback if unable to decrypt secured data. - The APIC defaults to C(yes) when unset. type: bool import_mode: description: - Determines how the import should be handled by the APIC. - The APIC defaults to C(atomic) when unset. type: str choices: [ atomic, best-effort ] import_policy: description: - The name of the Import Policy to use for config rollback. type: str import_type: description: - Determines how the current and snapshot configuration should be compared for replacement. - The APIC defaults to C(replace) when unset. type: str choices: [ merge, replace ] snapshot: description: - The name of the snapshot to rollback to, or the base snapshot to use for comparison. - The C(aci_snapshot) module can be used to query the list of available snapshots. type: str required: yes state: description: - Use C(preview) for previewing the diff between two snapshots. - Use C(rollback) for reverting the configuration to a previous snapshot. type: str choices: [ preview, rollback ] default: rollback extends_documentation_fragment: aci ''' EXAMPLES = r''' --- - name: Create a Snapshot aci_config_snapshot: host: apic username: admin password: SomeSecretPassword export_policy: config_backup state: present delegate_to: localhost - name: Query Existing Snapshots aci_config_snapshot: host: apic username: admin password: SomeSecretPassword export_policy: config_backup state: query delegate_to: localhost - name: Compare Snapshot Files aci_config_rollback: host: apic username: admin password: SomeSecretPassword export_policy: config_backup snapshot: run-2017-08-28T06-24-01 compare_export_policy: config_backup compare_snapshot: run-2017-08-27T23-43-56 state: preview delegate_to: localhost - name: Rollback Configuration aci_config_rollback: host: apic username: admin password: SomeSecretPassword import_policy: rollback_config export_policy: config_backup snapshot: run-2017-08-28T06-24-01 state: rollback delegate_to: localhost - name: Rollback Configuration aci_config_rollback: host: apic username: admin password: SomeSecretPassword import_policy: rollback_config export_policy: config_backup snapshot: run-2017-08-28T06-24-01 description: Rollback 8-27 changes import_mode: atomic import_type: replace fail_on_decrypt: yes state: rollback delegate_to: localhost ''' RETURN = r''' preview: description: A preview between two snapshots returned: when state is preview type: str error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: str sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' filter_string: description: The filter string used for the request returned: failure or debug type: str sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: str sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: str sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: str sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_bytes from ansible.module_utils.urls import fetch_url # Optional, only used for rollback preview try: import lxml.etree from xmljson import cobra XML_TO_JSON = True except ImportError: XML_TO_JSON = False def main(): argument_spec = aci_argument_spec() argument_spec.update( compare_export_policy=dict(type='str'), compare_snapshot=dict(type='str'), description=dict(type='str', aliases=['descr']), export_policy=dict(type='str'), fail_on_decrypt=dict(type='bool'), import_mode=dict(type='str', choices=['atomic', 'best-effort']), import_policy=dict(type='str'), import_type=dict(type='str', choices=['merge', 'replace']), snapshot=dict(type='str', required=True), state=dict(type='str', default='rollback', choices=['preview', 'rollback']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, required_if=[ ['state', 'preview', ['compare_export_policy', 'compare_snapshot']], ['state', 'rollback', ['import_policy']], ], ) aci = ACIModule(module) description = module.params['description'] export_policy = module.params['export_policy'] fail_on_decrypt = aci.boolean(module.params['fail_on_decrypt']) import_mode = module.params['import_mode'] import_policy = module.params['import_policy'] import_type = module.params['import_type'] snapshot = module.params['snapshot'] state = module.params['state'] if state == 'rollback': if snapshot.startswith('run-'): snapshot = snapshot.replace('run-', '', 1) if not snapshot.endswith('.tar.gz'): snapshot += '.tar.gz' filename = 'ce2_{0}-{1}'.format(export_policy, snapshot) aci.construct_url( root_class=dict( aci_class='configImportP', aci_rn='fabric/configimp-{0}'.format(import_policy), module_object=import_policy, target_filter={'name': import_policy}, ), ) aci.get_existing() aci.payload( aci_class='configImportP', class_config=dict( adminSt='triggered', descr=description, failOnDecryptErrors=fail_on_decrypt, fileName=filename, importMode=import_mode, importType=import_type, name=import_policy, snapshot='yes', ), ) aci.get_diff(aci_class='configImportP') aci.post_config() elif state == 'preview': aci.url = '%(protocol)s://%(host)s/mqapi2/snapshots.diff.xml' % module.params aci.filter_string = ( '?s1dn=uni/backupst/snapshots-[uni/fabric/configexp-%(export_policy)s]/snapshot-%(snapshot)s&' 's2dn=uni/backupst/snapshots-[uni/fabric/configexp-%(compare_export_policy)s]/snapshot-%(compare_snapshot)s' ) % module.params # Generate rollback comparison get_preview(aci) aci.exit_json() def get_preview(aci): ''' This function is used to generate a preview between two snapshots and add the parsed results to the aci module return data. ''' uri = aci.url + aci.filter_string resp, info = fetch_url(aci.module, uri, headers=aci.headers, method='GET', timeout=aci.module.params['timeout'], use_proxy=aci.module.params['use_proxy']) aci.method = 'GET' aci.response = info['msg'] aci.status = info['status'] # Handle APIC response if info['status'] == 200: xml_to_json(aci, resp.read()) else: aci.result['raw'] = resp.read() aci.fail_json(msg="Request failed: %(code)s %(text)s (see 'raw' output)" % aci.error) def xml_to_json(aci, response_data): ''' This function is used to convert preview XML data into JSON. ''' if XML_TO_JSON: xml = lxml.etree.fromstring(to_bytes(response_data)) xmldata = cobra.data(xml) aci.result['preview'] = xmldata else: aci.result['preview'] = response_data if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "errors" "fmt" "log/slog" "os" "reflect" "slices" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" apiv1 "k8s.io/api/core/v1" disv1 "k8s.io/api/discovery/v1" networkv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Required to get the GCP auth provider working. "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( // metaLabelPrefix is the meta prefix used for all meta labels. // in this discovery. metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_" namespaceLabel = metaLabelPrefix + "namespace" presentValue = model.LabelValue("true") ) // DefaultSDConfig is the default Kubernetes SD configuration. var DefaultSDConfig = SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&SDConfig{}) } // Role is role of the service in Kubernetes. type Role string // The valid options for Role. const ( RoleNode Role = "node" RolePod Role = "pod" RoleService Role = "service" RoleEndpoint Role = "endpoints" RoleEndpointSlice Role = "endpointslice" RoleIngress Role = "ingress" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *Role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } switch *c { case RoleNode, RolePod, RoleService, RoleEndpoint, RoleEndpointSlice, RoleIngress: return nil default: return fmt.Errorf("unknown Kubernetes SD role %q", *c) } } func (c Role) String() string { return string(c) } const ( MetricLabelRoleAdd = "add" MetricLabelRoleDelete = "delete" MetricLabelRoleUpdate = "update" ) // SDConfig is the configuration for Kubernetes service discovery. type SDConfig struct { APIServer config.URL `yaml:"api_server,omitempty"` Role Role `yaml:"role"` KubeConfig string `yaml:"kubeconfig_file"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"` Selectors []SelectorConfig `yaml:"selectors,omitempty"` AttachMetadata AttachMetadataConfig `yaml:"attach_metadata,omitempty"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return newDiscovererMetrics(reg, rmi) } // Name returns the name of the Config. func (*SDConfig) Name() string { return "kubernetes" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return New(opts.Logger, opts.Metrics, c) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) c.KubeConfig = config.JoinDir(dir, c.KubeConfig) } type roleSelector struct { node resourceSelector pod resourceSelector service resourceSelector endpoints resourceSelector endpointslice resourceSelector ingress resourceSelector } type SelectorConfig struct { Role Role `yaml:"role,omitempty"` Label string `yaml:"label,omitempty"` Field string `yaml:"field,omitempty"` } type resourceSelector struct { label string field string } // AttachMetadataConfig is the configuration for attaching additional metadata // coming from namespaces or nodes on which the targets are scheduled. type AttachMetadataConfig struct { Node bool `yaml:"node"` Namespace bool `yaml:"namespace"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Role == "" { return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") } err = c.HTTPClientConfig.Validate() if err != nil { return err } if c.APIServer.URL != nil && c.KubeConfig != "" { // Api-server and kubeconfig_file are mutually exclusive return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously") } if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { // Kubeconfig_file and custom http config are mutually exclusive return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") } if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") } if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace { return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") } if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace { return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") } foundSelectorRoles := make(map[Role]struct{}) allowedSelectors := map[Role][]string{ RolePod: {string(RolePod)}, RoleService: {string(RoleService)}, RoleEndpointSlice: {string(RolePod), string(RoleService), string(RoleEndpointSlice)}, RoleEndpoint: {string(RolePod), string(RoleService), string(RoleEndpoint)}, RoleNode: {string(RoleNode)}, RoleIngress: {string(RoleIngress)}, } for _, selector := range c.Selectors { if _, ok := foundSelectorRoles[selector.Role]; ok { return fmt.Errorf("duplicated selector role: %s", selector.Role) } foundSelectorRoles[selector.Role] = struct{}{} if _, ok := allowedSelectors[c.Role]; !ok { return fmt.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, endpointslice, node or ingress", c.Role) } if !slices.Contains(allowedSelectors[c.Role], string(selector.Role)) { return fmt.Errorf("%s role supports only %s selectors", c.Role, strings.Join(allowedSelectors[c.Role], ", ")) } _, err := fields.ParseSelector(selector.Field) if err != nil { return err } _, err = labels.Parse(selector.Label) if err != nil { return err } } return nil } // NamespaceDiscovery is the configuration for discovering // Kubernetes namespaces. type NamespaceDiscovery struct { IncludeOwnNamespace bool `yaml:"own_namespace"` Names []string `yaml:"names"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(any) error) error { *c = NamespaceDiscovery{} type plain NamespaceDiscovery return unmarshal((*plain)(c)) } // Discovery implements the discoverer interface for discovering // targets from Kubernetes. type Discovery struct { sync.RWMutex client kubernetes.Interface role Role logger *slog.Logger namespaceDiscovery *NamespaceDiscovery discoverers []discovery.Discoverer selectors roleSelector ownNamespace string attachMetadata AttachMetadataConfig metrics *kubernetesMetrics } func (d *Discovery) getNamespaces() []string { namespaces := d.namespaceDiscovery.Names includeOwnNamespace := d.namespaceDiscovery.IncludeOwnNamespace if len(namespaces) == 0 && !includeOwnNamespace { return []string{apiv1.NamespaceAll} } if includeOwnNamespace && d.ownNamespace != "" { return append(namespaces, d.ownNamespace) } return namespaces } // New creates a new Kubernetes discovery for the given role. func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { m, ok := metrics.(*kubernetesMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if l == nil { l = promslog.NewNopLogger() } var ( kcfg *rest.Config err error ownNamespace string ) switch { case conf.KubeConfig != "": kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) if err != nil { return nil, err } case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account // as described in https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/#using-official-client-libraries kcfg, err = rest.InClusterConfig() if err != nil { return nil, err } if conf.NamespaceDiscovery.IncludeOwnNamespace { ownNamespaceContents, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err != nil { return nil, fmt.Errorf("could not determine the pod's namespace: %w", err) } if len(ownNamespaceContents) == 0 { return nil, errors.New("could not read own namespace name (empty file)") } ownNamespace = string(ownNamespaceContents) } l.Info("Using pod service account via in-cluster config") default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { return nil, err } kcfg = &rest.Config{ Host: conf.APIServer.String(), Transport: rt, } } kcfg.UserAgent = version.PrometheusUserAgent() kcfg.ContentType = "application/vnd.kubernetes.protobuf" c, err := kubernetes.NewForConfig(kcfg) if err != nil { return nil, err } d := &Discovery{ client: c, logger: l, role: conf.Role, namespaceDiscovery: &conf.NamespaceDiscovery, discoverers: make([]discovery.Discoverer, 0), selectors: mapSelector(conf.Selectors), ownNamespace: ownNamespace, attachMetadata: conf.AttachMetadata, metrics: m, } return d, nil } func mapSelector(rawSelector []SelectorConfig) roleSelector { rs := roleSelector{} for _, resourceSelectorRaw := range rawSelector { switch resourceSelectorRaw.Role { case RoleEndpointSlice: rs.endpointslice.field = resourceSelectorRaw.Field rs.endpointslice.label = resourceSelectorRaw.Label case RoleEndpoint: rs.endpoints.field = resourceSelectorRaw.Field rs.endpoints.label = resourceSelectorRaw.Label case RoleIngress: rs.ingress.field = resourceSelectorRaw.Field rs.ingress.label = resourceSelectorRaw.Label case RoleNode: rs.node.field = resourceSelectorRaw.Field rs.node.label = resourceSelectorRaw.Label case RolePod: rs.pod.field = resourceSelectorRaw.Field rs.pod.label = resourceSelectorRaw.Label case RoleService: rs.service.field = resourceSelectorRaw.Field rs.service.label = resourceSelectorRaw.Label } } return rs } // Disable the informer's resync, which just periodically resends already processed updates and distort SD metrics. const resyncDisabled = 0 // Run implements the discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { d.Lock() namespaces := d.getNamespaces() switch d.role { case RoleEndpointSlice: for _, namespace := range namespaces { var informer cache.SharedIndexInformer e := d.client.DiscoveryV1().EndpointSlices(namespace) elw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.endpointslice.field options.LabelSelector = d.selectors.endpointslice.label return e.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.endpointslice.field options.LabelSelector = d.selectors.endpointslice.label return e.Watch(ctx, options) }, } informer = d.newIndexedEndpointSlicesInformer(elw, &disv1.EndpointSlice{}) s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.Watch(ctx, options) }, } p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.Watch(ctx, options) }, } var nodeInf cache.SharedInformer if d.attachMetadata.Node { nodeInf = d.newNodeInformer(context.Background()) go nodeInf.Run(ctx.Done()) } var namespaceInf cache.SharedInformer if d.attachMetadata.Namespace { namespaceInf = d.newNamespaceInformer(context.Background()) go namespaceInf.Run(ctx.Done()) } eps := NewEndpointSlice( d.logger.With("role", "endpointslice"), informer, d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, namespaceInf, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, eps) go eps.endpointSliceInf.Run(ctx.Done()) go eps.serviceInf.Run(ctx.Done()) go eps.podInf.Run(ctx.Done()) } case RoleEndpoint: for _, namespace := range namespaces { e := d.client.CoreV1().Endpoints(namespace) elw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.endpoints.field options.LabelSelector = d.selectors.endpoints.label return e.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.endpoints.field options.LabelSelector = d.selectors.endpoints.label return e.Watch(ctx, options) }, } s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.Watch(ctx, options) }, } p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.Watch(ctx, options) }, } var nodeInf cache.SharedInformer if d.attachMetadata.Node { nodeInf = d.newNodeInformer(ctx) go nodeInf.Run(ctx.Done()) } var namespaceInf cache.SharedInformer if d.attachMetadata.Namespace { namespaceInf = d.newNamespaceInformer(ctx) go namespaceInf.Run(ctx.Done()) } eps := NewEndpoints( d.logger.With("role", "endpoint"), d.newIndexedEndpointsInformer(elw), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, namespaceInf, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, eps) go eps.endpointsInf.Run(ctx.Done()) go eps.serviceInf.Run(ctx.Done()) go eps.podInf.Run(ctx.Done()) } case RolePod: var nodeInformer cache.SharedInformer if d.attachMetadata.Node { nodeInformer = d.newNodeInformer(ctx) go nodeInformer.Run(ctx.Done()) } var namespaceInformer cache.SharedInformer if d.attachMetadata.Namespace { namespaceInformer = d.newNamespaceInformer(ctx) go namespaceInformer.Run(ctx.Done()) } for _, namespace := range namespaces { p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.Watch(ctx, options) }, } pod := NewPod( d.logger.With("role", "pod"), d.newIndexedPodsInformer(plw), nodeInformer, namespaceInformer, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, pod) go pod.podInf.Run(ctx.Done()) } case RoleService: var namespaceInformer cache.SharedInformer if d.attachMetadata.Namespace { namespaceInformer = d.newNamespaceInformer(ctx) go namespaceInformer.Run(ctx.Done()) } for _, namespace := range namespaces { s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.Watch(ctx, options) }, } svc := NewService( d.logger.With("role", "service"), d.newIndexedServicesInformer(slw), namespaceInformer, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, svc) go svc.informer.Run(ctx.Done()) } case RoleIngress: var namespaceInformer cache.SharedInformer if d.attachMetadata.Namespace { namespaceInformer = d.newNamespaceInformer(ctx) go namespaceInformer.Run(ctx.Done()) } for _, namespace := range namespaces { i := d.client.NetworkingV1().Ingresses(namespace) ilw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.ingress.field options.LabelSelector = d.selectors.ingress.label return i.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.ingress.field options.LabelSelector = d.selectors.ingress.label return i.Watch(ctx, options) }, } ingress := NewIngress( d.logger.With("role", "ingress"), d.newIndexedIngressesInformer(ilw), namespaceInformer, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, ingress) go ingress.informer.Run(ctx.Done()) } case RoleNode: nodeInformer := d.newNodeInformer(ctx) node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount) d.discoverers = append(d.discoverers, node) go node.informer.Run(ctx.Done()) default: d.logger.Error("unknown Kubernetes discovery kind", "role", d.role) } var wg sync.WaitGroup for _, dd := range d.discoverers { wg.Add(1) go func(d discovery.Discoverer) { defer wg.Done() d.Run(ctx, ch) }(dd) } d.Unlock() wg.Wait() <-ctx.Done() } func lv(s string) model.LabelValue { return model.LabelValue(s) } func send(ctx context.Context, ch chan<- []*targetgroup.Group, tg *targetgroup.Group) { if tg == nil { return } select { case <-ctx.Done(): case ch <- []*targetgroup.Group{tg}: } } func retryOnError(ctx context.Context, interval time.Duration, f func() error) (canceled bool) { var err error err = f() for { if err == nil { return false } select { case <-ctx.Done(): return true case <-time.After(interval): err = f() } } } func (d *Discovery) newNodeInformer(_ context.Context) cache.SharedInformer { nlw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.node.field options.LabelSelector = d.selectors.node.label return d.client.CoreV1().Nodes().List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.node.field options.LabelSelector = d.selectors.node.label return d.client.CoreV1().Nodes().Watch(ctx, options) }, } return d.mustNewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) } func (d *Discovery) newNamespaceInformer(_ context.Context) cache.SharedInformer { // We don't filter on NamespaceDiscovery. nlw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { return d.client.CoreV1().Namespaces().List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { return d.client.CoreV1().Namespaces().Watch(ctx, options) }, } return d.mustNewSharedInformer(nlw, &apiv1.Namespace{}, resyncDisabled) } func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if d.attachMetadata.Node { indexers[nodeIndex] = func(obj any) ([]string, error) { pod, ok := obj.(*apiv1.Pod) if !ok { return nil, errors.New("object is not a pod") } return []string{pod.Spec.NodeName}, nil } } if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers) } func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) indexers[podIndex] = func(obj any) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { return nil, errors.New("object is not endpoints") } var pods []string for _, target := range e.Subsets { for _, addr := range target.Addresses { if addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" { pods = append(pods, namespacedName(addr.TargetRef.Namespace, addr.TargetRef.Name)) } } } return pods, nil } if d.attachMetadata.Node { indexers[nodeIndex] = func(obj any) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { return nil, errors.New("object is not endpoints") } var nodes []string for _, target := range e.Subsets { for _, addr := range target.Addresses { if addr.TargetRef != nil { switch addr.TargetRef.Kind { case "Pod": if addr.NodeName != nil { nodes = append(nodes, *addr.NodeName) } case "Node": nodes = append(nodes, addr.TargetRef.Name) } } } } return nodes, nil } } if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) } func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) indexers[serviceIndex] = func(obj any) ([]string, error) { e, ok := obj.(*disv1.EndpointSlice) if !ok { return nil, errors.New("object is not an endpointslice") } svcName, exists := e.Labels[disv1.LabelServiceName] if !exists { return nil, nil } return []string{namespacedName(e.Namespace, svcName)}, nil } if d.attachMetadata.Node { indexers[nodeIndex] = func(obj any) ([]string, error) { e, ok := obj.(*disv1.EndpointSlice) if !ok { return nil, errors.New("object is not an endpointslice") } var nodes []string for _, target := range e.Endpoints { if target.TargetRef != nil { switch target.TargetRef.Kind { case "Pod": if target.NodeName != nil { nodes = append(nodes, *target.NodeName) } case "Node": nodes = append(nodes, target.TargetRef.Name) } } } return nodes, nil } } if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers) } func (d *Discovery) newIndexedServicesInformer(slw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(slw, &apiv1.Service{}, resyncDisabled, indexers) } func (d *Discovery) newIndexedIngressesInformer(ilw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(ilw, &networkv1.Ingress{}, resyncDisabled, indexers) } func (d *Discovery) informerWatchErrorHandler(ctx context.Context, r *cache.Reflector, err error) { d.metrics.failuresCount.Inc() cache.DefaultWatchErrorHandler(ctx, r, err) } func (d *Discovery) mustNewSharedInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) cache.SharedInformer { informer := cache.NewSharedInformer(lw, exampleObject, defaultEventHandlerResyncPeriod) // Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand. // Such a scenario would suggest an incorrect use of the API, thus the panic. if err := informer.SetWatchErrorHandlerWithContext(d.informerWatchErrorHandler); err != nil { panic(err) } return informer } func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { informer := cache.NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, indexers) // Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand. // Such a scenario would suggest an incorrect use of the API, thus the panic. if err := informer.SetWatchErrorHandlerWithContext(d.informerWatchErrorHandler); err != nil { panic(err) } return informer } func addObjectAnnotationsAndLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, resource string) { for k, v := range objectMeta.Labels { ln := strutil.SanitizeLabelName(k) labelSet[model.LabelName(metaLabelPrefix+resource+"_label_"+ln)] = lv(v) labelSet[model.LabelName(metaLabelPrefix+resource+"_labelpresent_"+ln)] = presentValue } for k, v := range objectMeta.Annotations { ln := strutil.SanitizeLabelName(k) labelSet[model.LabelName(metaLabelPrefix+resource+"_annotation_"+ln)] = lv(v) labelSet[model.LabelName(metaLabelPrefix+resource+"_annotationpresent_"+ln)] = presentValue } } func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) addObjectAnnotationsAndLabels(labelSet, objectMeta, string(role)) } func addNamespaceMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta) { // Omitting the namespace name because should be already injected elsewhere. addObjectAnnotationsAndLabels(labelSet, objectMeta, "namespace") } func namespacedName(namespace, name string) string { return namespace + "/" + name } // nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone. // It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key. func nodeName(o any) (string, error) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o) if err != nil { return "", err } return key, nil }
go
github
https://github.com/prometheus/prometheus
discovery/kubernetes/kubernetes.go
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None ''' class Solution: def isBalanced(self, root: TreeNode) -> bool: return self.depth(root) != -1 def depth(self, root): if not root: return 0 left = self.depth(root.left) if left == -1: return -1 right = self.depth(root.right) if right == -1: return -1 return max(left, right) + 1 if abs(left - right) < 2 else -1 ''' ''' class Solution: def isBalanced(self, root: TreeNode) -> bool: self.res = True def helper(root): if not root: return 0 left = helper(root.left) + 1 right = helper(root.right) + 1 if abs(right - left) > 1: self.res = False return 0 return max(left, right) helper(root) return self.res ''' class Solution(object): def depth(self, root): if root: return max(self.depth(root.left), self.depth(root.right))+1 else: return False def isBalanced(self, root): """ :type root: TreeNode :rtype: bool """ if root: return abs(self.depth(root.right)-self.depth(root.left)) <= 1 and self.isBalanced(root.right) and self.isBalanced(root.left) else: return True
unknown
codeparrot/codeparrot-clean
''' Copyright 2013 Cosnita Radu Viorel Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. .. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com> .. py:module:: fantastico.oauth2.sha512salt_passwords_hasher ''' from fantastico.oauth2.passwords_hasher import PasswordsHasher import base64 import hashlib from fantastico.utils.dictionary_object import DictionaryObject class Sha512SaltPasswordsHasher(PasswordsHasher): '''This class provides the sha512salt implementation for password hashing. In addition, the result is encoded using base64. In order to use this hasher try the code snippet below: .. code-block:: python sha512_hasher = PasswordsHasherFactory().get_hasher(PasswordsHasherFactory.SHA512_SALT) hashed_passwd = sha512_hasher.hash_password("abcd", DictionaryObject({"salt": 123}))''' _DEFAULT_SALT = 9999 def hash_password(self, plain_passwd, hash_ctx=None): '''This method provides the sha512 with salt algorithm for a given plain password. In addition, the hash is base64 encoded.''' if not hash_ctx or not hash_ctx.dictionary.get("salt"): hash_ctx = DictionaryObject({"salt": self._DEFAULT_SALT}) plain_passwd = (plain_passwd or "").strip() salt = hash_ctx.salt text = (plain_passwd + str(salt)).encode() hashed_text = hashlib.sha512(text).hexdigest() return base64.b64encode(hashed_text.encode()).decode()
unknown
codeparrot/codeparrot-clean
- Feature Name: Follower Reads Adoption - Implementation Status: completed - Start Date: 2018-12-27 - Authors: Andrew Werner - RFC PR: #33474 - Cockroach Issue: #16593 # Summary Follower reads are consistent reads at historical timestamps from follower replicas. They make the non-leaseholder replicas in a range suitable sources for historical reads. Historical reads include both `AS OF SYSTEM TIME` queries as well as transactions with a read timestamp sufficiently in the past (for example long-running analytics queries). Most of the required machinery to safely perform these reads was implemented in the [Follower Reads RFC](20180603_follower_reads.md). Follower reads can greatly improve query performance by avoiding the need to make wide area RPCs and by reducing traffic on lease holders. This document proposes mechanisms to expose follower reads through a new SQL function to determine a reasonable read time stamp and the accompanying change to make that function legal in use with `AS OF SYSTEM TIME` queries as well as tweaks to make the SQL physical planner and kv DistSender aware that follower reads are possible. Lastly this proposal suggests extending the `BEGIN TRANSACTION` and `SET TRANSACTION` statements to allow read-only transactions to be performed at a single transaction timestamp that can read from followers. Given the intention to make follower reads an enterprise feature, some of the complexity in this proposal stems from the need to inject behavior from CCL code. # Motivation Given that cockroachdb stores multiple replicas of data, a client might expect that it be able to serve reads from any of those replicas. In order to provide its high level of consistency and isolation, cockroachdb currently requires that all reads for a range go to the current lease holder. For reads against data written sufficiently far in the past, consistency and isolation morally should not be a concern as no concurrent writes should be possible. There are many queries which do not require a completely up-to-date view of the database such as analytical workloads for report generation. Enabling reads to be performed within the same data center can greatly increase throughput for large reads and greatly reduce latency for small ones. Increasing performance and lowering cost to run large analytical queries is valuable, especially for geo-distributed deployments. Providing a convenient mechanism to request local rather than lease-holder historical reads will be a compelling enterprise feature. # Guide-level explanation The Follower Reads RFC lays out the mechanisms to understand closed timestamps and presents the rules which the store follows to determine whether a replica can serve a read. This document deals with how clients can make use of this behavior offered by the storage layer. The work described in the Follower Reads RFC provides the mechanism which enables a replica to determine if a query timestamp is adequately old to allow for a follower read through the use of its `Closed Timestamp` mechanism. That work has already enabled replicas to fulfil read requests in the past. In order to expose this functionality to clients all that needs to be done is to convince the SQL physical planner to direct historical reads to local nodes and to coax the `DistSender` to send requests to followers when appropriate. The `Closed Timestamp` is tracked on a per-range basis which attempts to lag behind "real" time by some target duration controlled by the cluster setting `kv.closed_timestamp.target_duration`. As of writing this value defaults to 30 seconds but could likely be lowered to 5-10 seconds (at some threshold it may potentially interfere with on-going transactions). The closed timestamp subsystem seeks to update the closed timestamp at an interval defined as a fraction of the `target_duration` which is termed the `close_fraction`. For example, if the `target_duration` is 30s and the `close_fraction` is 0.2 then the subsystem will try to keep the closed timestamp 30s behind real time and will try to update the value every 6s. This proposal seeks to achieve follower reads by employing stateless approximation of when a follower read is possible by assuming that a read may be directed to a follower if it occurs at some multiple of the target duration which is controlled by a hidden cluster setting which we'll refer to as the `target_multiple`. While this may ultimately lead to failure to perform reads at a follower it leads to a simple implementation that controls a fixed tradeoff between staleness (the amount of time behind the "present" at which `follower_read_timestamp()` reads occur) and the risk of needing to perform a leaseholder read (which will happen seamlessly due to a NotLeaseHolderError). The `target_multiple` defines the tradeoff between staleness and likelihood of follower read failing. In order to ease the burden of the client determining an adequately old timestamp for use with an `AS OF SYSTEM TIME` query, this RFC introduces a new SQL function `follower_read_timestamp()` which is effectively a syntactic short-hand for multiplying the above mentioned cluster settings then extends `AS OF SYSTEM TIME` to allow for a non-constant expression. After this change and the enabling of `kv.closed_timestamp.follower_reads.enabled` clients can trivially encourage their `SELECT` statements to be directed to physically close replicas. For example, imagine that the kv.kv table exists, the below query would perform a read against the nearest replica: ``` SELECT * FROM kv.kv AS OF SYSTEM TIME follower_read_timestamp() ORDER BY k LIMIT 10; ``` The physical planning of SQL evaluation currently tries to send DistSQL processors to be run on nodes which are currently the leaseholder for ranges of interest. This allocation is performed via the `distsqlplan.SpanResolver` which internally uses a `leaseHolderOracle` which provides a ReplicaDesc given a RangeDesc according to a policy. This RFC refactors the oracle logic into its own package and provides (via injection) a new follower read aware policy. The `kv.DistSender` currently attempts to send all writes and reads at consistency levels other than INCONSISTENT to the current lease holder for a range falling back to replica closeness. This RFC adds an injectable `CanUseFollowerReads` function which defaults to returning `false` that the DistSender code will consult when determining whether to locate the current lease holder. While these changes enable and ease performing individual SELECT queries against follower replicas, it does not enable running entire (read-only) transactions at a single point in time and thus benefitting from the performance gains offered by follower reads. This document proposes an extension to the `SET TRANSACTION` and `BEGIN TRANSACTION` statements to allow a clause analagous to `AS OF SYSTEM TIME` for `SELECT` statements today. This change will ease the multi-statement historical reads, potentially enabling use of existing code which relies on a transaction object, and will provide a mechanism to run historical reads with a HIGH transaction priority, eliminating the possibility of blocking on a long-running read-write transaction. `SET TRANSACTION` must be the first statement following `BEGIN`. Note that `SET TRANSACTION AS OF SYSTEM TIME` implies `READ ONLY`. A historical read only transaction thus will look something like the following: ```sql BEGIN; SET TRANSACTION AS OF SYSTEM TIME follower_read_timestamp(), PRIORITY HIGH; SELECT * FROM kv.kv; COMMIT; ``` or alternatively: ```sql BEGIN TRANSACTION AS OF SYSTEM TIME follower_read_timestamp(), PRIORITY HIGH; SELECT * FROM kv.kv; COMMIT; ``` # Reference-level explanation This section will focus on the specific details of plumbing the functionality required to expose follower reads through the codebase. Because follower reads will be implemented as an enterprise feature the core enabling logic will live in a CCL licensed package `pkg/ccl/followerreadsccl`. This package will then inject the needed abstractions to provide the following six changes: 1. Define the `target_multiple` internal setting. 2. Add the `follower_read_timestamp()` SQL builtin. 3. Extend the SQL evaluation to support `follower_read_timestamp()` with `AS OF SYSTEM TIME`. 4. Abstract the replica selection mechanism for SQL physical planning. 5. Modify DistSender logic to determine when it may safely send a read to a follower. 6. Extend the `SET TRANSACTION` and `BEGIN TRANSACTION` statements to enable setting the TxnTime. ## Detailed Design ### The `target_multiple` Cluster setting The new cluster setting will be defined inside of `followerreadsccl` and thus will only exist in CCL builds. The setting will be a float value greater than or equal to one which is combined with `kv.closed_timestamp.target_duration` to determine at which time `follower_read_timestamp()` should evaluate. A function in followerreadsccl like below will use the recent time: ```go // recentDuration returns the duration to be used as the offset to create a // follower_read_timestamp(). The same value plus a unit of clock uncertainty, // then should be used to determine if a query can use follower reads. func recentDuration(st *cluster.Settings) time.Duration { targetMultiple := TargetMultiple.Get(&st.SV) targetDuration := closedts.TargetDuration.Get(&st.SV) closeFraction := closedts.CloseFraction.Get(&st.SV) return -1 * time.Duration(float64(targetDuration) * (1 + closeFraction * targetMultiple)) } ``` The setting represents the tradeoff between staleness of `follower_read_timestamp()` queries and the chance that such queries may fail to be performed on a follower. The initial choice of value is `3` which likely is rather conservative. Given that the current target duration for closed timestamps is 30s and the close fraction is .2, queries performed with `follower_read_timestamp()` should lag "real" time by roughly 48s. If we can lower the target duration to 10s which would lead to a 16s real time delay. Note that while this setting does control a tradeoff, it is deeply related to implementation details and will be hidden from customers. ### SQL `follower_read_timestamp()` builtin A new SQL builtin `follower_read_timestamp()` is added to call through to an injected function which determines a timestamp which likely can be used to perform a follower read. This function will only be functional with the CCL version of CRDB and an enterprise license. ### `follower_read_timestamp()` expressions in `AS OF SYSTEM TIME` clauses Prior to this change, SQL evaluation enforced that the expression passed to `AS OF SYSTEM TIME` clauses be constant. This requirement prevented the use of the above mentioned function. Allowing for some evaluation of non-constant expressions enables useful functionality such as taking the max of two timestamps allowing enforcement of a maximum staleness. Allowing `AS OF SYSTEM TIME` clauses to accept all expressions is problematic as arbitrary SQL scalar evaluation is not available at time of AS OF clause expression evaluation. The intention is to only permit the `follower_read_timestamp()` function to be used and for all other expressions to be rejected. ### Abstract replica selection for SQL physical planning. The physical planning of SQL query evaluation attempts to place evaluation near the lease holder for ranges when known, falling back to a policy which seeks to pack requests on nearby nodes. This logic is encapsulated in an interface called a `leaseHolderOracle` (henceforth Oracle) which is constructed based on a policy. Today's policy is called the `binPackingLeaseHolderChoice`. All of this logic currently resides in the `sql/distsqlplan` package and is used by the `SpanResolver`. A span resolver uses a `*client.Txn` to create a `SpanResolverIterator` which iterates through ranges and provides replica selection. This proposal moves the Oracle logic into a new package `sql/distsqlplan/replicaoracle` which will extend the current abstraction for selecting a replica given a policy to additional be able to take into account the current transaction. The package will also provide a mechanism to register new policies which we'll see that followerreadsccl will exploit. In addition to today's existing binPacking and random policies the new package will include a policy which selects the closest replica. Prior to this change the policy is used to statically construct an Oracle which is used throughout the life of the SpanResolver. An Oracle provides a single method: ```go // ChoosePreferredReplica returns a choice for one range. Implementors are free to // use the queryState param, which has info about the number of // ranges already handled by each node for the current SQL query. The state is // not updated with the result of this method; the caller is in charge of // that. // // A RangeUnavailableError can be returned if there's no information in gossip // about any of the nodes that might be tried. ChoosePreferredReplica( context.Context, roachpb.RangeDescriptor, OracleQueryState, ) (kv.ReplicaInfo, error) ``` The change will add a layer of indirection such that rather than holding an Oracle, the SpanResolver will hold an OracleFactory with the following interface: ```go // OracleFactory creates an oracle for a Txn. type OracleFactory interface { // Oracle provides an Oracle to select an appropriate replica for a range. Oracle(*client.Txn) Oracle } ``` For the existing policies the OracleFactory can be implemented by the same concrete struct which implement today's Oracles by merely returning themselves in calls to `Oracle()`. This mechanism allows different policies to be used for different Txns, namely the use of the closest policy for historical queries and the binPacking policy for all others. This `FollowerReadsAwarePolicy` will check to see if the OrigTimestamp of a Txn is before now less `recentDuration` plus a clock uncertainty duration. The `followerreadsccl` code will then register this new policy and set it to the global var `distsqlplan.ReplicaOraclePolicy`. ### Expose DistSender For Determining Follower Read Safety The last hurdle to exposing follower reads is that the `kv.DistSender` attempts to send batch requests to current lease holders which may prevent reads from going to nearby follower replicas. In order to inform the DistSender that it can send a batch request to a follower we add a new global var in the kv package ```go var CanSendToFollower = func( clusterID uuid.UUID, _ *cluster.Settings, _ *roachpb.BatchRequest, ) bool { return false } ``` Which is adopted by the DistSender when it decides whether to look up a cached lease holder in `DistSender.sendSingleRange`. The followerreadsccl package can then inject a new implementation of this function which ensures that the batch request is a read only transaction and then verifies that it meets the criteria for a follower read. Not that the read-only property of a batch request is that all contained requests are neither write nor admin. This simplistic policy has potentially negative implications for export requests. Right now export relies on balanced range leadership to spread load through the cluster. Export requests are also commonly run at historical timestamps. If we were to blindly allow all sufficiently old, read-only batches to be sent to the nearest replica then export load may all flood a single node in a multi-node cluster. To deal with this we'll explicitly exclude batches which contain export requests from consideration for follower reads. ### Extending `SET TRANSACTION` to support `AS OF SYSTEM TIME` It is easy to imagine cases where a client would like to run multiple reads at the same historical timestamp. Today this would require adding an `AS OF` clause to each `SELECT` this cumbersome even when the exact timestamp is known, but is made worse when the timestamp which the client would like to use is generated by the server. In order to reuse the same timestamp the client would need to jump through the hoop of first generating the timestamp and then plumbing it in to each individual statement. To simplify this workflow we'll expose a mechanism to run an entire transaction at a single timestamp by extending the `SET TRANSACTION` statement to support the `AS OF SYSTEM TIME` clause. While it might on some level seem reasonable to allow for arbitrary read-write queries to be performed at historical timestamps, due to the mechanisms of closed timestamps, write operations could never successfully commit. Because the MinProposalTracker would effectively block write operations, we'll enforce that `SET TRANSACTION AS OF SYSTEM TIME` implies `SET TRANSACTION READ ONLY`. Allowing the setting of timestamp on transactions relates to another problem; you cannot set the priority on an implicit transaction. The problem arises when reasoning about follower reads which encounter unresolved intents which might impact the value they read. Like always, a read must resolve unresolved intents which might impact it. If the intent is from a transaction which is still running and that transaction is not at a lower priority than this read. If a client were to run a `follower_read_timestamp()` read only transaction as proposed above, then that client could additionally set the transaction priority to HIGH which will ensure that the read never waits on pending long-running update transactions. ## Drawbacks There are very few drawbacks to implementing the high level idea. Most of the groundwork has already been laid. Any obvious downsides come from the impurity of the injection required to realize the functionality as an enterprise feature. ## Rationale and Alternatives ### Stateful closed timestamp tracking One potential downside of this approach is that in an edge case it may have the potential to have a detrimentally effect cluster performance in the case of bursty traffic and a large volume of follower reads. Imagine a situation where a large percentage of client traffic is due to follower reads and the cluster is heavily loaded such that all transactions are performing acceptably but if the workload were to be shifted entirely such that all requests were forced to go to leaseholders it would not be capable of acceptably serving the traffic. If then, a burst of load or some other cluster event were to lead one or more replicas to fall behind in its ability to publish closed timestamps, all traffic which was spread over all of the replicas would begin receiving all of the load that had been going to followers. It is possible that this concern is not realistic in most common cases. Furthermore it seems straightforward to mitigate by increasing the target multiple. The problem seems worse as the replication factor increases beyond 3 to numbers like 7 or 9. Furthermore even if the increased load during this bursty period does not meaningfully affect OLTP traffic, it may lead to potentially massively increased latency for queries which in the previous regime had been fast. A more sophisticated mechanism which statefully tracks a closed timestamps on a per-range basis on all nodes would allow `follower_read_timestamp` to always evaluate to a timestamp which is known to be closed. Such an approach may, in the common case, be less pessimistic than this proposal's target_multiple and unlike the optimistic approach, would be sure to always safely perform follower reads. That being said, a stateful approach which tracks follower reads would require nodes to track closed timestamps for all replicas at planning time and may additionally require new mechanisms to mark as known to be safe for follower reads. Furthermore the state tracking may be prohibitively expensive on large clusters. ### Node-local Dynamic Adjustment of `targetMultiple` Another less invasive might be to dynamically update the target multiple by detecting NotLeaseHolderErrors for queries which expected to hit followers. This could mitigate the flood of consistent reads in the face of lagging closed timestamps but would make the semantics of `follower_read_timestamp()` harder to understand and would require increased participation from the DistSender to provide the feedback.
unknown
github
https://github.com/cockroachdb/cockroach
docs/RFCS/20181227_follower_reads_implementation.md
# 05.10.2007, c # last revision: 25.02.2008 from sfepy import data_dir filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh' # Whole domain $Y$. region_1000 = { 'name' : 'Y', 'select' : 'all', } # Domain $Y_1$. region_1 = { 'name' : 'Y1', 'select' : 'elements of group 1', } # Domain $Y_2$. region_2 = { 'name' : 'Y2', 'select' : 'elements of group 2', } region_10 = { 'name' : 'Bottom', 'select' : 'nodes in (z < %f)' % -0.499, } region_11 = { 'name' : 'Top', 'select' : 'nodes in (z > %f)' % 0.499, } material_1 = { 'name' : 'solid', 'values' : { 'lam' : 1e1, 'mu' : 1e0, 'density' : 1e-1, }, } field_1 = { 'name' : '3_displacement', 'dtype' : 'real', 'shape' : 'vector', 'region' : 'Y', 'approx_order' : 1, } variable_1 = { 'name' : 'u', 'kind' : 'unknown field', 'field' : '3_displacement', 'order' : 0, } variable_2 = { 'name' : 'v', 'kind' : 'test field', 'field' : '3_displacement', 'dual' : 'u', } ebc_1 = { 'name' : 'Fix', 'region' : 'Bottom', 'dofs' : {'u.all' : 0.0}, } ebc_2 = { 'name' : 'Load', 'region' : 'Top', 'dofs' : {'u.[0,1]' : 0.2, 'u.2' : 0.5}, } lcbc_1 = { 'name' : 'rigid1', 'region' : 'Y2', 'dofs' : {'u.[0,1]' : 'rigid'}, } ## lcbc_2 = { ## 'name' : 'rigid1', ## 'region' : 'Y3', ## 'dofs' : {'u.all' : 'rigid'}, ## } integral_1 = { 'name' : 'i1', 'kind' : 'v', 'quadrature' : 'gauss_o2_d3', } equations = { 'balance' : """dw_lin_elastic_iso.i1.Y( solid.lam, solid.mu, v, u ) = 0""", } solver_0 = { 'name' : 'ls', 'kind' : 'ls.scipy_direct', } solver_1 = { 'name' : 'newton', 'kind' : 'nls.newton', 'i_max' : 1, 'eps_a' : 1e-10, 'eps_r' : 1.0, 'macheps' : 1e-16, 'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red). 'ls_red' : 0.1, 'ls_red_warp' : 0.001, 'ls_on' : 1.1, 'ls_min' : 1e-5, 'check' : 0, 'delta' : 1e-6, 'is_plot' : False, 'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max) } ## # FE assembling parameters. fe = { 'chunk_size' : 1000 } from testsBasic import TestLCBC output_name = 'test_lcbc_3d.vtk' ## # 03.10.2007, c class Test( TestLCBC ): pass
unknown
codeparrot/codeparrot-clean
bugfixes: - password lookup plugin - replace random.SystemRandom() with secrets.SystemRandom() when generating passwords (https://github.com/ansible/ansible/issues/85956, https://github.com/ansible/ansible/pull/85971).
unknown
github
https://github.com/ansible/ansible
changelogs/fragments/replace-random-with-secrets.yml
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package jsonformat import ( "fmt" "testing" "github.com/google/go-cmp/cmp" "github.com/mitchellh/colorstring" "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/command/jsonprovider" "github.com/hashicorp/terraform/internal/command/jsonstate" "github.com/hashicorp/terraform/internal/configs/configschema" "github.com/hashicorp/terraform/internal/providers" testing_provider "github.com/hashicorp/terraform/internal/providers/testing" "github.com/hashicorp/terraform/internal/states" "github.com/hashicorp/terraform/internal/states/statefile" "github.com/hashicorp/terraform/internal/terminal" "github.com/hashicorp/terraform/internal/terraform" ) func TestState(t *testing.T) { color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} tests := []struct { State *states.State Schemas *terraform.Schemas Want string }{ 0: { State: &states.State{}, Schemas: &terraform.Schemas{}, Want: "The state file is empty. No resources are represented.\n", }, 1: { State: basicState(t), Schemas: testSchemas(), Want: basicStateOutput, }, 2: { State: nestedState(t), Schemas: testSchemas(), Want: nestedStateOutput, }, 3: { State: deposedState(t), Schemas: testSchemas(), Want: deposedNestedStateOutput, }, 4: { State: onlyDeposedState(t), Schemas: testSchemas(), Want: onlyDeposedOutput, }, 5: { State: stateWithMoreOutputs(t), Schemas: testSchemas(), Want: stateWithMoreOutputsOutput, }, } for i, tt := range tests { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { root, outputs, err := jsonstate.MarshalForRenderer(&statefile.File{ State: tt.State, }, tt.Schemas) if err != nil { t.Errorf("found err: %v", err) return } streams, done := terminal.StreamsForTesting(t) renderer := Renderer{ Colorize: color, Streams: streams, } renderer.RenderHumanState(State{ StateFormatVersion: jsonstate.FormatVersion, RootModule: root, RootModuleOutputs: outputs, ProviderFormatVersion: jsonprovider.FormatVersion, ProviderSchemas: jsonprovider.MarshalForRenderer(tt.Schemas), }) result := done(t).All() if diff := cmp.Diff(result, tt.Want); diff != "" { t.Errorf("wrong output\nexpected:\n%s\nactual:\n%s\ndiff:\n%s\n", tt.Want, result, diff) } }) } } func testProvider() *testing_provider.MockProvider { p := new(testing_provider.MockProvider) p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { return providers.ReadResourceResponse{NewState: req.PriorState} } p.GetProviderSchemaResponse = testProviderSchema() return p } func testProviderSchema() *providers.GetProviderSchemaResponse { return &providers.GetProviderSchemaResponse{ Provider: providers.Schema{ Body: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "region": {Type: cty.String, Optional: true}, }, }, }, ResourceTypes: map[string]providers.Schema{ "test_resource": { Body: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "id": {Type: cty.String, Computed: true}, "foo": {Type: cty.String, Optional: true}, "woozles": {Type: cty.String, Optional: true}, }, BlockTypes: map[string]*configschema.NestedBlock{ "nested": { Nesting: configschema.NestingList, Block: configschema.Block{ Attributes: map[string]*configschema.Attribute{ "compute": {Type: cty.String, Optional: true}, "value": {Type: cty.String, Optional: true}, }, }, }, }, }, }, }, DataSources: map[string]providers.Schema{ "test_data_source": { Body: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "compute": {Type: cty.String, Optional: true}, "value": {Type: cty.String, Computed: true}, }, }, }, }, } } func testSchemas() *terraform.Schemas { provider := testProvider() return &terraform.Schemas{ Providers: map[addrs.Provider]providers.ProviderSchema{ addrs.NewDefaultProvider("test"): provider.GetProviderSchema(), }, } } const basicStateOutput = `# data.test_data_source.data: data "test_data_source" "data" { compute = "sure" } # test_resource.baz[0]: resource "test_resource" "baz" { woozles = "confuzles" } Outputs: bar = "bar value" ` const nestedStateOutput = `# test_resource.baz[0]: resource "test_resource" "baz" { woozles = "confuzles" nested { value = "42" } } ` const deposedNestedStateOutput = `# test_resource.baz[0]: resource "test_resource" "baz" { woozles = "confuzles" nested { value = "42" } } # test_resource.baz[0]: (deposed object 1234) resource "test_resource" "baz" { woozles = "confuzles" nested { value = "42" } } ` const onlyDeposedOutput = `# test_resource.baz[0]: (deposed object 1234) resource "test_resource" "baz" { woozles = "confuzles" nested { value = "42" } } # test_resource.baz[0]: (deposed object 5678) resource "test_resource" "baz" { woozles = "confuzles" nested { value = "42" } } ` const stateWithMoreOutputsOutput = `# test_resource.baz[0]: resource "test_resource" "baz" { woozles = "confuzles" } Outputs: bool_var = true int_var = 42 map_var = { "first" = "foo" "second" = "bar" } sensitive_var = (sensitive value) string_var = "string value" ` func basicState(t *testing.T) *states.State { state := states.NewState() rootModule := state.RootModule() if rootModule == nil { t.Errorf("root module is nil; want valid object") } state.SetOutputValue( addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), cty.StringVal("bar value"), false, ) rootModule.SetResourceInstanceCurrent( addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "baz", }.Instance(addrs.IntKey(0)), &states.ResourceInstanceObjectSrc{ Status: states.ObjectReady, SchemaVersion: 0, AttrsJSON: []byte(`{"woozles":"confuzles"}`), }, addrs.AbsProviderConfig{ Provider: addrs.NewDefaultProvider("test"), Module: addrs.RootModule, }, ) rootModule.SetResourceInstanceCurrent( addrs.Resource{ Mode: addrs.DataResourceMode, Type: "test_data_source", Name: "data", }.Instance(addrs.NoKey), &states.ResourceInstanceObjectSrc{ Status: states.ObjectReady, SchemaVersion: 0, AttrsJSON: []byte(`{"compute":"sure"}`), }, addrs.AbsProviderConfig{ Provider: addrs.NewDefaultProvider("test"), Module: addrs.RootModule, }, ) return state } func stateWithMoreOutputs(t *testing.T) *states.State { state := states.NewState() rootModule := state.RootModule() if rootModule == nil { t.Errorf("root module is nil; want valid object") } state.SetOutputValue( addrs.OutputValue{Name: "string_var"}.Absolute(addrs.RootModuleInstance), cty.StringVal("string value"), false, ) state.SetOutputValue( addrs.OutputValue{Name: "int_var"}.Absolute(addrs.RootModuleInstance), cty.NumberIntVal(42), false, ) state.SetOutputValue( addrs.OutputValue{Name: "bool_var"}.Absolute(addrs.RootModuleInstance), cty.True, false, ) state.SetOutputValue( addrs.OutputValue{Name: "sensitive_var"}.Absolute(addrs.RootModuleInstance), cty.StringVal("secret!!!"), true, ) state.SetOutputValue( addrs.OutputValue{Name: "map_var"}.Absolute(addrs.RootModuleInstance), cty.MapVal(map[string]cty.Value{ "first": cty.StringVal("foo"), "second": cty.StringVal("bar"), }), false, ) rootModule.SetResourceInstanceCurrent( addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "baz", }.Instance(addrs.IntKey(0)), &states.ResourceInstanceObjectSrc{ Status: states.ObjectReady, SchemaVersion: 0, AttrsJSON: []byte(`{"woozles":"confuzles"}`), }, addrs.AbsProviderConfig{ Provider: addrs.NewDefaultProvider("test"), Module: addrs.RootModule, }, ) return state } func nestedState(t *testing.T) *states.State { state := states.NewState() rootModule := state.RootModule() if rootModule == nil { t.Errorf("root module is nil; want valid object") } rootModule.SetResourceInstanceCurrent( addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "baz", }.Instance(addrs.IntKey(0)), &states.ResourceInstanceObjectSrc{ Status: states.ObjectReady, SchemaVersion: 0, AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), }, addrs.AbsProviderConfig{ Provider: addrs.NewDefaultProvider("test"), Module: addrs.RootModule, }, ) return state } func deposedState(t *testing.T) *states.State { state := nestedState(t) rootModule := state.RootModule() rootModule.SetResourceInstanceDeposed( addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "baz", }.Instance(addrs.IntKey(0)), states.DeposedKey("1234"), &states.ResourceInstanceObjectSrc{ Status: states.ObjectReady, SchemaVersion: 0, AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), }, addrs.AbsProviderConfig{ Provider: addrs.NewDefaultProvider("test"), Module: addrs.RootModule, }, ) return state } // replicate a corrupt resource where only a deposed exists func onlyDeposedState(t *testing.T) *states.State { state := states.NewState() rootModule := state.RootModule() if rootModule == nil { t.Errorf("root module is nil; want valid object") } rootModule.SetResourceInstanceDeposed( addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "baz", }.Instance(addrs.IntKey(0)), states.DeposedKey("1234"), &states.ResourceInstanceObjectSrc{ Status: states.ObjectReady, SchemaVersion: 0, AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), }, addrs.AbsProviderConfig{ Provider: addrs.NewDefaultProvider("test"), Module: addrs.RootModule, }, ) rootModule.SetResourceInstanceDeposed( addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "baz", }.Instance(addrs.IntKey(0)), states.DeposedKey("5678"), &states.ResourceInstanceObjectSrc{ Status: states.ObjectReady, SchemaVersion: 0, AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), }, addrs.AbsProviderConfig{ Provider: addrs.NewDefaultProvider("test"), Module: addrs.RootModule, }, ) return state }
go
github
https://github.com/hashicorp/terraform
internal/command/jsonformat/state_test.go
# keyword.py - $Keyword$ expansion for Mercurial # # Copyright 2007-2012 Christian Ebert <blacktrash@gmx.net> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. # # $Id$ # # Keyword expansion hack against the grain of a Distributed SCM # # There are many good reasons why this is not needed in a distributed # SCM, still it may be useful in very small projects based on single # files (like LaTeX packages), that are mostly addressed to an # audience not running a version control system. # # For in-depth discussion refer to # <http://mercurial.selenic.com/wiki/KeywordPlan>. # # Keyword expansion is based on Mercurial's changeset template mappings. # # Binary files are not touched. # # Files to act upon/ignore are specified in the [keyword] section. # Customized keyword template mappings in the [keywordmaps] section. # # Run "hg help keyword" and "hg kwdemo" to get info on configuration. '''expand keywords in tracked files This extension expands RCS/CVS-like or self-customized $Keywords$ in tracked text files selected by your configuration. Keywords are only expanded in local repositories and not stored in the change history. The mechanism can be regarded as a convenience for the current user or for archive distribution. Keywords expand to the changeset data pertaining to the latest change relative to the working directory parent of each file. Configuration is done in the [keyword], [keywordset] and [keywordmaps] sections of hgrc files. Example:: [keyword] # expand keywords in every python file except those matching "x*" **.py = x* = ignore [keywordset] # prefer svn- over cvs-like default keywordmaps svn = True .. note:: The more specific you are in your filename patterns the less you lose speed in huge repositories. For [keywordmaps] template mapping and expansion demonstration and control run :hg:`kwdemo`. See :hg:`help templates` for a list of available templates and filters. Three additional date template filters are provided: :``utcdate``: "2006/09/18 15:13:13" :``svnutcdate``: "2006-09-18 15:13:13Z" :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)" The default template mappings (view with :hg:`kwdemo -d`) can be replaced with customized keywords and templates. Again, run :hg:`kwdemo` to control the results of your configuration changes. Before changing/disabling active keywords, you must run :hg:`kwshrink` to avoid storing expanded keywords in the change history. To force expansion after enabling it, or a configuration change, run :hg:`kwexpand`. Expansions spanning more than one line and incremental expansions, like CVS' $Log$, are not supported. A keyword template map "Log = {desc}" expands to the first line of the changeset description. ''' from mercurial import commands, context, cmdutil, dispatch, filelog, extensions from mercurial import localrepo, match, patch, templatefilters, templater, util from mercurial import scmutil from mercurial.hgweb import webcommands from mercurial.i18n import _ import os, re, shutil, tempfile commands.optionalrepo += ' kwdemo' commands.inferrepo += ' kwexpand kwfiles kwshrink' cmdtable = {} command = cmdutil.command(cmdtable) testedwith = 'internal' # hg commands that do not act on keywords nokwcommands = ('add addremove annotate bundle export grep incoming init log' ' outgoing push tip verify convert email glog') # hg commands that trigger expansion only when writing to working dir, # not when reading filelog, and unexpand when reading from working dir restricted = 'merge kwexpand kwshrink record qrecord resolve transplant' # names of extensions using dorecord recordextensions = 'record' colortable = { 'kwfiles.enabled': 'green bold', 'kwfiles.deleted': 'cyan bold underline', 'kwfiles.enabledunknown': 'green', 'kwfiles.ignored': 'bold', 'kwfiles.ignoredunknown': 'none' } # date like in cvs' $Date def utcdate(text): ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13". ''' return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S') # date like in svn's $Date def svnisodate(text): ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13 +0200 (Tue, 18 Aug 2009)". ''' return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') # date like in svn's $Id def svnutcdate(text): ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18 11:00:13Z". ''' return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ') templatefilters.filters.update({'utcdate': utcdate, 'svnisodate': svnisodate, 'svnutcdate': svnutcdate}) # make keyword tools accessible kwtools = {'templater': None, 'hgcmd': ''} def _defaultkwmaps(ui): '''Returns default keywordmaps according to keywordset configuration.''' templates = { 'Revision': '{node|short}', 'Author': '{author|user}', } kwsets = ({ 'Date': '{date|utcdate}', 'RCSfile': '{file|basename},v', 'RCSFile': '{file|basename},v', # kept for backwards compatibility # with hg-keyword 'Source': '{root}/{file},v', 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}', 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}', }, { 'Date': '{date|svnisodate}', 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}', 'LastChangedRevision': '{node|short}', 'LastChangedBy': '{author|user}', 'LastChangedDate': '{date|svnisodate}', }) templates.update(kwsets[ui.configbool('keywordset', 'svn')]) return templates def _shrinktext(text, subfunc): '''Helper for keyword expansion removal in text. Depending on subfunc also returns number of substitutions.''' return subfunc(r'$\1$', text) def _preselect(wstatus, changed): '''Retrieves modified and added files from a working directory state and returns the subset of each contained in given changed files retrieved from a change context.''' modified, added = wstatus[:2] modified = [f for f in modified if f in changed] added = [f for f in added if f in changed] return modified, added class kwtemplater(object): ''' Sets up keyword templates, corresponding keyword regex, and provides keyword substitution functions. ''' def __init__(self, ui, repo, inc, exc): self.ui = ui self.repo = repo self.match = match.match(repo.root, '', [], inc, exc) self.restrict = kwtools['hgcmd'] in restricted.split() self.postcommit = False kwmaps = self.ui.configitems('keywordmaps') if kwmaps: # override default templates self.templates = dict((k, templater.parsestring(v, False)) for k, v in kwmaps) else: self.templates = _defaultkwmaps(self.ui) @util.propertycache def escape(self): '''Returns bar-separated and escaped keywords.''' return '|'.join(map(re.escape, self.templates.keys())) @util.propertycache def rekw(self): '''Returns regex for unexpanded keywords.''' return re.compile(r'\$(%s)\$' % self.escape) @util.propertycache def rekwexp(self): '''Returns regex for expanded keywords.''' return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape) def substitute(self, data, path, ctx, subfunc): '''Replaces keywords in data with expanded template.''' def kwsub(mobj): kw = mobj.group(1) ct = cmdutil.changeset_templater(self.ui, self.repo, False, None, '', False) ct.use_template(self.templates[kw]) self.ui.pushbuffer() ct.show(ctx, root=self.repo.root, file=path) ekw = templatefilters.firstline(self.ui.popbuffer()) return '$%s: %s $' % (kw, ekw) return subfunc(kwsub, data) def linkctx(self, path, fileid): '''Similar to filelog.linkrev, but returns a changectx.''' return self.repo.filectx(path, fileid=fileid).changectx() def expand(self, path, node, data): '''Returns data with keywords expanded.''' if not self.restrict and self.match(path) and not util.binary(data): ctx = self.linkctx(path, node) return self.substitute(data, path, ctx, self.rekw.sub) return data def iskwfile(self, cand, ctx): '''Returns subset of candidates which are configured for keyword expansion but are not symbolic links.''' return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)] def overwrite(self, ctx, candidates, lookup, expand, rekw=False): '''Overwrites selected files expanding/shrinking keywords.''' if self.restrict or lookup or self.postcommit: # exclude kw_copy candidates = self.iskwfile(candidates, ctx) if not candidates: return kwcmd = self.restrict and lookup # kwexpand/kwshrink if self.restrict or expand and lookup: mf = ctx.manifest() if self.restrict or rekw: re_kw = self.rekw else: re_kw = self.rekwexp if expand: msg = _('overwriting %s expanding keywords\n') else: msg = _('overwriting %s shrinking keywords\n') for f in candidates: if self.restrict: data = self.repo.file(f).read(mf[f]) else: data = self.repo.wread(f) if util.binary(data): continue if expand: if lookup: ctx = self.linkctx(f, mf[f]) data, found = self.substitute(data, f, ctx, re_kw.subn) elif self.restrict: found = re_kw.search(data) else: data, found = _shrinktext(data, re_kw.subn) if found: self.ui.note(msg % f) fp = self.repo.wopener(f, "wb", atomictemp=True) fp.write(data) fp.close() if kwcmd: self.repo.dirstate.normal(f) elif self.postcommit: self.repo.dirstate.normallookup(f) def shrink(self, fname, text): '''Returns text with all keyword substitutions removed.''' if self.match(fname) and not util.binary(text): return _shrinktext(text, self.rekwexp.sub) return text def shrinklines(self, fname, lines): '''Returns lines with keyword substitutions removed.''' if self.match(fname): text = ''.join(lines) if not util.binary(text): return _shrinktext(text, self.rekwexp.sub).splitlines(True) return lines def wread(self, fname, data): '''If in restricted mode returns data read from wdir with keyword substitutions removed.''' if self.restrict: return self.shrink(fname, data) return data class kwfilelog(filelog.filelog): ''' Subclass of filelog to hook into its read, add, cmp methods. Keywords are "stored" unexpanded, and processed on reading. ''' def __init__(self, opener, kwt, path): super(kwfilelog, self).__init__(opener, path) self.kwt = kwt self.path = path def read(self, node): '''Expands keywords when reading filelog.''' data = super(kwfilelog, self).read(node) if self.renamed(node): return data return self.kwt.expand(self.path, node, data) def add(self, text, meta, tr, link, p1=None, p2=None): '''Removes keyword substitutions when adding to filelog.''' text = self.kwt.shrink(self.path, text) return super(kwfilelog, self).add(text, meta, tr, link, p1, p2) def cmp(self, node, text): '''Removes keyword substitutions for comparison.''' text = self.kwt.shrink(self.path, text) return super(kwfilelog, self).cmp(node, text) def _status(ui, repo, wctx, kwt, *pats, **opts): '''Bails out if [keyword] configuration is not active. Returns status of working directory.''' if kwt: return repo.status(match=scmutil.match(wctx, pats, opts), clean=True, unknown=opts.get('unknown') or opts.get('all')) if ui.configitems('keyword'): raise util.Abort(_('[keyword] patterns cannot match')) raise util.Abort(_('no [keyword] patterns configured')) def _kwfwrite(ui, repo, expand, *pats, **opts): '''Selects files and passes them to kwtemplater.overwrite.''' wctx = repo[None] if len(wctx.parents()) > 1: raise util.Abort(_('outstanding uncommitted merge')) kwt = kwtools['templater'] wlock = repo.wlock() try: status = _status(ui, repo, wctx, kwt, *pats, **opts) modified, added, removed, deleted, unknown, ignored, clean = status if modified or added or removed or deleted: raise util.Abort(_('outstanding uncommitted changes')) kwt.overwrite(wctx, clean, True, expand) finally: wlock.release() @command('kwdemo', [('d', 'default', None, _('show default keyword template maps')), ('f', 'rcfile', '', _('read maps from rcfile'), _('FILE'))], _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')) def demo(ui, repo, *args, **opts): '''print [keywordmaps] configuration and an expansion example Show current, custom, or default keyword template maps and their expansions. Extend the current configuration by specifying maps as arguments and using -f/--rcfile to source an external hgrc file. Use -d/--default to disable current configuration. See :hg:`help templates` for information on templates and filters. ''' def demoitems(section, items): ui.write('[%s]\n' % section) for k, v in sorted(items): ui.write('%s = %s\n' % (k, v)) fn = 'demo.txt' tmpdir = tempfile.mkdtemp('', 'kwdemo.') ui.note(_('creating temporary repository at %s\n') % tmpdir) repo = localrepo.localrepository(repo.baseui, tmpdir, True) ui.setconfig('keyword', fn, '') svn = ui.configbool('keywordset', 'svn') # explicitly set keywordset for demo output ui.setconfig('keywordset', 'svn', svn) uikwmaps = ui.configitems('keywordmaps') if args or opts.get('rcfile'): ui.status(_('\n\tconfiguration using custom keyword template maps\n')) if uikwmaps: ui.status(_('\textending current template maps\n')) if opts.get('default') or not uikwmaps: if svn: ui.status(_('\toverriding default svn keywordset\n')) else: ui.status(_('\toverriding default cvs keywordset\n')) if opts.get('rcfile'): ui.readconfig(opts.get('rcfile')) if args: # simulate hgrc parsing rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args] fp = repo.opener('hgrc', 'w') fp.writelines(rcmaps) fp.close() ui.readconfig(repo.join('hgrc')) kwmaps = dict(ui.configitems('keywordmaps')) elif opts.get('default'): if svn: ui.status(_('\n\tconfiguration using default svn keywordset\n')) else: ui.status(_('\n\tconfiguration using default cvs keywordset\n')) kwmaps = _defaultkwmaps(ui) if uikwmaps: ui.status(_('\tdisabling current template maps\n')) for k, v in kwmaps.iteritems(): ui.setconfig('keywordmaps', k, v) else: ui.status(_('\n\tconfiguration using current keyword template maps\n')) if uikwmaps: kwmaps = dict(uikwmaps) else: kwmaps = _defaultkwmaps(ui) uisetup(ui) reposetup(ui, repo) ui.write('[extensions]\nkeyword =\n') demoitems('keyword', ui.configitems('keyword')) demoitems('keywordset', ui.configitems('keywordset')) demoitems('keywordmaps', kwmaps.iteritems()) keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n' repo.wopener.write(fn, keywords) repo[None].add([fn]) ui.note(_('\nkeywords written to %s:\n') % fn) ui.note(keywords) repo.dirstate.setbranch('demobranch') for name, cmd in ui.configitems('hooks'): if name.split('.', 1)[0].find('commit') > -1: repo.ui.setconfig('hooks', name, '') msg = _('hg keyword configuration and expansion example') ui.note("hg ci -m '%s'\n" % msg) # check-code-ignore repo.commit(text=msg) ui.status(_('\n\tkeywords expanded\n')) ui.write(repo.wread(fn)) shutil.rmtree(tmpdir, ignore_errors=True) @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...')) def expand(ui, repo, *pats, **opts): '''expand keywords in the working directory Run after (re)enabling keyword expansion. kwexpand refuses to run if given files contain local changes. ''' # 3rd argument sets expansion to True _kwfwrite(ui, repo, True, *pats, **opts) @command('kwfiles', [('A', 'all', None, _('show keyword status flags of all files')), ('i', 'ignore', None, _('show files excluded from expansion')), ('u', 'unknown', None, _('only show unknown (not tracked) files')), ] + commands.walkopts, _('hg kwfiles [OPTION]... [FILE]...')) def files(ui, repo, *pats, **opts): '''show files configured for keyword expansion List which files in the working directory are matched by the [keyword] configuration patterns. Useful to prevent inadvertent keyword expansion and to speed up execution by including only files that are actual candidates for expansion. See :hg:`help keyword` on how to construct patterns both for inclusion and exclusion of files. With -A/--all and -v/--verbose the codes used to show the status of files are:: K = keyword expansion candidate k = keyword expansion candidate (not tracked) I = ignored i = ignored (not tracked) ''' kwt = kwtools['templater'] wctx = repo[None] status = _status(ui, repo, wctx, kwt, *pats, **opts) cwd = pats and repo.getcwd() or '' modified, added, removed, deleted, unknown, ignored, clean = status files = [] if not opts.get('unknown') or opts.get('all'): files = sorted(modified + added + clean) kwfiles = kwt.iskwfile(files, wctx) kwdeleted = kwt.iskwfile(deleted, wctx) kwunknown = kwt.iskwfile(unknown, wctx) if not opts.get('ignore') or opts.get('all'): showfiles = kwfiles, kwdeleted, kwunknown else: showfiles = [], [], [] if opts.get('all') or opts.get('ignore'): showfiles += ([f for f in files if f not in kwfiles], [f for f in unknown if f not in kwunknown]) kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split() kwstates = zip(kwlabels, 'K!kIi', showfiles) fm = ui.formatter('kwfiles', opts) fmt = '%.0s%s\n' if opts.get('all') or ui.verbose: fmt = '%s %s\n' for kwstate, char, filenames in kwstates: label = 'kwfiles.' + kwstate for f in filenames: fm.startitem() fm.write('kwstatus path', fmt, char, repo.pathto(f, cwd), label=label) fm.end() @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...')) def shrink(ui, repo, *pats, **opts): '''revert expanded keywords in the working directory Must be run before changing/disabling active keywords. kwshrink refuses to run if given files contain local changes. ''' # 3rd argument sets expansion to False _kwfwrite(ui, repo, False, *pats, **opts) def uisetup(ui): ''' Monkeypatches dispatch._parse to retrieve user command.''' def kwdispatch_parse(orig, ui, args): '''Monkeypatch dispatch._parse to obtain running hg command.''' cmd, func, args, options, cmdoptions = orig(ui, args) kwtools['hgcmd'] = cmd return cmd, func, args, options, cmdoptions extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse) def reposetup(ui, repo): '''Sets up repo as kwrepo for keyword substitution. Overrides file method to return kwfilelog instead of filelog if file matches user configuration. Wraps commit to overwrite configured files with updated keyword substitutions. Monkeypatches patch and webcommands.''' try: if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split() or '.hg' in util.splitpath(repo.root) or repo._url.startswith('bundle:')): return except AttributeError: pass inc, exc = [], ['.hg*'] for pat, opt in ui.configitems('keyword'): if opt != 'ignore': inc.append(pat) else: exc.append(pat) if not inc: return kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc) class kwrepo(repo.__class__): def file(self, f): if f[0] == '/': f = f[1:] return kwfilelog(self.sopener, kwt, f) def wread(self, filename): data = super(kwrepo, self).wread(filename) return kwt.wread(filename, data) def commit(self, *args, **opts): # use custom commitctx for user commands # other extensions can still wrap repo.commitctx directly self.commitctx = self.kwcommitctx try: return super(kwrepo, self).commit(*args, **opts) finally: del self.commitctx def kwcommitctx(self, ctx, error=False): n = super(kwrepo, self).commitctx(ctx, error) # no lock needed, only called from repo.commit() which already locks if not kwt.postcommit: restrict = kwt.restrict kwt.restrict = True kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()), False, True) kwt.restrict = restrict return n def rollback(self, dryrun=False, force=False): wlock = self.wlock() try: if not dryrun: changed = self['.'].files() ret = super(kwrepo, self).rollback(dryrun, force) if not dryrun: ctx = self['.'] modified, added = _preselect(self[None].status(), changed) kwt.overwrite(ctx, modified, True, True) kwt.overwrite(ctx, added, True, False) return ret finally: wlock.release() # monkeypatches def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None): '''Monkeypatch/wrap patch.patchfile.__init__ to avoid rejects or conflicts due to expanded keywords in working dir.''' orig(self, ui, gp, backend, store, eolmode) # shrink keywords read from working dir self.lines = kwt.shrinklines(self.fname, self.lines) def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None, opts=None, prefix=''): '''Monkeypatch patch.diff to avoid expansion.''' kwt.restrict = True return orig(repo, node1, node2, match, changes, opts, prefix) def kwweb_skip(orig, web, req, tmpl): '''Wraps webcommands.x turning off keyword expansion.''' kwt.match = util.never return orig(web, req, tmpl) def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts): '''Wraps cmdutil.amend expanding keywords after amend.''' wlock = repo.wlock() try: kwt.postcommit = True newid = orig(ui, repo, commitfunc, old, extra, pats, opts) if newid != old.node(): ctx = repo[newid] kwt.restrict = True kwt.overwrite(ctx, ctx.files(), False, True) kwt.restrict = False return newid finally: wlock.release() def kw_copy(orig, ui, repo, pats, opts, rename=False): '''Wraps cmdutil.copy so that copy/rename destinations do not contain expanded keywords. Note that the source of a regular file destination may also be a symlink: hg cp sym x -> x is symlink cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords) For the latter we have to follow the symlink to find out whether its target is configured for expansion and we therefore must unexpand the keywords in the destination.''' wlock = repo.wlock() try: orig(ui, repo, pats, opts, rename) if opts.get('dry_run'): return wctx = repo[None] cwd = repo.getcwd() def haskwsource(dest): '''Returns true if dest is a regular file and configured for expansion or a symlink which points to a file configured for expansion. ''' source = repo.dirstate.copied(dest) if 'l' in wctx.flags(source): source = scmutil.canonpath(repo.root, cwd, os.path.realpath(source)) return kwt.match(source) candidates = [f for f in repo.dirstate.copies() if 'l' not in wctx.flags(f) and haskwsource(f)] kwt.overwrite(wctx, candidates, False, False) finally: wlock.release() def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts): '''Wraps record.dorecord expanding keywords after recording.''' wlock = repo.wlock() try: # record returns 0 even when nothing has changed # therefore compare nodes before and after kwt.postcommit = True ctx = repo['.'] wstatus = repo[None].status() ret = orig(ui, repo, commitfunc, *pats, **opts) recctx = repo['.'] if ctx != recctx: modified, added = _preselect(wstatus, recctx.files()) kwt.restrict = False kwt.overwrite(recctx, modified, False, True) kwt.overwrite(recctx, added, False, True, True) kwt.restrict = True return ret finally: wlock.release() def kwfilectx_cmp(orig, self, fctx): # keyword affects data size, comparing wdir and filelog size does # not make sense if (fctx._filerev is None and (self._repo._encodefilterpats or kwt.match(fctx.path()) and 'l' not in fctx.flags() or self.size() - 4 == fctx.size()) or self.size() == fctx.size()): return self._filelog.cmp(self._filenode, fctx.data()) return True extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp) extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init) extensions.wrapfunction(patch, 'diff', kw_diff) extensions.wrapfunction(cmdutil, 'amend', kw_amend) extensions.wrapfunction(cmdutil, 'copy', kw_copy) for c in 'annotate changeset rev filediff diff'.split(): extensions.wrapfunction(webcommands, c, kwweb_skip) for name in recordextensions.split(): try: record = extensions.find(name) extensions.wrapfunction(record, 'dorecord', kw_dorecord) except KeyError: pass repo.__class__ = kwrepo
unknown
codeparrot/codeparrot-clean
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Project Name: MakeHuman # Product Home Page: http://www.makehuman.org/ # Code Home Page: http://code.google.com/p/makehuman/ # Authors: Thomas Larsson # Script copyright (C) MakeHuman Team 2001-2014 # Coding Standards: See http://www.makehuman.org/node/165 import json import gzip def loadJson(filepath): try: with gzip.open(filepath, 'rb') as fp: bytes = fp.read() except IOError: bytes = None if bytes: string = bytes.decode("utf-8") struct = json.loads(string) else: with open(filepath, "rU") as fp: struct = json.load(fp) return struct def saveJson(struct, filepath, binary=False): if binary: bytes = json.dumps(struct) with gzip.open(realpath, 'wb') as fp: fp.write(bytes) else: string = encodeJsonData(struct, "") with open(filepath, "w", encoding="utf-8") as fp: fp.write(string) fp.write("\n") def encodeJsonData(data, pad=""): if data == None: return "none" elif isinstance(data, bool): if data == True: return "true" else: return "false" elif isinstance(data, float): if abs(data) < 1e-6: return "0" else: return "%.5g" % data elif isinstance(data, int): return str(data) elif isinstance(data, str): return "\"%s\"" % data elif isinstance(data, (list, tuple)): if data == []: return "[]" elif leafList(data): string = "[" for elt in data: string += encodeJsonData(elt) + ", " return string[:-2] + "]" else: string = "[" for elt in data: string += "\n " + pad + encodeJsonData(elt, pad+" ") + "," return string[:-1] + "\n%s]" % pad elif isinstance(data, dict): if data == {}: return "{}" string = "{" for key,value in data.items(): string += "\n %s\"%s\" : " % (pad, key) + encodeJsonData(value, pad+" ") + "," return string[:-1] + "\n%s}" % pad def leafList(data): for elt in data: if isinstance(elt, (list,tuple,dict)): return False return True
unknown
codeparrot/codeparrot-clean