code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# encoding: utf-8
# Copyright 2008-2011 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
from setuptools import setup, find_packages
import os.path
# Package data
# ------------
_name = 'edrn.rdf'
_version = '1.3.8'
_description = 'EDRN RDF Server'
_author = 'Sean Kelly'
_authorEmail = 'sean.kelly@jpl.nasa.gov'
_maintainer = 'Sean Kelly'
_maintainerEmail = 'sean.kelly@jpl.nasa.gov'
_license = 'ALv2'
_namespaces = ['edrn']
_zipSafe = False
_keywords = 'rdf web zope plone cancer bioinformatics detection informatics edrn'
_testSuite = 'edrn.rdf.tests.test_suite'
_entryPoints = {
'z3c.autoinclude.plugin': ['target=plone'],
}
_requirements = [
'collective.autopermission',
'pysolr',
'Pillow',
'plone.app.dexterity[relations]',
'plone.app.relationfield',
'plone.behavior',
'Products.CMFPlone',
'rdflib==4.2.2',
'setuptools',
'z3c.relationfield',
'suds2',
'zope.app.intid',
]
_extras = {
'test': ['plone.app.testing', 'rdfextras'],
}
_classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Plone',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries :: Python Modules',
]
# Setup Metadata
# --------------
def _read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
_header = '*' * len(_name) + '\n' + _name + '\n' + '*' * len(_name)
_longDescription = _header + '\n\n' + _read('README.rst') + '\n\n' + _read('docs', 'INSTALL.txt') + '\n\n' \
+ _read('docs', 'HISTORY.txt') + '\n'
open('doc.txt', 'w').write(_longDescription)
setup(
author=_author,
author_email=_authorEmail,
classifiers=_classifiers,
description=_description,
entry_points=_entryPoints,
extras_require=_extras,
include_package_data=True,
install_requires=_requirements,
keywords=_keywords,
license=_license,
long_description=_longDescription,
maintainer=_maintainer,
maintainer_email=_maintainerEmail,
name=_name,
namespace_packages=_namespaces,
packages=find_packages(exclude=['ez_setup', 'distribute_setup', 'bootstrap']),
url='https://github.com/EDRN/' + _name,
version=_version,
zip_safe=_zipSafe,
) | unknown | codeparrot/codeparrot-clean | ||
import unittest
from test.test_string._support import TStringBaseCase, fstring
class TestTString(unittest.TestCase, TStringBaseCase):
def test_string_representation(self):
# Test __repr__
t = t"Hello"
self.assertEqual(repr(t), "Template(strings=('Hello',), interpolations=())")
name = "Python"
t = t"Hello, {name}"
self.assertEqual(repr(t),
"Template(strings=('Hello, ', ''), "
"interpolations=(Interpolation('Python', 'name', None, ''),))"
)
def test_interpolation_basics(self):
# Test basic interpolation
name = "Python"
t = t"Hello, {name}"
self.assertTStringEqual(t, ("Hello, ", ""), [(name, "name")])
self.assertEqual(fstring(t), "Hello, Python")
# Multiple interpolations
first = "Python"
last = "Developer"
t = t"{first} {last}"
self.assertTStringEqual(
t, ("", " ", ""), [(first, 'first'), (last, 'last')]
)
self.assertEqual(fstring(t), "Python Developer")
# Interpolation with expressions
a = 10
b = 20
t = t"Sum: {a + b}"
self.assertTStringEqual(t, ("Sum: ", ""), [(a + b, "a + b")])
self.assertEqual(fstring(t), "Sum: 30")
# Interpolation with function
def square(x):
return x * x
t = t"Square: {square(5)}"
self.assertTStringEqual(
t, ("Square: ", ""), [(square(5), "square(5)")]
)
self.assertEqual(fstring(t), "Square: 25")
# Test attribute access in expressions
class Person:
def __init__(self, name):
self.name = name
def upper(self):
return self.name.upper()
person = Person("Alice")
t = t"Name: {person.name}"
self.assertTStringEqual(
t, ("Name: ", ""), [(person.name, "person.name")]
)
self.assertEqual(fstring(t), "Name: Alice")
# Test method calls
t = t"Name: {person.upper()}"
self.assertTStringEqual(
t, ("Name: ", ""), [(person.upper(), "person.upper()")]
)
self.assertEqual(fstring(t), "Name: ALICE")
# Test dictionary access
data = {"name": "Bob", "age": 30}
t = t"Name: {data['name']}, Age: {data['age']}"
self.assertTStringEqual(
t, ("Name: ", ", Age: ", ""),
[(data["name"], "data['name']"), (data["age"], "data['age']")],
)
self.assertEqual(fstring(t), "Name: Bob, Age: 30")
def test_format_specifiers(self):
# Test basic format specifiers
value = 3.14159
t = t"Pi: {value:.2f}"
self.assertTStringEqual(
t, ("Pi: ", ""), [(value, "value", None, ".2f")]
)
self.assertEqual(fstring(t), "Pi: 3.14")
def test_conversions(self):
# Test !s conversion (str)
obj = object()
t = t"Object: {obj!s}"
self.assertTStringEqual(t, ("Object: ", ""), [(obj, "obj", "s")])
self.assertEqual(fstring(t), f"Object: {str(obj)}")
# Test !r conversion (repr)
t = t"Data: {obj!r}"
self.assertTStringEqual(t, ("Data: ", ""), [(obj, "obj", "r")])
self.assertEqual(fstring(t), f"Data: {repr(obj)}")
# Test !a conversion (ascii)
text = "Café"
t = t"ASCII: {text!a}"
self.assertTStringEqual(t, ("ASCII: ", ""), [(text, "text", "a")])
self.assertEqual(fstring(t), f"ASCII: {ascii(text)}")
# Test !z conversion (error)
num = 1
with self.assertRaises(SyntaxError):
eval("t'{num!z}'")
def test_debug_specifier(self):
# Test debug specifier
value = 42
t = t"Value: {value=}"
self.assertTStringEqual(
t, ("Value: value=", ""), [(value, "value", "r")]
)
self.assertEqual(fstring(t), "Value: value=42")
# Test debug specifier with format (conversion default to !r)
t = t"Value: {value=:.2f}"
self.assertTStringEqual(
t, ("Value: value=", ""), [(value, "value", None, ".2f")]
)
self.assertEqual(fstring(t), "Value: value=42.00")
# Test debug specifier with conversion
t = t"Value: {value=!s}"
self.assertTStringEqual(
t, ("Value: value=", ""), [(value, "value", "s")]
)
# Test white space in debug specifier
t = t"Value: {value = }"
self.assertTStringEqual(
t, ("Value: value = ", ""), [(value, "value", "r")]
)
self.assertEqual(fstring(t), "Value: value = 42")
def test_raw_tstrings(self):
path = r"C:\Users"
t = rt"{path}\Documents"
self.assertTStringEqual(t, ("", r"\Documents"), [(path, "path")])
self.assertEqual(fstring(t), r"C:\Users\Documents")
# Test alternative prefix
t = tr"{path}\Documents"
self.assertTStringEqual(t, ("", r"\Documents"), [(path, "path")])
def test_template_concatenation(self):
# Test template + template
t1 = t"Hello, "
t2 = t"world"
combined = t1 + t2
self.assertTStringEqual(combined, ("Hello, world",), ())
self.assertEqual(fstring(combined), "Hello, world")
# Test template + string
t1 = t"Hello"
expected_msg = 'can only concatenate string.templatelib.Template ' \
'\\(not "str"\\) to string.templatelib.Template'
with self.assertRaisesRegex(TypeError, expected_msg):
t1 + ", world"
# Test template + template with interpolation
name = "Python"
t1 = t"Hello, "
t2 = t"{name}"
combined = t1 + t2
self.assertTStringEqual(combined, ("Hello, ", ""), [(name, "name")])
self.assertEqual(fstring(combined), "Hello, Python")
# Test string + template
expected_msg = 'can only concatenate str ' \
'\\(not "string.templatelib.Template"\\) to str'
with self.assertRaisesRegex(TypeError, expected_msg):
"Hello, " + t"{name}"
def test_nested_templates(self):
# Test a template inside another template expression
name = "Python"
inner = t"{name}"
t = t"Language: {inner}"
t_interp = t.interpolations[0]
self.assertEqual(t.strings, ("Language: ", ""))
self.assertEqual(t_interp.value.strings, ("", ""))
self.assertEqual(t_interp.value.interpolations[0].value, name)
self.assertEqual(t_interp.value.interpolations[0].expression, "name")
self.assertEqual(t_interp.value.interpolations[0].conversion, None)
self.assertEqual(t_interp.value.interpolations[0].format_spec, "")
self.assertEqual(t_interp.expression, "inner")
self.assertEqual(t_interp.conversion, None)
self.assertEqual(t_interp.format_spec, "")
def test_syntax_errors(self):
for case, err in (
("t'", "unterminated t-string literal"),
("t'''", "unterminated triple-quoted t-string literal"),
("t''''", "unterminated triple-quoted t-string literal"),
("t'{", "'{' was never closed"),
("t'{'", "t-string: expecting '}'"),
("t'{a'", "t-string: expecting '}'"),
("t'}'", "t-string: single '}' is not allowed"),
("t'{}'", "t-string: valid expression required before '}'"),
("t'{=x}'", "t-string: valid expression required before '='"),
("t'{!x}'", "t-string: valid expression required before '!'"),
("t'{:x}'", "t-string: valid expression required before ':'"),
("t'{x;y}'", "t-string: expecting '=', or '!', or ':', or '}'"),
("t'{x=y}'", "t-string: expecting '!', or ':', or '}'"),
("t'{x!s!}'", "t-string: expecting ':' or '}'"),
("t'{x!s:'", "t-string: expecting '}', or format specs"),
("t'{x!}'", "t-string: missing conversion character"),
("t'{x=!}'", "t-string: missing conversion character"),
("t'{x!z}'", "t-string: invalid conversion character 'z': "
"expected 's', 'r', or 'a'"),
("t'{lambda:1}'", "t-string: lambda expressions are not allowed "
"without parentheses"),
("t'{x:{;}}'", "t-string: expecting a valid expression after '{'"),
("t'{1:d\n}'", "t-string: newlines are not allowed in format specifiers")
):
with self.subTest(case), self.assertRaisesRegex(SyntaxError, err):
eval(case)
def test_runtime_errors(self):
# Test missing variables
with self.assertRaises(NameError):
eval("t'Hello, {name}'")
def test_literal_concatenation(self):
# Test concatenation of t-string literals
t = t"Hello, " t"world"
self.assertTStringEqual(t, ("Hello, world",), ())
self.assertEqual(fstring(t), "Hello, world")
# Test concatenation with interpolation
name = "Python"
t = t"Hello, " t"{name}"
self.assertTStringEqual(t, ("Hello, ", ""), [(name, "name")])
self.assertEqual(fstring(t), "Hello, Python")
# Test disallowed mix of t-string and string/f-string (incl. bytes)
what = 't'
expected_msg = 'cannot mix t-string literals with string or bytes literals'
for case in (
"t'{what}-string literal' 'str literal'",
"t'{what}-string literal' u'unicode literal'",
"t'{what}-string literal' f'f-string literal'",
"t'{what}-string literal' r'raw string literal'",
"t'{what}-string literal' rf'raw f-string literal'",
"t'{what}-string literal' b'bytes literal'",
"t'{what}-string literal' br'raw bytes literal'",
"'str literal' t'{what}-string literal'",
"u'unicode literal' t'{what}-string literal'",
"f'f-string literal' t'{what}-string literal'",
"r'raw string literal' t'{what}-string literal'",
"rf'raw f-string literal' t'{what}-string literal'",
"b'bytes literal' t'{what}-string literal'",
"br'raw bytes literal' t'{what}-string literal'",
):
with self.subTest(case):
with self.assertRaisesRegex(SyntaxError, expected_msg):
eval(case)
def test_triple_quoted(self):
# Test triple-quoted t-strings
t = t"""
Hello,
world
"""
self.assertTStringEqual(
t, ("\n Hello,\n world\n ",), ()
)
self.assertEqual(fstring(t), "\n Hello,\n world\n ")
# Test triple-quoted with interpolation
name = "Python"
t = t"""
Hello,
{name}
"""
self.assertTStringEqual(
t, ("\n Hello,\n ", "\n "), [(name, "name")]
)
self.assertEqual(fstring(t), "\n Hello,\n Python\n ")
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_tstring.py |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.db.models import Prefetch
class CommentPollQuerySet(models.QuerySet):
def unremoved(self):
return self.filter(is_removed=False)
def for_comment(self, comment):
return self.filter(comment=comment)
def with_choices(self):
choice_model = apps.get_model('spirit_comment_poll.CommentPollChoice')
visible_choices = choice_model.objects.unremoved()
prefetch_choices = Prefetch("poll_choices", queryset=visible_choices, to_attr='choices')
return self.prefetch_related(prefetch_choices)
class CommentPollChoiceQuerySet(models.QuerySet):
def unremoved(self):
return self.filter(is_removed=False)
def for_comment(self, comment):
return self.filter(poll__comment=comment)
def for_poll(self, poll):
return self.filter(poll=poll)
def for_voter(self, voter):
return self.filter(
choice_votes__voter=voter,
choice_votes__is_removed=False
)
def for_vote(self, poll, voter):
return self \
.for_poll(poll) \
.for_voter(voter) \
.unremoved()
class CommentPollVoteQuerySet(models.QuerySet):
def unremoved(self):
return self.filter(is_removed=False)
def for_voter(self, user):
return self.filter(voter=user)
def for_choice(self, choice):
return self.filter(choice=choice) | unknown | codeparrot/codeparrot-clean | ||
__author__ = 'Simons'
from main import*
from nltk.corpus import stopwords
from collections import Counter
from nltk import*
import types
import tfidf
def preProcessReviews(list):
porter = nltk.PorterStemmer()
stop = stopwords.words("english")
temp = []
i = 0;
G = []
for row in list:
token = nltk.word_tokenize(row[8])
'''print(token)'''
temp = ' '.join([a for a in token if a not in stop])
'''print(temp)'''
temp = nltk.word_tokenize(temp)
stem = [porter.stem(t) for t in temp]
'''print(stem)'''
row[8] = stem
row.append(stem)
for row in list:
print(row[9])
return list
'''words = ' '.join([a for a in reviews[i].split() if a not in stop])'''
def findStoreId(store_name):
store_id = 'null'
for row in store:
if(store_name == row[6]):
store_id = row[1]
if (store_id == 'null'):
print('Restaurant not found!')
else:
return store_id
def findStoreName(store_id):
store_name = 'null'
for row in store:
if(store_id == row[1]):
store_name = row[6]
if (store_name == 'null'):
print('Restaurant not found!')
else:
return store_name
def findUserReviews(user_id):
user_reviews=[]
for row in reviews:
if user_id == row[1]:
user_reviews.append(row[8])
return user_reviews
def findStoreReviews(store_id):
store_revies=[]
for row in reviews:
if store_id == row[2]:
store_reviews.append(row[8])
return store_reviews
def normalize(list):
length = len(list)
t = 0
for i in range(length):
if t < list[i]:
t = list[i]
for i in range(length):
list[i]=list[i]/t
return list
def compare (list1,list2,list3):
g = []
for rows in list1:
for rows in list2:
g.extend(tfidf(list1.rows[8],list2.rows[8],list3[rownumber]))
g = sorted(g,key=getKey)
g = normalize(g)
return g | unknown | codeparrot/codeparrot-clean | ||
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset() | unknown | codeparrot/codeparrot-clean | ||
import os
# Plivo Auth ID and Auth Token
PLIVO_AUTH_ID = os.environ.get('PLIVO_AUTH_ID')
PLIVO_AUTH_TOKEN = os.environ.get('PLIVO_AUTH_TOKEN')
# Plivo Caller ID
PLIVO_CALLER_ID = os.environ.get('PLIVO_CALLER_ID', '')
# Wait announcement music when there is only 1 participant in the conference
HOLD_MUSIC = 'https://s3.amazonaws.com/plivocloud/music.mp3'
# Wait announcement message when there is only 1 participant in the conference
CONFERENCE_WAIT_ANNOUNCEMENT = "You are currently alone in the conference. Please wait. Thank you."
# Announcement message before entering the conference
CONFERENCE_ANNOUNCEMENT = 'Welcome to the conferencing bridge.'
# Enable this to have the ability to add people to a conference by calling a
# PSTN number.
ALLOW_OUTBOUND_PSTN = False
# Enable this to attach an incoming number to every ad-hoc conference created.
# Be careful with this flag. Turning this to True will result in renting a new number
# with every conference created from this app.
ALLOW_INBOUND_DID = False
# Expire a conference in 24 hours when this flag is enabled.
EXPIRE_CONFERENCE = not ALLOW_INBOUND_DID | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.security.authenticator.CredentialCache;
import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.test.TestUtils;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Common utility functions used by transport layer and authenticator tests.
*/
public class NetworkTestUtils {
public static NioEchoServer createEchoServer(ListenerName listenerName, SecurityProtocol securityProtocol,
AbstractConfig serverConfig, CredentialCache credentialCache, Time time) throws Exception {
return createEchoServer(listenerName, securityProtocol, serverConfig, credentialCache, 100, time);
}
public static NioEchoServer createEchoServer(ListenerName listenerName, SecurityProtocol securityProtocol,
AbstractConfig serverConfig, CredentialCache credentialCache,
int failedAuthenticationDelayMs, Time time) throws Exception {
NioEchoServer server = new NioEchoServer(listenerName, securityProtocol, serverConfig, "localhost",
null, credentialCache, failedAuthenticationDelayMs, time);
server.start();
return server;
}
public static NioEchoServer createEchoServer(ListenerName listenerName, SecurityProtocol securityProtocol,
AbstractConfig serverConfig, CredentialCache credentialCache,
int failedAuthenticationDelayMs, Time time, DelegationTokenCache tokenCache) throws Exception {
NioEchoServer server = new NioEchoServer(listenerName, securityProtocol, serverConfig, "localhost",
null, credentialCache, failedAuthenticationDelayMs, time, tokenCache);
server.start();
return server;
}
public static Selector createSelector(ChannelBuilder channelBuilder, Time time) {
return new Selector(5000, new Metrics(), time, "MetricGroup", channelBuilder, new LogContext());
}
public static void checkClientConnection(Selector selector, String node, int minMessageSize, int messageCount) throws Exception {
waitForChannelReady(selector, node);
String prefix = TestUtils.randomString(minMessageSize);
int requests = 0;
int responses = 0;
selector.send(new NetworkSend(node, ByteBufferSend.sizePrefixed(ByteBuffer.wrap((prefix + "-0").getBytes(StandardCharsets.UTF_8)))));
requests++;
while (responses < messageCount) {
selector.poll(0L);
assertEquals(0, selector.disconnected().size(), "No disconnects should have occurred ." + selector.disconnected());
for (NetworkReceive receive : selector.completedReceives()) {
assertEquals(prefix + "-" + responses, new String(Utils.toArray(receive.payload()), StandardCharsets.UTF_8));
responses++;
}
for (int i = 0; i < selector.completedSends().size() && requests < messageCount && selector.isChannelReady(node); i++, requests++) {
selector.send(new NetworkSend(node, ByteBufferSend.sizePrefixed(ByteBuffer.wrap((prefix + "-" + requests).getBytes()))));
}
}
}
public static void waitForChannelConnected(Selector selector, String node) throws IOException {
int secondsLeft = 30;
while (selector.channel(node) != null
&& !selector.channel(node).isConnected() && secondsLeft-- > 0) {
selector.poll(1000L);
}
assertNotNull(selector.channel(node));
assertTrue(selector.channel(node).isConnected(), String.format("Channel %s was not connected after 30 seconds", node));
}
public static void waitForChannelReady(Selector selector, String node) throws IOException {
// wait for handshake to finish
int secondsLeft = 30;
while (!selector.isChannelReady(node) && secondsLeft-- > 0) {
selector.poll(1000L);
}
assertTrue(selector.isChannelReady(node), String.format("Channel %s was not ready after 30 seconds", node));
}
public static ChannelState waitForChannelClose(Selector selector, String node, ChannelState.State channelState) throws IOException {
return waitForChannelClose(selector, node, channelState, 0);
}
public static ChannelState waitForChannelClose(Selector selector, String node, ChannelState.State channelState, int delayBetweenPollMs)
throws IOException {
boolean closed = false;
for (int i = 0; i < 300; i++) {
if (delayBetweenPollMs > 0)
Utils.sleep(delayBetweenPollMs);
selector.poll(100L);
if (selector.channel(node) == null && selector.closingChannel(node) == null) {
closed = true;
break;
}
}
assertTrue(closed, "Channel was not closed by timeout");
ChannelState finalState = selector.disconnected().get(node);
assertEquals(channelState, finalState.state());
return finalState;
}
public static void completeDelayedChannelClose(Selector selector, long currentTimeNanos) {
selector.completeDelayedChannelClose(currentTimeNanos);
}
public static Map<?, ?> delayedClosingChannels(Selector selector) {
return selector.delayedClosingChannels();
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/common/network/NetworkTestUtils.java |
import operator
from django.db import DataError, InterfaceError
from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.backends.postgresql.psycopg_any import is_psycopg3
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
minimum_database_version = (15,)
allows_group_by_selected_pks = True
can_return_columns_from_insert = True
can_return_rows_from_bulk_insert = True
can_return_rows_from_update = True
has_real_datatype = True
has_native_uuid_field = True
has_native_duration_field = True
has_native_json_field = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_select_for_update_of = True
has_select_for_update_skip_locked = True
has_select_for_no_key_update = True
can_release_savepoints = True
supports_comments = True
supports_tablespaces = True
supports_transactions = True
can_introspect_materialized_views = True
can_distinct_on_fields = True
can_rollback_ddl = True
schema_editor_uses_clientside_param_binding = True
supports_combined_alters = True
nulls_order_largest = True
closed_cursor_error_class = InterfaceError
greatest_least_ignores_nulls = True
can_clone_databases = True
supports_temporal_subtraction = True
supports_slicing_ordering_in_compound = True
create_test_procedure_without_params_sql = """
CREATE FUNCTION test_procedure () RETURNS void AS $$
DECLARE
V_I INTEGER;
BEGIN
V_I := 1;
END;
$$ LANGUAGE plpgsql;"""
create_test_procedure_with_int_param_sql = """
CREATE FUNCTION test_procedure (P_I INTEGER) RETURNS void AS $$
DECLARE
V_I INTEGER;
BEGIN
V_I := P_I;
END;
$$ LANGUAGE plpgsql;"""
requires_casted_case_in_updates = True
supports_over_clause = True
supports_frame_exclusion = True
only_supports_unbounded_with_preceding_and_following = True
supports_aggregate_filter_clause = True
supports_aggregate_order_by_clause = True
supported_explain_formats = {"JSON", "TEXT", "XML", "YAML"}
supports_deferrable_unique_constraints = True
has_json_operators = True
json_key_contains_list_matching_requires_list = True
supports_update_conflicts = True
supports_update_conflicts_with_target = True
supports_covering_indexes = True
supports_stored_generated_columns = True
supports_nulls_distinct_unique_constraints = True
supports_no_precision_decimalfield = True
can_rename_index = True
prohibits_dollar_signs_in_column_aliases = True
test_collations = {
"deterministic": "C",
"non_default": "sv-x-icu",
"swedish_ci": "sv-x-icu",
"virtual": "sv-x-icu",
}
test_now_utc_template = "STATEMENT_TIMESTAMP() AT TIME ZONE 'UTC'"
insert_test_table_with_defaults = "INSERT INTO {} DEFAULT VALUES"
supports_uuid4_function = True
@cached_property
def supports_uuid7_function(self):
return self.is_postgresql_18
@cached_property
def supports_uuid7_function_shift(self):
return self.is_postgresql_18
@cached_property
def django_test_skips(self):
skips = {
"opclasses are PostgreSQL only.": {
"indexes.tests.SchemaIndexesNotPostgreSQLTests."
"test_create_index_ignores_opclasses",
},
"PostgreSQL requires casting to text.": {
"lookup.tests.LookupTests.test_textfield_exact_null",
},
}
if self.connection.settings_dict["OPTIONS"].get("pool"):
skips.update(
{
"Pool does implicit health checks": {
"backends.base.test_base.ConnectionHealthChecksTests."
"test_health_checks_enabled",
"backends.base.test_base.ConnectionHealthChecksTests."
"test_set_autocommit_health_checks_enabled",
},
}
)
if self.uses_server_side_binding:
skips.update(
{
"The actual query cannot be determined for server side bindings": {
"backends.base.test_base.ExecuteWrapperTests."
"test_wrapper_debug",
}
},
)
return skips
@cached_property
def django_test_expected_failures(self):
expected_failures = set()
if self.uses_server_side_binding:
expected_failures.update(
{
# Parameters passed to expressions in SELECT and GROUP BY
# clauses are not recognized as the same values when using
# server-side binding cursors (#34255).
"aggregation.tests.AggregateTestCase."
"test_group_by_nested_expression_with_params",
}
)
if not is_psycopg3:
expected_failures.update(
{
# operator does not exist: bigint[] = integer[]
"postgres_tests.test_array.TestQuerying.test_gt",
"postgres_tests.test_array.TestQuerying.test_in",
"postgres_tests.test_array.TestQuerying.test_lt",
}
)
return expected_failures
@cached_property
def uses_server_side_binding(self):
options = self.connection.settings_dict["OPTIONS"]
return is_psycopg3 and options.get("server_side_binding") is True
@cached_property
def max_query_params(self):
if self.uses_server_side_binding:
return 2**16 - 1
return None
@cached_property
def prohibits_null_characters_in_text_exception(self):
if is_psycopg3:
return DataError, "PostgreSQL text fields cannot contain NUL (0x00) bytes"
else:
return ValueError, "A string literal cannot contain NUL (0x00) characters."
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
"PositiveBigIntegerField": "BigIntegerField",
"PositiveIntegerField": "IntegerField",
"PositiveSmallIntegerField": "SmallIntegerField",
}
@cached_property
def is_postgresql_16(self):
return self.connection.pg_version >= 160000
@cached_property
def is_postgresql_17(self):
return self.connection.pg_version >= 170000
@cached_property
def is_postgresql_18(self):
return self.connection.pg_version >= 180000
supports_unlimited_charfield = True
supports_any_value = property(operator.attrgetter("is_postgresql_16"))
supports_virtual_generated_columns = property(
operator.attrgetter("is_postgresql_18")
) | python | github | https://github.com/django/django | django/db/backends/postgresql/features.py |
import unittest
import numpy as np
import nipy.neurospin.group.displacement_field as df
def make_data(n=10, dim=20, r=5, mdim=15, maskdim=20, amplitude=10, noise=1, jitter=None, activation=False):
XYZvol = np.zeros((dim,dim,dim),int)
XYZ = np.array(np.where(XYZvol==0))
p = XYZ.shape[1]
#mask = np.arange(p)
XYZvol[XYZ[0],XYZ[1],XYZ[2]] = np.arange(p)
o = np.array([dim/2,dim/2,dim/2])
I = XYZvol[(dim-mdim)/2:(dim+mdim)/2,(dim-mdim)/2:(dim+mdim)/2,(dim-mdim)/2:(dim+mdim)/2].ravel()
mask = XYZvol[ (dim-maskdim)/2 : (dim+maskdim)/2, (dim-maskdim)/2 : (dim+maskdim)/2, (dim-maskdim)/2 : (dim+maskdim)/2 ].ravel()
q = len(mask)
maskvol = np.zeros((dim,dim,dim),int)
maskvol[XYZ[0,mask],XYZ[1,mask],XYZ[2,mask]] = np.arange(q)
Isignal = maskvol[dim/2-r:dim/2+r,dim/2-r:dim/2+r,dim/2-r:dim/2+r].ravel()
signal = np.zeros(q,float)
signal[Isignal] += amplitude
X = np.zeros((n,p),float) + np.nan
data = np.zeros((n,p),float) + np.nan
vardata = np.zeros((n,p),float) + np.nan
for i in xrange(n):
X[i,I] = np.random.randn(len(I))
if activation:
o = np.array([dim/2,dim/2,dim/2])
if jitter!=None:
o += np.round(np.random.randn(3)*jitter).clip(r-mdim/2,mdim/2-r)
#print o
Ii = XYZvol[o[0]-r:o[0]+r,o[1]-r:o[1]+r,o[2]-r:o[2]+r].ravel()
X[i,Ii] += amplitude
vardata[i,I] = np.square(np.random.randn(len(I)))*noise**2
data[i,I] = X[i,I] + np.random.randn(len(I))*np.sqrt(vardata[i,I])
return data, XYZ, mask, XYZvol, vardata, signal
class test_displacement_field(unittest.TestCase):
def test_sample_prior(self, verbose=False):
data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True)
D = df.displacement_field(XYZ, sigma=2.5, n=data.shape[0], mask=mask)
B = len(D.block)
for b in np.random.permutation(range(B)):
for i in xrange(data.shape[0]):
if verbose:
print 'sampling field', i, 'block', b
U, V, L, W, I = D.sample(i, b, 'prior', 1)
block = D.block[b]
D.U[:, i, b] = U
D.V[:, i, block] = V
D.W[:, i, L] = W
D.I[i, L] = I
def test_sample_rand_walk(self, verbose=False):
data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True)
D = df.displacement_field(XYZ, sigma=2.5*np.ones(3), n=data.shape[0], mask=mask)
B = len(D.block)
for b in np.random.permutation(range(B)):
for i in xrange(data.shape[0]):
if verbose:
print 'sampling field', i, 'block', b
U, V, L, W, I = D.sample(i, b, 'rand_walk', 1e-2)
block = D.block[b]
D.U[:, i, b] = U
D.V[:, i, block] = V
D.W[:, i, L] = W
D.I[i, L] = I
def test_sample_prior(self, verbose=False):
data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True)
D = df.displacement_field(XYZ, sigma=2.5, n=data.shape[0], mask=mask)
B = len(D.block)
for b in np.random.permutation(range(B)):
for i in xrange(data.shape[0]):
if verbose:
print 'sampling field', i, 'block', b
U, V, L, W, I = D.sample(i, b, 'prior', 1)
block = D.block[b]
D.U[:, i, b] = U
D.V[:, i, block] = V
D.W[:, i, L] = W
D.I[i, L] = I
def test_sample_all_blocks(self, verbose=False):
data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True)
D = df.displacement_field(XYZ, sigma=2.5, n=data.shape[0], mask=mask)
for i in xrange(data.shape[0]):
if verbose:
print 'sampling field', i
U, V, W, I = D.sample_all_blocks(1e-2)
D.U[:, i] = U
D.V[:, i] = V
D.W[:, i] = W
D.I[i] = I
class test_gaussian_random_field(unittest.TestCase):
def test_sample(self, verbose=False):
data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True)
n=data.shape[0]
D = df.gaussian_random_field(XYZ, 2.5, n)
for i in xrange(n):
if verbose:
print 'sampling field', i+1, 'out of', n
U, V, L, W, I = D.sample(i, 1)
D.U[:, i], D.V[:, i], D.W[:, i, L], D.I[i, L] = U, V, W, I
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigiq_regkey_license import ModuleParameters
from library.modules.bigiq_regkey_license import ApiParameters
from library.modules.bigiq_regkey_license import ModuleManager
from library.modules.bigiq_regkey_license import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigiq_regkey_license import ModuleParameters
from ansible.modules.network.f5.bigiq_regkey_license import ApiParameters
from ansible.modules.network.f5.bigiq_regkey_license import ModuleManager
from ansible.modules.network.f5.bigiq_regkey_license import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
regkey_pool='foo',
license_key='XXXX-XXXX-XXXX-XXXX-XXXX',
accept_eula=True,
description='this is a description'
)
p = ModuleParameters(params=args)
assert p.regkey_pool == 'foo'
assert p.license_key == 'XXXX-XXXX-XXXX-XXXX-XXXX'
assert p.accept_eula is True
assert p.description == 'this is a description'
def test_api_parameters(self):
args = load_fixture('load_regkey_license_key.json')
p = ApiParameters(params=args)
assert p.description == 'foo bar baz'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_create(self, *args):
set_module_args(dict(
regkey_pool='foo',
license_key='XXXX-XXXX-XXXX-XXXX-XXXX',
accept_eula=True,
description='this is a description',
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'this is a description' | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_age_limit,
parse_iso8601,
)
class IndavideoEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:embed\.)?indavideo\.hu/player/video/|assets\.indavideo\.hu/swf/player\.swf\?.*\b(?:v(?:ID|id))=)(?P<id>[\da-f]+)'
_TESTS = [{
'url': 'http://indavideo.hu/player/video/1bdc3c6d80/',
'md5': 'f79b009c66194acacd40712a6778acfa',
'info_dict': {
'id': '1837039',
'ext': 'mp4',
'title': 'Cicatánc',
'description': '',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'cukiajanlo',
'uploader_id': '83729',
'timestamp': 1439193826,
'upload_date': '20150810',
'duration': 72,
'age_limit': 0,
'tags': ['tánc', 'cica', 'cuki', 'cukiajanlo', 'newsroom'],
},
}, {
'url': 'http://embed.indavideo.hu/player/video/1bdc3c6d80?autostart=1&hide=1',
'only_matching': True,
}, {
'url': 'http://assets.indavideo.hu/swf/player.swf?v=fe25e500&vID=1bdc3c6d80&autostart=1&hide=1&i=1',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % video_id,
video_id)['data']
title = video['title']
video_urls = video.get('video_files', [])
video_file = video.get('video_file')
if video:
video_urls.append(video_file)
video_urls = list(set(video_urls))
video_prefix = video_urls[0].rsplit('/', 1)[0]
for flv_file in video.get('flv_files', []):
flv_url = '%s/%s' % (video_prefix, flv_file)
if flv_url not in video_urls:
video_urls.append(flv_url)
formats = [{
'url': video_url,
'height': self._search_regex(r'\.(\d{3,4})\.mp4$', video_url, 'height', default=None),
} for video_url in video_urls]
self._sort_formats(formats)
timestamp = video.get('date')
if timestamp:
# upload date is in CEST
timestamp = parse_iso8601(timestamp + ' +0200', ' ')
thumbnails = [{
'url': self._proto_relative_url(thumbnail)
} for thumbnail in video.get('thumbnails', [])]
tags = [tag['title'] for tag in video.get('tags', [])]
return {
'id': video.get('id') or video_id,
'title': title,
'description': video.get('description'),
'thumbnails': thumbnails,
'uploader': video.get('user_name'),
'uploader_id': video.get('user_id'),
'timestamp': timestamp,
'duration': int_or_none(video.get('length')),
'age_limit': parse_age_limit(video.get('age_limit')),
'tags': tags,
'formats': formats,
}
class IndavideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?indavideo\.hu/video/(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'http://indavideo.hu/video/Vicces_cica_1',
'md5': '8c82244ba85d2a2310275b318eb51eac',
'info_dict': {
'id': '1335611',
'display_id': 'Vicces_cica_1',
'ext': 'mp4',
'title': 'Vicces cica',
'description': 'Játszik a tablettel. :D',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Jet_Pack',
'uploader_id': '491217',
'timestamp': 1390821212,
'upload_date': '20140127',
'duration': 7,
'age_limit': 0,
'tags': ['vicces', 'macska', 'cica', 'ügyes', 'nevetés', 'játszik', 'Cukiság', 'Jet_Pack'],
},
}, {
'url': 'http://index.indavideo.hu/video/2015_0728_beregszasz',
'only_matching': True,
}, {
'url': 'http://auto.indavideo.hu/video/Sajat_utanfutoban_a_kis_tacsko',
'only_matching': True,
}, {
'url': 'http://erotika.indavideo.hu/video/Amator_tini_punci',
'only_matching': True,
}, {
'url': 'http://film.indavideo.hu/video/f_hrom_nagymamm_volt',
'only_matching': True,
}, {
'url': 'http://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
embed_url = self._search_regex(
r'<link[^>]+rel="video_src"[^>]+href="(.+?)"', webpage, 'embed url')
return {
'_type': 'url_transparent',
'ie_key': 'IndavideoEmbed',
'url': embed_url,
'display_id': display_id,
} | unknown | codeparrot/codeparrot-clean | ||
import re
from django.db.backends import BaseDatabaseOperations
server_version_re = re.compile(r'PostgreSQL (\d{1,2})\.(\d{1,2})\.?(\d{1,2})?')
# This DatabaseOperations class lives in here instead of base.py because it's
# used by both the 'postgresql' and 'postgresql_psycopg2' backends.
class DatabaseOperations(BaseDatabaseOperations):
def __init__(self):
self._postgres_version = None
def _get_postgres_version(self):
if self._postgres_version is None:
from django.db import connection
cursor = connection.cursor()
cursor.execute("SELECT version()")
version_string = cursor.fetchone()[0]
m = server_version_re.match(version_string)
if not m:
raise Exception('Unable to determine PostgreSQL version from version() function string: %r' % version_string)
self._postgres_version = [int(val) for val in m.groups() if val]
return self._postgres_version
postgres_version = property(_get_postgres_version)
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def field_cast_sql(self, db_type):
if db_type == 'inet':
return 'HOST(%s)'
return '%s'
def last_insert_id(self, cursor, table_name, pk_name):
cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (table_name, pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def sql_flush(self, style, tables, sequences):
if tables:
if self.postgres_version[0] >= 8 and self.postgres_version[1] >= 1:
# Postgres 8.1+ can do 'TRUNCATE x, y, z...;'. In fact, it *has to*
# in order to be able to truncate tables referenced by a foreign
# key in any other table. The result is a single SQL TRUNCATE
# statement.
sql = ['%s %s;' % \
(style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(', '.join([self.quote_name(table) for table in tables]))
)]
else:
# Older versions of Postgres can't do TRUNCATE in a single call, so
# they must use a simple delete.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if column_name and len(column_name) > 0:
sequence_name = '%s_%s_seq' % (table_name, column_name)
else:
sequence_name = '%s_id_seq' % table_name
sql.append("%s setval('%s', 1, false);" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_FIELD(self.quote_name(sequence_name)))
)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval('%s', coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_FIELD(qn('%s_%s_seq' % (model._meta.db_table, f.column))),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table))))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
output.append("%s setval('%s', coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_FIELD(qn('%s_id_seq' % f.m2m_db_table())),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output | unknown | codeparrot/codeparrot-clean | ||
# (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestDellos9Module(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['updates']), result['updates'])
else:
self.assertEqual(commands, result['updates'], result['updates'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass | unknown | codeparrot/codeparrot-clean | ||
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
from math import sqrt
import numpy as np
from scipy import sparse
from ..externals.six.moves import xrange
from .hierarchical import AgglomerativeClustering
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..exceptions import NotFittedError
from ..metrics.pairwise import euclidean_distances
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X) | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A library of basic cythonized CombineFn subclasses.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from apache_beam.transforms import core
class AccumulatorCombineFn(core.CombineFn):
# singleton?
def create_accumulator(self):
return self._accumulator_type()
@staticmethod
def add_input(accumulator, element):
accumulator.add_input(element)
return accumulator
def merge_accumulators(self, accumulators):
accumulator = self._accumulator_type()
accumulator.merge(accumulators)
return accumulator
@staticmethod
def extract_output(accumulator):
return accumulator.extract_output()
def __eq__(self, other):
return (isinstance(other, AccumulatorCombineFn)
and self._accumulator_type is other._accumulator_type)
def __hash__(self):
return hash(self._accumulator_type)
_63 = 63 # Avoid large literals in C source code.
globals()['INT64_MAX'] = 2**_63 - 1
globals()['INT64_MIN'] = -2**_63
class CountAccumulator(object):
def __init__(self):
self.value = 0
def add_input(self, unused_element):
self.value += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
return self.value
class SumInt64Accumulator(object):
def __init__(self):
self.value = 0
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.value += element
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
if not INT64_MIN <= self.value <= INT64_MAX:
self.value %= 2**64
if self.value >= INT64_MAX:
self.value -= 2**64
return self.value
class MinInt64Accumulator(object):
def __init__(self):
self.value = INT64_MAX
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
if element < self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value < self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MaxInt64Accumulator(object):
def __init__(self):
self.value = INT64_MIN
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
if element > self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value > self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MeanInt64Accumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.sum += element
self.count += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
def extract_output(self):
if not INT64_MIN <= self.sum <= INT64_MAX:
self.sum %= 2**64
if self.sum >= INT64_MAX:
self.sum -= 2**64
return self.sum / self.count if self.count else _NAN
class CountCombineFn(AccumulatorCombineFn):
_accumulator_type = CountAccumulator
class SumInt64Fn(AccumulatorCombineFn):
_accumulator_type = SumInt64Accumulator
class MinInt64Fn(AccumulatorCombineFn):
_accumulator_type = MinInt64Accumulator
class MaxInt64Fn(AccumulatorCombineFn):
_accumulator_type = MaxInt64Accumulator
class MeanInt64Fn(AccumulatorCombineFn):
_accumulator_type = MeanInt64Accumulator
_POS_INF = float('inf')
_NEG_INF = float('-inf')
_NAN = float('nan')
class SumDoubleAccumulator(object):
def __init__(self):
self.value = 0
def add_input(self, element):
element = float(element)
self.value += element
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
return self.value
class MinDoubleAccumulator(object):
def __init__(self):
self.value = _POS_INF
def add_input(self, element):
element = float(element)
if element < self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value < self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MaxDoubleAccumulator(object):
def __init__(self):
self.value = _NEG_INF
def add_input(self, element):
element = float(element)
if element > self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value > self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MeanDoubleAccumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
def add_input(self, element):
element = float(element)
self.sum += element
self.count += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
def extract_output(self):
return self.sum / self.count if self.count else _NAN
class SumFloatFn(AccumulatorCombineFn):
_accumulator_type = SumDoubleAccumulator
class MinFloatFn(AccumulatorCombineFn):
_accumulator_type = MinDoubleAccumulator
class MaxFloatFn(AccumulatorCombineFn):
_accumulator_type = MaxDoubleAccumulator
class MeanFloatFn(AccumulatorCombineFn):
_accumulator_type = MeanDoubleAccumulator
class AllAccumulator(object):
def __init__(self):
self.value = True
def add_input(self, element):
self.value &= not not element
def merge(self, accumulators):
for accumulator in accumulators:
self.value &= accumulator.value
def extract_output(self):
return self.value
class AnyAccumulator(object):
def __init__(self):
self.value = False
def add_input(self, element):
self.value |= not not element
def merge(self, accumulators):
for accumulator in accumulators:
self.value |= accumulator.value
def extract_output(self):
return self.value
class AnyCombineFn(AccumulatorCombineFn):
_accumulator_type = AnyAccumulator
class AllCombineFn(AccumulatorCombineFn):
_accumulator_type = AllAccumulator | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image/Text processor class for FLAVA
"""
from ...processing_utils import ProcessorMixin
from ...utils import auto_docstring
@auto_docstring
class FlavaProcessor(ProcessorMixin):
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
super().__init__(image_processor, tokenizer)
__all__ = ["FlavaProcessor"] | python | github | https://github.com/huggingface/transformers | src/transformers/models/flava/processing_flava.py |
"""
Copyright 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import pygtk
pygtk.require('2.0')
import gtk
import Utils
import Actions
class SimpleTextDisplay(gtk.TextView):
"""A non editable gtk text view."""
def __init__(self, text=''):
"""
TextDisplay constructor.
Args:
text: the text to display (string)
"""
text_buffer = gtk.TextBuffer()
text_buffer.set_text(text)
self.set_text = text_buffer.set_text
gtk.TextView.__init__(self, text_buffer)
self.set_editable(False)
self.set_cursor_visible(False)
self.set_wrap_mode(gtk.WRAP_WORD_CHAR)
class TextDisplay(SimpleTextDisplay):
def __init__(self, text=''):
"""
TextDisplay constructor.
Args:
text: the text to display (string)
"""
SimpleTextDisplay.__init__(self, text)
self.scroll_lock = True
self.connect("populate-popup", self.populate_popup)
def insert(self, line):
# make backspaces work
line = self._consume_backspaces(line)
# add the remaining text to buffer
self.get_buffer().insert(self.get_buffer().get_end_iter(), line)
# Automatically scroll on insert
self.scroll_to_end()
def _consume_backspaces(self, line):
"""removes text from the buffer if line starts with \b*"""
if not line: return
# for each \b delete one char from the buffer
back_count = 0
start_iter = self.get_buffer().get_end_iter()
while line[back_count] == '\b':
# stop at the beginning of a line
if not start_iter.starts_line(): start_iter.backward_char()
back_count += 1
# remove chars
self.get_buffer().delete(start_iter, self.get_buffer().get_end_iter())
# return remaining text
return line[back_count:]
def scroll_to_end(self):
if self.scroll_lock:
buffer = self.get_buffer()
buffer.move_mark(buffer.get_insert(), buffer.get_end_iter())
self.scroll_to_mark(buffer.get_insert(), 0.0)
def clear(self):
buffer = self.get_buffer()
buffer.delete(buffer.get_start_iter(), buffer.get_end_iter())
def save(self, file_path):
report_file = open(file_path, 'w')
buffer = self.get_buffer()
report_file.write(buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter(), True))
report_file.close()
# Callback functions to handle the scrolling lock and clear context menus options
# Action functions are set by the ActionHandler's init function
def clear_cb(self, menu_item, web_view):
Actions.CLEAR_REPORTS()
def scroll_back_cb(self, menu_item, web_view):
Actions.TOGGLE_SCROLL_LOCK()
def save_cb(self, menu_item, web_view):
Actions.SAVE_REPORTS()
def populate_popup(self, view, menu):
"""Create a popup menu for the scroll lock and clear functions"""
menu.append(gtk.SeparatorMenuItem())
lock = gtk.CheckMenuItem("Scroll Lock")
menu.append(lock)
lock.set_active(self.scroll_lock)
lock.connect('activate', self.scroll_back_cb, view)
save = gtk.ImageMenuItem(gtk.STOCK_SAVE)
menu.append(save)
save.connect('activate', self.save_cb, view)
clear = gtk.ImageMenuItem(gtk.STOCK_CLEAR)
menu.append(clear)
clear.connect('activate', self.clear_cb, view)
menu.show_all()
return False
def MessageDialogHelper(type, buttons, title=None, markup=None, default_response=None, extra_buttons=None):
"""
Create a modal message dialog and run it.
Args:
type: the type of message: gtk.MESSAGE_INFO, gtk.MESSAGE_WARNING, gtk.MESSAGE_QUESTION or gtk.MESSAGE_ERROR
buttons: the predefined set of buttons to use:
gtk.BUTTONS_NONE, gtk.BUTTONS_OK, gtk.BUTTONS_CLOSE, gtk.BUTTONS_CANCEL, gtk.BUTTONS_YES_NO, gtk.BUTTONS_OK_CANCEL
Args:
title: the title of the window (string)
markup: the message text with pango markup
default_response: if set, determines which button is highlighted by default
extra_buttons: a tuple containing pairs of values; each value is the button's text and the button's return value
Returns:
the gtk response from run()
"""
message_dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL, type, buttons)
if title: message_dialog.set_title(title)
if markup: message_dialog.set_markup(markup)
if extra_buttons: message_dialog.add_buttons(*extra_buttons)
if default_response: message_dialog.set_default_response(default_response)
response = message_dialog.run()
message_dialog.destroy()
return response
ERRORS_MARKUP_TMPL="""\
#for $i, $err_msg in enumerate($errors)
<b>Error $i:</b>
$encode($err_msg.replace('\t', ' '))
#end for"""
def ErrorsDialog(flowgraph): MessageDialogHelper(
type=gtk.MESSAGE_ERROR,
buttons=gtk.BUTTONS_CLOSE,
title='Flow Graph Errors',
markup=Utils.parse_template(ERRORS_MARKUP_TMPL, errors=flowgraph.get_error_messages()),
)
class AboutDialog(gtk.AboutDialog):
"""A cute little about dialog."""
def __init__(self, platform):
"""AboutDialog constructor."""
gtk.AboutDialog.__init__(self)
self.set_name(platform.get_name())
self.set_version(platform.get_version())
self.set_license(platform.get_license())
self.set_copyright(platform.get_license().splitlines()[0])
self.set_website(platform.get_website())
self.run()
self.destroy()
def HelpDialog(): MessageDialogHelper(
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_CLOSE,
title='Help',
markup="""\
<b>Usage Tips</b>
<u>Add block</u>: drag and drop or double click a block in the block selection window.
<u>Rotate block</u>: Select a block, press left/right on the keyboard.
<u>Change type</u>: Select a block, press up/down on the keyboard.
<u>Edit parameters</u>: double click on a block in the flow graph.
<u>Make connection</u>: click on the source port of one block, then click on the sink port of another block.
<u>Remove connection</u>: select the connection and press delete, or drag the connection.
* See the menu for other keyboard shortcuts.""")
COLORS_DIALOG_MARKUP_TMPL = """\
<b>Color Mapping</b>
#if $colors
#set $max_len = max([len(color[0]) for color in $colors]) + 10
#for $title, $color_spec in $colors
<span background="$color_spec"><tt>$($encode($title).center($max_len))</tt></span>
#end for
#end if
"""
def TypesDialog(platform): MessageDialogHelper(
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_CLOSE,
title='Types',
markup=Utils.parse_template(COLORS_DIALOG_MARKUP_TMPL, colors=platform.get_colors())) | unknown | codeparrot/codeparrot-clean | ||
"""Default tags used by the template system, available to all templates."""
import re
import sys
import warnings
from collections import namedtuple
from collections.abc import Iterable, Mapping
from datetime import datetime
from itertools import cycle as itertools_cycle
from itertools import groupby
from django.conf import settings
from django.http import QueryDict
from django.utils import timezone
from django.utils.datastructures import DeferredSubDict
from django.utils.html import conditional_escape, escape, format_html
from django.utils.lorem_ipsum import paragraphs, words
from django.utils.safestring import mark_safe
from .base import (
BLOCK_TAG_END,
BLOCK_TAG_START,
COMMENT_TAG_END,
COMMENT_TAG_START,
FILTER_SEPARATOR,
SINGLE_BRACE_END,
SINGLE_BRACE_START,
VARIABLE_ATTRIBUTE_SEPARATOR,
VARIABLE_TAG_END,
VARIABLE_TAG_START,
Node,
NodeList,
PartialTemplate,
TemplateSyntaxError,
VariableDoesNotExist,
kwarg_re,
render_value_in_context,
token_kwargs,
)
from .context import Context
from .defaultfilters import date
from .library import Library
from .smartif import IfParser, Literal
register = Library()
class AutoEscapeControlNode(Node):
"""Implement the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting = setting
self.nodelist = nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
child_nodelists = ()
def render(self, context):
return ""
class CsrfTokenNode(Node):
child_nodelists = ()
def render(self, context):
csrf_token = context.get("csrf_token")
if csrf_token:
if csrf_token == "NOTPROVIDED":
return format_html("")
else:
return format_html(
'<input type="hidden" name="csrfmiddlewaretoken" value="{}">',
csrf_token,
)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
if settings.DEBUG:
warnings.warn(
"A {% csrf_token %} was used in a template, but the context "
"did not provide the value. This is usually caused by not "
"using RequestContext."
)
return ""
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = next(cycle_iter).resolve(context)
if self.variable_name:
context.set_upward(self.variable_name, value)
if self.silent:
return ""
return render_value_in_context(value, context)
def reset(self, context):
"""
Reset the cycle iteration back to the beginning.
"""
context.render_context[self] = itertools_cycle(self.cyclevars)
class DebugNode(Node):
def render(self, context):
if not settings.DEBUG:
return ""
from pprint import pformat
output = [escape(pformat(val)) for val in context]
output.append("\n\n")
output.append(escape(pformat(sys.modules)))
return "".join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr = filter_expr
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
with context.push(var=output):
return self.filter_expr.resolve(context)
class FirstOfNode(Node):
def __init__(self, variables, asvar=None):
self.vars = variables
self.asvar = asvar
def render(self, context):
first = ""
for var in self.vars:
value = var.resolve(context, ignore_failures=True)
if value:
first = render_value_in_context(value, context)
break
if self.asvar:
context[self.asvar] = first
return ""
return first
class ForNode(Node):
child_nodelists = ("nodelist_loop", "nodelist_empty")
def __init__(
self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None
):
self.loopvars = loopvars
self.sequence = sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = " reversed" if self.is_reversed else ""
return "<%s: for %s in %s, tail_len: %d%s>" % (
self.__class__.__name__,
", ".join(self.loopvars),
self.sequence,
len(self.nodelist_loop),
reversed_text,
)
def render(self, context):
if "forloop" in context:
parentloop = context["forloop"]
else:
parentloop = {}
with context.push():
values = self.sequence.resolve(context, ignore_failures=True)
if values is None:
values = []
if not hasattr(values, "__len__"):
values = list(values)
len_values = len(values)
if len_values < 1:
return self.nodelist_empty.render(context)
nodelist = []
if self.is_reversed:
values = reversed(values)
num_loopvars = len(self.loopvars)
unpack = num_loopvars > 1
# Create a forloop value in the context. We'll update counters on
# each iteration just below.
loop_dict = context["forloop"] = {
"parentloop": parentloop,
"length": len_values,
}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict["counter0"] = i
loop_dict["counter"] = i + 1
# Reverse counter iteration numbers.
loop_dict["revcounter"] = len_values - i
loop_dict["revcounter0"] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict["first"] = i == 0
loop_dict["last"] = i == len_values - 1
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item
# into them.
try:
len_item = len(item)
except TypeError: # not an iterable
len_item = 1
# Check loop variable count before unpacking
if num_loopvars != len_item:
raise ValueError(
"Need {} values to unpack in for loop; got {}. ".format(
num_loopvars, len_item
),
)
unpacked_vars = dict(zip(self.loopvars, item))
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
for node in self.nodelist_loop:
nodelist.append(node.render_annotated(context))
if pop_context:
# Pop the loop variables pushed on to the context to avoid
# the context ending up in an inconsistent state when other
# tags (e.g., include and with) push data to context.
context.pop()
return mark_safe("".join(nodelist))
class IfChangedNode(Node):
child_nodelists = ("nodelist_true", "nodelist_false")
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
self._varlist = varlist
def render(self, context):
# Init state storage
state_frame = self._get_context_stack_frame(context)
state_frame.setdefault(self)
nodelist_true_output = None
if self._varlist:
# Consider multiple parameters. This behaves like an OR evaluation
# of the multiple variables.
compare_to = [
var.resolve(context, ignore_failures=True) for var in self._varlist
]
else:
# The "{% ifchanged %}" syntax (without any variables) compares
# the rendered output.
compare_to = nodelist_true_output = self.nodelist_true.render(context)
if compare_to != state_frame[self]:
state_frame[self] = compare_to
# render true block if not already rendered
return nodelist_true_output or self.nodelist_true.render(context)
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ""
def _get_context_stack_frame(self, context):
# The Context object behaves like a stack where each template tag can
# create a new scope. Find the place where to store the state to detect
# changes.
if "forloop" in context:
# Ifchanged is bound to the local for loop.
# When there is a loop-in-loop, the state is bound to the inner
# loop, so it resets when the outer loop continues.
return context["forloop"]
else:
# Using ifchanged outside loops. Effectively this is a no-op
# because the state is associated with 'self'.
return context.render_context
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<%s>" % self.__class__.__name__
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
yield from nodelist
@property
def nodelist(self):
return NodeList(self)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ""
class LoremNode(Node):
def __init__(self, count, method, common):
self.count = count
self.method = method
self.common = common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == "w":
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == "p":
paras = ["<p>%s</p>" % p for p in paras]
return "\n\n".join(paras)
GroupedResult = namedtuple("GroupedResult", ["grouper", "list"])
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target = target
self.expression = expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, ignore_failures=True)
def render(self, context):
obj_list = self.target.resolve(context, ignore_failures=True)
if obj_list is None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ""
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
GroupedResult(grouper=key, list=list(val))
for key, val in groupby(
obj_list, lambda obj: self.resolve_expression(obj, context)
)
]
return ""
class LoadNode(Node):
child_nodelists = ()
def render(self, context):
return ""
class NowNode(Node):
def __init__(self, format_string, asvar=None):
self.format_string = format_string
self.asvar = asvar
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
formatted = date(datetime.now(tz=tzinfo), self.format_string)
if self.asvar:
context[self.asvar] = formatted
return ""
else:
return formatted
class PartialDefNode(Node):
def __init__(self, partial_name, inline, nodelist):
self.partial_name = partial_name
self.inline = inline
self.nodelist = nodelist
def render(self, context):
return self.nodelist.render(context) if self.inline else ""
class PartialNode(Node):
def __init__(self, partial_name, partial_mapping):
# Defer lookup in `partial_mapping` and nodelist to runtime.
self.partial_name = partial_name
self.partial_mapping = partial_mapping
def render(self, context):
try:
return self.partial_mapping[self.partial_name].render(context)
except KeyError:
raise TemplateSyntaxError(
f"Partial '{self.partial_name}' is not defined in the current template."
)
class ResetCycleNode(Node):
def __init__(self, node):
self.node = node
def render(self, context):
self.node.reset(context)
return ""
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {
"openblock": BLOCK_TAG_START,
"closeblock": BLOCK_TAG_END,
"openvariable": VARIABLE_TAG_START,
"closevariable": VARIABLE_TAG_END,
"openbrace": SINGLE_BRACE_START,
"closebrace": SINGLE_BRACE_END,
"opencomment": COMMENT_TAG_START,
"closecomment": COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, "")
class URLNode(Node):
child_nodelists = ()
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def __repr__(self):
return "<%s view_name='%s' args=%s kwargs=%s as=%s>" % (
self.__class__.__qualname__,
self.view_name,
repr(self.args),
repr(self.kwargs),
repr(self.asvar),
)
def render(self, context):
from django.urls import NoReverseMatch, reverse
args = [arg.resolve(context) for arg in self.args]
kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
view_name = self.view_name.resolve(context)
try:
current_app = context.request.current_app
except AttributeError:
try:
current_app = context.request.resolver_match.namespace
except AttributeError:
current_app = None
# Try to look up the URL. If it fails, raise NoReverseMatch unless the
# {% url ... as var %} construct is used, in which case return nothing.
url = ""
try:
url = reverse(view_name, args=args, kwargs=kwargs, current_app=current_app)
except NoReverseMatch:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ""
else:
if context.autoescape:
url = conditional_escape(url)
return url
class VerbatimNode(Node):
def __init__(self, content):
self.content = content
def render(self, context):
return self.content
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width, asvar=None):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
self.asvar = asvar
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ""
except (ValueError, TypeError):
raise TemplateSyntaxError("widthratio final argument must be a number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
result = str(round(ratio))
except ZeroDivisionError:
result = "0"
except (ValueError, TypeError, OverflowError):
result = ""
if self.asvar:
context[self.asvar] = result
return ""
else:
return result
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<%s>" % self.__class__.__name__
def render(self, context):
values = {key: val.resolve(context) for key, val in self.extra_context.items()}
with context.push(**values):
return self.nodelist.render(context)
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ("on", "off"):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(("endautoescape",))
parser.delete_first_token()
return AutoEscapeControlNode((arg == "on"), nodelist)
@register.tag
def comment(parser, token):
"""
Ignore everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past("endcomment")
return CommentNode()
@register.tag
def cycle(parser, token):
"""
Cycle among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each successive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
#
# It keeps the last node in the parser to be able to reset it with
# {% resetcycle %}.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, "_named_cycle_nodes"):
raise TemplateSyntaxError(
"No named cycles in template. '%s' is not defined" % name
)
if name not in parser._named_cycle_nodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._named_cycle_nodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError(
"Only 'silent' flag is allowed after cycle's name, not '%s'."
% args[-1]
)
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent)
if not hasattr(parser, "_named_cycle_nodes"):
parser._named_cycle_nodes = {}
parser._named_cycle_nodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values)
parser._last_cycle_node = node
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Output a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag("filter")
def do_filter(parser, token):
"""
Filter the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
filter_name = getattr(func, "_filter_name", None)
if filter_name in ("escape", "safe"):
raise TemplateSyntaxError(
'"filter %s" is not permitted. Use the "autoescape" tag instead.'
% filter_name
)
nodelist = parser.parse(("endfilter",))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token):
"""
Output the first variable passed that is not False.
Output nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 as myvar %}
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% elif var2 %}
{{ var2 }}
{% elif var3 %}
{{ var3 }}
{% endif %}
but much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% firstof var1 var2 var3 "<strong>fallback value</strong>" %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% firstof var1 var2|safe var3 "<strong>fallback</strong>"|safe %}
"""
bits = token.split_contents()[1:]
asvar = None
if not bits:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
if len(bits) >= 2 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
return FirstOfNode([parser.compile_filter(bit) for bit in bits], asvar)
@register.tag("for")
def do_for(parser, token):
"""
Loop over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if athlete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
======================= ==============================================
Variable Description
======================= ==============================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the
loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
======================= ==============================================
"""
bits = token.split_contents()
if len(bits) < 4:
raise TemplateSyntaxError(
"'for' statements should have at least four words: %s" % token.contents
)
is_reversed = bits[-1] == "reversed"
in_index = -3 if is_reversed else -2
if bits[in_index] != "in":
raise TemplateSyntaxError(
"'for' statements should use the format"
" 'for x in y': %s" % token.contents
)
invalid_chars = frozenset((" ", '"', "'", FILTER_SEPARATOR))
loopvars = re.split(r" *, *", " ".join(bits[1:in_index]))
for var in loopvars:
if not var or not invalid_chars.isdisjoint(var):
raise TemplateSyntaxError(
"'for' tag received an invalid argument: %s" % token.contents
)
sequence = parser.compile_filter(bits[in_index + 1])
nodelist_loop = parser.parse(
(
"empty",
"endfor",
)
)
token = parser.next_token()
if token.contents == "empty":
nodelist_empty = parser.parse(("endfor",))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super().__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag("if")
def do_if(parser, token):
"""
Evaluate a variable, and if that variable is "true" (i.e., exists, is not
empty, and is not a false boolean value), output the contents of the block:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
The ``if`` tag may take one or several `` {% elif %}`` clauses, as well as
an ``{% else %}`` clause that will be displayed if all previous conditions
fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both athletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==``, ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(("elif", "else", "endif"))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith("elif"):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(("elif", "else", "endif"))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == "else":
nodelist = parser.parse(("endif",))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
if token.contents != "endif":
raise TemplateSyntaxError(
'Malformed template tag at line {}: "{}"'.format(
token.lineno, token.contents
)
)
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Check if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Check its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.split_contents()
nodelist_true = parser.parse(("else", "endifchanged"))
token = parser.next_token()
if token.contents == "else":
nodelist_false = parser.parse(("endifchanged",))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
def find_library(parser, name):
try:
return parser.libraries[name]
except KeyError:
raise TemplateSyntaxError(
"'%s' is not a registered tag library. Must be one of:\n%s"
% (
name,
"\n".join(sorted(parser.libraries)),
),
)
def load_from_library(library, label, names):
"""
Return a subset of tags and filters from a library.
"""
subset = Library()
for name in names:
found = False
if name in library.tags:
found = True
subset.tags[name] = library.tags[name]
if name in library.filters:
found = True
subset.filters[name] = library.filters[name]
if found is False:
raise TemplateSyntaxError(
"'%s' is not a valid tag or filter in tag library '%s'"
% (
name,
label,
),
)
return subset
@register.tag
def load(parser, token):
"""
Load a custom template tag library into the parser.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
# from syntax is used; load individual tags from the library
name = bits[-1]
lib = find_library(parser, name)
subset = load_from_library(lib, name, bits[1:-2])
parser.add_library(subset)
else:
# one or more libraries are specified; load and add them to the parser
for name in bits[1:]:
lib = find_library(parser, name)
parser.add_library(lib)
return LoadNode()
@register.tag
def lorem(parser, token):
"""
Create random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` outputs the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` outputs the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` outputs two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != "random"
if not common:
bits.pop()
# Method bit
if bits[-1] in ("w", "p", "b"):
method = bits.pop()
else:
method = "b"
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = "1"
count = parser.compile_filter(count)
if len(bits) != 1:
raise TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
@register.tag
def now(parser, token):
"""
Display the date, formatted according to the given string.
Use the same format as PHP's ``date()`` function; see https://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
asvar = None
if len(bits) == 4 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string, asvar)
@register.tag(name="partialdef")
def partialdef_func(parser, token):
"""
Declare a partial that can be used in the template.
Usage::
{% partialdef partial_name %}
Content goes here.
{% endpartialdef %}
Store the nodelist in the context under the key "partials". It can be
retrieved using the ``{% partial %}`` tag.
The optional ``inline`` argument renders the partial's contents
immediately, at the point where it is defined.
"""
match token.split_contents():
case "partialdef", partial_name, "inline":
inline = True
case "partialdef", partial_name, _:
raise TemplateSyntaxError(
"The 'inline' argument does not have any parameters; either use "
"'inline' or remove it completely."
)
case "partialdef", partial_name:
inline = False
case ["partialdef"]:
raise TemplateSyntaxError("'partialdef' tag requires a name")
case _:
raise TemplateSyntaxError("'partialdef' tag takes at most 2 arguments")
# Parse the content until the end tag.
valid_endpartials = ("endpartialdef", f"endpartialdef {partial_name}")
pos_open = getattr(token, "position", None)
source_start = pos_open[0] if isinstance(pos_open, tuple) else None
nodelist = parser.parse(valid_endpartials)
endpartial = parser.next_token()
if endpartial.contents not in valid_endpartials:
parser.invalid_block_tag(endpartial, "endpartialdef", valid_endpartials)
pos_close = getattr(endpartial, "position", None)
source_end = pos_close[1] if isinstance(pos_close, tuple) else None
# Store the partial nodelist in the parser.extra_data attribute.
partials = parser.extra_data.setdefault("partials", {})
if partial_name in partials:
raise TemplateSyntaxError(
f"Partial '{partial_name}' is already defined in the "
f"'{parser.origin.name}' template."
)
partials[partial_name] = PartialTemplate(
nodelist,
parser.origin,
partial_name,
source_start=source_start,
source_end=source_end,
)
return PartialDefNode(partial_name, inline, nodelist)
@register.tag(name="partial")
def partial_func(parser, token):
"""
Render a partial previously declared with the ``{% partialdef %}`` tag.
Usage::
{% partial partial_name %}
"""
match token.split_contents():
case "partial", partial_name:
extra_data = parser.extra_data
partial_mapping = DeferredSubDict(extra_data, "partials")
return PartialNode(partial_name, partial_mapping=partial_mapping)
case _:
raise TemplateSyntaxError("'partial' tag requires a single argument")
@register.simple_tag(name="querystring", takes_context=True)
def querystring(context, *args, **kwargs):
"""
Build a query string using `args` and `kwargs` arguments.
This tag constructs a new query string by adding, removing, or modifying
parameters from the given positional and keyword arguments. Positional
arguments must be mappings (such as `QueryDict` or `dict`), and
`request.GET` is used as the starting point if `args` is empty.
Keyword arguments are treated as an extra, final mapping. These mappings
are processed sequentially, with later arguments taking precedence.
Passing `None` as a value removes the corresponding key from the result.
For iterable values, `None` entries are ignored, but if all values are
`None`, the key is removed.
A query string prefixed with `?` is returned.
Raise TemplateSyntaxError if a positional argument is not a mapping or if
keys are not strings.
For example::
{# Set a parameter on top of `request.GET` #}
{% querystring foo=3 %}
{# Remove a key from `request.GET` #}
{% querystring foo=None %}
{# Use with pagination #}
{% querystring page=page_obj.next_page_number %}
{# Use a custom ``QueryDict`` #}
{% querystring my_query_dict foo=3 %}
{# Use multiple positional and keyword arguments #}
{% querystring my_query_dict my_dict foo=3 bar=None %}
"""
if not args:
args = [context.request.GET]
params = QueryDict(mutable=True)
for d in [*args, kwargs]:
if not isinstance(d, Mapping):
raise TemplateSyntaxError(
"querystring requires mappings for positional arguments (got "
"%r instead)." % d
)
items = d.lists() if isinstance(d, QueryDict) else d.items()
for key, value in items:
if not isinstance(key, str):
raise TemplateSyntaxError(
"querystring requires strings for mapping keys (got %r "
"instead)." % key
)
if value is None:
params.pop(key, None)
elif isinstance(value, Iterable) and not isinstance(value, str):
# Drop None values; if no values remain, the key is removed.
params.setlist(key, [v for v in value if v is not None])
else:
params[key] = value
query_string = params.urlencode() if params else ""
return f"?{query_string}"
@register.tag
def regroup(parser, token):
"""
Regroup a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``musicians`` is a list of ``Musician`` objects that have ``name`` and
``instrument`` attributes, and you'd like to display a list that
looks like:
* Guitar:
* Django Reinhardt
* Emily Remler
* Piano:
* Lovie Austin
* Bud Powell
* Trumpet:
* Duke Ellington
The following snippet of template code would accomplish this dubious task::
{% regroup musicians by instrument as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for musician in group.list %}
<li>{{ musician.name }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano``
and ``Trumpet``, and ``list`` is the list of musicians who play this
instrument.
Note that ``{% regroup %}`` does not work when the list to be grouped is
not sorted by the key you are grouping by! This means that if your list of
musicians was not sorted by instrument, you'd need to make sure it is
sorted before using it, i.e.::
{% regroup musicians|dictsort:"instrument" by instrument as grouped %}
"""
bits = token.split_contents()
if len(bits) != 6:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(bits[1])
if bits[2] != "by":
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
if bits[4] != "as":
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must be 'as'")
var_name = bits[5]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(
var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]
)
return RegroupNode(target, expression, var_name)
@register.tag
def resetcycle(parser, token):
"""
Reset a cycle tag.
If an argument is given, reset the last rendered cycle tag whose name
matches the argument, else reset the last rendered cycle tag (named or
unnamed).
"""
args = token.split_contents()
if len(args) > 2:
raise TemplateSyntaxError("%r tag accepts at most one argument." % args[0])
if len(args) == 2:
name = args[1]
try:
return ResetCycleNode(parser._named_cycle_nodes[name])
except (AttributeError, KeyError):
raise TemplateSyntaxError("Named cycle '%s' does not exist." % name)
try:
return ResetCycleNode(parser._last_cycle_node)
except AttributeError:
raise TemplateSyntaxError("No cycles in template.")
@register.tag
def spaceless(parser, token):
"""
Remove whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example returns this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` isn't stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(("endspaceless",))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Output one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError(
"Invalid templatetag argument: '%s'."
" Must be one of: %s" % (tag, list(TemplateTagNode.mapping))
)
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
r"""
Return an absolute URL matching the given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "url_name" arg1 arg2 %}
or
{% url "url_name" name1=value1 name2=value2 %}
The first argument is a URL pattern name. Other arguments are
space-separated values that will be filled in place of positional and
keyword arguments in the URL. Don't mix positional and keyword arguments.
All arguments for the URL must be present.
For example, if you have a view ``app_name.views.client_details`` taking
the client's id and the corresponding line in a URLconf looks like this::
path(
'client/<int:id>/',
views.client_details,
name='client-detail-view',
)
and this app's URLconf is included into the project's URLconf under some
path::
path('clients/', include('app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "client-detail-view" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument may also be the name of a template variable that will be
evaluated to obtain the view name or the URL name, e.g.::
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"'%s' takes at least one argument, a URL pattern name." % bits[0]
)
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stop the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(("endverbatim",))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such. Calculate the ratio of a given value to a
maximum value, and then apply that ratio to a constant.
For example::
<img src="bar.png" alt="Bar"
height="10"
width="{% widthratio this_value max_value max_width %}">
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
In some cases you might want to capture the result of widthratio in a
variable. It can be useful for instance in a blocktranslate like this::
{% widthratio this_value max_value max_width as width %}
{% blocktranslate %}The width is: {{ width }}{% endblocktranslate %}
"""
bits = token.split_contents()
if len(bits) == 4:
tag, this_value_expr, max_value_expr, max_width = bits
asvar = None
elif len(bits) == 6:
tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits
if as_ != "as":
raise TemplateSyntaxError(
"Invalid syntax in widthratio tag. Expecting 'as' keyword"
)
else:
raise TemplateSyntaxError("widthratio takes at least three arguments")
return WidthRatioNode(
parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width),
asvar=asvar,
)
@register.tag("with")
def do_with(parser, token):
"""
Add one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError(
"%r expected at least one variable assignment" % bits[0]
)
if remaining_bits:
raise TemplateSyntaxError(
"%r received an invalid token: %r" % (bits[0], remaining_bits[0])
)
nodelist = parser.parse(("endwith",))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context) | python | github | https://github.com/django/django | django/template/defaulttags.py |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.query_constructors.tencentvectordb import (
TencentVectorDBTranslator,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TencentVectorDBTranslator": (
"langchain_community.query_constructors.tencentvectordb"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["TencentVectorDBTranslator"] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/retrievers/self_query/tencentvectordb.py |
# frozen_string_literal: true
# :markup: markdown
require "action_dispatch/http/mime_type"
module AbstractController
module Collector
def self.generate_method_for_mime(mime)
sym = mime.is_a?(Symbol) ? mime : mime.to_sym
class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{sym}(...)
custom(Mime[:#{sym}], ...)
end
RUBY
end
Mime::SET.each do |mime|
generate_method_for_mime(mime)
end
Mime::Type.register_callback do |mime|
generate_method_for_mime(mime) unless instance_methods.include?(mime.to_sym)
end
private
def method_missing(symbol, ...)
unless mime_constant = Mime[symbol]
raise NoMethodError, "To respond to a custom format, register it as a MIME type first: " \
"https://guides.rubyonrails.org/action_controller_advanced_topics.html#restful-downloads. " \
"If you meant to respond to a variant like :tablet or :phone, not a custom format, " \
"be sure to nest your variant response within a format response: " \
"format.html { |html| html.tablet { ... } }"
end
if Mime::SET.include?(mime_constant)
AbstractController::Collector.generate_method_for_mime(mime_constant)
public_send(symbol, ...)
else
super
end
end
end
end | ruby | github | https://github.com/rails/rails | actionpack/lib/abstract_controller/collector.rb |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
import time
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(BitcoinTestFramework):
start_height = 101
max_stat_pos = 2
STATS_NEED_TXINDEX = [
'avgfee',
'avgfeerate',
'maxfee',
'maxfeerate',
'medianfee',
'feerate_percentiles',
'minfee',
'minfeerate',
'totalfee',
'utxo_size_inc',
]
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [['-txindex'], ['-paytxfee=0.003']]
self.setup_clean_chain = True
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = time.time()
self.nodes[0].generate(101)
self.nodes[0].sendtoaddress(address=self.nodes[1].getnewaddress(), amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=10, subtractfeefromamount=False)
self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.nodes[1].setmocktime(mocktime)
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
expected_stats_noindex = []
for stat_row in stats:
expected_stats_noindex.append({k: v for k, v in stat_row.items() if k not in self.STATS_NEED_TXINDEX})
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Check with the node that has no txindex
stats_no_txindex = self.nodes[1].getblockstats(hash_or_height=blockhash, stats=list(expected_stats_noindex[i].keys()))
assert_equal(stats_no_txindex, expected_stats_noindex[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
assert_raises_rpc_error(-8, 'One or more of the selected stats requires -txindex enabled',
self.nodes[1].getblockstats, hash_or_height=self.start_height + self.max_stat_pos)
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
if __name__ == '__main__':
GetblockstatsTest().main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
from io import BytesIO
from tempfile import TemporaryFile
from itertools import chain, repeat, tee
from functools import update_wrapper
from werkzeug._compat import to_native, text_type
from werkzeug.urls import url_decode_stream
from werkzeug.wsgi import make_line_iter, \
get_input_stream, get_content_length
from werkzeug.datastructures import Headers, FileStorage, MultiDict
from werkzeug.http import parse_options_header
#: an iterator that yields empty strings
_empty_string_iter = repeat('')
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
def default_stream_factory(total_content_length, filename, content_type,
content_length=None):
"""The stream factory that is used per default."""
if total_content_length > 1024 * 500:
return TemporaryFile('wb+')
return BytesIO()
def parse_form_data(environ, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(stream_factory, charset, errors,
max_form_memory_size, max_content_length,
cls, silent).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, 'exhaust', None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(self, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get('CONTENT_TYPE', '')
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype,
content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if self.max_content_length is not None and \
content_length is not None and \
content_length > self.max_content_length:
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype,
content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(self.stream_factory, self.charset, self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls)
boundary = options.get('boundary')
if boundary is None:
raise ValueError('Missing boundary')
if isinstance(boundary, text_type):
boundary = boundary.encode('ascii')
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if self.max_form_memory_size is not None and \
content_length is not None and \
content_length > self.max_form_memory_size:
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset,
errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
'multipart/form-data': _parse_multipart,
'application/x-www-form-urlencoded': _parse_urlencoded,
'application/x-url-encoded': _parse_urlencoded
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ['\r\n', b'\r\n']:
return line[:-2], True
elif line[-1:] in ['\r', '\n', b'\r', b'\n']:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError('unexpected end of line in multipart header')
if not line:
break
elif line[0] in ' \t' and result:
key, value = result[-1]
result[-1] = (key, value + '\n ' + line[1:])
else:
parts = line.split(':', 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
_begin_form = 'begin_form'
_begin_file = 'begin_file'
_cont = 'cont'
_end = 'end'
class MultiPartParser(object):
def __init__(self, stream_factory=None, charset='utf-8', errors='replace',
max_form_memory_size=None, cls=None, buffer_size=64 * 1024):
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
if stream_factory is None:
stream_factory = default_stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ':\\' or filename[:2] == '\\\\':
return filename.split('\\')[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b''
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get('content-transfer-encoding')
if transfer_encoding is not None and \
transfer_encoding in _supported_multipart_encodings:
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get('content-type')
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get('charset', self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
if isinstance(filename, bytes):
filename = filename.decode(self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get('content-type')
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(total_content_length, content_type,
filename, content_length)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise exceptions.RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail('Missing boundary')
if not is_valid_multipart_boundary(boundary):
self.fail('Invalid boundary: %s' % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail('Boundary longer than buffer size')
def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b'--' + boundary
last_part = next_part + b'--'
iterator = chain(make_line_iter(file, limit=content_length,
buffer_size=self.buffer_size,
cap_at_buffer=cap_at_buffer),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == b'--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == 'base64':
transfer_encoding = 'base64_codec'
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b'\r\n':
buf = b'\r\n'
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b'', b'\r', b'\n', b'\r\n'):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield ('file',
(name, FileStorage(container, filename, name,
headers=headers)))
else:
part_charset = self.get_part_charset(headers)
yield ('form',
(name, b''.join(container).decode(
part_charset, self.errors)))
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2)
form = (p[1] for p in formstream if p[0] == 'form')
files = (p[1] for p in filestream if p[0] == 'file')
return self.cls(form), self.cls(files)
from werkzeug import exceptions | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2019 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.engine.jetty
import io.ktor.client.engine.*
import org.eclipse.jetty.http2.client.*
import org.eclipse.jetty.util.ssl.*
/**
* A configuration for the [Jetty] client engine.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.jetty.JettyEngineConfig)
*/
public class JettyEngineConfig : HttpClientEngineConfig() {
internal var config: (HTTP2Client) -> Unit = {}
/**
* Allows you to configure [SSL](https://ktor.io/docs/client-ssl.html) settings for this engine.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.jetty.JettyEngineConfig.sslContextFactory)
*/
public var sslContextFactory: SslContextFactory = SslContextFactory.Client()
/**
* Specifies the size of cache that keeps recently used [JettyHttp2Engine] instances.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.jetty.JettyEngineConfig.clientCacheSize)
*/
public var clientCacheSize: Int = 10
/**
* Configures a raw Jetty client.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.jetty.JettyEngineConfig.configureClient)
*/
public fun configureClient(block: (HTTP2Client) -> Unit) {
val current = config
config = {
current(it)
block(it)
}
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-client/ktor-client-jetty/jvm/src/io/ktor/client/engine/jetty/JettyEngineConfig.kt |
#![allow(non_snake_case)]
use std::ffi::{CStr, CString};
use std::num::NonZero;
use std::ptr;
use std::string::FromUtf8Error;
use libc::c_uint;
use rustc_abi::{AddressSpace, Align, Size, WrappingRange};
use rustc_llvm::RustString;
pub(crate) use self::CallConv::*;
pub(crate) use self::CodeGenOptSize::*;
pub(crate) use self::conversions::*;
pub(crate) use self::ffi::*;
pub(crate) use self::metadata_kind::*;
use crate::common::AsCCharPtr;
mod conversions;
pub(crate) mod diagnostic;
pub(crate) mod enzyme_ffi;
mod ffi;
mod metadata_kind;
pub(crate) use self::enzyme_ffi::*;
impl LLVMRustResult {
pub(crate) fn into_result(self) -> Result<(), ()> {
match self {
LLVMRustResult::Success => Ok(()),
LLVMRustResult::Failure => Err(()),
}
}
}
pub(crate) fn AddFunctionAttributes<'ll>(
llfn: &'ll Value,
idx: AttributePlace,
attrs: &[&'ll Attribute],
) {
unsafe {
LLVMRustAddFunctionAttributes(llfn, idx.as_uint(), attrs.as_ptr(), attrs.len());
}
}
pub(crate) fn HasStringAttribute<'ll>(llfn: &'ll Value, name: &str) -> bool {
unsafe { LLVMRustHasFnAttribute(llfn, name.as_c_char_ptr(), name.len()) }
}
pub(crate) fn RemoveStringAttrFromFn<'ll>(llfn: &'ll Value, name: &str) {
unsafe { LLVMRustRemoveFnAttribute(llfn, name.as_c_char_ptr(), name.len()) }
}
pub(crate) fn AddCallSiteAttributes<'ll>(
callsite: &'ll Value,
idx: AttributePlace,
attrs: &[&'ll Attribute],
) {
unsafe {
LLVMRustAddCallSiteAttributes(callsite, idx.as_uint(), attrs.as_ptr(), attrs.len());
}
}
pub(crate) fn CreateAttrStringValue<'ll>(
llcx: &'ll Context,
attr: &str,
value: &str,
) -> &'ll Attribute {
unsafe {
LLVMCreateStringAttribute(
llcx,
attr.as_c_char_ptr(),
attr.len().try_into().unwrap(),
value.as_c_char_ptr(),
value.len().try_into().unwrap(),
)
}
}
pub(crate) fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &str) -> &'ll Attribute {
unsafe {
LLVMCreateStringAttribute(
llcx,
attr.as_c_char_ptr(),
attr.len().try_into().unwrap(),
std::ptr::null(),
0,
)
}
}
pub(crate) fn CreateAlignmentAttr(llcx: &Context, bytes: u64) -> &Attribute {
unsafe { LLVMRustCreateAlignmentAttr(llcx, bytes) }
}
pub(crate) fn CreateDereferenceableAttr(llcx: &Context, bytes: u64) -> &Attribute {
unsafe { LLVMRustCreateDereferenceableAttr(llcx, bytes) }
}
pub(crate) fn CreateDereferenceableOrNullAttr(llcx: &Context, bytes: u64) -> &Attribute {
unsafe { LLVMRustCreateDereferenceableOrNullAttr(llcx, bytes) }
}
pub(crate) fn CreateByValAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
unsafe { LLVMRustCreateByValAttr(llcx, ty) }
}
pub(crate) fn CreateStructRetAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
unsafe { LLVMRustCreateStructRetAttr(llcx, ty) }
}
pub(crate) fn CreateUWTableAttr(llcx: &Context, async_: bool) -> &Attribute {
unsafe { LLVMRustCreateUWTableAttr(llcx, async_) }
}
pub(crate) fn CreateAllocSizeAttr(llcx: &Context, size_arg: u32) -> &Attribute {
unsafe { LLVMRustCreateAllocSizeAttr(llcx, size_arg) }
}
pub(crate) fn CreateAllocKindAttr(llcx: &Context, kind_arg: AllocKindFlags) -> &Attribute {
unsafe { LLVMRustCreateAllocKindAttr(llcx, kind_arg.bits()) }
}
pub(crate) fn CreateRangeAttr(llcx: &Context, size: Size, range: WrappingRange) -> &Attribute {
let lower = range.start;
// LLVM treats the upper bound as exclusive, but allows wrapping.
let upper = range.end.wrapping_add(1);
// Pass each `u128` endpoint value as a `[u64; 2]` array, least-significant part first.
let as_u64_array = |x: u128| [x as u64, (x >> 64) as u64];
let lower_words: [u64; 2] = as_u64_array(lower);
let upper_words: [u64; 2] = as_u64_array(upper);
// To ensure that LLVM doesn't try to read beyond the `[u64; 2]` arrays,
// we must explicitly check that `size_bits` does not exceed 128.
let size_bits = size.bits();
assert!(size_bits <= 128);
// More robust assertions that are redundant with `size_bits <= 128` and
// should be optimized away.
assert!(size_bits.div_ceil(64) <= u64::try_from(lower_words.len()).unwrap());
assert!(size_bits.div_ceil(64) <= u64::try_from(upper_words.len()).unwrap());
let size_bits = c_uint::try_from(size_bits).unwrap();
unsafe {
LLVMRustCreateRangeAttribute(llcx, size_bits, lower_words.as_ptr(), upper_words.as_ptr())
}
}
#[derive(Copy, Clone)]
pub(crate) enum AttributePlace {
ReturnValue,
Argument(u32),
Function,
}
impl AttributePlace {
pub(crate) fn as_uint(self) -> c_uint {
match self {
AttributePlace::ReturnValue => 0,
AttributePlace::Argument(i) => 1 + i,
AttributePlace::Function => !0,
}
}
}
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
pub(crate) enum CodeGenOptSize {
CodeGenOptSizeNone = 0,
CodeGenOptSizeDefault = 1,
CodeGenOptSizeAggressive = 2,
}
pub(crate) fn SetInstructionCallConv(instr: &Value, cc: CallConv) {
unsafe {
LLVMSetInstructionCallConv(instr, cc as c_uint);
}
}
pub(crate) fn SetFunctionCallConv(fn_: &Value, cc: CallConv) {
unsafe {
LLVMSetFunctionCallConv(fn_, cc as c_uint);
}
}
// Externally visible symbols that might appear in multiple codegen units need to appear in
// their own comdat section so that the duplicates can be discarded at link time. This can for
// example happen for generics when using multiple codegen units. This function simply uses the
// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
// function.
// For more details on COMDAT sections see e.g., https://www.airs.com/blog/archives/52
pub(crate) fn SetUniqueComdat(llmod: &Module, val: &Value) {
let name_buf = get_value_name(val);
let name =
CString::from_vec_with_nul(name_buf).or_else(|buf| CString::new(buf.into_bytes())).unwrap();
set_comdat(llmod, val, &name);
}
pub(crate) fn set_unnamed_address(global: &Value, unnamed: UnnamedAddr) {
LLVMSetUnnamedAddress(global, unnamed);
}
pub(crate) fn set_thread_local_mode(global: &Value, mode: ThreadLocalMode) {
unsafe {
LLVMSetThreadLocalMode(global, mode);
}
}
impl AttributeKind {
/// Create an LLVM Attribute with no associated value.
pub(crate) fn create_attr(self, llcx: &Context) -> &Attribute {
unsafe { LLVMRustCreateAttrNoValue(llcx, self) }
}
}
impl MemoryEffects {
/// Create an LLVM Attribute with these memory effects.
pub(crate) fn create_attr(self, llcx: &Context) -> &Attribute {
unsafe { LLVMRustCreateMemoryEffectsAttr(llcx, self) }
}
}
pub(crate) fn set_section(llglobal: &Value, section_name: &CStr) {
unsafe {
LLVMSetSection(llglobal, section_name.as_ptr());
}
}
pub(crate) fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name_cstr: &CStr) -> &'a Value {
unsafe { LLVMAddGlobal(llmod, ty, name_cstr.as_ptr()) }
}
pub(crate) fn set_initializer(llglobal: &Value, constant_val: &Value) {
unsafe {
LLVMSetInitializer(llglobal, constant_val);
}
}
pub(crate) fn set_global_constant(llglobal: &Value, is_constant: bool) {
LLVMSetGlobalConstant(llglobal, is_constant.to_llvm_bool());
}
pub(crate) fn get_linkage(llglobal: &Value) -> Linkage {
unsafe { LLVMGetLinkage(llglobal) }.to_rust()
}
pub(crate) fn set_linkage(llglobal: &Value, linkage: Linkage) {
unsafe {
LLVMSetLinkage(llglobal, linkage);
}
}
pub(crate) fn is_declaration(llglobal: &Value) -> bool {
unsafe { LLVMIsDeclaration(llglobal) }.is_true()
}
pub(crate) fn get_visibility(llglobal: &Value) -> Visibility {
unsafe { LLVMGetVisibility(llglobal) }.to_rust()
}
pub(crate) fn set_visibility(llglobal: &Value, visibility: Visibility) {
unsafe {
LLVMSetVisibility(llglobal, visibility);
}
}
pub(crate) fn set_alignment(llglobal: &Value, align: Align) {
unsafe {
ffi::LLVMSetAlignment(llglobal, align.bytes() as c_uint);
}
}
pub(crate) fn set_externally_initialized(llglobal: &Value, is_ext_init: bool) {
LLVMSetExternallyInitialized(llglobal, is_ext_init.to_llvm_bool());
}
/// Get the `name`d comdat from `llmod` and assign it to `llglobal`.
///
/// Inserts the comdat into `llmod` if it does not exist.
/// It is an error to call this if the target does not support comdat.
pub(crate) fn set_comdat(llmod: &Module, llglobal: &Value, name: &CStr) {
unsafe {
let comdat = LLVMGetOrInsertComdat(llmod, name.as_ptr());
LLVMSetComdat(llglobal, comdat);
}
}
/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun.
pub(crate) fn get_param(llfn: &Value, index: c_uint) -> &Value {
unsafe {
assert!(
index < LLVMCountParams(llfn),
"out of bounds argument access: {} out of {} arguments",
index,
LLVMCountParams(llfn)
);
LLVMGetParam(llfn, index)
}
}
/// Safe wrapper for `LLVMGetValueName2`
/// Needs to allocate the value, because `set_value_name` will invalidate
/// the pointer.
pub(crate) fn get_value_name(value: &Value) -> Vec<u8> {
unsafe {
let mut len = 0;
let data = LLVMGetValueName2(value, &mut len);
std::slice::from_raw_parts(data.cast(), len).to_vec()
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct Intrinsic {
id: NonZero<c_uint>,
}
impl Intrinsic {
pub(crate) fn lookup(name: &[u8]) -> Option<Self> {
let id = unsafe { LLVMLookupIntrinsicID(name.as_c_char_ptr(), name.len()) };
NonZero::new(id).map(|id| Self { id })
}
pub(crate) fn get_declaration<'ll>(
self,
llmod: &'ll Module,
type_params: &[&'ll Type],
) -> &'ll Value {
unsafe {
LLVMGetIntrinsicDeclaration(llmod, self.id, type_params.as_ptr(), type_params.len())
}
}
}
/// Safe wrapper for `LLVMSetValueName2` from a byte slice
pub(crate) fn set_value_name(value: &Value, name: &[u8]) {
unsafe {
let data = name.as_c_char_ptr();
LLVMSetValueName2(value, data, name.len());
}
}
pub(crate) fn build_string(f: impl FnOnce(&RustString)) -> Result<String, FromUtf8Error> {
String::from_utf8(RustString::build_byte_buffer(f))
}
pub(crate) fn build_byte_buffer(f: impl FnOnce(&RustString)) -> Vec<u8> {
RustString::build_byte_buffer(f)
}
pub(crate) fn twine_to_string(tr: &Twine) -> String {
unsafe {
build_string(|s| LLVMRustWriteTwineToString(tr, s)).expect("got a non-UTF8 Twine from LLVM")
}
}
pub(crate) fn last_error() -> Option<String> {
unsafe {
let cstr = LLVMRustGetLastError();
if cstr.is_null() {
None
} else {
let err = CStr::from_ptr(cstr).to_bytes();
let err = String::from_utf8_lossy(err).to_string();
libc::free(cstr as *mut _);
Some(err)
}
}
}
/// Owning pointer to an [`OperandBundle`] that will dispose of the bundle
/// when dropped.
pub(crate) struct OperandBundleBox<'a> {
raw: ptr::NonNull<OperandBundle<'a>>,
}
impl<'a> OperandBundleBox<'a> {
pub(crate) fn new(name: &str, vals: &[&'a Value]) -> Self {
let raw = unsafe {
LLVMCreateOperandBundle(
name.as_c_char_ptr(),
name.len(),
vals.as_ptr(),
vals.len() as c_uint,
)
};
Self { raw: ptr::NonNull::new(raw).unwrap() }
}
/// Dereferences to the underlying `&OperandBundle`.
///
/// This can't be a `Deref` implementation because `OperandBundle` transitively
/// contains an extern type, which is incompatible with `Deref::Target: ?Sized`.
pub(crate) fn as_ref(&self) -> &OperandBundle<'a> {
// SAFETY: The returned reference is opaque and can only used for FFI.
// It is valid for as long as `&self` is.
unsafe { self.raw.as_ref() }
}
}
impl Drop for OperandBundleBox<'_> {
fn drop(&mut self) {
unsafe {
LLVMDisposeOperandBundle(self.raw);
}
}
}
pub(crate) fn add_module_flag_u32(
module: &Module,
merge_behavior: ModuleFlagMergeBehavior,
key: &str,
value: u32,
) {
unsafe {
LLVMRustAddModuleFlagU32(module, merge_behavior, key.as_c_char_ptr(), key.len(), value);
}
}
pub(crate) fn add_module_flag_str(
module: &Module,
merge_behavior: ModuleFlagMergeBehavior,
key: &str,
value: &str,
) {
unsafe {
LLVMRustAddModuleFlagString(
module,
merge_behavior,
key.as_c_char_ptr(),
key.len(),
value.as_c_char_ptr(),
value.len(),
);
}
}
pub(crate) fn set_dllimport_storage_class<'ll>(v: &'ll Value) {
unsafe {
LLVMSetDLLStorageClass(v, DLLStorageClass::DllImport);
}
}
pub(crate) fn set_dso_local<'ll>(v: &'ll Value) {
unsafe {
LLVMRustSetDSOLocal(v, true);
}
}
/// Safe wrapper for `LLVMAppendModuleInlineAsm`, which delegates to
/// `Module::appendModuleInlineAsm`.
pub(crate) fn append_module_inline_asm<'ll>(llmod: &'ll Module, asm: &[u8]) {
unsafe {
LLVMAppendModuleInlineAsm(llmod, asm.as_ptr(), asm.len());
}
}
/// Safe wrapper for `LLVMAddAlias2`
pub(crate) fn add_alias<'ll>(
module: &'ll Module,
ty: &Type,
address_space: AddressSpace,
aliasee: &Value,
name: &CStr,
) -> &'ll Value {
unsafe { LLVMAddAlias2(module, ty, address_space.0, aliasee, name.as_ptr()) }
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_codegen_llvm/src/llvm/mod.rs |
# Copyright (c) 2005-2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
try:
import unittest2 as unittest
except ImportError:
import unittest
from sheet.parser.parse_node import ParseNode
from sheet.parser.fl_cell_reference_parse_node import FLCellReferenceParseNode
from sheet.parser.fl_reference_parse_node import FLReferenceParseNode
class FLCellReferenceParseNodeTest(unittest.TestCase):
def testConstructor(self):
flCellReference = FLCellReferenceParseNode(["A1"])
self.assertTrue(isinstance(flCellReference, FLReferenceParseNode), 'should be a parse node')
self.assertEquals(flCellReference.type, ParseNode.FL_CELL_REFERENCE, "Node was of the wrong type")
self.assertEquals(flCellReference.children, ["A1"], "Node had the wrong children")
def testStr(self):
node = FLCellReferenceParseNode(["a1"])
self.assertEquals(str(node), "<FLCellReferenceParseNode type=\"FL_CELL_REFERENCE\" children=['a1']>", "Wrong string representation")
def testColAbsolute(self):
self.assertFalse(FLCellReferenceParseNode(["A1"]).colAbsolute, "Incorrect colAbsolute for A1")
self.assertFalse(FLCellReferenceParseNode(["A$1"]).colAbsolute, "Incorrect colAbsolute for A$1")
self.assertTrue(FLCellReferenceParseNode(["$A1"]).colAbsolute, "Incorrect colAbsolute for $A1")
self.assertTrue(FLCellReferenceParseNode(["$A$1"]).colAbsolute, "Incorrect colAbsolute for $A$1")
self.assertFalse(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).colAbsolute,
"Incorrect colAbsolute for A1 with worksheet")
self.assertTrue(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).colAbsolute,
"Incorrect colAbsolute for $A$1 with worksheet")
def testRowAbsolute(self):
self.assertFalse(FLCellReferenceParseNode(["A1"]).rowAbsolute, "Incorrect rowAbsolute for A1")
self.assertTrue(FLCellReferenceParseNode(["A$1"]).rowAbsolute, "Incorrect rowAbsolute for A$1")
self.assertFalse(FLCellReferenceParseNode(["$A1"]).rowAbsolute, "Incorrect rowAbsolute for $A1")
self.assertTrue(FLCellReferenceParseNode(["$A$1"]).rowAbsolute, "Incorrect rowAbsolute for $A$1")
self.assertFalse(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).rowAbsolute,
"Incorrect colAbsolute for A1 with worksheet")
self.assertTrue(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).rowAbsolute,
"Incorrect colAbsolute for $A$1 with worksheet")
def testPlainCellName(self):
self.assertEquals(FLCellReferenceParseNode(["A1"]).plainCellName, "A1", "Incorrect plainCellName for A1")
self.assertEquals(FLCellReferenceParseNode(["A$1"]).plainCellName, "A1", "Incorrect plainCellName for A$1")
self.assertEquals(FLCellReferenceParseNode(["$A1"]).plainCellName, "A1", "Incorrect plainCellName for $A1")
self.assertEquals(FLCellReferenceParseNode(["$A$1"]).plainCellName, "A1", "Incorrect plainCellName for $A$1")
self.assertEquals(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).plainCellName, "A1",
"Incorrect plainCellName for A1 with worksheet")
self.assertEquals(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).plainCellName, "A1",
"Incorrect plainCellName for $A$1 with worksheet")
def testRegisteredWithParse(self):
"test registered with ParseNode"
self.assertEquals(type(ParseNode.construct_node(ParseNode.FL_CELL_REFERENCE, ['A1'])), FLCellReferenceParseNode,
"Class is not registered with ParseNode")
def testCellProperty(self):
node = FLCellReferenceParseNode(["G8 "])
self.assertEquals(node.localReference, "G8 ", "cellref wrong")
node = FLCellReferenceParseNode(["Sheet1", "!", "G8 "])
self.assertEquals(node.localReference, "G8 ", "cellref wrong")
node = FLCellReferenceParseNode(["G8 "])
node.localReference = "F5"
self.assertEquals(node.localReference, "F5", "should discard whitespace")
node = FLCellReferenceParseNode(["G8 "])
node.localReference = "F5 "
self.assertEquals(node.localReference, "F5 ", "should not pile whitespace")
def testCanonicalise(self):
node = FLCellReferenceParseNode(["bertie ", "!", "a1 "])
node.canonicalise(['Bertie'])
self.assertEquals(node.localReference, 'A1 ')
self.assertEquals(node.worksheetReference, 'Bertie')
def testOffset(self):
node = FLCellReferenceParseNode(["G8 "])
node.offset(1, 4)
self.assertEquals(node.localReference, "H12 ", "offset didnt work")
node = FLCellReferenceParseNode(["G8 "])
node.offset(-7, 1)
self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work")
node = FLCellReferenceParseNode(["G8 "])
node.offset(1, -8)
self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work")
node = FLCellReferenceParseNode(["G8 "])
node.offset(-6, -7)
self.assertEquals(node.localReference, "A1 ", "offset didnt work")
node = FLCellReferenceParseNode(["$G8 "])
node.offset(-6, -7)
self.assertEquals(node.localReference, "$G1 ", "offset didnt work")
node = FLCellReferenceParseNode(["G$8 "])
node.offset(-6, -7)
self.assertEquals(node.localReference, "A$8 ", "offset didnt work")
node = FLCellReferenceParseNode(["$G$8 "])
node.offset(-6, -7)
self.assertEquals(node.localReference, "$G$8 ", "offset didnt work")
node = FLCellReferenceParseNode(["$G$8 "])
node.offset(-6, -7, move_absolute=True)
self.assertEquals(node.localReference, "$A$1 ", "offset didnt work")
node = FLCellReferenceParseNode(["ZZZ9 "])
node.offset(1, -1)
self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work")
def testCoords(self):
node = FLCellReferenceParseNode(["A2"])
self.assertEquals(node.coords, (1, 2))
node = FLCellReferenceParseNode(["B1"])
self.assertEquals(node.coords, (2, 1)) | unknown | codeparrot/codeparrot-clean | ||
use crate::spec::{
Arch, Cc, CodeModel, LinkerFlavor, Lld, Os, PanicStrategy, RelocModel, SanitizerSet, Target,
TargetMetadata, TargetOptions, cvs,
};
pub(crate) fn target() -> Target {
Target {
data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
metadata: TargetMetadata {
description: None,
tier: Some(3),
host_tools: None,
std: Some(true),
},
llvm_target: "riscv64".into(),
pointer_width: 64,
arch: Arch::RiscV64,
options: TargetOptions {
families: cvs!["unix"],
os: Os::NuttX,
linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv64".into(),
max_atomic_width: Some(64),
features: "+m,+a,+c".into(),
llvm_abiname: "lp64".into(),
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
code_model: Some(CodeModel::Medium),
emit_debug_gdb_scripts: false,
eh_frame_header: false,
supported_sanitizers: SanitizerSet::KERNELADDRESS,
..Default::default()
},
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_target/src/spec/targets/riscv64imac_unknown_nuttx_elf.rs |
#![feature(core_intrinsics, alloc_error_handler, lang_items)]
#![no_std]
#![no_main]
#![allow(internal_features)]
extern crate alloc;
extern crate alloc_system;
use alloc::boxed::Box;
use alloc_system::System;
#[global_allocator]
static ALLOC: System = System;
#[link(name = "c")]
extern "C" {
fn puts(s: *const u8) -> i32;
}
#[panic_handler]
fn panic_handler(_: &core::panic::PanicInfo<'_>) -> ! {
core::intrinsics::abort();
}
#[alloc_error_handler]
fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
core::intrinsics::abort();
}
#[lang = "eh_personality"]
fn eh_personality() -> ! {
loop {}
}
#[no_mangle]
unsafe extern "C" fn _Unwind_Resume() {
core::intrinsics::unreachable();
}
#[no_mangle]
extern "C" fn main(_argc: core::ffi::c_int, _argv: *const *const u8) -> core::ffi::c_int {
let world: Box<&str> = Box::new("Hello World!\0");
unsafe {
puts(*world as *const str as *const u8);
}
0
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_codegen_gcc/example/alloc_example.rs |
{
"HELP": {
"summary": "Returns helpful text about the different subcommands.",
"complexity": "O(1)",
"group": "pubsub",
"since": "6.2.0",
"arity": 2,
"container": "PUBSUB",
"function": "pubsubCommand",
"command_flags": [
"LOADING",
"STALE"
],
"reply_schema": {
"type": "array",
"description": "Helpful text about subcommands.",
"items": {
"type": "string"
}
}
}
} | json | github | https://github.com/redis/redis | src/commands/pubsub-help.json |
####################################################################################################
#
# PyDvi - A Python Library to Process DVI Stream.
# Copyright (C) 2011 Salvaire Fabrice
#
####################################################################################################
####################################################################################################
#
# Audit
#
# - 20/11/2011 fabrice
#
####################################################################################################
####################################################################################################
import unittest
####################################################################################################
from PyDvi.Font.FontMap import *
from PyDvi.Kpathsea import *
####################################################################################################
class TestFontMap(unittest.TestCase):
def test(self):
fontmap_name = 'pdftex'
fontmap_file = kpsewhich(fontmap_name, file_format='map')
self.assertIsNotNone(fontmap_file)
print 'Fontmap file:', fontmap_file
fontmap = FontMap(fontmap_file)
fontmap_entry = fontmap['cmmi10']
self.assertEqual(fontmap_entry.tex_name, 'cmmi10')
self.assertEqual(fontmap_entry.ps_font_name, 'CMMI10')
# self.assertEqual(fontmap_entry.ps_snippet, '.167 SlantFont')
# self.assertEqual(fontmap_entry.effects, )
# self.assertEqual(fontmap_entry.encoding, )
self.assertEqual(fontmap_entry.pfb_filename, 'cmmi10.pfb')
####################################################################################################
if __name__ == '__main__':
unittest.main()
####################################################################################################
#
# End
#
#################################################################################################### | unknown | codeparrot/codeparrot-clean | ||
"""
A number of functions that enhance IDLE on macOS.
"""
from os.path import expanduser
import plistlib
from sys import platform # Used in _init_tk_type, changed by test.
import tkinter
## Define functions that query the Mac graphics type.
## _tk_type and its initializer are private to this section.
_tk_type = None
def _init_tk_type():
""" Initialize _tk_type for isXyzTk functions.
This function is only called once, when _tk_type is still None.
"""
global _tk_type
if platform == 'darwin':
# When running IDLE, GUI is present, test/* may not be.
# When running tests, test/* is present, GUI may not be.
# If not, guess most common. Does not matter for testing.
from idlelib.__init__ import testing
if testing:
from test.support import requires, ResourceDenied
try:
requires('gui')
except ResourceDenied:
_tk_type = "cocoa"
return
root = tkinter.Tk()
ws = root.tk.call('tk', 'windowingsystem')
if 'x11' in ws:
_tk_type = "xquartz"
elif 'aqua' not in ws:
_tk_type = "other"
elif 'AppKit' in root.tk.call('winfo', 'server', '.'):
_tk_type = "cocoa"
else:
_tk_type = "carbon"
root.destroy()
else:
_tk_type = "other"
return
def isAquaTk():
"""
Returns True if IDLE is using a native OS X Tk (Cocoa or Carbon).
"""
if not _tk_type:
_init_tk_type()
return _tk_type == "cocoa" or _tk_type == "carbon"
def isCarbonTk():
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
if not _tk_type:
_init_tk_type()
return _tk_type == "carbon"
def isCocoaTk():
"""
Returns True if IDLE is using a Cocoa Aqua Tk.
"""
if not _tk_type:
_init_tk_type()
return _tk_type == "cocoa"
def isXQuartz():
"""
Returns True if IDLE is using an OS X X11 Tk.
"""
if not _tk_type:
_init_tk_type()
return _tk_type == "xquartz"
def readSystemPreferences():
"""
Fetch the macOS system preferences.
"""
if platform != 'darwin':
return None
plist_path = expanduser('~/Library/Preferences/.GlobalPreferences.plist')
try:
with open(plist_path, 'rb') as plist_file:
return plistlib.load(plist_file)
except OSError:
return None
def preferTabsPreferenceWarning():
"""
Warn if "Prefer tabs when opening documents" is set to "Always".
"""
if platform != 'darwin':
return None
prefs = readSystemPreferences()
if prefs and prefs.get('AppleWindowTabbingMode') == 'always':
return (
'WARNING: The system preference "Prefer tabs when opening'
' documents" is set to "Always". This will cause various problems'
' with IDLE. For the best experience, change this setting when'
' running IDLE (via System Preferences -> Dock).'
)
return None
## Fix the menu and related functions.
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respond to open AppleEvents, which
makes is feasible to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that is more appropriate for
IDLE with an Aqua Tk.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from tkinter import Menu
from idlelib import mainmenu
from idlelib import window
closeItem = mainmenu.menudefs[0][1][-2]
# Remove the last 3 items of the file menu: a separator, close window and
# quit. Close window will be reinserted just above the save item, where
# it should be according to the HIG. Quit is in the application menu.
del mainmenu.menudefs[0][1][-3:]
mainmenu.menudefs[0][1].insert(6, closeItem)
# Remove the 'About' entry from the help menu, it is in the application
# menu
del mainmenu.menudefs[-1][1][0:2]
# Remove the 'Configure Idle' entry from the options menu, it is in the
# application menu as 'Preferences'
del mainmenu.menudefs[-3][1][0:2]
menubar = Menu(root)
root.configure(menu=menubar)
menu = Menu(menubar, name='window', tearoff=0)
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
window.add_windows_to_menu(menu)
window.register_callback(postwindowsmenu)
def about_dialog(event=None):
"Handle Help 'About IDLE' event."
# Synchronize with editor.EditorWindow.about_dialog.
from idlelib import help_about
help_about.AboutDialog(root)
def config_dialog(event=None):
"Handle Options 'Configure IDLE' event."
# Synchronize with editor.EditorWindow.config_dialog.
from idlelib import configdialog
# Ensure that the root object has an instance_dict attribute,
# mirrors code in EditorWindow (although that sets the attribute
# on an EditorWindow instance that is then passed as the first
# argument to ConfigDialog)
root.instance_dict = flist.inversedict
configdialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
"Handle Help 'IDLE Help' event."
# Synchronize with editor.EditorWindow.help_dialog.
from idlelib import help
help.show_idlehelp(root)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on macOS. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('::tk::mac::Quit', flist.close_all_callback)
if isCarbonTk():
# for Carbon AquaTk, replace the default Tk apple menu
menu = Menu(menubar, name='apple', tearoff=0)
menubar.add_cascade(label='IDLE', menu=menu)
mainmenu.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
if isCocoaTk():
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del mainmenu.menudefs[-1][1][0]
def fixb2context(root):
'''Removed bad AquaTk Button-2 (right) and Paste bindings.
They prevent context menu access and seem to be gone in AquaTk8.6.
See issue #24801.
'''
root.unbind_class('Text', '<B2>')
root.unbind_class('Text', '<B2-Motion>')
root.unbind_class('Text', '<<PasteSelection>>')
def setupApp(root, flist):
"""
Perform initial OS X customizations if needed.
Called from pyshell.main() after initial calls to Tk()
There are currently three major versions of Tk in use on OS X:
1. Aqua Cocoa Tk (native default since OS X 10.6)
2. Aqua Carbon Tk (original native, 32-bit only, deprecated)
3. X11 (supported by some third-party distributors, deprecated)
There are various differences among the three that affect IDLE
behavior, primarily with menus, mouse key events, and accelerators.
Some one-time customizations are performed here.
Others are dynamically tested throughout idlelib by calls to the
isAquaTk(), isCarbonTk(), isCocoaTk(), isXQuartz() functions which
are initialized here as well.
"""
if isAquaTk():
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
fixb2context(root)
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_macosx', verbosity=2) | python | github | https://github.com/python/cpython | Lib/idlelib/macosx.py |
from UnitTest import UnitTest
import random
from math import log, exp, sqrt, pi
try:
from math import fsum as msum
except:
# fsum is new in 2.6
from math import fabs
def msum(x):
xx = [(fabs(v), i) for i, v in enumerate(x)]
xx.sort()
sum = 0
for i in xx:
sum += x[i[1]]
return sum
_gammacoeff = (0.9999999999995183, 676.5203681218835, -1259.139216722289,
771.3234287757674, -176.6150291498386, 12.50734324009056,
-0.1385710331296526, 0.9934937113930748e-05, 0.1659470187408462e-06)
def gamma(z, cof=_gammacoeff, g=7):
z -= 1.0
# Next line fails when not compiled with --operator-funcs
#s = msum([cof[0]] + [cof[i] / (z+i) for i in range(1,len(cof))])
v1 = [cof[0]]
v2 = [cof[i] / (z+i) for i in range(1,len(cof))]
v1 = v1.__add__(v2)
s = msum(v1)
z += 0.5
return (z+g)**z / exp(z+g) * sqrt(2.0*pi) * s
class RandomModuleTest(UnitTest):
def test_zeroinputs(self):
# Verify that distributions can handle a series of zero inputs'
g = random.Random()
xx = [g.random() for i in xrange(50)]
x = [0.0]
xx = xx.__add__(x.__mul__(5))
x = xx[:]
g.random = getattr(x, 'pop')
g.uniform(1,10)
x = xx[:]
g.random = getattr(x, 'pop')
g.paretovariate(1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.expovariate(1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.weibullvariate(1.0, 1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.normalvariate(0.0, 1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.gauss(0.0, 1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.lognormvariate(0.0, 1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.vonmisesvariate(0.0, 1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.gammavariate(0.01, 1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.gammavariate(1.0, 1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.gammavariate(200.0, 1.0)
x = xx[:]
g.random = getattr(x, 'pop')
g.betavariate(3.0, 3.0)
if hasattr(g, 'triangular'):
x = xx[:]
g.random = getattr(x, 'pop')
g.triangular(0.0, 1.0, 1.0/3.0)
def test_avg_std(self):
# Use integration to test distribution average and standard deviation.
# Only works for distributions which do not consume variates in pairs
g = random.Random()
N = 5000
xx = [i/float(N) for i in xrange(1,N)]
dists = [
(g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12),
(g.expovariate, (1.5,), 1/1.5, 1/1.5**2),
(g.paretovariate, (5.0,), 5.0/(5.0-1),
5.0/((5.0-1)**2*(5.0-2))),
(g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0),
gamma(1+2/3.0)-gamma(1+1/3.0)**2) ]
if hasattr(g, 'triangular'):
dists.append((g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0))
for variate, args, mu, sigmasqrd in dists:
x = xx[:]
g.random = getattr(x, 'pop')
y = []
for i in xrange(len(x)):
try:
y.append(variate(*args))
except IndexError:
pass
s1 = s2 = 0
for e in y:
s1 += e
s2 += (e - mu) ** 2
N = len(y)
self.assertAlmostEqual(s1/N, mu, 2)
self.assertAlmostEqual(s2/(N-1), sigmasqrd, 2) | unknown | codeparrot/codeparrot-clean | ||
import React, { ElementType, Fragment, Ref } from "react";
import { Video } from "./Video";
import { Image } from "./Image";
import { StaticImageData } from "next/image";
import { Media as MediaType } from "../../payload-types";
export type Props = {
src?: StaticImageData; // for static media
alt?: string;
resource?: MediaType; // for Payload media
size?: string; // for NextImage only
priority?: boolean; // for NextImage only
fill?: boolean; // for NextImage only
className?: string;
imgClassName?: string;
videoClassName?: string;
htmlElement?: ElementType | null;
onClick?: () => void;
onLoad?: () => void;
ref?: Ref<null | HTMLImageElement | HTMLVideoElement>;
};
export const Media: React.FC<Props> = (props) => {
const { className, resource, htmlElement = "div" } = props;
const isVideo =
typeof resource !== "string" && resource?.mimeType?.includes("video");
const Tag = (htmlElement as ElementType) || Fragment;
return (
<Tag
{...(htmlElement !== null
? {
className,
}
: {})}
>
{isVideo ? (
<Video {...props} />
) : (
<Image {...props} /> // eslint-disable-line
)}
</Tag>
);
}; | typescript | github | https://github.com/vercel/next.js | examples/cms-payload/components/Media/index.tsx |
import sys
import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
from .dist import _get_unpatched
from . import msvc9_support
_Extension = _get_unpatched(distutils.core.Extension)
msvc9_support.patch_for_specialized_compiler()
def have_pyrex():
"""
Return True if Cython or Pyrex can be imported.
"""
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
# from (pyrex_impl) import build_ext
__import__(pyrex_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, *args, **kw):
_Extension.__init__(self, *args, **kw)
self._convert_pyx_sources_to_lang()
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if have_pyrex():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension | unknown | codeparrot/codeparrot-clean | ||
/*
* regfree - free an RE
*
* Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
* thanks all of them.
*
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
*
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* HENRY SPENCER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* src/backend/regex/regfree.c
*
*
* You might think that this could be incorporated into regcomp.c, and
* that would be a reasonable idea... except that this is a generic
* function (with a generic name), applicable to all compiled REs
* regardless of the size of their characters, whereas the stuff in
* regcomp.c gets compiled once per character size.
*/
#include "regex/regguts.h"
/*
* pg_regfree - free an RE (generic function, punts to RE-specific function)
*
* Ignoring invocation with NULL is a convenience.
*/
void
pg_regfree(regex_t *re)
{
if (re == NULL)
return;
(*((struct fns *) re->re_fns)->free) (re);
} | c | github | https://github.com/postgres/postgres | src/backend/regex/regfree.c |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package statekeys
import (
"strings"
)
// rawKeyBuilder is a helper for building multi-field keys in the format
// that's expected by [cutKeyField].
//
// The zero value of rawKeyBuilder is ready to use.
type rawKeyBuilder struct {
b strings.Builder
w bool
}
// AppendField appends the given string to the key-in-progress as an additional
// field.
//
// The given string must not contain any unquoted commas, because comma is the
// field delimiter. If given an invalid field value this function will panic.
func (b *rawKeyBuilder) AppendField(s string) {
if keyDelimiterIdx(s) != -1 {
panic("key field contains the field delimiter")
}
if b.w {
b.b.WriteByte(',')
}
b.w = true
b.b.WriteString(s)
}
// Raw returns the assembled raw key string.
func (b *rawKeyBuilder) Raw() string {
return b.b.String()
} | go | github | https://github.com/hashicorp/terraform | internal/stacks/stackstate/statekeys/key_build.go |
// Adapted from https://github.com/withastro/cli-kit
// MIT License Copyright (c) 2022 Nate Moore
import process from "node:process";
import readline from "node:readline";
import { erase, cursor } from "sisteransi";
import { reverse, sleep, color } from "./utils";
const GRADIENT_COLORS: Array<`#${string}`> = [
"#ffffff",
"#dadada",
"#dadada",
"#a8deaa",
"#a8deaa",
"#a8deaa",
"#d0f0bd",
"#d0f0bd",
"#ffffed",
"#ffffed",
"#ffffed",
"#ffffed",
"#ffffed",
"#ffffed",
"#ffffed",
"#ffffed",
"#ffffed",
"#f7f8ca",
"#f7f8ca",
"#eae6ba",
"#eae6ba",
"#eae6ba",
"#dadada",
"#dadada",
"#ffffff",
];
const MAX_FRAMES = 8;
const LEADING_FRAMES = Array.from(
{ length: MAX_FRAMES * 2 },
() => GRADIENT_COLORS[0],
);
const TRAILING_FRAMES = Array.from(
{ length: MAX_FRAMES * 2 },
() => GRADIENT_COLORS[GRADIENT_COLORS.length - 1],
);
const INDICATOR_FULL_FRAMES = [
...LEADING_FRAMES,
...GRADIENT_COLORS,
...TRAILING_FRAMES,
...reverse(GRADIENT_COLORS),
];
const INDICATOR_GRADIENT = reverse(
INDICATOR_FULL_FRAMES.map((_, i) => loadingIndicatorFrame(i)),
);
export async function renderLoadingIndicator({
start,
end,
while: update = () => sleep(100),
noMotion = false,
stdin = process.stdin,
stdout = process.stdout,
}: {
start: string;
end: string;
while: (...args: any) => Promise<any>;
noMotion?: boolean;
stdin?: NodeJS.ReadStream & { fd: 0 };
stdout?: NodeJS.WriteStream & { fd: 1 };
}) {
let act = update();
let tooSlow = Object.create(null);
let result = await Promise.race([sleep(500).then(() => tooSlow), act]);
if (result === tooSlow) {
let loading = await gradient(color.green(start), {
stdin,
stdout,
noMotion,
});
await act;
loading.stop();
}
stdout.write(`${" ".repeat(5)} ${color.green("✔")} ${color.green(end)}\n`);
}
function loadingIndicatorFrame(offset = 0) {
let frames = INDICATOR_FULL_FRAMES.slice(offset, offset + (MAX_FRAMES - 2));
if (frames.length < MAX_FRAMES - 2) {
let filled = new Array(MAX_FRAMES - frames.length - 2).fill(
GRADIENT_COLORS[0],
);
frames.push(...filled);
}
return frames;
}
function getGradientAnimationFrames() {
return INDICATOR_GRADIENT.map(
(colors) => " " + colors.map((g, i) => color.hex(g)("█")).join(""),
);
}
async function gradient(
text: string,
{ stdin = process.stdin, stdout = process.stdout, noMotion = false } = {},
) {
let { createLogUpdate } = await import("log-update");
let logUpdate = createLogUpdate(stdout);
let frameIndex = 0;
let frames = getGradientAnimationFrames();
let interval: NodeJS.Timeout;
let rl = readline.createInterface({ input: stdin, escapeCodeTimeout: 50 });
readline.emitKeypressEvents(stdin, rl);
if (stdin.isTTY) stdin.setRawMode(true);
function keypress(char: string) {
if (char === "\x03") {
loadingIndicator.stop();
process.exit(0);
}
if (stdin.isTTY) stdin.setRawMode(true);
stdout.write(cursor.hide + erase.lines(1));
}
let done = false;
let loadingIndicator = {
start() {
stdout.write(cursor.hide);
stdin.on("keypress", keypress);
logUpdate(`${frames[0]} ${text}`);
async function loop() {
if (done) return;
if (frameIndex < frames.length - 1) {
frameIndex++;
} else {
frameIndex = 0;
}
let frame = frames[frameIndex];
logUpdate(
`${(noMotion
? getMotionlessFrame(frameIndex)
: color.supportsColor
? frame
: getColorlessFrame(frameIndex)
).padEnd(MAX_FRAMES - 1, " ")} ${text}`,
);
if (!done) await sleep(20);
loop();
}
loop();
},
stop() {
done = true;
stdin.removeListener("keypress", keypress);
clearInterval(interval);
logUpdate.clear();
rl.close();
},
};
loadingIndicator.start();
return loadingIndicator;
}
function getColorlessFrame(frameIndex: number) {
return (
frameIndex % 3 === 0 ? ".. .. " : frameIndex % 3 === 1 ? " .. .." : ". .. ."
).padEnd(MAX_FRAMES - 1 + 20, " ");
}
function getMotionlessFrame(frameIndex: number) {
return " ".repeat(MAX_FRAMES - 1);
} | typescript | github | https://github.com/remix-run/react-router | packages/create-react-router/loading-indicator.ts |
from requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None, cache_etags=True, controller_class=None,
serializer=None, *args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response, from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(self.controller.conditional_headers(request))
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
response = cached_response
else:
# try to cache the response
try:
self.controller.cache_response(request, response)
except Exception as e:
# Failed to cache the results
pass
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_TAC_COMMON_COST_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_TAC_COMMON_COST_H_
#include <string>
#include "mlir/IR/Builders.h" // from @llvm-project
#include "mlir/IR/Operation.h" // from @llvm-project
namespace mlir {
namespace TFL {
namespace tac {
// Cost attribute string on the TFL dialect.
constexpr char kCost[] = "tac.cost";
inline void UpdateCost(Operation* op, float cost, OpBuilder* builder) {
op->setAttr(kCost, builder->getF32FloatAttr(cost));
}
// Get the cost annotated with kCost.
inline bool GetCostOnOp(Operation* op, float* cost) {
auto cost_type = op->getAttrOfType<FloatAttr>(kCost);
if (cost_type == nullptr) {
return false;
}
*cost = cost_type.getValueAsDouble();
return true;
}
} // namespace tac
} // namespace TFL
} // namespace mlir
#endif // TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_TAC_COMMON_COST_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/lite/experimental/tac/common/cost.h |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "pymagicc-"
cfg.versionfile_source = "pymagicc/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None} | unknown | codeparrot/codeparrot-clean | ||
"use strict";
/** @type {import("webpack").Configuration} */
const config = {
optimization: {
chunkIds: "deterministic" // To keep filename consistent between different modes (for example building only)
}
};
module.exports = config; | javascript | github | https://github.com/webpack/webpack | examples/code-splitting-harmony/webpack.config.js |
"use strict";
const { describeCases } = require("./TestCases.template");
describe("TestCases", () => {
describeCases({
name: "normal"
});
}); | javascript | github | https://github.com/webpack/webpack | test/TestCasesNormal.basictest.js |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.standalone.fir.test.cases.generated.cases.components.expressionInfoProvider;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.standalone.fir.test.configurators.AnalysisApiFirStandaloneModeTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.expressionInfoProvider.AbstractWhenMissingCasesTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases")
@TestDataPath("$PROJECT_ROOT")
public class FirStandaloneNormalAnalysisSourceModuleWhenMissingCasesTestGenerated extends AbstractWhenMissingCasesTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirStandaloneModeTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Standalone
)
);
}
@Test
public void testAllFilesPresentInWhenMissingCases() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("boolean_else.kt")
public void testBoolean_else() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/boolean_else.kt");
}
@Test
@TestMetadata("boolean_empty.kt")
public void testBoolean_empty() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/boolean_empty.kt");
}
@Test
@TestMetadata("boolean_noSubject.kt")
public void testBoolean_noSubject() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/boolean_noSubject.kt");
}
@Test
@TestMetadata("boolean_noSubjectIncorrectCode.kt")
public void testBoolean_noSubjectIncorrectCode() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/boolean_noSubjectIncorrectCode.kt");
}
@Test
@TestMetadata("boolean_noSubject_else.kt")
public void testBoolean_noSubject_else() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/boolean_noSubject_else.kt");
}
@Test
@TestMetadata("boolean_partial.kt")
public void testBoolean_partial() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/boolean_partial.kt");
}
@Test
@TestMetadata("enum_else.kt")
public void testEnum_else() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/enum_else.kt");
}
@Test
@TestMetadata("enum_empty.kt")
public void testEnum_empty() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/enum_empty.kt");
}
@Test
@TestMetadata("enum_partial.kt")
public void testEnum_partial() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/enum_partial.kt");
}
@Test
@TestMetadata("nothing.kt")
public void testNothing() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/nothing.kt");
}
@Test
@TestMetadata("nullableBoolean.kt")
public void testNullableBoolean() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/nullableBoolean.kt");
}
@Test
@TestMetadata("nullableEnum.kt")
public void testNullableEnum() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/nullableEnum.kt");
}
@Test
@TestMetadata("nullableNothing.kt")
public void testNullableNothing() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/nullableNothing.kt");
}
@Test
@TestMetadata("nullableSealedClass_empty.kt")
public void testNullableSealedClass_empty() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/nullableSealedClass_empty.kt");
}
@Test
@TestMetadata("sealedClass_else.kt")
public void testSealedClass_else() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/sealedClass_else.kt");
}
@Test
@TestMetadata("sealedClass_empty.kt")
public void testSealedClass_empty() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/sealedClass_empty.kt");
}
@Test
@TestMetadata("sealedClass_partial.kt")
public void testSealedClass_partial() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/whenMissingCases/sealedClass_partial.kt");
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-standalone/tests-gen/org/jetbrains/kotlin/analysis/api/standalone/fir/test/cases/generated/cases/components/expressionInfoProvider/FirStandaloneNormalAnalysisSourceModuleWhenMissingCasesTestGenerated.java |
"""Utility functions for killing the wrapper softly.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
Softexit: Concise class to manage cleaning up in case of an emergency exit.
"""
import traceback, sys
from ipi.utils.messages import verbosity, warning
__all__ = ['Softexit', 'softexit']
class Softexit(object):
"""Class to deal with stopping a simulation half way through.
Holds the functions used to clean up a simulation that has been
stopped early, either because of a SIGTERM signal or because the
user has added an EXIT file to the directory in which it is
running. This will then properly shut down the socket interface,
and print out a RESTART file for the appropriate time step.
Attributes:
flist: A list of functions used to close down the socket
interface.
"""
def __init__(self):
"""Initializes SoftExit."""
self.flist = []
def register(self, func):
"""Adds another function to flist.
Args:
func: The function to be added to flist.
"""
self.flist.append(func)
def trigger(self, message=""):
"""Halts the simulation.
Prints out a warning message, then runs all the exit functions in flist
before terminating the simulation.
Args:
message: The message to output to standard output.
"""
if message != "":
warning("Soft exit has been requested with message: '" + message + "'. Cleaning up.", verbosity.low)
for f in self.flist:
f()
sys.exit()
softexit = Softexit() | unknown | codeparrot/codeparrot-clean | ||
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "SpecialMemberFunctionsCheck.h"
#include "clang/AST/ASTContext.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "llvm/ADT/StringExtras.h"
#define DEBUG_TYPE "clang-tidy"
using namespace clang::ast_matchers;
namespace clang::tidy::cppcoreguidelines {
namespace {
AST_MATCHER(CXXRecordDecl, isInMacro) {
return Node.getBeginLoc().isMacroID() && Node.getEndLoc().isMacroID();
}
} // namespace
SpecialMemberFunctionsCheck::SpecialMemberFunctionsCheck(
StringRef Name, ClangTidyContext *Context)
: ClangTidyCheck(Name, Context), AllowMissingMoveFunctions(Options.get(
"AllowMissingMoveFunctions", false)),
AllowSoleDefaultDtor(Options.get("AllowSoleDefaultDtor", false)),
AllowMissingMoveFunctionsWhenCopyIsDeleted(
Options.get("AllowMissingMoveFunctionsWhenCopyIsDeleted", false)),
AllowImplicitlyDeletedCopyOrMove(
Options.get("AllowImplicitlyDeletedCopyOrMove", false)),
IgnoreMacros(Options.get("IgnoreMacros", true)) {}
void SpecialMemberFunctionsCheck::storeOptions(
ClangTidyOptions::OptionMap &Opts) {
Options.store(Opts, "AllowMissingMoveFunctions", AllowMissingMoveFunctions);
Options.store(Opts, "AllowSoleDefaultDtor", AllowSoleDefaultDtor);
Options.store(Opts, "AllowMissingMoveFunctionsWhenCopyIsDeleted",
AllowMissingMoveFunctionsWhenCopyIsDeleted);
Options.store(Opts, "AllowImplicitlyDeletedCopyOrMove",
AllowImplicitlyDeletedCopyOrMove);
Options.store(Opts, "IgnoreMacros", IgnoreMacros);
}
std::optional<TraversalKind>
SpecialMemberFunctionsCheck::getCheckTraversalKind() const {
return AllowImplicitlyDeletedCopyOrMove ? TK_AsIs
: TK_IgnoreUnlessSpelledInSource;
}
void SpecialMemberFunctionsCheck::registerMatchers(MatchFinder *Finder) {
const auto IsNotImplicitOrDeleted = anyOf(unless(isImplicit()), isDeleted());
const ast_matchers::internal::Matcher<CXXRecordDecl> Anything = anything();
Finder->addMatcher(
cxxRecordDecl(
unless(isImplicit()), IgnoreMacros ? unless(isInMacro()) : Anything,
eachOf(has(cxxDestructorDecl(unless(isImplicit())).bind("dtor")),
has(cxxConstructorDecl(isCopyConstructor(),
IsNotImplicitOrDeleted)
.bind("copy-ctor")),
has(cxxMethodDecl(isCopyAssignmentOperator(),
IsNotImplicitOrDeleted)
.bind("copy-assign")),
has(cxxConstructorDecl(isMoveConstructor(),
IsNotImplicitOrDeleted)
.bind("move-ctor")),
has(cxxMethodDecl(isMoveAssignmentOperator(),
IsNotImplicitOrDeleted)
.bind("move-assign"))))
.bind("class-def"),
this);
}
static llvm::StringRef
toString(SpecialMemberFunctionsCheck::SpecialMemberFunctionKind K) {
switch (K) {
case SpecialMemberFunctionsCheck::SpecialMemberFunctionKind::Destructor:
return "a destructor";
case SpecialMemberFunctionsCheck::SpecialMemberFunctionKind::
DefaultDestructor:
return "a default destructor";
case SpecialMemberFunctionsCheck::SpecialMemberFunctionKind::
NonDefaultDestructor:
return "a non-default destructor";
case SpecialMemberFunctionsCheck::SpecialMemberFunctionKind::CopyConstructor:
return "a copy constructor";
case SpecialMemberFunctionsCheck::SpecialMemberFunctionKind::CopyAssignment:
return "a copy assignment operator";
case SpecialMemberFunctionsCheck::SpecialMemberFunctionKind::MoveConstructor:
return "a move constructor";
case SpecialMemberFunctionsCheck::SpecialMemberFunctionKind::MoveAssignment:
return "a move assignment operator";
}
llvm_unreachable("Unhandled SpecialMemberFunctionKind");
}
static std::string
join(ArrayRef<SpecialMemberFunctionsCheck::SpecialMemberFunctionKind> SMFS,
llvm::StringRef AndOr) {
assert(!SMFS.empty() &&
"List of defined or undefined members should never be empty.");
std::string Buffer;
llvm::raw_string_ostream Stream(Buffer);
Stream << toString(SMFS[0]);
const size_t LastIndex = SMFS.size() - 1;
for (size_t I = 1; I < LastIndex; ++I)
Stream << ", " << toString(SMFS[I]);
if (LastIndex != 0)
Stream << AndOr << toString(SMFS[LastIndex]);
return Stream.str();
}
void SpecialMemberFunctionsCheck::check(
const MatchFinder::MatchResult &Result) {
const auto *MatchedDecl = Result.Nodes.getNodeAs<CXXRecordDecl>("class-def");
if (!MatchedDecl)
return;
ClassDefId ID(MatchedDecl->getLocation(),
std::string(MatchedDecl->getName()));
auto StoreMember = [this, &ID](SpecialMemberFunctionData Data) {
llvm::SmallVectorImpl<SpecialMemberFunctionData> &Members =
ClassWithSpecialMembers[ID];
if (!llvm::is_contained(Members, Data))
Members.push_back(std::move(Data));
};
if (const auto *Dtor = Result.Nodes.getNodeAs<CXXMethodDecl>("dtor")) {
SpecialMemberFunctionKind DestructorType =
SpecialMemberFunctionKind::Destructor;
if (Dtor->isDefined()) {
DestructorType = Dtor->getDefinition()->isDefaulted()
? SpecialMemberFunctionKind::DefaultDestructor
: SpecialMemberFunctionKind::NonDefaultDestructor;
}
StoreMember({DestructorType, Dtor->isDeleted()});
}
const std::initializer_list<std::pair<std::string, SpecialMemberFunctionKind>>
Matchers = {{"copy-ctor", SpecialMemberFunctionKind::CopyConstructor},
{"copy-assign", SpecialMemberFunctionKind::CopyAssignment},
{"move-ctor", SpecialMemberFunctionKind::MoveConstructor},
{"move-assign", SpecialMemberFunctionKind::MoveAssignment}};
for (const auto &KV : Matchers)
if (const auto *MethodDecl =
Result.Nodes.getNodeAs<CXXMethodDecl>(KV.first)) {
StoreMember(
{KV.second, MethodDecl->isDeleted(), MethodDecl->isImplicit()});
}
}
void SpecialMemberFunctionsCheck::onEndOfTranslationUnit() {
for (const auto &C : ClassWithSpecialMembers)
checkForMissingMembers(C.first, C.second);
}
void SpecialMemberFunctionsCheck::checkForMissingMembers(
const ClassDefId &ID,
llvm::ArrayRef<SpecialMemberFunctionData> DefinedMembers) {
llvm::SmallVector<SpecialMemberFunctionKind, 5> MissingMembers;
auto HasMember = [&](SpecialMemberFunctionKind Kind) {
return llvm::any_of(DefinedMembers, [Kind](const auto &Data) {
return Data.FunctionKind == Kind && !Data.IsImplicit;
});
};
auto HasImplicitDeletedMember = [&](SpecialMemberFunctionKind Kind) {
return llvm::any_of(DefinedMembers, [Kind](const auto &Data) {
return Data.FunctionKind == Kind && Data.IsImplicit && Data.IsDeleted;
});
};
auto IsDeleted = [&](SpecialMemberFunctionKind Kind) {
return llvm::any_of(DefinedMembers, [Kind](const auto &Data) {
return Data.FunctionKind == Kind && Data.IsDeleted;
});
};
auto RequireMembers = [&](SpecialMemberFunctionKind Kind1,
SpecialMemberFunctionKind Kind2) {
if (AllowImplicitlyDeletedCopyOrMove && HasImplicitDeletedMember(Kind1) &&
HasImplicitDeletedMember(Kind2))
return;
if (!HasMember(Kind1))
MissingMembers.push_back(Kind1);
if (!HasMember(Kind2))
MissingMembers.push_back(Kind2);
};
const bool RequireThree =
HasMember(SpecialMemberFunctionKind::NonDefaultDestructor) ||
(!AllowSoleDefaultDtor &&
(HasMember(SpecialMemberFunctionKind::Destructor) ||
HasMember(SpecialMemberFunctionKind::DefaultDestructor))) ||
HasMember(SpecialMemberFunctionKind::CopyConstructor) ||
HasMember(SpecialMemberFunctionKind::CopyAssignment) ||
HasMember(SpecialMemberFunctionKind::MoveConstructor) ||
HasMember(SpecialMemberFunctionKind::MoveAssignment);
const bool RequireFive =
(!AllowMissingMoveFunctions && RequireThree &&
getLangOpts().CPlusPlus11) ||
HasMember(SpecialMemberFunctionKind::MoveConstructor) ||
HasMember(SpecialMemberFunctionKind::MoveAssignment);
if (RequireThree) {
if (!HasMember(SpecialMemberFunctionKind::Destructor) &&
!HasMember(SpecialMemberFunctionKind::DefaultDestructor) &&
!HasMember(SpecialMemberFunctionKind::NonDefaultDestructor))
MissingMembers.push_back(SpecialMemberFunctionKind::Destructor);
RequireMembers(SpecialMemberFunctionKind::CopyConstructor,
SpecialMemberFunctionKind::CopyAssignment);
}
if (RequireFive &&
!(AllowMissingMoveFunctionsWhenCopyIsDeleted &&
(IsDeleted(SpecialMemberFunctionKind::CopyConstructor) &&
IsDeleted(SpecialMemberFunctionKind::CopyAssignment)))) {
assert(RequireThree);
RequireMembers(SpecialMemberFunctionKind::MoveConstructor,
SpecialMemberFunctionKind::MoveAssignment);
}
if (!MissingMembers.empty()) {
llvm::SmallVector<SpecialMemberFunctionKind, 5> DefinedMemberKinds;
for (const auto &Data : DefinedMembers)
if (!Data.IsImplicit)
DefinedMemberKinds.push_back(Data.FunctionKind);
diag(ID.first, "class '%0' defines %1 but does not define %2")
<< ID.second << cppcoreguidelines::join(DefinedMemberKinds, " and ")
<< cppcoreguidelines::join(MissingMembers, " or ");
}
}
} // namespace clang::tidy::cppcoreguidelines | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/cppcoreguidelines/SpecialMemberFunctionsCheck.cpp |
# -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
from calibre.customize.conversion import OutputFormatPlugin, \
OptionRecommendation
class TCROutput(OutputFormatPlugin):
name = 'TCR Output'
author = 'John Schember'
file_type = 'tcr'
options = set([
OptionRecommendation(name='tcr_output_encoding', recommended_value='utf-8',
level=OptionRecommendation.LOW,
help=_('Specify the character encoding of the output document. ' \
'The default is utf-8.')),
])
def convert(self, oeb_book, output_path, input_plugin, opts, log):
from calibre.ebooks.txt.txtml import TXTMLizer
from calibre.ebooks.compression.tcr import compress
close = False
if not hasattr(output_path, 'write'):
close = True
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path) != '':
os.makedirs(os.path.dirname(output_path))
out_stream = open(output_path, 'wb')
else:
out_stream = output_path
setattr(opts, 'flush_paras', False)
setattr(opts, 'max_line_length', 0)
setattr(opts, 'force_max_line_length', False)
setattr(opts, 'indent_paras', False)
writer = TXTMLizer(log)
txt = writer.extract_content(oeb_book, opts).encode(opts.tcr_output_encoding, 'replace')
log.info('Compressing text...')
txt = compress(txt)
out_stream.seek(0)
out_stream.truncate()
out_stream.write(txt)
if close:
out_stream.close() | unknown | codeparrot/codeparrot-clean | ||
import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.fields import Scope, String
import textwrap
log = logging.getLogger(__name__)
class AnnotatableFields(object):
data = String(help="XML data for the annotation", scope=Scope.content,
default=textwrap.dedent(
"""\
<annotatable>
<instructions>
<p>Enter your (optional) instructions for the exercise in HTML format.</p>
<p>Annotations are specified by an <code><annotation></code> tag which may may have the following attributes:</p>
<ul class="instructions-template">
<li><code>title</code> (optional). Title of the annotation. Defaults to <i>Commentary</i> if omitted.</li>
<li><code>body</code> (<b>required</b>). Text of the annotation.</li>
<li><code>problem</code> (optional). Numeric index of the problem associated with this annotation. This is a zero-based index, so the first problem on the page would have <code>problem="0"</code>.</li>
<li><code>highlight</code> (optional). Possible values: yellow, red, orange, green, blue, or purple. Defaults to yellow if this attribute is omitted.</li>
</ul>
</instructions>
<p>Add your HTML with annotation spans here.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. <annotation title="My title" body="My comment" highlight="yellow" problem="0">Ut sodales laoreet est, egestas gravida felis egestas nec.</annotation> Aenean at volutpat erat. Cras commodo viverra nibh in aliquam.</p>
<p>Nulla facilisi. <annotation body="Basic annotation example." problem="1">Pellentesque id vestibulum libero.</annotation> Suspendisse potenti. Morbi scelerisque nisi vitae felis dictum mattis. Nam sit amet magna elit. Nullam volutpat cursus est, sit amet sagittis odio vulputate et. Curabitur euismod, orci in vulputate imperdiet, augue lorem tempor purus, id aliquet augue turpis a est. Aenean a sagittis libero. Praesent fringilla pretium magna, non condimentum risus elementum nec. Pellentesque faucibus elementum pharetra. Pellentesque vitae metus eros.</p>
</annotatable>
"""))
display_name = String(
display_name="Display Name",
help="Display name for this module",
scope=Scope.settings,
default='Annotation',
)
class AnnotatableModule(AnnotatableFields, XModule):
js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee')],
'js': []}
js_module_name = "Annotatable"
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'annotatable'
def __init__(self, *args, **kwargs):
super(AnnotatableModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.element_id = self.location.html_id()
self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green']
def _get_annotation_class_attr(self, index, el):
""" Returns a dict with the CSS class attribute to set on the annotation
and an XML key to delete from the element.
"""
attr = {}
cls = ['annotatable-span', 'highlight']
highlight_key = 'highlight'
color = el.get(highlight_key)
if color is not None:
if color in self.highlight_colors:
cls.append('highlight-' + color)
attr['_delete'] = highlight_key
attr['value'] = ' '.join(cls)
return {'class': attr}
def _get_annotation_data_attr(self, index, el):
""" Returns a dict in which the keys are the HTML data attributes
to set on the annotation element. Each data attribute has a
corresponding 'value' and (optional) '_delete' key to specify
an XML attribute to delete.
"""
data_attrs = {}
attrs_map = {
'body': 'data-comment-body',
'title': 'data-comment-title',
'problem': 'data-problem-id'
}
for xml_key in attrs_map.keys():
if xml_key in el.attrib:
value = el.get(xml_key, '')
html_key = attrs_map[xml_key]
data_attrs[html_key] = {'value': value, '_delete': xml_key}
return data_attrs
def _render_annotation(self, index, el):
""" Renders an annotation element for HTML output. """
attr = {}
attr.update(self._get_annotation_class_attr(index, el))
attr.update(self._get_annotation_data_attr(index, el))
el.tag = 'span'
for key in attr.keys():
el.set(key, attr[key]['value'])
if '_delete' in attr[key] and attr[key]['_delete'] is not None:
delete_key = attr[key]['_delete']
del el.attrib[delete_key]
def _render_content(self):
""" Renders annotatable content with annotation spans and returns HTML. """
xmltree = etree.fromstring(self.content)
xmltree.tag = 'div'
if 'display_name' in xmltree.attrib:
del xmltree.attrib['display_name']
index = 0
for el in xmltree.findall('.//annotation'):
self._render_annotation(index, el)
index += 1
return etree.tostring(xmltree, encoding='unicode')
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
instructions = xmltree.find('instructions')
if instructions is not None:
instructions.tag = 'div'
xmltree.remove(instructions)
return etree.tostring(instructions, encoding='unicode')
return None
def get_html(self):
""" Renders parameters to template. """
context = {
'display_name': self.display_name_with_default,
'element_id': self.element_id,
'instructions_html': self.instructions,
'content_html': self._render_content()
}
return self.system.render_template('annotatable.html', context)
class AnnotatableDescriptor(AnnotatableFields, RawDescriptor):
module_class = AnnotatableModule
mako_template = "widgets/raw-edit.html" | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for cloud ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# pylint: disable=line-too-long,wildcard-import,g-import-not-at-top
from tensorflow.contrib.cloud.python.ops.bigquery_reader_ops import *
from tensorflow.contrib.cloud.python.ops.gcs_config_ops import *
if os.name != 'nt':
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableClient
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableTable
del os
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'BigQueryReader',
'BigtableClient',
'BigtableTable',
'BlockCacheParams',
'configure_colab_session',
'configure_gcs',
'ConfigureGcsHook',
]
remove_undocumented(__name__, _allowed_symbols) | unknown | codeparrot/codeparrot-clean | ||
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import collections
import functools
import os
import re
# CONSTANTS
# categorization of a run result
# 'correct' and 'wrong' refer to whether the tool's result matches the expected result.
CATEGORY_CORRECT = "correct"
"""run result given by tool is correct"""
CATEGORY_CORRECT_UNCONFIRMED = "correct-unconfirmed"
"""run result given by tool is correct but not confirmed according to SV-COMP rules"""
CATEGORY_WRONG = "wrong"
"""run result given by tool is wrong"""
CATEGORY_UNKNOWN = "unknown"
"""run result given by tool is "unknown" (i.e., no answer)"""
CATEGORY_ERROR = "error"
"""tool failed, crashed, or hit a resource limit"""
CATEGORY_MISSING = "missing"
"""BenchExec could not determine whether run result was correct or wrong
because no property was defined, and no other categories apply."""
# possible run results (output of a tool)
RESULT_DONE = "done"
"""tool terminated properly and true/false does not make sense"""
RESULT_UNKNOWN = "unknown"
"""tool could not find out an answer due to incompleteness"""
RESULT_ERROR = "ERROR" # or any other value not listed here
"""tool could not complete due to an error
(it is recommended to instead use a string with more details about the error)"""
RESULT_TRUE_PROP = "true"
"""property holds"""
RESULT_FALSE_PROP = "false"
"""property does not hold"""
# shortcuts for tool-info modules that return results as required in SV-COMP
RESULT_FALSE_REACH = RESULT_FALSE_PROP + "(unreach-call)"
"""SV-COMP reachability property violated"""
RESULT_FALSE_TERMINATION = RESULT_FALSE_PROP + "(termination)"
"""SV-COMP termination property violated"""
RESULT_FALSE_OVERFLOW = RESULT_FALSE_PROP + "(no-overflow)"
"""SV-COMP overflow property violated"""
RESULT_FALSE_DEADLOCK = RESULT_FALSE_PROP + "(no-deadlock)"
"""deadlock property violated""" # not yet part of SV-COMP
RESULT_FALSE_DEREF = RESULT_FALSE_PROP + "(valid-deref)"
"""SV-COMP valid-deref property violated"""
RESULT_FALSE_FREE = RESULT_FALSE_PROP + "(valid-free)"
"""SV-COMP valid-free property violated"""
RESULT_FALSE_MEMTRACK = RESULT_FALSE_PROP + "(valid-memtrack)"
"""SV-COMP valid-memtrack property violated"""
RESULT_FALSE_MEMCLEANUP = RESULT_FALSE_PROP + "(valid-memcleanup)"
"""SV-COMP valid-memcleanup property violated"""
RESULT_LIST_OTHER = [RESULT_DONE, RESULT_ERROR, RESULT_UNKNOWN]
"""list of unspecific standard results besides true/false"""
# Classification of results
RESULT_CLASS_TRUE = "true"
RESULT_CLASS_FALSE = "false"
RESULT_CLASS_OTHER = "other"
# Score values taken from http://sv-comp.sosy-lab.org/
# (use values 0 to disable scores completely for a given property).
_SCORE_CORRECT_TRUE = 2
_SCORE_CORRECT_UNCONFIRMED_TRUE = 0
_SCORE_CORRECT_FALSE = 1
_SCORE_CORRECT_UNCONFIRMED_FALSE = 0
_SCORE_UNKNOWN = 0
_SCORE_WRONG_FALSE = -16
_SCORE_WRONG_TRUE = -32
class ExpectedResult(collections.namedtuple("ExpectedResult", "result subproperty")):
"""Stores the expected result and respective information for a task"""
__slots__ = () # reduce per-instance memory consumption
def __str__(self):
result = {True: "true", False: "false"}.get(self.result, "")
if result and self.subproperty:
return f"{result}({self.subproperty})"
return result
@classmethod
def from_str(cls, s):
if s == "":
return ExpectedResult(None, None)
match = re.match(r"^(true|false)(\((.+)\))?$", s)
if not match:
raise ValueError(f"Not a valid expected verdict: {s}")
return ExpectedResult(match.group(1) == "true", match.group(3))
class Property(collections.namedtuple("Property", "filename is_svcomp name")):
"""Stores information about a property"""
__slots__ = () # reduce per-instance memory consumption
def compute_score(self, category, result):
if not self.is_svcomp:
return None
return _svcomp_score(category, result)
def max_score(self, expected_result):
"""
Return the maximum possible score for a task that uses this property.
@param expected_result:
an ExpectedResult indicating whether the property is expected to hold for the task
"""
if not self.is_svcomp or not expected_result:
return None
return _svcomp_max_score(expected_result.result)
@property
def nice_name(self):
return (
("SV-COMP-" if self.is_svcomp else "")
+ "Property "
+ (f"from {self.filename}" if self.filename else self.name)
)
def __str__(self):
return self.name
@classmethod
@functools.lru_cache() # cache because it reads files
def create(cls, propertyfile):
"""
Create a Property instance by attempting to parse the given property file.
@param propertyfile: A file name of a property file
"""
with open(propertyfile) as f:
# SV-COMP property files have every non-empty line start with CHECK,
# and there needs to be at least one such line.
is_svcomp = False
for line in f.readlines():
if line.rstrip():
if line.startswith("CHECK"):
# Found line with CHECK, might be an SV-COMP property
is_svcomp = True
else:
# Found line without CHECK, definitely not an SV-COMP property
is_svcomp = False
break
name = os.path.splitext(os.path.basename(propertyfile))[0]
return cls(propertyfile, is_svcomp, name)
def _svcomp_max_score(expected_result):
"""
Return the maximum possible score for a task according to the SV-COMP scoring scheme.
@param expected_result: whether the property is fulfilled for the task or not
"""
if expected_result is True:
return _SCORE_CORRECT_TRUE
elif expected_result is False:
return _SCORE_CORRECT_FALSE
return 0
def _svcomp_score(category, result):
"""
Return the achieved score of a task according to the SV-COMP scoring scheme.
@param category: result category as determined by get_result_category
@param result: the result given by the tool
"""
assert result is not None
result_class = get_result_classification(result)
if category == CATEGORY_CORRECT_UNCONFIRMED:
if result_class == RESULT_CLASS_TRUE:
return _SCORE_CORRECT_UNCONFIRMED_TRUE
elif result_class == RESULT_CLASS_FALSE:
return _SCORE_CORRECT_UNCONFIRMED_FALSE
else:
assert False
elif category == CATEGORY_CORRECT:
if result_class == RESULT_CLASS_TRUE:
return _SCORE_CORRECT_TRUE
elif result_class == RESULT_CLASS_FALSE:
return _SCORE_CORRECT_FALSE
else:
assert False, result
elif category == CATEGORY_WRONG:
if result_class == RESULT_CLASS_TRUE:
return _SCORE_WRONG_TRUE
elif result_class == RESULT_CLASS_FALSE:
return _SCORE_WRONG_FALSE
else:
assert False
else:
return _SCORE_UNKNOWN
def get_result_classification(result):
"""
Classify the given result into "true" (property holds),
"false" (property does not hold), "unknown", and "error".
@param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized).
@return One of RESULT_CLASS_* strings
"""
if not result:
return RESULT_CLASS_OTHER
if result == RESULT_FALSE_PROP:
return RESULT_CLASS_FALSE
if result.startswith(RESULT_FALSE_PROP + "(") and result.endswith(")"):
return RESULT_CLASS_FALSE
if result == RESULT_TRUE_PROP:
return RESULT_CLASS_TRUE
return RESULT_CLASS_OTHER
def get_result_category(expected_results, result, properties):
"""
This function determines the relation between actual result and expected result
for the given file and properties.
@param filename: The file name of the input file.
@param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized).
@param properties: The list of property names to check.
@return One of the CATEGORY_* strings.
"""
result_class = get_result_classification(result)
if result_class == RESULT_CLASS_OTHER:
if result == RESULT_UNKNOWN:
return CATEGORY_UNKNOWN
elif result == RESULT_DONE:
return CATEGORY_MISSING
else:
return CATEGORY_ERROR
if not properties:
# Without property we cannot return correct or wrong results.
return CATEGORY_MISSING
# For now, we have at most one property
assert len(properties) == 1, properties
prop = properties[0]
expected_result = expected_results.get(prop.filename)
if not expected_result or expected_result.result is None:
# expected result of task is unknown
return CATEGORY_MISSING
if expected_result.subproperty:
is_valid_result = result in {
RESULT_TRUE_PROP,
f"{RESULT_FALSE_PROP}({expected_result.subproperty})",
}
else:
is_valid_result = (result == RESULT_TRUE_PROP) or result.startswith(
RESULT_FALSE_PROP
)
if not is_valid_result:
return CATEGORY_UNKNOWN # result does not match property
if expected_result.result:
return CATEGORY_CORRECT if result_class == RESULT_CLASS_TRUE else CATEGORY_WRONG
else:
if expected_result.subproperty:
return (
CATEGORY_CORRECT
if result == f"{RESULT_FALSE_PROP}({expected_result.subproperty})"
else CATEGORY_WRONG
)
else:
return (
CATEGORY_CORRECT
if result_class == RESULT_CLASS_FALSE
else CATEGORY_WRONG
) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protectionutil
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/slice"
)
// IsDeletionCandidate checks if object is candidate to be deleted
func IsDeletionCandidate(obj metav1.Object, finalizer string) bool {
return obj.GetDeletionTimestamp() != nil && slice.ContainsString(obj.GetFinalizers(),
finalizer, nil)
}
// NeedToAddFinalizer checks if need to add finalizer to object
func NeedToAddFinalizer(obj metav1.Object, finalizer string) bool {
return obj.GetDeletionTimestamp() == nil && !slice.ContainsString(obj.GetFinalizers(),
finalizer, nil)
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/volume/protectionutil/utils.go |
/*
* Copyright 2014-2019 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.server.routing
import io.ktor.http.*
import io.ktor.server.application.*
import io.ktor.server.request.*
import io.ktor.server.response.*
import io.ktor.util.*
import kotlinx.coroutines.*
import kotlin.coroutines.*
/**
* An application call handled by [RoutingRoot].
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPipelineCall)
*
* @property call original call from [io.ktor.server.engine.ApplicationEngine]
* @property route is the selected route
*/
public class RoutingPipelineCall(
public val engineCall: PipelineCall,
public val route: RoutingNode,
override val coroutineContext: CoroutineContext,
receivePipeline: ApplicationReceivePipeline,
responsePipeline: ApplicationSendPipeline,
public val pathParameters: Parameters
) : PipelineCall, CoroutineScope {
@Deprecated(level = DeprecationLevel.WARNING, message = "Use explicit coroutineContext instead.")
public constructor(
engineCall: PipelineCall,
route: RoutingNode,
receivePipeline: ApplicationReceivePipeline,
responsePipeline: ApplicationSendPipeline,
pathParameters: Parameters
) : this(
engineCall,
route,
engineCall.coroutineContext,
receivePipeline,
responsePipeline,
pathParameters
)
override val application: Application get() = engineCall.application
override val attributes: Attributes get() = engineCall.attributes
override val request: RoutingPipelineRequest =
RoutingPipelineRequest(this, receivePipeline, engineCall.request)
override val response: RoutingPipelineResponse =
RoutingPipelineResponse(this, responsePipeline, engineCall.response)
override val parameters: Parameters by lazy(LazyThreadSafetyMode.NONE) {
Parameters.build {
appendAll(engineCall.parameters)
appendMissing(pathParameters)
}
}
override fun toString(): String = "RoutingApplicationCall(route=$route)"
}
/**
* An application request handled by [RoutingRoot].
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPipelineRequest)
*/
public class RoutingPipelineRequest(
override val call: RoutingPipelineCall,
override val pipeline: ApplicationReceivePipeline,
public val engineRequest: PipelineRequest
) : PipelineRequest by engineRequest
/**
* An application response handled by [RoutingRoot].
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPipelineResponse)
*/
public class RoutingPipelineResponse(
override val call: RoutingPipelineCall,
override val pipeline: ApplicationSendPipeline,
public val engineResponse: PipelineResponse
) : PipelineResponse by engineResponse | kotlin | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-core/common/src/io/ktor/server/routing/RoutingPipelineCall.kt |
# Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Language pack store."""
__metaclass__ = type
__all__ = [
'LanguagePack',
'LanguagePackSet',
]
from sqlobject import ForeignKey
from zope.interface import implements
from lp.services.database.constants import UTC_NOW
from lp.services.database.datetimecol import UtcDateTimeCol
from lp.services.database.enumcol import EnumCol
from lp.services.database.sqlbase import (
SQLBase,
sqlvalues,
)
from lp.translations.enums import LanguagePackType
from lp.translations.interfaces.languagepack import (
ILanguagePack,
ILanguagePackSet,
)
class LanguagePack(SQLBase):
implements(ILanguagePack)
_table = 'LanguagePack'
file = ForeignKey(
foreignKey='LibraryFileAlias', dbName='file', notNull=True)
date_exported = UtcDateTimeCol(notNull=True, default=UTC_NOW)
distroseries = ForeignKey(
foreignKey='DistroSeries', dbName='distroseries', notNull=True)
type = EnumCol(
enum=LanguagePackType, notNull=True, default=LanguagePackType.FULL)
updates = ForeignKey(
foreignKey='LanguagePack', dbName='updates',
notNull=False, default=None)
class LanguagePackSet:
implements(ILanguagePackSet)
def addLanguagePack(self, distroseries, file_alias, type):
"""See `ILanguagePackSet`."""
assert type in LanguagePackType, (
'Unknown language pack type: %s' % type.name)
if (type == LanguagePackType.DELTA and
distroseries.language_pack_base is None):
raise AssertionError(
"There is no base language pack available for %s's %s to get"
" deltas from." % sqlvalues(
distroseries.distribution.name, distroseries.name))
updates = None
if type == LanguagePackType.DELTA:
updates = distroseries.language_pack_base
return LanguagePack(
file=file_alias, date_exported=UTC_NOW, distroseries=distroseries,
type=type, updates=updates) | unknown | codeparrot/codeparrot-clean | ||
"""
34. Generic relations
Generic relations let an object have a foreign key to any object through a
content-type/object-id field. A ``GenericForeignKey`` field can point to any
object, be it animal, vegetable, or mineral.
The canonical example is tags (although this example implementation is *far*
from complete).
"""
from __future__ import unicode_literals
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TaggedItem(models.Model):
"""A tag on an item."""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
class Meta:
ordering = ["tag", "content_type__name"]
def __str__(self):
return self.tag
class ValuableTaggedItem(TaggedItem):
value = models.PositiveIntegerField()
@python_2_unicode_compatible
class Comparison(models.Model):
"""
A model that tests having multiple GenericForeignKeys
"""
comparative = models.CharField(max_length=50)
content_type1 = models.ForeignKey(ContentType, related_name="comparative1_set")
object_id1 = models.PositiveIntegerField()
content_type2 = models.ForeignKey(ContentType, related_name="comparative2_set")
object_id2 = models.PositiveIntegerField()
first_obj = generic.GenericForeignKey(ct_field="content_type1", fk_field="object_id1")
other_obj = generic.GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __str__(self):
return "%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj)
@python_2_unicode_compatible
class Animal(models.Model):
common_name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
tags = generic.GenericRelation(TaggedItem)
comparisons = generic.GenericRelation(Comparison,
object_id_field="object_id1",
content_type_field="content_type1")
def __str__(self):
return self.common_name
@python_2_unicode_compatible
class Vegetable(models.Model):
name = models.CharField(max_length=150)
is_yucky = models.BooleanField(default=True)
tags = generic.GenericRelation(TaggedItem)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Mineral(models.Model):
name = models.CharField(max_length=150)
hardness = models.PositiveSmallIntegerField()
# note the lack of an explicit GenericRelation here...
def __str__(self):
return self.name
class GeckoManager(models.Manager):
def get_queryset(self):
return super(GeckoManager, self).get_queryset().filter(has_tail=True)
class Gecko(models.Model):
has_tail = models.BooleanField(default=False)
objects = GeckoManager()
# To test fix for #11263
class Rock(Mineral):
tags = generic.GenericRelation(TaggedItem)
class ManualPK(models.Model):
id = models.IntegerField(primary_key=True)
tags = generic.GenericRelation(TaggedItem)
class ForProxyModelModel(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
obj = generic.GenericForeignKey(for_concrete_model=False)
title = models.CharField(max_length=255, null=True)
class ForConcreteModelModel(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
obj = generic.GenericForeignKey()
class ConcreteRelatedModel(models.Model):
bases = generic.GenericRelation(ForProxyModelModel, for_concrete_model=False)
class ProxyRelatedModel(ConcreteRelatedModel):
class Meta:
proxy = True | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
/**
* Copyright (c) 2016, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
**/
"""
import argparse
import httplib
import logging
import logging.handlers
import signal
import ssl
import sys
import threading
import time, calendar
from functools import wraps
import mqttHandler
import iotUtils
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Overriding the default SSL version used in some of the Python (2.7.x) versions
# This is a known issue in earlier Python releases
# But was fixed in later versions. Ex-2.7.11
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def sslwrap(func):
@wraps(func)
def bar(*args, **kw):
kw['ssl_version'] = ssl.PROTOCOL_TLSv1
return func(*args, **kw)
return bar
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PUSH_INTERVAL = 2 # time interval between successive data pushes in seconds
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Logger defaults
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LOG_FILENAME = "agent.log"
logging_enabled = False
LOG_LEVEL = logging.INFO # Could be e.g. "DEBUG" or "WARNING"
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python version
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if sys.version_info<(2,6,0):
sys.stderr.write("You need python 2.6.0 or later to run this script\n")
exit(1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Define and parse command line arguments
# If the log file is specified on the command line then override the default
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
parser = argparse.ArgumentParser(description="Python service to push RPi info to the Device Cloud")
parser.add_argument("-l", "--log", help="file to write log to (default '" + LOG_FILENAME + "')")
help_string_for_data_push_interval = "time interval between successive locker status push to server(default '" + \
str(PUSH_INTERVAL) + "')"
parser.add_argument("-i", "--interval", type=int, help=help_string_for_data_push_interval)
args = parser.parse_args()
if args.log:
LOG_FILENAME = args.log
if args.interval:
PUSH_INTERVAL = args.interval
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Endpoint specific settings to connect with the IoT Server
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SERVER_ENDPOINT = iotUtils.HTTPS_EP.split(":")
SERVER_IP = SERVER_ENDPOINT[1].replace('//', '')
SERVER_PORT = int(SERVER_ENDPOINT[2])
API_ENDPOINT_CONTEXT = iotUtils.CONTROLLER_CONTEXT
REGISTER_ENDPOINT = str(API_ENDPOINT_CONTEXT) + '/device/register'
PUSH_SENSOR_VALUE_ENDPOINT = str(API_ENDPOINT_CONTEXT) + '/push-sensor-value'
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# A class we can use to capture stdout and sterr in the log
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class IOTLogger(object):
def __init__(self, logger, level):
"""Needs a logger and a logger level."""
self.logger = logger
self.level = level
def write(self, message):
if message.rstrip() != "": # Only log if there is a message (not just a new line)
self.logger.log(self.level, message.rstrip())
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Configure logging to log to a file,
# making a new file at midnight and keeping the last 3 day's data
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def configureLogger(loggerName):
logger = logging.getLogger(loggerName)
logger.setLevel(LOG_LEVEL) # Set the log level to LOG_LEVEL
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME, when="midnight",
backupCount=3) # Handler that writes to a file,
# ~~~make new file at midnight and keep 3 backups
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') # Format each log message like this
handler.setFormatter(formatter) # Attach the formatter to the handler
logger.addHandler(handler) # Attach the handler to the logger
if (logging_enabled):
sys.stdout = IOTLogger(logger, logging.INFO) # Replace stdout with logging to file at INFO level
sys.stderr = IOTLogger(logger, logging.ERROR) # Replace stderr with logging to file at ERROR level
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This method is for register the sensor agent into the Device-Cloud
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def registerAgent():
ssl.wrap_socket = sslwrap(ssl.wrap_socket) # using the overridden sslwrap that uses TLSv1
if sys.version_info<(2,7,9):
dcConncection = httplib.HTTPSConnection(host=SERVER_IP, port=SERVER_PORT)
else:
dcConncection = httplib.HTTPSConnection(host=SERVER_IP, port=SERVER_PORT
, context=ssl._create_unverified_context())
#TODO need to get server certificate when initializing https connection
dcConncection.set_debuglevel(1)
dcConncection.connect()
PUSH_DATA = iotUtils.DEVICE_INFO + iotUtils.DEVICE_DATA.format(sensorValue=0.0)
PUSH_DATA += '}'
print PUSH_DATA
registerURL = str(REGISTER_ENDPOINT)
dcConncection.putrequest('POST', registerURL)
dcConncection.putheader('Authorization', 'Bearer ' + iotUtils.AUTH_TOKEN)
dcConncection.putheader('Content-Type', 'application/json')
dcConncection.putheader('Content-Length', len(PUSH_DATA))
dcConncection.endheaders()
dcConncection.send(PUSH_DATA)
dcResponse = dcConncection.getresponse()
if(dcResponse.status < 400):
iotUtils.IS_REGISTERED = True
print "Your device has been registered with IoT Server"
else:
iotUtils.IS_REGISTERED = False
print "Your device hasn't been registered with IoT Server"
print ('agentStats: ' + str(dcResponse.status))
print ('agentStats: ' + str(dcResponse.reason))
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print ('agentStats: ' + str(registerURL))
print ('agentStats: Response Message')
print str(dcResponse.msg)
dcConncection.close()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This is a Thread object for listening for MQTT Messages
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ListenMQTTThread(object):
def __init__(self):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
mqttHandler.main()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# When sysvinit sends the TERM signal, cleanup before exiting
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def sigterm_handler(_signo, _stack_frame):
print("[] received signal {}, exiting...".format(_signo))
sys.exit(0)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# generate random sensor value
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def getSensorValue():
return iotUtils.generateRandomSensorValues()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
signal.signal(signal.SIGTERM, sigterm_handler)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This method is used to send sensor reading to DAS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def pushSensorValue():
if sys.version_info<(2,7,9):
dcConncection = httplib.HTTPSConnection(host=SERVER_IP, port=SERVER_PORT)
else:
dcConncection = httplib.HTTPSConnection(host=SERVER_IP, port=SERVER_PORT
, context=ssl._create_unverified_context())
#TODO need to get server certificate when initializing https connection
dcConncection.set_debuglevel(1)
dcConncection.connect()
PUSH_DATA = iotUtils.DEVICE_INFO + iotUtils.DEVICE_DATA.format(sensorValue=getSensorValue())
PUSH_DATA += '}'
print PUSH_DATA
regist = str(PUSH_SENSOR_VALUE_ENDPOINT)
dcConncection.putrequest('POST', regist)
dcConncection.putheader('Authorization', 'Bearer ' + iotUtils.AUTH_TOKEN)
dcConncection.putheader('Content-Type', 'application/json')
dcConncection.putheader('Content-Length', len(PUSH_DATA))
dcConncection.endheaders()
dcConncection.send(PUSH_DATA)
dcResponse = dcConncection.getresponse()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print ('agentStats: ' + str(regist))
print ('agentStats: Response Message')
print str(dcResponse.msg)
dcConncection.close()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The Main method of the Agent
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def main():
configureLogger("agent")
ListenMQTTThread()
registerAgent() # Call the register endpoint and register Device I
while True:
try:
if(iotUtils.IS_REGISTERED):
currentTime = calendar.timegm(time.gmtime())
tempValue = getSensorValue()
PUSH_DATA = iotUtils.SENSOR_STATS.format(currentTime, tempValue)
mqttHandler.sendSensorValue(PUSH_DATA)
print '~~~~~~~~~~~~~~~~~~~~~~~~ Publishing Device-Data ~~~~~~~~~~~~~~~~~~~~~~~~~'
print ('PUBLISHED DATA: ' + PUSH_DATA)
else:
registerAgent()
time.sleep(PUSH_INTERVAL)
except (KeyboardInterrupt, Exception) as e:
print "agentStats: Exception in AgentThread (either KeyboardInterrupt or Other)"
print ("agentStats: " + str(e))
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
pass
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
from meowth.core.data_manager import schema
def setup(bot):
lang_table = bot.dbi.table('languages')
lang_table.new_columns = [
schema.IDColumn('language_id', primary_key=True),
schema.StringColumn('iso639', required=True),
schema.StringColumn('iso3166', required=True),
schema.StringColumn('identifier', required=True, unique=True),
schema.BoolColumn('official', required=True)
]
lang_table.initial_data = [
{
"language_id": 9,
"iso639": "en",
"iso3166": "us",
"identifier": "en",
"official": True
},
{
"language_id": 12,
"iso639": "en",
"iso3166": "gb",
"identifier": "en-gb",
"official": False
},
{
"language_id": 1,
"iso639": "ja",
"iso3166": "jp",
"identifier": "ja",
"official": True
},
{
"language_id": 11,
"iso639": "ja",
"iso3166": "jp",
"identifier": "ja-kanji",
"official": True
},
{
"language_id": 2,
"iso639": "ja",
"iso3166": "jp",
"identifier": "roomaji",
"official": True
},
{
"language_id": 3,
"iso639": "ko",
"iso3166": "kr",
"identifier": "ko",
"official": True
},
{
"language_id": 4,
"iso639": "zh",
"iso3166": "cn",
"identifier": "zh",
"official": True
},
{
"language_id": 5,
"iso639": "fr",
"iso3166": "fr",
"identifier": "fr",
"official": True
},
{
"language_id": 6,
"iso639": "de",
"iso3166": "de",
"identifier": "de",
"official": True
},
{
"language_id": 7,
"iso639": "es",
"iso3166": "es",
"identifier": "es",
"official": True
},
{
"language_id": 8,
"iso639": "it",
"iso3166": "it",
"identifier": "it",
"official": True
},
{
"language_id": 10,
"iso639": "cs",
"iso3166": "cz",
"identifier": "cs",
"official": False
}
]
return lang_table | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, String, Integer, Boolean
from airflow.models.base import Base, ID_LEN
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String(ID_LEN), unique=True)
email = Column(String(500))
superuser = Column(Boolean(), default=False)
def __repr__(self):
return self.username
def get_id(self):
return str(self.id)
def is_superuser(self):
return self.superuser | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core import web_contents
class Oobe(web_contents.WebContents):
def __init__(self, inspector_backend):
super(Oobe, self).__init__(inspector_backend)
def _GaiaLoginContext(self):
max_context_id = self.EnableAllContexts()
logging.debug('%d contexts in Gaia page' % max_context_id)
for gaia_context in range(max_context_id + 1):
try:
if self.EvaluateJavaScriptInContext(
"document.readyState == 'complete' && "
"document.getElementById('Email') != null",
gaia_context):
return gaia_context
except exceptions.EvaluateException:
pass
return None
def _ExecuteOobeApi(self, api, *args):
logging.info('Invoking %s' % api)
self.WaitForJavaScriptExpression("typeof Oobe == 'function'", 20)
if self.EvaluateJavaScript("typeof %s == 'undefined'" % api):
raise exceptions.LoginException('%s js api missing' % api)
js = api + '(' + ("'%s'," * len(args)).rstrip(',') + ');'
self.ExecuteJavaScript(js % args)
def NavigateGuestLogin(self):
"""Logs in as guest."""
self._ExecuteOobeApi('Oobe.guestLoginForTesting')
def NavigateFakeLogin(self, username, password):
"""Fake user login."""
self._ExecuteOobeApi('Oobe.loginForTesting', username, password)
def NavigateGaiaLogin(self, username, password):
"""Logs in to GAIA with provided credentials."""
self._ExecuteOobeApi('Oobe.addUserForTesting')
gaia_context = util.WaitFor(self._GaiaLoginContext, timeout=30)
self.ExecuteJavaScriptInContext("""
document.getElementById('Email').value='%s';
document.getElementById('Passwd').value='%s';
document.getElementById('signIn').click();"""
% (username, password),
gaia_context) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import types
from boto.gs.user import User
from boto.exception import InvalidCorsError
from xml.sax import handler
# Relevant tags for the CORS XML document.
CORS_CONFIG = 'CorsConfig'
CORS = 'Cors'
ORIGINS = 'Origins'
ORIGIN = 'Origin'
METHODS = 'Methods'
METHOD = 'Method'
HEADERS = 'ResponseHeaders'
HEADER = 'ResponseHeader'
MAXAGESEC = 'MaxAgeSec'
class Cors(handler.ContentHandler):
"""Encapsulates the CORS configuration XML document"""
def __init__(self):
# List of CORS elements found within a CorsConfig element.
self.cors = []
# List of collections (e.g. Methods, ResponseHeaders, Origins)
# found within a CORS element. We use a list of lists here
# instead of a dictionary because the collections need to be
# preserved in the order in which they appear in the input XML
# document (and Python dictionary keys are inherently unordered).
# The elements on this list are two element tuples of the form
# (collection name, [list of collection contents]).
self.collections = []
# Lists of elements within a collection. Again a list is needed to
# preserve ordering but also because the same element may appear
# multiple times within a collection.
self.elements = []
# Dictionary mapping supported collection names to element types
# which may be contained within each.
self.legal_collections = {
ORIGINS : [ORIGIN],
METHODS : [METHOD],
HEADERS : [HEADER],
MAXAGESEC: []
}
# List of supported element types within any collection, used for
# checking validadity of a parsed element name.
self.legal_elements = [ORIGIN, METHOD, HEADER]
self.parse_level = 0
self.collection = None
self.element = None
def validateParseLevel(self, tag, level):
"""Verify parse level for a given tag."""
if self.parse_level != level:
raise InvalidCorsError('Invalid tag %s at parse level %d: ' %
(tag, self.parse_level))
def startElement(self, name, attrs, connection):
"""SAX XML logic for parsing new element found."""
if name == CORS_CONFIG:
self.validateParseLevel(name, 0)
self.parse_level += 1;
elif name == CORS:
self.validateParseLevel(name, 1)
self.parse_level += 1;
elif name in self.legal_collections:
self.validateParseLevel(name, 2)
self.parse_level += 1;
self.collection = name
elif name in self.legal_elements:
self.validateParseLevel(name, 3)
# Make sure this tag is found inside a collection tag.
if self.collection is None:
raise InvalidCorsError('Tag %s found outside collection' % name)
# Make sure this tag is allowed for the current collection tag.
if name not in self.legal_collections[self.collection]:
raise InvalidCorsError('Tag %s not allowed in %s collection' %
(name, self.collection))
self.element = name
else:
raise InvalidCorsError('Unsupported tag ' + name)
def endElement(self, name, value, connection):
"""SAX XML logic for parsing new element found."""
if name == CORS_CONFIG:
self.validateParseLevel(name, 1)
self.parse_level -= 1;
elif name == CORS:
self.validateParseLevel(name, 2)
self.parse_level -= 1;
# Terminating a CORS element, save any collections we found
# and re-initialize collections list.
self.cors.append(self.collections)
self.collections = []
elif name in self.legal_collections:
self.validateParseLevel(name, 3)
if name != self.collection:
raise InvalidCorsError('Mismatched start and end tags (%s/%s)' %
(self.collection, name))
self.parse_level -= 1;
if not self.legal_collections[name]:
# If this collection doesn't contain any sub-elements, store
# a tuple of name and this tag's element value.
self.collections.append((name, value.strip()))
else:
# Otherwise, we're terminating a collection of sub-elements,
# so store a tuple of name and list of contained elements.
self.collections.append((name, self.elements))
self.elements = []
self.collection = None
elif name in self.legal_elements:
self.validateParseLevel(name, 3)
# Make sure this tag is found inside a collection tag.
if self.collection is None:
raise InvalidCorsError('Tag %s found outside collection' % name)
# Make sure this end tag is allowed for the current collection tag.
if name not in self.legal_collections[self.collection]:
raise InvalidCorsError('Tag %s not allowed in %s collection' %
(name, self.collection))
if name != self.element:
raise InvalidCorsError('Mismatched start and end tags (%s/%s)' %
(self.element, name))
# Terminating an element tag, add it to the list of elements
# for the current collection.
self.elements.append((name, value.strip()))
self.element = None
else:
raise InvalidCorsError('Unsupported end tag ' + name)
def to_xml(self):
"""Convert CORS object into XML string representation."""
s = '<' + CORS_CONFIG + '>'
for collections in self.cors:
s += '<' + CORS + '>'
for (collection, elements_or_value) in collections:
assert collection is not None
s += '<' + collection + '>'
# If collection elements has type string, append atomic value,
# otherwise, append sequence of values in named tags.
if isinstance(elements_or_value, str):
s += elements_or_value
else:
for (name, value) in elements_or_value:
assert name is not None
assert value is not None
s += '<' + name + '>' + value + '</' + name + '>'
s += '</' + collection + '>'
s += '</' + CORS + '>'
s += '</' + CORS_CONFIG + '>'
return s | unknown | codeparrot/codeparrot-clean | ||
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface _Promise {};
interface A {
legacycaller Promise<any> foo();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow Promise return values for legacycaller.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface _Promise {};
interface A {
Promise<any> foo();
long foo(long arg);
};
""")
results = parser.finish();
except:
threw = True
harness.ok(threw,
"Should not allow overloads which have both Promise and "
"non-Promise return types.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface _Promise {};
interface A {
long foo(long arg);
Promise<any> foo();
};
""")
results = parser.finish();
except:
threw = True
harness.ok(threw,
"Should not allow overloads which have both Promise and "
"non-Promise return types.")
parser = parser.reset()
parser.parse("""
interface _Promise {};
interface A {
Promise<any> foo();
Promise<any> foo(long arg);
};
""")
results = parser.finish();
harness.ok(True,
"Should allow overloads which only have Promise and return "
"types.") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_wide_ip
short_description: Manages F5 BIG-IP GTM wide ip
description:
- Manages F5 BIG-IP GTM wide ip.
version_added: 2.0
options:
pool_lb_method:
description:
- Specifies the load balancing method used to select a pool in this wide
IP. This setting is relevant only when multiple pools are configured
for a wide IP.
type: str
required: True
aliases: ['lb_method']
choices:
- round-robin
- ratio
- topology
- global-availability
version_added: 2.5
name:
description:
- Wide IP name. This name must be formatted as a fully qualified
domain name (FQDN). You can also use the alias C(wide_ip) but this
is deprecated and will be removed in a future Ansible version.
type: str
required: True
aliases:
- wide_ip
type:
description:
- Specifies the type of wide IP. GTM wide IPs need to be keyed by query
type in addition to name, since pool members need different attributes
depending on the response RDATA they are meant to supply. This value
is required if you are using BIG-IP versions >= 12.0.0.
type: str
choices:
- a
- aaaa
- cname
- mx
- naptr
- srv
version_added: 2.4
state:
description:
- When C(present) or C(enabled), ensures that the Wide IP exists and
is enabled.
- When C(absent), ensures that the Wide IP has been removed.
- When C(disabled), ensures that the Wide IP exists and is disabled.
type: str
choices:
- present
- absent
- disabled
- enabled
default: present
version_added: 2.4
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
version_added: 2.5
pools:
description:
- The pools that you want associated with the Wide IP.
- If C(ratio) is not provided when creating a new Wide IP, it will default
to 1.
type: list
suboptions:
name:
description:
- The name of the pool to include.
type: str
required: True
ratio:
description:
- Ratio for the pool.
- The system uses this number with the Ratio load balancing method.
type: int
version_added: 2.5
irules:
description:
- List of rules to be applied.
- If you want to remove all existing iRules, specify a single empty value; C("").
See the documentation for an example.
type: list
version_added: 2.6
aliases:
description:
- Specifies alternate domain names for the web site content you are load
balancing.
- You can use the same wildcard characters for aliases as you can for actual
wide IP names.
type: list
version_added: 2.7
last_resort_pool:
description:
- Specifies which GTM pool, for the system to use as the last resort pool for
the wide IP.
- The valid pools for this parameter are those with the C(type) specified in this
module.
type: str
version_added: 2.8
notes:
- Support for TMOS versions below v12.x has been deprecated for this module, and will be removed in Ansible 2.12.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set lb method
bigip_gtm_wide_ip:
pool_lb_method: round-robin
name: my-wide-ip.example.com
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add iRules to the Wide IP
bigip_gtm_wide_ip:
pool_lb_method: round-robin
name: my-wide-ip.example.com
irules:
- irule1
- irule2
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Remove one iRule from the Virtual Server
bigip_gtm_wide_ip:
pool_lb_method: round-robin
name: my-wide-ip.example.com
irules:
- irule1
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Remove all iRules from the Virtual Server
bigip_gtm_wide_ip:
pool_lb_method: round-robin
name: my-wide-ip.example.com
irules: ""
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Assign a pool with ratio to the Wide IP
bigip_gtm_wide_ip:
pool_lb_method: round-robin
name: my-wide-ip.example.com
pools:
- name: pool1
ratio: 100
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
lb_method:
description: The new load balancing method used by the wide IP.
returned: changed
type: str
sample: topology
state:
description: The new state of the wide IP.
returned: changed
type: str
sample: disabled
irules:
description: iRules set on the Wide IP.
returned: changed
type: list
sample: ['/Common/irule1', '/Common/irule2']
aliases:
description: Aliases set on the Wide IP.
returned: changed
type: list
sample: ['alias1.foo.com', '*.wildcard.domain']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import is_valid_fqdn
from library.module_utils.network.f5.icontrol import tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import is_valid_fqdn
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {
'poolLbMode': 'pool_lb_method',
'rules': 'irules',
'lastResortPool': 'last_resort_pool',
}
updatables = [
'pool_lb_method',
'state',
'pools',
'irules',
'enabled',
'disabled',
'aliases',
'last_resort_pool',
]
returnables = [
'name',
'pool_lb_method',
'state',
'pools',
'irules',
'aliases',
'last_resort_pool',
]
api_attributes = [
'poolLbMode',
'enabled',
'disabled',
'pools',
'rules',
'aliases',
'lastResortPool',
]
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return False
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return False
@property
def pools(self):
result = []
if self._values['pools'] is None:
return []
pools = sorted(self._values['pools'], key=lambda x: x['order'])
for item in pools:
pool = dict()
pool.update(item)
name = '/{0}/{1}'.format(item['partition'], item['name'])
del pool['nameReference']
del pool['order']
del pool['name']
del pool['partition']
pool['name'] = name
result.append(pool)
return result
@property
def last_resort_pool(self):
if self._values['last_resort_pool'] in [None, '', 'none']:
return ''
return self._values['last_resort_pool']
class ModuleParameters(Parameters):
@property
def last_resort_pool(self):
if self._values['last_resort_pool'] in [None, '', 'none']:
return ''
return '{0} {1}'.format(
self.type, fq_name(self.partition, self._values['last_resort_pool'])
)
@property
def pool_lb_method(self):
if self._values['pool_lb_method'] is None:
return None
lb_method = str(self._values['pool_lb_method'])
return lb_method
@property
def type(self):
if self._values['type'] is None:
return None
return str(self._values['type'])
@property
def name(self):
if self._values['name'] is None:
return None
if not is_valid_fqdn(self._values['name']):
raise F5ModuleError(
"The provided name must be a valid FQDN"
)
return self._values['name']
@property
def state(self):
if self._values['state'] == 'enabled':
return 'present'
return self._values['state']
@property
def enabled(self):
if self._values['state'] == 'disabled':
return False
elif self._values['state'] in ['present', 'enabled']:
return True
else:
return None
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['present', 'enabled']:
return False
else:
return None
@property
def pools(self):
result = []
if self._values['pools'] is None:
return None
for item in self._values['pools']:
pool = dict()
if 'name' not in item:
raise F5ModuleError(
"'name' is a required key for items in the list of pools."
)
if 'ratio' in item:
pool['ratio'] = item['ratio']
pool['name'] = fq_name(self.partition, item['name'])
result.append(pool)
return result
@property
def irules(self):
results = []
if self._values['irules'] is None:
return None
if len(self._values['irules']) == 1 and self._values['irules'][0] == '':
return ''
for irule in self._values['irules']:
result = fq_name(self.partition, irule)
results.append(result)
return results
@property
def aliases(self):
if self._values['aliases'] is None:
return None
if len(self._values['aliases']) == 1 and self._values['aliases'][0] == '':
return ''
self._values['aliases'].sort()
return self._values['aliases']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def irules(self):
if self._values['irules'] is None:
return None
if self._values['irules'] == '':
return []
return self._values['irules']
class ReportableChanges(Changes):
@property
def pool_lb_method(self):
result = dict(
lb_method=self._values['pool_lb_method'],
pool_lb_method=self._values['pool_lb_method'],
)
return result
@property
def last_resort_pool(self):
if self._values['last_resort_pool'] is None:
return None
if self._values['last_resort_pool'] in ['', 'none']:
return 'none'
return self._values['last_resort_pool'].split(' ')[1]
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def to_tuple(self, items):
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def _diff_complex_items(self, want, have):
if want == [] and have is None:
return None
if want is None:
return None
w = self.to_tuple(want)
h = self.to_tuple(have)
if set(w).issubset(set(h)):
return None
else:
return want
@property
def last_resort_pool(self):
if self.want.last_resort_pool is None:
return None
if self.want.last_resort_pool == '' and self.have.last_resort_pool == '':
return None
if self.want.last_resort_pool != self.have.last_resort_pool:
return self.want.last_resort_pool
@property
def state(self):
if self.want.state == 'disabled' and self.have.enabled:
return self.want.state
elif self.want.state in ['present', 'enabled'] and self.have.disabled:
return self.want.state
@property
def pools(self):
result = self._diff_complex_items(self.want.pools, self.have.pools)
return result
@property
def irules(self):
if self.want.irules is None:
return None
if self.want.irules == '' and self.have.irules is None:
return None
if self.want.irules == '' and len(self.have.irules) > 0:
return []
if sorted(set(self.want.irules)) != sorted(set(self.have.irules)):
return self.want.irules
@property
def aliases(self):
if self.want.aliases is None:
return None
if self.want.aliases == '' and self.have.aliases is None:
return None
if self.want.aliases == '' and len(self.have.aliases) > 0:
return []
if self.have.aliases is None:
return self.want.aliases
if set(self.want.aliases) != set(self.have.aliases):
return self.want.aliases
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.kwargs = kwargs
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
if self.version_is_less_than_12():
manager = self.get_manager('untyped')
else:
manager = self.get_manager('typed')
return manager.exec_module()
def get_manager(self, type):
if type == 'typed':
return TypedManager(**self.kwargs)
elif type == 'untyped':
return UntypedManager(**self.kwargs)
def version_is_less_than_12(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state in ["present", "disabled"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
if self.version_is_less_than_12():
self._deprecate_v11(warnings)
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def version_is_less_than_12(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
def _deprecate_v11(self, result):
result.append(
dict(
msg='The support for this TMOS version is deprecated.',
version='2.12'
)
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
if self.want.pool_lb_method is None:
raise F5ModuleError(
"The 'pool_lb_method' option is required when state is 'present'"
)
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Wide IP")
return True
class UntypedManager(BaseManager):
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class TypedManager(BaseManager):
def __init__(self, *args, **kwargs):
super(TypedManager, self).__init__(**kwargs)
if self.want.type is None:
raise F5ModuleError(
"The 'type' option is required for BIG-IP instances "
"greater than or equal to 12.x"
)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.type,
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.type,
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.type,
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}/".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.type
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/{2}/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.type,
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
lb_method_choices = [
'round-robin', 'topology', 'ratio', 'global-availability',
]
self.supports_check_mode = True
argument_spec = dict(
pool_lb_method=dict(
choices=lb_method_choices,
aliases=['lb_method']
),
name=dict(
required=True,
aliases=['wide_ip']
),
type=dict(
choices=[
'a', 'aaaa', 'cname', 'mx', 'naptr', 'srv'
]
),
state=dict(
default='present',
choices=['absent', 'present', 'enabled', 'disabled']
),
pools=dict(
type='list',
options=dict(
name=dict(required=True),
ratio=dict(type='int')
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
irules=dict(
type='list',
),
aliases=dict(
type='list'
),
last_resort_pool=dict(),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
A link was used without a name parameter.
Erroneous code example:
```compile_fail,E0459
#[link(kind = "dylib")] extern "C" {}
// error: `#[link(...)]` specified without `name = "foo"`
```
Please add the name parameter to allow the rust compiler to find the library
you want. Example:
```no_run
#[link(kind = "dylib", name = "some_lib")] extern "C" {} // ok!
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0459.md |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import errno
import json
import shlex
import shutil
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
import multiprocessing
from ansible.module_utils._text import to_text, to_bytes
PY3 = sys.version_info[0] == 3
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
# pipe for communication between forked process and parent
ipc_watcher, ipc_notifier = multiprocessing.Pipe()
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
with open(module_path, 'rb') as module_fd:
head = module_fd.read(1024)
if head[0:2] != b'#!':
return None
return head[2:head.index(b'\n')].strip().split(b' ')
def _make_temp_dir(path):
# TODO: Add checks for permissions on path.
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid}))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
# signal grandchild process started and isolated from being terminated
# by the connection being closed sending a signal to the job group
ipc_notifier.send(True)
ipc_notifier.close()
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(wrapped_cmd)]
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
if PY3:
outdata = outdata.decode('utf-8', 'surrogateescape')
stderr = stderr.decode('utf-8', 'surrogateescape')
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd": wrapped_cmd,
"msg": to_text(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except (ValueError, Exception):
result = {
"failed": 1,
"cmd": wrapped_cmd,
"data": outdata, # temporary notice only
"stderr": stderr,
"msg": traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
def main():
if len(sys.argv) < 5:
print(json.dumps({
"failed": True,
"msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
# setup job output directory
jobdir = os.path.expanduser(async_dir)
job_path = os.path.join(jobdir, jid)
try:
_make_temp_dir(jobdir)
except Exception as e:
print(json.dumps({
"failed": 1,
"msg": "could not create: %s - %s" % (jobdir, to_text(e)),
"exception": to_text(traceback.format_exc()),
}))
sys.exit(1)
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
# close off notifier handle in grandparent, probably unnecessary as
# this process doesn't hang around long enough
ipc_notifier.close()
# allow waiting up to 2.5 seconds in total should be long enough for worst
# loaded environment in practice.
retries = 25
while retries > 0:
if ipc_watcher.poll(0.1):
break
else:
retries = retries - 1
continue
notice("Return async_wrapper task started.")
print(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
"_ansible_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
sys.exit(0)
else:
# The actual wrapper process
# close off the receiving end of the pipe from child process
ipc_watcher.close()
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# close off inherited pipe handles
ipc_watcher.close()
ipc_notifier.close()
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)" % (sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)" % (sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
notice("Now killing %s" % (sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s " % sub_pid)
time.sleep(1)
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
notice("Done in kid B.")
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
else:
# the child process runs the actual module
notice("Start module (%s)" % os.getpid())
_run_module(cmd, jid, job_path)
notice("Module complete (%s)" % os.getpid())
sys.exit(0)
except SystemExit:
# On python2.4, SystemExit is a subclass of Exception.
# This block makes python2.4 behave the same as python2.5+
raise
except Exception:
e = sys.exc_info()[1]
notice("error: %s" % e)
print(json.dumps({
"failed": True,
"msg": "FATAL ERROR: %s" % e
}))
sys.exit(1)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import unittest
import tools
class MyPlugin(object):
def __init__(self):
self.app = None
self.add_args = {}
self.add_content = ''
def setup(self, app):
self.app = app
def apply(self, func, config):
def wrapper(*a, **ka):
ka.update(self.add_args)
self.lastcall = func, a, ka
return ''.join(func(*a, **ka)) + self.add_content
return wrapper
def my_decorator(func):
def wrapper(*a, **ka):
return list(func(*a, **ka))[-1]
class TestPluginManagement(tools.ServerTestBase):
def verify_installed(self, plugin, otype, **config):
self.assertEqual(type(plugin), otype)
self.assertEqual(plugin.config, config)
self.assertEqual(plugin.app, self.app)
self.assertTrue(plugin in self.app.plugins)
def test_install_plugin(self):
plugin = MyPlugin()
installed = self.app.install(plugin)
self.assertEqual(plugin, installed)
self.assertTrue(plugin in self.app.plugins)
def test_install_decorator(self):
installed = self.app.install(my_decorator)
self.assertEqual(my_decorator, installed)
self.assertTrue(my_decorator in self.app.plugins)
def test_install_non_plugin(self):
self.assertRaises(TypeError, self.app.install, 'I am not a plugin')
def test_uninstall_by_instance(self):
plugin = self.app.install(MyPlugin())
plugin2 = self.app.install(MyPlugin())
self.app.uninstall(plugin)
self.assertTrue(plugin not in self.app.plugins)
self.assertTrue(plugin2 in self.app.plugins)
def test_uninstall_by_type(self):
plugin = self.app.install(MyPlugin())
plugin2 = self.app.install(MyPlugin())
self.app.uninstall(MyPlugin)
self.assertTrue(plugin not in self.app.plugins)
self.assertTrue(plugin2 not in self.app.plugins)
def test_uninstall_by_name(self):
plugin = self.app.install(MyPlugin())
plugin2 = self.app.install(MyPlugin())
plugin.name = 'myplugin'
self.app.uninstall('myplugin')
self.assertTrue(plugin not in self.app.plugins)
self.assertTrue(plugin2 in self.app.plugins)
def test_uninstall_all(self):
plugin = self.app.install(MyPlugin())
plugin2 = self.app.install(MyPlugin())
self.app.uninstall(True)
self.assertFalse(self.app.plugins)
def test_route_plugin(self):
plugin = MyPlugin()
plugin.add_content = ';foo'
@self.app.route('/a')
@self.app.route('/b', apply=[plugin])
def a(): return 'plugin'
self.assertBody('plugin', '/a')
self.assertBody('plugin;foo', '/b')
def test_plugin_oder(self):
self.app.install(MyPlugin()).add_content = ';global-1'
self.app.install(MyPlugin()).add_content = ';global-2'
l1 = MyPlugin()
l1.add_content = ';local-1'
l2 = MyPlugin()
l2.add_content = ';local-2'
@self.app.route('/a')
@self.app.route('/b', apply=[l1, l2])
def a(): return 'plugin'
self.assertBody('plugin;global-2;global-1', '/a')
self.assertBody('plugin;local-2;local-1;global-2;global-1', '/b')
def test_skip_by_instance(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
g2 = self.app.install(MyPlugin())
g2.add_content = ';global-2'
l1 = MyPlugin()
l1.add_content = ';local-1'
l2 = MyPlugin()
l2.add_content = ';local-2'
@self.app.route('/a', skip=[g2, l2])
@self.app.route('/b', apply=[l1, l2], skip=[g2, l2])
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin;local-1;global-1', '/b')
def test_skip_by_class(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
@self.app.route('/a')
@self.app.route('/b', skip=[MyPlugin])
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin', '/b')
def test_skip_by_name(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
g1.name = 'test'
@self.app.route('/a')
@self.app.route('/b', skip=['test'])
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin', '/b')
def test_skip_all(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
@self.app.route('/a')
@self.app.route('/b', skip=[True])
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin', '/b')
def test_skip_nonlist(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
@self.app.route('/a')
@self.app.route('/b', skip=g1)
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin', '/b')
class TestPluginAPI(tools.ServerTestBase):
def setUp(self):
super(TestPluginAPI, self).setUp()
@self.app.route('/', test='plugin.cfg')
def test(**args):
return ', '.join('%s:%s' % (k,v) for k,v in args.items())
def test_callable(self):
def plugin(func):
def wrapper(*a, **ka):
return func(test='me', *a, **ka) + '; tail'
return wrapper
self.app.install(plugin)
self.assertBody('test:me; tail', '/')
def test_apply(self):
class Plugin(object):
def apply(self, func, cfg):
def wrapper(*a, **ka):
return func(test=cfg['config']['test'], *a, **ka) + '; tail'
return wrapper
def __call__(self, func):
raise AssertionError("Plugins must not be called "\
"if they implement 'apply'")
self.app.install(Plugin())
self.assertBody('test:plugin.cfg; tail', '/')
def test_instance_method_wrapper(self):
class Plugin(object):
api=2
def apply(self, callback, route):
return self.b
def b(self): return "Hello"
self.app.install(Plugin())
self.assertBody('Hello', '/')
def test_setup(self):
class Plugin(object):
def __call__(self, func): return func
def setup(self, app): self.app = app
plugin = self.app.install(Plugin())
self.assertEquals(getattr(plugin, 'app', None), self.app)
def test_close(self):
class Plugin(object):
def __call__(self, func): return func
def close(self): self.closed = True
plugin = self.app.install(Plugin())
plugin2 = self.app.install(Plugin())
self.app.uninstall(plugin)
self.assertTrue(getattr(plugin, 'closed', False))
self.app.close()
self.assertTrue(getattr(plugin2, 'closed', False))
if __name__ == '__main__': #pragma: no cover
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
var _ MetricsProvider = &MetricsNil{}
// MetricsNil represents a MetricsProvider that does not support returning
// Metrics. It serves as a placeholder for Volumes that do not yet support
// metrics.
type MetricsNil struct{}
// SupportsMetrics returns false for the MetricsNil type.
func (*MetricsNil) SupportsMetrics() bool {
return false
}
// GetMetrics returns an empty Metrics and an error.
// See MetricsProvider.GetMetrics
func (*MetricsNil) GetMetrics() (*Metrics, error) {
return &Metrics{}, NewNotSupportedError()
} | go | github | https://github.com/kubernetes/kubernetes | pkg/volume/metrics_nil.go |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package vault
import (
"context"
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/vault/helper/metricsutil"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/pluginconsts"
"github.com/hashicorp/vault/limits"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/pluginutil"
"github.com/hashicorp/vault/sdk/logical"
)
const (
KVv2MetadataPath = "metadata"
)
func (c *Core) metricsLoop(stopCh chan struct{}) {
emitTimer := time.Tick(time.Second)
stopOrHAState := func() (bool, consts.HAState) {
l := newLockGrabber(c.stateLock.RLock, c.stateLock.RUnlock, stopCh)
go l.grab()
if stopped := l.lockOrStop(); stopped {
return true, 0
}
defer c.stateLock.RUnlock()
return false, c.HAState()
}
identityCountTimer := time.Tick(time.Minute * 10)
// Only emit on active node of cluster that is not a DR secondary.
if stopped, haState := stopOrHAState(); stopped {
return
} else if haState == consts.Standby || c.IsDRSecondary() {
identityCountTimer = nil
}
writeTimer := time.Tick(time.Second * 30)
// Do not process the writeTimer on DR Secondary nodes
if c.IsDRSecondary() {
writeTimer = nil
}
// This loop covers
// vault.expire.num_leases
// vault.core.unsealed
// vault.identity.num_entities
// and the non-telemetry request counters shown in the UI.
for {
select {
case <-emitTimer:
stopped, haState := stopOrHAState()
if stopped {
return
}
if haState == consts.Active {
c.metricsMutex.Lock()
// Emit on active node only
if c.expiration != nil {
c.expiration.emitMetrics()
}
c.metricsMutex.Unlock()
}
// Refresh the sealed gauge, on all nodes
if c.Sealed() {
c.metricSink.SetGaugeWithLabels([]string{"core", "unsealed"}, 0, nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "unsealed"}, 1, nil)
}
if c.UndoLogsEnabled() {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "write_undo_logs"}, 1, nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "write_undo_logs"}, 0, nil)
}
writeLimiter := c.GetRequestLimiter(limits.WriteLimiter)
if writeLimiter != nil {
c.metricSink.SetGaugeWithLabels([]string{
"core", "limits", "concurrency", limits.WriteLimiter,
}, float32(writeLimiter.EstimatedLimit()), nil)
}
pathLimiter := c.GetRequestLimiter(limits.SpecialPathLimiter)
if pathLimiter != nil {
c.metricSink.SetGaugeWithLabels([]string{
"core", "limits", "concurrency", limits.SpecialPathLimiter,
}, float32(pathLimiter.EstimatedLimit()), nil)
}
// Refresh the standby gauge, on all nodes
if haState != consts.Active {
c.metricSink.SetGaugeWithLabels([]string{"core", "active"}, 0, nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "active"}, 1, nil)
}
if haState == consts.PerfStandby {
c.metricSink.SetGaugeWithLabels([]string{"core", "performance_standby"}, 1, nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "performance_standby"}, 0, nil)
}
if c.ReplicationState().HasState(consts.ReplicationPerformancePrimary) {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "performance", "primary"}, 1, nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "performance", "primary"}, 0, nil)
}
if c.IsPerfSecondary() {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "performance", "secondary"}, 1, nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "performance", "secondary"}, 0, nil)
}
if c.ReplicationState().HasState(consts.ReplicationDRPrimary) {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "dr", "primary"}, 1, nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "dr", "primary"}, 0, nil)
}
if c.IsDRSecondary() {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "dr", "secondary"}, 1, nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "dr", "secondary"}, 0, nil)
}
if haState == consts.Active {
reindexState := c.ReindexStage()
if reindexState != nil {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "reindex_stage"}, float32(*reindexState), nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "reindex_stage"}, 0, nil)
}
buildProgress := c.BuildProgress()
if buildProgress != nil {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "build_progress"}, float32(*buildProgress), nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "build_progress"}, 0, nil)
}
buildTotal := c.BuildTotal()
if buildTotal != nil {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "build_total"}, float32(*buildTotal), nil)
} else {
c.metricSink.SetGaugeWithLabels([]string{"core", "replication", "build_total"}, 0, nil)
}
}
// If we're using a raft backend, emit raft metrics
if rb, ok := c.underlyingPhysical.(*raft.RaftBackend); ok {
rb.CollectMetrics(c.MetricSink())
}
// Capture the total number of in-flight requests
c.inFlightReqGaugeMetric()
// Refresh gauge metrics that are looped
c.cachedGaugeMetricsEmitter()
case <-writeTimer:
l := newLockGrabber(c.stateLock.RLock, c.stateLock.RUnlock, stopCh)
go l.grab()
if stopped := l.lockOrStop(); stopped {
return
}
// Ship barrier encryption counts if a perf standby or the active node
// on a performance secondary cluster
if c.perfStandby || c.IsPerfSecondary() { // already have lock here, do not re-acquire
err := syncBarrierEncryptionCounter(c)
if err != nil {
c.logger.Error("writing syncing encryption counters", "err", err)
}
}
c.stateLock.RUnlock()
case <-identityCountTimer:
// TODO: this can be replaced by the identity gauge counter; we need to
// sum across all namespaces.
go func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
entities, err := c.countActiveEntities(ctx)
if err != nil {
c.logger.Error("error counting identity entities", "err", err)
} else {
metrics.SetGauge([]string{"identity", "num_entities"}, float32(entities.Entities.Total))
}
}()
case <-stopCh:
return
}
}
}
// These wrappers are responsible for redirecting to the current instance of
// TokenStore; there is one per method because an additional level of abstraction
// seems confusing.
func (c *Core) tokenGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
if c.IsDRSecondary() {
// there is no expiration manager on DR Secondaries
return []metricsutil.GaugeLabelValues{}, nil
}
// stateLock or authLock protects the tokenStore pointer
c.stateLock.RLock()
ts := c.tokenStore
c.stateLock.RUnlock()
if ts == nil {
return []metricsutil.GaugeLabelValues{}, errors.New("nil token store")
}
return ts.gaugeCollector(ctx)
}
func (c *Core) tokenGaugePolicyCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
if c.IsDRSecondary() {
// there is no expiration manager on DR Secondaries
return []metricsutil.GaugeLabelValues{}, nil
}
c.stateLock.RLock()
ts := c.tokenStore
c.stateLock.RUnlock()
if ts == nil {
return []metricsutil.GaugeLabelValues{}, errors.New("nil token store")
}
return ts.gaugeCollectorByPolicy(ctx)
}
func (c *Core) leaseExpiryGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
c.stateLock.RLock()
e := c.expiration
metricsConsts := c.MetricSink().TelemetryConsts
c.stateLock.RUnlock()
if e == nil {
return []metricsutil.GaugeLabelValues{}, errors.New("nil expiration manager")
}
return e.leaseAggregationMetrics(ctx, metricsConsts)
}
func (c *Core) tokenGaugeMethodCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
if c.IsDRSecondary() {
// there is no expiration manager on DR Secondaries
return []metricsutil.GaugeLabelValues{}, nil
}
c.stateLock.RLock()
ts := c.tokenStore
c.stateLock.RUnlock()
if ts == nil {
return []metricsutil.GaugeLabelValues{}, errors.New("nil token store")
}
return ts.gaugeCollectorByMethod(ctx)
}
func (c *Core) tokenGaugeTtlCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
if c.IsDRSecondary() {
// there is no expiration manager on DR Secondaries
return []metricsutil.GaugeLabelValues{}, nil
}
c.stateLock.RLock()
ts := c.tokenStore
c.stateLock.RUnlock()
if ts == nil {
return []metricsutil.GaugeLabelValues{}, errors.New("nil token store")
}
return ts.gaugeCollectorByTtl(ctx)
}
// emitMetricsActiveNode is used to start all the periodic metrics; all of them should
// be shut down when stopCh is closed. This code runs on the active node only.
func (c *Core) emitMetricsActiveNode(stopCh chan struct{}) {
// The gauge collection processes are started and stopped here
// because there's more than one TokenManager created during startup,
// but we only want one set of gauges.
metricsInit := []struct {
MetricName []string
MetadataLabel []metrics.Label
CollectorFunc metricsutil.GaugeCollector
DisableEnvVar string
IsEnterpriseOnly bool
}{
{
[]string{"token", "count"},
[]metrics.Label{{"gauge", "token_by_namespace"}},
c.tokenGaugeCollector,
"",
false,
},
{
[]string{"token", "count", "by_policy"},
[]metrics.Label{{"gauge", "token_by_policy"}},
c.tokenGaugePolicyCollector,
"",
false,
},
{
[]string{"expire", "leases", "by_expiration"},
[]metrics.Label{{"gauge", "leases_by_expiration"}},
c.leaseExpiryGaugeCollector,
"",
false,
},
{
[]string{"token", "count", "by_auth"},
[]metrics.Label{{"gauge", "token_by_auth"}},
c.tokenGaugeMethodCollector,
"",
false,
},
{
[]string{"token", "count", "by_ttl"},
[]metrics.Label{{"gauge", "token_by_ttl"}},
c.tokenGaugeTtlCollector,
"",
false,
},
{
[]string{"secret", "kv", "count"},
[]metrics.Label{{"gauge", "kv_secrets_by_mountpoint"}},
c.kvSecretGaugeCollector,
"VAULT_DISABLE_KV_GAUGE",
false,
},
{
[]string{"identity", "entity", "count"},
[]metrics.Label{{"gauge", "identity_by_namespace"}},
c.entityGaugeCollector,
"",
false,
},
{
[]string{"identity", "entity", "alias", "count"},
[]metrics.Label{{"gauge", "identity_by_mountpoint"}},
c.entityGaugeCollectorByMount,
"",
false,
},
{
[]string{"identity", "entity", "active", "partial_month"},
[]metrics.Label{{"gauge", "identity_active_month"}},
c.activeEntityGaugeCollector,
"",
false,
},
{
[]string{"policy", "configured", "count"},
[]metrics.Label{{"gauge", "number_policies_by_type"}},
c.configuredPoliciesGaugeCollector,
"",
false,
},
{
[]string{"client", "billing_period", "activity"},
[]metrics.Label{{"gauge", "clients_current_billing_period"}},
c.clientsGaugeCollectorCurrentBillingPeriod,
"",
true,
},
}
// Disable collection if configured, or if we're a performance standby
// node or DR secondary cluster.
if c.MetricSink().GaugeInterval == time.Duration(0) {
c.logger.Info("usage gauge collection is disabled")
} else if standby, _ := c.Standby(); !standby && !c.IsDRSecondary() {
for _, init := range metricsInit {
if init.DisableEnvVar != "" {
if os.Getenv(init.DisableEnvVar) != "" {
c.logger.Info("usage gauge collection is disabled for",
"metric", init.MetricName)
continue
}
}
// Billing start date is always 0 on CE
if init.IsEnterpriseOnly && c.BillingStart().IsZero() {
continue
}
proc, err := c.MetricSink().NewGaugeCollectionProcess(
init.MetricName,
init.MetadataLabel,
init.CollectorFunc,
c.logger,
)
if err != nil {
c.logger.Error("failed to start collector", "metric", init.MetricName, "error", err)
} else {
go proc.Run()
defer proc.Stop()
}
}
}
// When this returns, all the defers set up above will fire.
c.metricsLoop(stopCh)
}
type kvMount struct {
Namespace *namespace.Namespace
MountPoint string
MountAccessor string
Version string
Local bool
NumSecrets int
RunningPluginVersion string
}
// findOfficialKvMounts differs from findKvMounts in that it will ignore any sideloaded
// or externally compiled KV mounts that are still of type KV.
// It's a simple function that's slightly reimplemented to prevent needing a context
// in findKvMounts.
func (c *Core) findOfficialKvMounts(ctx context.Context) []*kvMount {
mounts := make([]*kvMount, 0)
c.mountsLock.RLock()
defer c.mountsLock.RUnlock()
// we don't grab the statelock, so this code might run during or after the seal process.
// Therefore, we need to check if c.mounts is nil. If we do not, this will panic when
// run after seal.
if c.mounts == nil {
return mounts
}
for _, entry := range c.mounts.Entries {
if entry.Type == pluginconsts.SecretEngineKV || entry.Type == pluginconsts.SecretEngineGeneric {
version, ok := entry.Options["version"]
if !ok || version == "" {
version = "1"
}
pluginName := getAdjustedPluginType(entry)
if pluginName == "" {
continue
}
pluginVersion := entry.RunningVersion
runner, err := c.pluginCatalog.Get(ctx, pluginName, consts.PluginTypeSecrets, pluginVersion)
if err != nil {
continue
}
if !(isOfficialOrBuiltin(runner)) {
continue
}
mounts = append(mounts, &kvMount{
Namespace: entry.namespace,
MountPoint: entry.Path,
MountAccessor: entry.Accessor,
Version: version,
NumSecrets: 0,
Local: entry.Local,
RunningPluginVersion: entry.RunningVersion,
})
}
}
return mounts
}
func (c *Core) findKvMounts() []*kvMount {
mounts := make([]*kvMount, 0)
c.mountsLock.RLock()
defer c.mountsLock.RUnlock()
// we don't grab the statelock, so this code might run during or after the seal process.
// Therefore, we need to check if c.mounts is nil. If we do not, this will panic when
// run after seal.
if c.mounts == nil {
return mounts
}
for _, entry := range c.mounts.Entries {
if entry.Type == pluginconsts.SecretEngineKV || entry.Type == pluginconsts.SecretEngineGeneric {
version, ok := entry.Options["version"]
if !ok || version == "" {
version = "1"
}
mounts = append(mounts, &kvMount{
Namespace: entry.namespace,
MountPoint: entry.Path,
MountAccessor: entry.Accessor,
Version: version,
NumSecrets: 0,
Local: entry.Local,
RunningPluginVersion: entry.RunningVersion,
})
}
}
return mounts
}
func (c *Core) kvCollectionErrorCount() {
c.MetricSink().IncrCounterWithLabels(
[]string{"metrics", "collection", "error"},
1,
[]metrics.Label{{"gauge", "kv_secrets_by_mountpoint"}},
)
}
func (c *Core) walkKvSecrets(
ctx context.Context,
rootDirs []string,
m *kvMount,
onSecret func(ctx context.Context, fullPath string) error,
) error {
subdirectories := rootDirs
for len(subdirectories) > 0 {
// Context cancellation check
select {
case <-ctx.Done():
return nil
default:
break
}
currentDirectory := subdirectories[0]
subdirectories = subdirectories[1:]
listRequest := &logical.Request{
Operation: logical.ListOperation,
Path: currentDirectory,
}
resp, err := c.router.Route(ctx, listRequest)
if err != nil {
c.kvCollectionErrorCount()
// ErrUnsupportedPath probably means that the mount is not there anymore,
// don't log those cases.
if !strings.Contains(err.Error(), logical.ErrUnsupportedPath.Error()) &&
// ErrSetupReadOnly means the mount's currently being set up.
// Nothing is wrong and there's no cause for alarm, just that we can't get data from it
// yet. We also shouldn't log these cases
!strings.Contains(err.Error(), logical.ErrSetupReadOnly.Error()) {
c.logger.Error("failed to perform internal KV list", "mount_point", m.MountPoint, "error", err)
break
}
// Quit handling this mount point (but it'll still appear in the list)
return err
}
if resp == nil {
continue
}
rawKeys, ok := resp.Data["keys"]
if !ok {
continue
}
keys, ok := rawKeys.([]string)
if !ok {
c.kvCollectionErrorCount()
c.logger.Error("KV list keys are not a []string", "mount_point", m.MountPoint, "rawKeys", rawKeys)
// Quit handling this mount point (but it'll still appear in the list)
return fmt.Errorf("KV list keys are not a []string")
}
for _, path := range keys {
fullPath := currentDirectory + path
if strings.HasSuffix(path, "/") {
subdirectories = append(subdirectories, fullPath)
} else {
if callBackErr := onSecret(ctx, fullPath); callBackErr != nil {
c.logger.Error("failed to get metadata for KVv2 secret", "path", fullPath, "error", err)
return callBackErr
}
}
}
}
return nil
}
// getMinNamespaceSecrets is expected to be called on the output
// of GetKvUsageMetrics to get the min number of secrets in a single namespace.
func getMinNamespaceSecrets(mapOfNamespacesToSecrets map[string]int) int {
currentMin := 0
for _, n := range mapOfNamespacesToSecrets {
if n < currentMin || currentMin == 0 {
currentMin = n
}
}
return currentMin
}
// getMaxNamespaceSecrets is expected to be called on the output
// of GetKvUsageMetrics to get the max number of secrets in a single namespace.
func getMaxNamespaceSecrets(mapOfNamespacesToSecrets map[string]int) int {
currentMax := 0
for _, n := range mapOfNamespacesToSecrets {
if n > currentMax {
currentMax = n
}
}
return currentMax
}
// getTotalSecretsAcrossAllNamespaces is expected to be called on the output
// of GetKvUsageMetrics to get the total number of secrets across namespaces.
func getTotalSecretsAcrossAllNamespaces(mapOfNamespacesToSecrets map[string]int) int {
total := 0
for _, n := range mapOfNamespacesToSecrets {
total += n
}
return total
}
// getMeanNamespaceSecrets is expected to be called on the output
// of GetKvUsageMetrics to get the mean number of secrets across namespaces.
func getMeanNamespaceSecrets(mapOfNamespacesToSecrets map[string]int) int {
length := len(mapOfNamespacesToSecrets)
// Avoid divide by zero:
if length == 0 {
return length
}
return getTotalSecretsAcrossAllNamespaces(mapOfNamespacesToSecrets) / length
}
func (c *Core) walkKvMountSecrets(ctx context.Context, m *kvMount) {
var startDirs []string
if m.Version == "1" {
startDirs = []string{m.Namespace.Path + m.MountPoint}
} else {
startDirs = []string{m.Namespace.Path + m.MountPoint + KVv2MetadataPath + "/"}
}
err := c.walkKvSecrets(ctx, startDirs, m, func(ctx context.Context, fullPath string) error {
m.NumSecrets++
return nil
})
if err != nil {
// ErrUnsupportedPath probably means that the mount is not there anymore,
// don't log those cases.
if !strings.Contains(err.Error(), logical.ErrUnsupportedPath.Error()) &&
// ErrSetupReadOnly means the mount's currently being set up.
// Nothing is wrong and there's no cause for alarm, just that we can't get data from it
// yet. We also shouldn't log these cases
!strings.Contains(err.Error(), logical.ErrSetupReadOnly.Error()) {
c.logger.Error("failed to walk KV mount", "mount_point", m.MountPoint, "error", err)
}
}
}
func (c *Core) kvSecretGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
// Find all KV mounts
mounts := c.findKvMounts()
results := make([]metricsutil.GaugeLabelValues, len(mounts))
// Use a root namespace, so include namespace path
// in any queries.
ctx = namespace.RootContext(ctx)
// Route list requests to all the identified mounts.
// (All of these will show up as activity in the vault.route metric.)
// Then we have to explore each subdirectory.
for i, m := range mounts {
// Check for cancellation, return empty array
select {
case <-ctx.Done():
return []metricsutil.GaugeLabelValues{}, nil
default:
break
}
results[i].Labels = []metrics.Label{
metricsutil.NamespaceLabel(m.Namespace),
{"mount_point", m.MountPoint},
}
c.walkKvMountSecrets(ctx, m)
results[i].Value = float32(m.NumSecrets)
}
return results, nil
}
func (c *Core) entityGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
// Protect against concurrent changes during seal
c.stateLock.RLock()
identityStore := c.identityStore
c.stateLock.RUnlock()
if identityStore == nil {
return []metricsutil.GaugeLabelValues{}, errors.New("nil identity store")
}
byNamespace, err := identityStore.countEntitiesByNamespace(ctx)
if err != nil {
return []metricsutil.GaugeLabelValues{}, err
}
// No check for expiration here; the bulk of the work should be in
// counting the entities.
allNamespaces := c.collectNamespaces()
values := make([]metricsutil.GaugeLabelValues, len(allNamespaces))
for i := range values {
values[i].Labels = []metrics.Label{
metricsutil.NamespaceLabel(allNamespaces[i]),
}
values[i].Value = float32(byNamespace[allNamespaces[i].ID])
}
return values, nil
}
func (c *Core) entityGaugeCollectorByMount(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
c.stateLock.RLock()
identityStore := c.identityStore
c.stateLock.RUnlock()
if identityStore == nil {
return []metricsutil.GaugeLabelValues{}, errors.New("nil identity store")
}
byAccessor, err := identityStore.countEntitiesByMountAccessor(ctx)
if err != nil {
return []metricsutil.GaugeLabelValues{}, err
}
values := make([]metricsutil.GaugeLabelValues, 0)
for accessor, count := range byAccessor {
// Terminate if taking too long to do the translation
select {
case <-ctx.Done():
return values, errors.New("context cancelled")
default:
break
}
c.stateLock.RLock()
mountEntry := c.router.MatchingMountByAccessor(accessor)
c.stateLock.RUnlock()
if mountEntry == nil {
continue
}
values = append(values, metricsutil.GaugeLabelValues{
Labels: []metrics.Label{
metricsutil.NamespaceLabel(mountEntry.namespace),
{"auth_method", mountEntry.Type},
{"mount_point", "auth/" + mountEntry.Path},
},
Value: float32(count),
})
}
return values, nil
}
func (c *Core) cachedGaugeMetricsEmitter() {
if c.metricsHelper == nil {
return
}
loopMetrics := &c.metricsHelper.LoopMetrics.Metrics
emit := func(key interface{}, value interface{}) bool {
metricValue := value.(metricsutil.GaugeMetric)
c.metricSink.SetGaugeWithLabels(metricValue.Key, metricValue.Value, metricValue.Labels)
return true
}
loopMetrics.Range(emit)
}
func (c *Core) inFlightReqGaugeMetric() {
totalInFlightReq := c.inFlightReqData.InFlightReqCount.Load()
// Adding a gauge metric to capture total number of inflight requests
c.metricSink.SetGaugeWithLabels([]string{"core", "in_flight_requests"}, float32(totalInFlightReq), nil)
}
// configuredPoliciesGaugeCollector is used to collect gauge label values for the `vault.policy.configured.count` metric
func (c *Core) configuredPoliciesGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) {
c.stateLock.RLock()
policyStore := c.policyStore
c.stateLock.RUnlock()
if policyStore == nil {
return []metricsutil.GaugeLabelValues{}, nil
}
ctx = namespace.RootContext(ctx)
namespaces := c.collectNamespaces()
policyTypes := []PolicyType{
PolicyTypeACL,
PolicyTypeRGP,
PolicyTypeEGP,
}
var values []metricsutil.GaugeLabelValues
for _, pt := range policyTypes {
policies, err := policyStore.policiesByNamespaces(ctx, pt, namespaces)
if err != nil {
return []metricsutil.GaugeLabelValues{}, err
}
v := metricsutil.GaugeLabelValues{}
v.Labels = []metricsutil.Label{{
"policy_type",
pt.String(),
}}
v.Value = float32(len(policies))
values = append(values, v)
}
return values, nil
}
type RoleCounts struct {
AWSDynamicRoles int `json:"aws_dynamic_roles"`
AWSStaticRoles int `json:"aws_static_roles"`
AzureDynamicRoles int `json:"azure_dynamic_roles"`
AzureStaticRoles int `json:"azure_static_roles"`
DatabaseDynamicRoles int `json:"database_dynamic_roles"`
DatabaseStaticRoles int `json:"database_static_roles"`
GCPRolesets int `json:"gcp_rolesets"`
GCPStaticAccounts int `json:"gcp_static_accounts"`
GCPImpersonatedAccounts int `json:"gcp_impersonated_accounts"`
LDAPDynamicRoles int `json:"ldap_dynamic_roles"`
LDAPStaticRoles int `json:"ldap_static_roles"`
OpenLDAPDynamicRoles int `json:"openldap_dynamic_roles"`
OpenLDAPStaticRoles int `json:"openldap_static_roles"`
AlicloudDynamicRoles int `json:"alicloud_dynamic_roles"`
RabbitMQDynamicRoles int `json:"rabbitmq_dynamic_roles"`
ConsulDynamicRoles int `json:"consul_dynamic_roles"`
NomadDynamicRoles int `json:"nomad_dynamic_roles"`
KubernetesDynamicRoles int `json:"kubernetes_dynamic_roles"`
MongoDBAtlasDynamicRoles int `json:"mongodb_atlas_dynamic_roles"`
TerraformCloudDynamicRoles int `json:"terraformcloud_dynamic_roles"`
}
// getRoleCountsInternal gets the role counts for plugins.
// includeLocal determines if local mounts are included
// includeReplicated determines if replicated mounts are included
// officialPluginsOnly determines if this function should include only plugins that are official,
// which would exclude, for example, a custom built version of these plugins.
func (c *Core) getRoleCountsInternal(includeLocal bool, includeReplicated bool, officialPluginsOnly bool) *RoleCounts {
if c.Sealed() {
c.logger.Debug("core is sealed, cannot access mounts table")
return nil
}
ctx := namespace.RootContext(c.activeContext)
apiList := func(entry *MountEntry, apiPath string) []string {
listRequest := &logical.Request{
Operation: logical.ListOperation,
Path: entry.namespace.Path + entry.Path + apiPath,
}
resp, err := c.router.Route(ctx, listRequest)
if err != nil || resp == nil {
return nil
}
rawKeys, ok := resp.Data["keys"]
if !ok {
return nil
}
keys, ok := rawKeys.([]string)
if !ok {
return nil
}
return keys
}
c.mountsLock.RLock()
defer c.mountsLock.RUnlock()
var roles RoleCounts
for _, entry := range c.mounts.Entries {
if !entry.Local && !includeReplicated {
continue
}
if entry.Local && !includeLocal {
continue
}
pluginName := getAdjustedPluginType(entry)
if pluginName == "" {
continue
}
pluginVersion := entry.RunningVersion
if officialPluginsOnly {
runner, err := c.pluginCatalog.Get(ctx, pluginName, consts.PluginTypeSecrets, pluginVersion)
if err != nil {
continue
}
if !(isOfficialOrBuiltin(runner)) {
continue
}
}
switch pluginName {
case pluginconsts.SecretEngineAWS:
dynamicRoles := apiList(entry, "roles")
roles.AWSDynamicRoles += len(dynamicRoles)
staticRoles := apiList(entry, "static-roles")
roles.AWSStaticRoles += len(staticRoles)
case pluginconsts.SecretEngineAzure:
dynamicRoles := apiList(entry, "roles")
roles.AzureDynamicRoles += len(dynamicRoles)
staticRoles := apiList(entry, "static-roles")
roles.AzureStaticRoles += len(staticRoles)
case pluginconsts.SecretEngineDatabase:
dynamicRoles := apiList(entry, "roles")
roles.DatabaseDynamicRoles += len(dynamicRoles)
staticRoles := apiList(entry, "static-roles")
roles.DatabaseStaticRoles += len(staticRoles)
case pluginconsts.SecretEngineGCP:
rolesets := apiList(entry, "rolesets")
roles.GCPRolesets += len(rolesets)
staticAccounts := apiList(entry, "static-accounts")
roles.GCPStaticAccounts += len(staticAccounts)
impersonatedAccounts := apiList(entry, "impersonated-accounts")
roles.GCPImpersonatedAccounts += len(impersonatedAccounts)
case pluginconsts.SecretEngineLDAP:
dynamicRoles := apiList(entry, "role")
roles.LDAPDynamicRoles += len(dynamicRoles)
staticRoles := apiList(entry, "static-role")
roles.LDAPStaticRoles += len(staticRoles)
case pluginconsts.SecretEngineOpenLDAP:
dynamicRoles := apiList(entry, "role")
roles.OpenLDAPDynamicRoles += len(dynamicRoles)
staticRoles := apiList(entry, "static-role")
roles.OpenLDAPStaticRoles += len(staticRoles)
case pluginconsts.SecretEngineAlicloud:
dynamicRoles := apiList(entry, "role")
roles.AlicloudDynamicRoles += len(dynamicRoles)
case pluginconsts.SecretEngineRabbitMQ:
dynamicRoles := apiList(entry, "roles")
roles.RabbitMQDynamicRoles += len(dynamicRoles)
case pluginconsts.SecretEngineConsul:
dynamicRoles := apiList(entry, "roles")
roles.ConsulDynamicRoles += len(dynamicRoles)
case pluginconsts.SecretEngineNomad:
dynamicRoles := apiList(entry, "role")
roles.NomadDynamicRoles += len(dynamicRoles)
case pluginconsts.SecretEngineKubernetes:
dynamicRoles := apiList(entry, "roles")
roles.KubernetesDynamicRoles += len(dynamicRoles)
case pluginconsts.SecretEngineMongoDBAtlas:
dynamicRoles := apiList(entry, "roles")
roles.MongoDBAtlasDynamicRoles += len(dynamicRoles)
case pluginconsts.SecretEngineTerraform:
dynamicRoles := apiList(entry, "role")
roles.TerraformCloudDynamicRoles += len(dynamicRoles)
}
}
return &roles
}
func (c *Core) GetRoleCounts() *RoleCounts {
return c.getRoleCountsInternal(true, true, false)
}
func (c *Core) GetRoleCountsForCluster() *RoleCounts {
return c.getRoleCountsInternal(true, c.isPrimary(), false)
}
// GetKvUsageMetrics returns a map of namespace paths to KV secret counts.
func (c *Core) GetKvUsageMetrics(ctx context.Context, kvVersion string) (map[string]int, error) {
return c.GetKvUsageMetricsByNamespace(ctx, kvVersion, "", true, true, true)
}
// GetKvUsageMetricsByNamespace returns a map of namespace paths to KV secret counts within a specific namespace.
func (c *Core) GetKvUsageMetricsByNamespace(ctx context.Context, kvVersion string, nsPath string, includeLocal bool, includeReplicated bool, includeUnofficial bool) (map[string]int, error) {
mounts := c.findKvMounts()
if !includeUnofficial {
mounts = c.findOfficialKvMounts(ctx)
}
results := make(map[string]int)
if kvVersion == "1" || kvVersion == "2" {
var newMounts []*kvMount
for _, mount := range mounts {
if mount.Version == kvVersion {
newMounts = append(newMounts, mount)
}
}
mounts = newMounts
} else if kvVersion != "0" {
return results, fmt.Errorf("kv version %s not supported, must be 0, 1, or 2", kvVersion)
}
for _, m := range mounts {
if !includeLocal && m.Local {
continue
}
if !includeReplicated && !m.Local {
continue
}
if nsPath != "" && !strings.HasPrefix(m.Namespace.Path, nsPath) {
continue
}
select {
case <-ctx.Done():
return nil, fmt.Errorf("context expired")
default:
break
}
c.walkKvMountSecrets(ctx, m)
_, ok := results[m.Namespace.Path]
if ok {
// we need to add, not overwrite
results[m.Namespace.Path] += m.NumSecrets
} else {
results[m.Namespace.Path] = m.NumSecrets
}
}
return results, nil
}
// isOfficialOrBuiltin determines if a plugin is official based on its runner.
// We treat it as official if runner is nil to avoid overcharging, but ensure
// that it is properly scanned if it _is_ an official mount.
func isOfficialOrBuiltin(runner *pluginutil.PluginRunner) bool {
return runner == nil || runner.Builtin || runner.Tier == consts.PluginTierOfficial
}
// ListOfficialAndExternalSecretPlugins gets a list of all secret plugins, official and external.
// The union of both sets is the set of all secret plugins.
// Returns a list of official plugins, external plugins, and error, in that order.
func (c *Core) ListOfficialAndExternalSecretPlugins(ctx context.Context) ([]*MountEntry, []*MountEntry, error) {
if c == nil || c.pluginCatalog == nil {
return nil, nil, fmt.Errorf("core or plugin catalog is nil")
}
mounts, err := c.ListMounts()
if err != nil {
return nil, nil, fmt.Errorf("error listing mounts: %w", err)
}
var official []*MountEntry
var external []*MountEntry
for _, entry := range mounts {
if entry == nil {
continue
}
// Only secrets-engine mounts live in the mounts table. Exclude the known
// non-secrets mounts and database mounts (PluginTypeDatabase).
if entry.Table != mountTableType {
continue
}
pluginName := getAdjustedPluginType(entry)
if pluginName == "" {
continue
}
pluginVersion := entry.RunningVersion
runner, err := c.pluginCatalog.Get(ctx, pluginName, consts.PluginTypeSecrets, pluginVersion)
if err != nil {
continue
}
if isOfficialOrBuiltin(runner) {
official = append(official, entry)
} else {
external = append(external, entry)
}
}
return official, external, nil
}
// ListOfficialSecretPlugins gets a list of all 'official'/builtin secret plugins.
func (c *Core) ListOfficialSecretPlugins(ctx context.Context) ([]*MountEntry, error) {
internalPlugins, _, err := c.ListOfficialAndExternalSecretPlugins(ctx)
if err != nil {
return nil, err
}
return internalPlugins, nil
}
// getAdjustedPluginType gets the adjusted plugin type for an entry. In most cases
// this will be entry.Type, but it will correctly return the type for legacy (pre-Vault 1.0) plugins.
func getAdjustedPluginType(entry *MountEntry) string {
if entry == nil {
return ""
}
pluginName := entry.Type
if pluginName == mountTypePlugin && entry.Config.PluginName != "" {
pluginName = entry.Config.PluginName
}
return pluginName
}
// ListDeduplicatedExternalSecretPlugins returns the enabled secret engines
// that are not builtin and not official-tier.
//
// This is useful for identifying "third-party" secrets mounts (e.g. community or
// partner tier external plugins) while excluding builtins and official HashiCorp
// plugins.
// Note: This will include all mounts that have been built externally (even if they are
// Hashicorp owned). This will happen if the plugin was built from a Github repo or from an
// artifact.
func (c *Core) ListDeduplicatedExternalSecretPlugins(ctx context.Context) ([]*MountEntry, error) {
_, externalPlugins, err := c.ListOfficialAndExternalSecretPlugins(ctx)
if err != nil {
return nil, err
}
seen := make(map[string]struct{})
var result []*MountEntry
for _, entry := range externalPlugins {
if entry == nil {
continue
}
pluginName := getAdjustedPluginType(entry)
if pluginName == "" {
continue
}
pluginVersion := entry.RunningVersion
// De-dupe: multiple mounts can point at the same underlying plugin+version.
// We want to charge for each unique plugin+version pair.
key := pluginName + "\x00" + pluginVersion
if _, ok := seen[key]; ok {
continue
}
result = append(result, entry)
seen[key] = struct{}{}
}
return result, nil
} | go | github | https://github.com/hashicorp/vault | vault/core_metrics.go |
'''
Offset test cases
'''
import unittest
import utils
def suite():
suite = unittest.TestSuite()
suite.addTest(Offset1TestCase())
suite.addTest(Offset2TestCase())
suite.addTest(Offset3TestCase())
suite.addTest(Offset4TestCase())
suite.addTest(Offset5TestCase())
suite.addTest(Offset6TestCase())
return suite
class Offset1TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("offset1.xml")
ret = self.peachUtils.GetListenerData()
assert ret == '\x16\x10\n\x04PEACH4PEACH3PEACH2PEACH1', 'offset1.xml failed, instead [%s]' % repr(ret)
class Offset2TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("offset2.xml")
ret = self.peachUtils.GetListenerData()
assert ret == '28 28 80 92 93 93 103 CRAZY STRING!aslkjalskdjasaslkdjalskdjasdkjasdlkjasdALSKJDALKSJD11293812093aslkdjalskdjas', 'offset2.xml failed, instead [%s]' % repr(ret)
class Offset3TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("offset3.xml")
ret = self.peachUtils.GetListenerData()
assert ret == '24 24 37 41 58 65 CRAZY STRING!X\x02\x00\x00aslkdjalskdjasdlaFoo Me!\x00', 'offset3.xml failed, instead [%s]' % repr(ret)
class Offset4TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("offset4.xml")
ret = self.peachUtils.GetListenerData()
assert ret == 'CRAZY STRING!aslkjalskdjasaslkdjalskdjasdkjasdlkjasdALSKJDALKSJD11293812093aslkdjalskdjas0 0 52 64 65 65 75 ', 'offset4.xml failed, instead [%s]' % repr(ret)
class Offset5TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("offset5.xml")
ret = self.peachUtils.GetListenerData()
assert ret == '24 24 37 41 58 45 CRAZY STRING!X\x02\x00\x00aslkdjalskdjasdlaFoo Me!\x00', 'offset5.xml failed, instead [%s]' % repr(ret)
class Offset6TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("offset6.xml")
ret = self.peachUtils.GetListenerData()
assert ret == '24 24 37 41 58 45 CRAZY STRING!X\x02\x00\x00aslkdjalskdjasdlaFoo Me!\x00', 'offset6.xml failed, instead [%s]' % repr(ret)
if __name__ == "__main__":
unittest.main()
# end | unknown | codeparrot/codeparrot-clean | ||
#-*- encoding:utf-8 -*-
from django.db import models
import datetime
from django.utils.text import slugify
class Customer(models.Model):
customer_name = models.CharField(max_length=128,
blank=True, null=True,
verbose_name='Nombre',
help_text='Ingrese el Nombre Completo.')
customer_slug = models.SlugField(max_length=128, blank=True, null=True)
customer_address = models.CharField(max_length=64,
blank=True, null=True,
verbose_name='Direccion',
help_text='Ingrese la Direccion del Cliente.')
customer_phone = models.CharField(max_length=24,
blank=True, null=True,
verbose_name='Telefono',
help_text='Ingrese el teléfono del Cliente.')
""" Campos para auditar el cliente con respecto a la creacion y la actualizacion """
date_created_customer = models.DateTimeField(auto_now=True)
date_updated_customer = models.DateTimeField()
def save(self, *args, **kwargs):
# Tomando la info del tiempo en ese instante
date = datetime.datetime.now()
self.date_updated_customer = date
self.customer_slug = slugify(self.customer_name)
super(Customer, self).save(*args, **kwargs)
def __str__(self):
return self.customer_name
class Product(models.Model):
product_name = models.CharField(max_length=128,
blank=True, null=True,
verbose_name='Producto',
help_text='Ingrese el Nombre del Producto.')
product_price = models.DecimalField(max_digits=64,
decimal_places=2,
verbose_name='Precio',
help_text='Precio del Producto')
product_type = models.CharField(max_length=128,
blank=True, null=True,
verbose_name='Tipo de Producto',
help_text='Ingrese el Tipo de Producto al que pertence.')
product_description = models.TextField(max_length=400,
verbose_name='Descripción del Producto',
help_text='Ingrese la Descripción del Producto.')
product_likes = models.IntegerField(null=True, blank=True, default=0)
def __str__(self):
return self.product_name
class Stock(models.Model):
stock_product_id = models.ForeignKey('Product')
stock_quantity = models.IntegerField(max_length=24,
verbose_name='Cantidad del Producto',
help_text='Ingrese la Cantidad de Producto Disponible')
def __str__(self):
return self.stock_product_id.product_name
class Order(models.Model):
order_customer_id = models.ForeignKey('Customer')
order_product_id = models.ForeignKey('Product')
order_amount = models.IntegerField(max_length=64)
order_date = models.DateField(auto_now=True)
def __str__(self):
return self.order_product_id.customer_name | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
## Amazon S3 manager
## Author: Michal Ludvig <michal@logix.cz>
## http://www.logix.cz/michal
## License: GPL Version 2
## Copyright: TGRMN Software and contributors
import os
import sys
import time
import re
import string
import random
import errno
from calendar import timegm
from logging import debug, warning, error
from ExitCodes import EX_OSFILE
try:
import dateutil.parser
except ImportError:
sys.stderr.write(u"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ImportError trying to import dateutil.parser.
Please install the python dateutil module:
$ sudo apt-get install python-dateutil
or
$ sudo yum install python-dateutil
or
$ pip install python-dateutil
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
""")
sys.stderr.flush()
sys.exit(EX_OSFILE)
import Config
import Exceptions
# hashlib backported to python 2.4 / 2.5 is not compatible with hmac!
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
from md5 import md5
else:
from hashlib import md5
try:
import xml.etree.ElementTree as ET
except ImportError:
# xml.etree.ElementTree was only added in python 2.5
import elementtree.ElementTree as ET
__all__ = []
def parseNodes(nodes):
## WARNING: Ignores text nodes from mixed xml/text.
## For instance <tag1>some text<tag2>other text</tag2></tag1>
## will be ignore "some text" node
retval = []
for node in nodes:
retval_item = {}
for child in node.getchildren():
name = decode_from_s3(child.tag)
if child.getchildren():
retval_item[name] = parseNodes([child])
else:
found_text = node.findtext(".//%s" % child.tag)
retval_item[name] = decode_from_s3(found_text) if found_text is not None else None
retval.append(retval_item)
return retval
__all__.append("parseNodes")
def stripNameSpace(xml):
"""
removeNameSpace(xml) -- remove top-level AWS namespace
"""
r = re.compile('^(<?[^>]+?>\s*)(<\w+) xmlns=[\'"](http://[^\'"]+)[\'"](.*)', re.MULTILINE)
if r.match(xml):
xmlns = r.match(xml).groups()[2]
xml = r.sub("\\1\\2\\4", xml)
else:
xmlns = None
return xml, xmlns
__all__.append("stripNameSpace")
def getTreeFromXml(xml):
xml, xmlns = stripNameSpace(xml)
try:
tree = ET.fromstring(xml)
if xmlns:
tree.attrib['xmlns'] = xmlns
return tree
except Exception, e:
error("Error parsing xml: %s", e)
error(xml)
raise
__all__.append("getTreeFromXml")
def getListFromXml(xml, node):
tree = getTreeFromXml(xml)
nodes = tree.findall('.//%s' % (node))
return parseNodes(nodes)
__all__.append("getListFromXml")
def getDictFromTree(tree):
ret_dict = {}
for child in tree.getchildren():
if child.getchildren():
## Complex-type child. Recurse
content = getDictFromTree(child)
else:
content = decode_from_s3(child.text) if child.text is not None else None
child_tag = decode_from_s3(child.tag)
if ret_dict.has_key(child_tag):
if not type(ret_dict[child_tag]) == list:
ret_dict[child_tag] = [ret_dict[child_tag]]
ret_dict[child_tag].append(content or "")
else:
ret_dict[child_tag] = content or ""
return ret_dict
__all__.append("getDictFromTree")
def getTextFromXml(xml, xpath):
tree = getTreeFromXml(xml)
if tree.tag.endswith(xpath):
return decode_from_s3(tree.text) if tree.text is not None else None
else:
result = tree.findtext(xpath)
return decode_from_s3(result) if result is not None else None
__all__.append("getTextFromXml")
def getRootTagName(xml):
tree = getTreeFromXml(xml)
return decode_from_s3(tree.tag) if tree.tag is not None else None
__all__.append("getRootTagName")
def xmlTextNode(tag_name, text):
el = ET.Element(tag_name)
el.text = decode_from_s3(text)
return el
__all__.append("xmlTextNode")
def appendXmlTextNode(tag_name, text, parent):
"""
Creates a new <tag_name> Node and sets
its content to 'text'. Then appends the
created Node to 'parent' element if given.
Returns the newly created Node.
"""
el = xmlTextNode(tag_name, text)
parent.append(el)
return el
__all__.append("appendXmlTextNode")
def dateS3toPython(date):
# Reset milliseconds to 000
date = re.compile('\.[0-9]*(?:[Z\\-\\+]*?)').sub(".000", date)
return dateutil.parser.parse(date, fuzzy=True)
__all__.append("dateS3toPython")
def dateS3toUnix(date):
## NOTE: This is timezone-aware and return the timestamp regarding GMT
return timegm(dateS3toPython(date).utctimetuple())
__all__.append("dateS3toUnix")
def dateRFC822toPython(date):
return dateutil.parser.parse(date, fuzzy=True)
__all__.append("dateRFC822toPython")
def dateRFC822toUnix(date):
return timegm(dateRFC822toPython(date).utctimetuple())
__all__.append("dateRFC822toUnix")
def formatSize(size, human_readable = False, floating_point = False):
size = floating_point and float(size) or int(size)
if human_readable:
coeffs = ['k', 'M', 'G', 'T']
coeff = ""
while size > 2048:
size /= 1024
coeff = coeffs.pop(0)
return (size, coeff)
else:
return (size, "")
__all__.append("formatSize")
def formatDateTime(s3timestamp):
date_obj = dateutil.parser.parse(s3timestamp, fuzzy=True)
return date_obj.strftime("%Y-%m-%d %H:%M")
__all__.append("formatDateTime")
def convertTupleListToDict(list):
retval = {}
for tuple in list:
retval[tuple[0]] = tuple[1]
return retval
__all__.append("convertTupleListToDict")
_rnd_chars = string.ascii_letters+string.digits
_rnd_chars_len = len(_rnd_chars)
def rndstr(len):
retval = ""
while len > 0:
retval += _rnd_chars[random.randint(0, _rnd_chars_len-1)]
len -= 1
return retval
__all__.append("rndstr")
def mktmpsomething(prefix, randchars, createfunc):
old_umask = os.umask(0077)
tries = 5
while tries > 0:
dirname = prefix + rndstr(randchars)
try:
createfunc(dirname)
break
except OSError, e:
if e.errno != errno.EEXIST:
os.umask(old_umask)
raise
tries -= 1
os.umask(old_umask)
return dirname
__all__.append("mktmpsomething")
def mktmpdir(prefix = os.getenv('TMP','/tmp') + "/tmpdir-", randchars = 10):
return mktmpsomething(prefix, randchars, os.mkdir)
__all__.append("mktmpdir")
def mktmpfile(prefix = os.getenv('TMP','/tmp') + "/tmpfile-", randchars = 20):
createfunc = lambda filename : os.close(os.open(deunicodise(filename), os.O_CREAT | os.O_EXCL))
return mktmpsomething(prefix, randchars, createfunc)
__all__.append("mktmpfile")
def hash_file_md5(filename):
h = md5()
f = open(deunicodise(filename), "rb")
while True:
# Hash 32kB chunks
data = f.read(32*1024)
if not data:
break
h.update(data)
f.close()
return h.hexdigest()
__all__.append("hash_file_md5")
def mkdir_with_parents(dir_name):
"""
mkdir_with_parents(dst_dir)
Create directory 'dir_name' with all parent directories
Returns True on success, False otherwise.
"""
pathmembers = dir_name.split(os.sep)
tmp_stack = []
while pathmembers and not os.path.isdir(deunicodise(os.sep.join(pathmembers))):
tmp_stack.append(pathmembers.pop())
while tmp_stack:
pathmembers.append(tmp_stack.pop())
cur_dir = os.sep.join(pathmembers)
try:
debug("mkdir(%s)" % cur_dir)
os.mkdir(deunicodise(cur_dir))
except (OSError, IOError), e:
warning("%s: can not make directory: %s" % (cur_dir, e.strerror))
return False
except Exception, e:
warning("%s: %s" % (cur_dir, e))
return False
return True
__all__.append("mkdir_with_parents")
def unicodise(string, encoding = None, errors = "replace"):
"""
Convert 'string' to Unicode or raise an exception.
"""
if not encoding:
encoding = Config.Config().encoding
if type(string) == unicode:
return string
debug("Unicodising %r using %s" % (string, encoding))
try:
return unicode(string, encoding, errors)
except UnicodeDecodeError:
raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
__all__.append("unicodise")
def deunicodise(string, encoding = None, errors = "replace"):
"""
Convert unicode 'string' to <type str>, by default replacing
all invalid characters with '?' or raise an exception.
"""
if not encoding:
encoding = Config.Config().encoding
if type(string) != unicode:
return str(string)
debug("DeUnicodising %r using %s" % (string, encoding))
try:
return string.encode(encoding, errors)
except UnicodeEncodeError:
raise UnicodeEncodeError("Conversion from unicode failed: %r" % string)
__all__.append("deunicodise")
def unicodise_safe(string, encoding = None):
"""
Convert 'string' to Unicode according to current encoding
and replace all invalid characters with '?'
"""
return unicodise(deunicodise(string, encoding), encoding).replace(u'\ufffd', '?')
__all__.append("unicodise_safe")
def decode_from_s3(string, errors = "replace"):
"""
Convert S3 UTF-8 'string' to Unicode or raise an exception.
"""
if type(string) == unicode:
return string
# Be quiet by default
#debug("Decoding string from S3: %r" % string)
try:
return unicode(string, "UTF-8", errors)
except UnicodeDecodeError:
raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
__all__.append("decode_from_s3")
def encode_to_s3(string, errors = "replace"):
"""
Convert Unicode to S3 UTF-8 'string', by default replacing
all invalid characters with '?' or raise an exception.
"""
if type(string) != unicode:
return str(string)
# Be quiet by default
#debug("Encoding string to S3: %r" % string)
try:
return string.encode("UTF-8", errors)
except UnicodeEncodeError:
raise UnicodeEncodeError("Conversion from unicode failed: %r" % string)
__all__.append("encode_to_s3")
def replace_nonprintables(string):
"""
replace_nonprintables(string)
Replaces all non-printable characters 'ch' in 'string'
where ord(ch) <= 26 with ^@, ^A, ... ^Z
"""
new_string = ""
modified = 0
for c in string:
o = ord(c)
if (o <= 31):
new_string += "^" + chr(ord('@') + o)
modified += 1
elif (o == 127):
new_string += "^?"
modified += 1
else:
new_string += c
if modified and Config.Config().urlencoding_mode != "fixbucket":
warning("%d non-printable characters replaced in: %s" % (modified, new_string))
return new_string
__all__.append("replace_nonprintables")
def time_to_epoch(t):
"""Convert time specified in a variety of forms into UNIX epoch time.
Accepts datetime.datetime, int, anything that has a strftime() method, and standard time 9-tuples
"""
if isinstance(t, int):
# Already an int
return t
elif isinstance(t, tuple) or isinstance(t, time.struct_time):
# Assume it's a time 9-tuple
return int(time.mktime(t))
elif hasattr(t, 'timetuple'):
# Looks like a datetime object or compatible
return int(time.mktime(t.timetuple()))
elif hasattr(t, 'strftime'):
# Looks like the object supports standard srftime()
return int(t.strftime('%s'))
elif isinstance(t, str) or isinstance(t, unicode):
# See if it's a string representation of an epoch
try:
# Support relative times (eg. "+60")
if t.startswith('+'):
return time.time() + int(t[1:])
return int(t)
except ValueError:
# Try to parse it as a timestamp string
try:
return time.strptime(t)
except ValueError, ex:
# Will fall through
debug("Failed to parse date with strptime: %s", ex)
pass
raise Exceptions.ParameterError('Unable to convert %r to an epoch time. Pass an epoch time. Try `date -d \'now + 1 year\' +%%s` (shell) or time.mktime (Python).' % t)
def check_bucket_name(bucket, dns_strict = True):
if dns_strict:
invalid = re.search("([^a-z0-9\.-])", bucket, re.UNICODE)
if invalid:
raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: lowercase us-ascii letters (a-z), digits (0-9), dot (.) and hyphen (-)." % (bucket, invalid.groups()[0]))
else:
invalid = re.search("([^A-Za-z0-9\._-])", bucket, re.UNICODE)
if invalid:
raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: us-ascii letters (a-z, A-Z), digits (0-9), dot (.), hyphen (-) and underscore (_)." % (bucket, invalid.groups()[0]))
if len(bucket) < 3:
raise Exceptions.ParameterError("Bucket name '%s' is too short (min 3 characters)" % bucket)
if len(bucket) > 255:
raise Exceptions.ParameterError("Bucket name '%s' is too long (max 255 characters)" % bucket)
if dns_strict:
if len(bucket) > 63:
raise Exceptions.ParameterError("Bucket name '%s' is too long (max 63 characters)" % bucket)
if re.search("-\.", bucket, re.UNICODE):
raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '-.' for DNS compatibility" % bucket)
if re.search("\.\.", bucket, re.UNICODE):
raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '..' for DNS compatibility" % bucket)
if not re.search("^[0-9a-z]", bucket, re.UNICODE):
raise Exceptions.ParameterError("Bucket name '%s' must start with a letter or a digit" % bucket)
if not re.search("[0-9a-z]$", bucket, re.UNICODE):
raise Exceptions.ParameterError("Bucket name '%s' must end with a letter or a digit" % bucket)
return True
__all__.append("check_bucket_name")
def check_bucket_name_dns_conformity(bucket):
try:
return check_bucket_name(bucket, dns_strict = True)
except Exceptions.ParameterError:
return False
__all__.append("check_bucket_name_dns_conformity")
def check_bucket_name_dns_support(bucket_host, bucket_name):
"""
Check whether either the host_bucket support buckets and
either bucket name is dns compatible
"""
if "%(bucket)s" not in bucket_host:
return False
try:
return check_bucket_name(bucket_name, dns_strict = True)
except Exceptions.ParameterError:
return False
__all__.append("check_bucket_name_dns_support")
def getBucketFromHostname(hostname):
"""
bucket, success = getBucketFromHostname(hostname)
Only works for hostnames derived from bucket names
using Config.host_bucket pattern.
Returns bucket name and a boolean success flag.
"""
# Create RE pattern from Config.host_bucket
pattern = Config.Config().host_bucket % { 'bucket' : '(?P<bucket>.*)' }
m = re.match(pattern, hostname, re.UNICODE)
if not m:
return (hostname, False)
return m.groups()[0], True
__all__.append("getBucketFromHostname")
def getHostnameFromBucket(bucket):
return Config.Config().host_bucket % { 'bucket' : bucket }
__all__.append("getHostnameFromBucket")
def calculateChecksum(buffer, mfile, offset, chunk_size, send_chunk):
md5_hash = md5()
size_left = chunk_size
if buffer == '':
mfile.seek(offset)
while size_left > 0:
data = mfile.read(min(send_chunk, size_left))
md5_hash.update(data)
size_left -= len(data)
else:
md5_hash.update(buffer)
return md5_hash.hexdigest()
__all__.append("calculateChecksum")
# Deal with the fact that pwd and grp modules don't exist for Windows
try:
import pwd
def getpwuid_username(uid):
"""returns a username from the password databse for the given uid"""
return pwd.getpwuid(uid).pw_name
except ImportError:
import getpass
def getpwuid_username(uid):
return getpass.getuser()
__all__.append("getpwuid_username")
try:
import grp
def getgrgid_grpname(gid):
"""returns a groupname from the group databse for the given gid"""
return grp.getgrgid(gid).gr_name
except ImportError:
def getgrgid_grpname(gid):
return "nobody"
__all__.append("getgrgid_grpname")
# vim:et:ts=4:sts=4:ai | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# LexGen.py - implemented 2002 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Regenerate the Scintilla and SciTE source files that list
# all the lexers and all the properties files.
# Should be run whenever a new lexer is added or removed.
# Requires Python 2.4 or later
# Most files are regenerated in place with templates stored in comments.
# The VS .NET project file is generated into a different file as the
# VS .NET environment will not retain comments when modifying the file.
# The files are copied to a string apart from sections between a
# ++Autogenerated comment and a --Autogenerated comment which is
# generated by the CopyWithInsertion function. After the whole
# string is instantiated, it is compared with the target file and
# if different the file is rewritten.
# Does not regenerate the Visual C++ 6 project files but does the VS .NET
# project file.
import string
import sys
import os
import glob
# EOL constants
CR = "\r"
LF = "\n"
CRLF = "\r\n"
if sys.platform == "win32":
NATIVE = CRLF
else:
# Yes, LF is the native EOL even on Mac OS X. CR is just for
# Mac OS <=9 (a.k.a. "Mac Classic")
NATIVE = LF
# Automatically generated sections contain start and end comments,
# a definition line and the results.
# The results are replaced by regenerating based on the definition line.
# The definition line is a comment prefix followed by "**".
# If there is a digit after the ** then this indicates which list to use
# and the digit and next character are not part of the definition
# Backslash is used as an escape within the definition line.
# The part between \( and \) is repeated for each item in the list.
# \* is replaced by each list item. \t, and \n are tab and newline.
def CopyWithInsertion(input, commentPrefix, retainDefs, eolType, *lists):
copying = 1
listid = 0
output = []
for line in input.splitlines(0):
isStartGenerated = line.startswith(commentPrefix + "++Autogenerated")
if copying and not isStartGenerated:
output.append(line)
if isStartGenerated:
if retainDefs:
output.append(line)
copying = 0
definition = ""
elif not copying and line.startswith(commentPrefix + "**"):
if retainDefs:
output.append(line)
definition = line[len(commentPrefix + "**"):]
if (commentPrefix == "<!--") and (" -->" in definition):
definition = definition.replace(" -->", "")
listid = 0
if definition[0] in string.digits:
listid = int(definition[:1])
definition = definition[2:]
# Hide double slashes as a control character
definition = definition.replace("\\\\", "\001")
# Do some normal C style transforms
definition = definition.replace("\\n", "\n")
definition = definition.replace("\\t", "\t")
# Get the doubled backslashes back as single backslashes
definition = definition.replace("\001", "\\")
startRepeat = definition.find("\\(")
endRepeat = definition.find("\\)")
intro = definition[:startRepeat]
out = ""
if intro.endswith("\n"):
pos = 0
else:
pos = len(intro)
out += intro
middle = definition[startRepeat+2:endRepeat]
for i in lists[listid]:
item = middle.replace("\\*", i)
if pos and (pos + len(item) >= 80):
out += "\\\n"
pos = 0
out += item
pos += len(item)
if item.endswith("\n"):
pos = 0
outro = definition[endRepeat+2:]
out += outro
out = out.replace("\n", eolType) # correct EOLs in generated content
output.append(out)
elif line.startswith(commentPrefix + "--Autogenerated"):
copying = 1
if retainDefs:
output.append(line)
output = [line.rstrip(" \t") for line in output] # trim trailing whitespace
return eolType.join(output) + eolType
def UpdateFile(filename, updated):
""" If the file is different to updated then copy updated
into the file else leave alone so CVS and make don't treat
it as modified. """
try:
infile = open(filename, "rb")
except IOError: # File is not there yet
out = open(filename, "wb")
out.write(updated.encode('utf-8'))
out.close()
print("New %s" % filename)
return
original = infile.read()
infile.close()
original = original.decode('utf-8')
if updated != original:
os.unlink(filename)
out = open(filename, "wb")
out.write(updated.encode('utf-8'))
out.close()
print("Changed %s " % filename)
#~ else:
#~ print "Unchanged", filename
def Generate(inpath, outpath, commentPrefix, eolType, *lists):
"""Generate 'outpath' from 'inpath'.
"eolType" indicates the type of EOLs to use in the generated
file. It should be one of following constants: LF, CRLF,
CR, or NATIVE.
"""
#print "generate '%s' -> '%s' (comment prefix: %r, eols: %r)"\
# % (inpath, outpath, commentPrefix, eolType)
try:
infile = open(inpath, "rb")
except IOError:
print("Can not open %s" % inpath)
return
original = infile.read()
infile.close()
original = original.decode('utf-8')
updated = CopyWithInsertion(original, commentPrefix,
inpath == outpath, eolType, *lists)
UpdateFile(outpath, updated)
def Regenerate(filename, commentPrefix, eolType, *lists):
"""Regenerate the given file.
"eolType" indicates the type of EOLs to use in the generated
file. It should be one of following constants: LF, CRLF,
CR, or NATIVE.
"""
Generate(filename, filename, commentPrefix, eolType, *lists)
def FindModules(lexFile):
modules = []
f = open(lexFile)
for l in f.readlines():
if l.startswith("LexerModule"):
l = l.replace("(", " ")
modules.append(l.split()[1])
return modules
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
f = open(lexFile)
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
f = open(lexFile)
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].strip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def ciKey(a):
return a.lower()
def sortListInsensitive(l):
try: # Try key function
l.sort(key=ciKey)
except TypeError: # Earlier version of Python, so use comparison function
l.sort(ciCompare)
def UpdateLineInFile(path, linePrefix, lineReplace):
lines = []
with open(path, "r") as f:
for l in f.readlines():
l = l.rstrip()
if l.startswith(linePrefix):
lines.append(lineReplace)
else:
lines.append(l)
contents = NATIVE.join(lines) + NATIVE
UpdateFile(path, contents)
def UpdateVersionNumbers(root):
with open(root + "scintilla/version.txt") as f:
version = f.read()
versionDotted = version[0] + '.' + version[1] + '.' + version[2]
versionCommad = version[0] + ', ' + version[1] + ', ' + version[2] + ', 0'
UpdateLineInFile(root + "scintilla/win32/ScintRes.rc", "#define VERSION_SCINTILLA",
"#define VERSION_SCINTILLA \"" + versionDotted + "\"")
UpdateLineInFile(root + "scintilla/win32/ScintRes.rc", "#define VERSION_WORDS",
"#define VERSION_WORDS " + versionCommad)
UpdateLineInFile(root + "scintilla/qt/ScintillaEditBase/ScintillaEditBase.pro",
"VERSION =",
"VERSION = " + versionDotted)
UpdateLineInFile(root + "scintilla/qt/ScintillaEdit/ScintillaEdit.pro",
"VERSION =",
"VERSION = " + versionDotted)
UpdateLineInFile(root + "scintilla/doc/ScintillaDownload.html", " Release",
" Release " + versionDotted)
UpdateLineInFile(root + "scintilla/doc/index.html",
' <font color="#FFCC99" size="3"> Release version',
' <font color="#FFCC99" size="3"> Release version ' + versionDotted + '<br />')
if os.path.exists(root + "scite"):
UpdateLineInFile(root + "scite/src/SciTE.h", "#define VERSION_SCITE",
"#define VERSION_SCITE \"" + versionDotted + "\"")
UpdateLineInFile(root + "scite/src/SciTE.h", "#define VERSION_WORDS",
"#define VERSION_WORDS " + versionCommad)
UpdateLineInFile(root + "scite/doc/SciTEDownload.html", " Release",
" Release " + versionDotted)
UpdateLineInFile(root + "scite/doc/SciTE.html",
' <font color="#FFCC99" size="3"> Release version',
' <font color="#FFCC99" size="3"> Release version ' + versionDotted + '<br />')
def RegenerateAll():
root="../../"
# Find all the lexer source code files
lexFilePaths = glob.glob(root + "scintilla/lexers/Lex*.cxx")
sortListInsensitive(lexFilePaths)
lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
print(lexFiles)
lexerModules = []
lexerProperties = {}
propertyDocuments = {}
for lexFile in lexFilePaths:
lexerModules.extend(FindModules(lexFile))
for k in FindProperties(lexFile).keys():
lexerProperties[k] = 1
documents = FindPropertyDocumentation(lexFile)
for k in documents.keys():
propertyDocuments[k] = documents[k]
sortListInsensitive(lexerModules)
lexerProperties = list(lexerProperties.keys())
sortListInsensitive(lexerProperties)
# Generate HTML to document each property
# This is done because tags can not be safely put inside comments in HTML
documentProperties = list(propertyDocuments.keys())
sortListInsensitive(documentProperties)
propertiesHTML = []
for k in documentProperties:
propertiesHTML.append("\t<tr id='property-%s'>\n\t<td>%s</td>\n\t<td>%s</td>\n\t</tr>" %
(k, k, propertyDocuments[k]))
# Find all the SciTE properties files
otherProps = ["abbrev.properties", "Embedded.properties", "SciTEGlobal.properties", "SciTE.properties"]
if os.path.exists(root + "scite"):
propFilePaths = glob.glob(root + "scite/src/*.properties")
sortListInsensitive(propFilePaths)
propFiles = [os.path.basename(f) for f in propFilePaths if os.path.basename(f) not in otherProps]
sortListInsensitive(propFiles)
print(propFiles)
Regenerate(root + "scintilla/src/Catalogue.cxx", "//", NATIVE, lexerModules)
Regenerate(root + "scintilla/win32/scintilla.mak", "#", NATIVE, lexFiles)
Regenerate(root + "scintilla/win32/scintilla_vc6.mak", "#", NATIVE, lexFiles)
if os.path.exists(root + "scite"):
Regenerate(root + "scite/win32/makefile", "#", NATIVE, propFiles)
Regenerate(root + "scite/win32/scite.mak", "#", NATIVE, propFiles)
Regenerate(root + "scite/src/SciTEProps.cxx", "//", NATIVE, lexerProperties)
Regenerate(root + "scite/doc/SciTEDoc.html", "<!--", NATIVE, propertiesHTML)
Generate(root + "scite/boundscheck/vcproj.gen",
root + "scite/boundscheck/SciTE.vcproj", "#", NATIVE, lexFiles)
UpdateVersionNumbers(root)
RegenerateAll() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import diff
import models
def _MakeSym(section, size, path, name=None, container=None):
if name is None:
# Trailing letter is important since diffing trims numbers.
name = '{}_{}A'.format(section[1:], size)
ret = models.Symbol(section,
size,
full_name=name,
template_name=name,
name=name,
object_path=path)
if container:
ret.container = container
return ret
def _SetName(symbol, full_name, name=None):
if name is None:
name = full_name
symbol.full_name = full_name
symbol.template_name = full_name
symbol.name = name
def _CreateSizeInfo(aliases=None, containers=None):
build_config = {}
metadata = {}
section_sizes = {'.text': 100, '.bss': 40}
if not containers:
containers = [
models.Container('', metadata=metadata, section_sizes=section_sizes)
]
models.BaseContainer.AssignShortNames(containers)
TEXT = models.SECTION_TEXT
symbols = [
_MakeSym(models.SECTION_DEX_METHOD, 10, 'a', 'com.Foo#bar()'),
_MakeSym(TEXT, 20, 'a', '.Lfoo'),
_MakeSym(TEXT, 30, 'b'),
_MakeSym(TEXT, 40, 'b'),
_MakeSym(TEXT, 50, 'b'),
_MakeSym(TEXT, 60, ''),
]
for s in symbols:
s.container = containers[0]
if aliases:
for tup in aliases:
syms = symbols[tup[0]:tup[1]]
for sym in syms:
sym.aliases = syms
return models.SizeInfo(build_config, containers, symbols)
class DiffTest(unittest.TestCase):
def testIdentity(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
self.assertEqual(0, d.raw_symbols.padding)
def testSimple_Add(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info1.raw_symbols -= [size_info1.raw_symbols[0]]
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 1, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(10, d.raw_symbols.size)
self.assertEqual(0, d.raw_symbols.padding)
def testSimple_Delete(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols -= [size_info2.raw_symbols[0]]
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 0, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(-10, d.raw_symbols.size)
self.assertEqual(0, d.raw_symbols.padding)
def testSimple_Change(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].size += 11
size_info2.raw_symbols[0].padding += 20
size_info2.raw_symbols[-1].size += 11
d = diff.Diff(size_info1, size_info2)
self.assertEqual((2, 1, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(22, d.raw_symbols.size)
self.assertEqual(20, d.raw_symbols.padding)
def testDontMatchAcrossSections(self):
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols += [
_MakeSym(models.SECTION_TEXT, 11, 'asdf', name='Hello'),
]
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols += [
_MakeSym(models.SECTION_RODATA, 11, 'asdf', name='Hello'),
]
# For simplicity, not associating |symbols| with |containers|.
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 1, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testDontMatchAcrossContainers(self):
container_a = models.Container('A', metadata={}, section_sizes={})
container_b = models.Container('B', metadata={}, section_sizes={})
containers = [container_a, container_b]
size_info1 = _CreateSizeInfo(containers=containers)
size_info1.raw_symbols[0].container = container_b
size_info2 = _CreateSizeInfo(containers=containers)
d = diff.Diff(size_info1, size_info2)
# Should show as one add and one remove rather than a change.
self.assertEqual((0, 1, 1), d.raw_symbols.CountsByDiffStatus()[1:])
def testAliases_Remove(self):
size_info1 = _CreateSizeInfo(aliases=[(0, 3)])
size_info2 = _CreateSizeInfo(aliases=[(0, 2)])
d = diff.Diff(size_info1, size_info2)
# Aliases cause all sizes to change.
self.assertEqual((3, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testAliases_Add(self):
size_info1 = _CreateSizeInfo(aliases=[(0, 2)])
size_info2 = _CreateSizeInfo(aliases=[(0, 3)])
d = diff.Diff(size_info1, size_info2)
# Aliases cause all sizes to change.
self.assertEqual((3, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testAliases_ChangeGroup(self):
size_info1 = _CreateSizeInfo(aliases=[(0, 2), (2, 5)])
size_info2 = _CreateSizeInfo(aliases=[(0, 3), (3, 5)])
d = diff.Diff(size_info1, size_info2)
# Aliases cause all sizes to change.
self.assertEqual((4, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testStarSymbolNormalization(self):
size_info1 = _CreateSizeInfo()
_SetName(size_info1.raw_symbols[0], '* symbol gap 1 (end of section)')
size_info2 = _CreateSizeInfo()
_SetName(size_info2.raw_symbols[0], '* symbol gap 2 (end of section)')
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testNumberNormalization(self):
TEXT = models.SECTION_TEXT
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols += [
_MakeSym(TEXT, 11, 'a', name='.L__unnamed_1193'),
_MakeSym(TEXT, 22, 'a', name='.L__unnamed_1194'),
_MakeSym(TEXT, 33, 'a', name='SingleCategoryPreferences$3#this$0'),
_MakeSym(TEXT, 44, 'a', name='.L.ref.tmp.2'),
]
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols += [
_MakeSym(TEXT, 11, 'a', name='.L__unnamed_2194'),
_MakeSym(TEXT, 22, 'a', name='.L__unnamed_2195'),
_MakeSym(TEXT, 33, 'a', name='SingleCategoryPreferences$9#this$009'),
_MakeSym(TEXT, 44, 'a', name='.L.ref.tmp.137'),
]
# For simplicity, not associating |symbols| with |containers|.
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testChangedParams(self):
# Ensure that params changes match up so long as path doesn't change.
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols[0].full_name = 'Foo()'
size_info1.raw_symbols[0].name = 'Foo'
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].full_name = 'Foo(bool)'
size_info2.raw_symbols[0].name = 'Foo'
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testChangedPaths_Native(self):
# Ensure that non-globally-unique symbols are not matched when path changes.
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[1].object_path = 'asdf'
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 1, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testChangedPaths_StringLiterals(self):
# Ensure that string literals are not matched up.
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols[0].full_name = models.STRING_LITERAL_NAME
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].full_name = models.STRING_LITERAL_NAME
size_info2.raw_symbols[0].object_path = 'asdf'
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 1, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testChangedPaths_Java(self):
# Ensure that Java symbols are matched up.
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].object_path = 'asdf'
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
def testChangedPaths_ChangedParams(self):
# Ensure that path changes are not matched when params also change.
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols[0].full_name = 'Foo()'
size_info1.raw_symbols[0].name = 'Foo'
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].full_name = 'Foo(bool)'
size_info2.raw_symbols[0].name = 'Foo'
size_info2.raw_symbols[0].object_path = 'asdf'
d = diff.Diff(size_info1, size_info2)
self.assertEqual((0, 1, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEqual(0, d.raw_symbols.size)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from datetime import datetime, date, timedelta
from chambres.models import Chambre, Souci, Client, Reservation, Tache, Amour, CacheJour
import calendar
@login_required
def futur(request, annee, mois, jour, anneeF, moisF, jourF):
dateDeb = date(int(annee), int(mois), int(jour))
dateFin = date(int(anneeF), int(moisF), int(jourF))
listeJours = []
for i in range((dateFin - dateDeb).days + 1):
jour = dateDeb + timedelta(i)
listeJours.append(jour)
listeTotale = []
monthList = []
weekList = []
for k in range(dateDeb.weekday()):
weekList.append(None)
for i in listeJours:
# if i.day==1:
# monthList.append(weekList)
# listeTotale.append(monthList)
# newWeekList=[]
# lenWeekList=len(weekList)
# for k in range(lenWeekList,7):
# weekList.append(None)
# if lenWeekList!=7:
# for k in range(0,lenWeekList):
# newWeekList.append(None)
# monthList=[]
# weekList=newWeekList
if i.weekday() == 0:
monthList.append(weekList)
weekList = []
cache = CacheJour.objects.filter(jour=i)
if len(cache) < 1:
cache = CacheJour(jour=i)
cache.nbCh = 0
cache.nbTotal = 0
cache.nbDortoir = 0
cache.nbanc = 0
cache.save()
else:
cache = cache[0]
weekList.append(cache)
monthList.append(weekList)
listeTotale.append(monthList)
return render_to_response('chambres/futur.html', {'an': listeTotale}) | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/percpu-refcount.h>
/*
* Initially, a percpu refcount is just a set of percpu counters. Initially, we
* don't try to detect the ref hitting 0 - which means that get/put can just
* increment or decrement the local counter. Note that the counter on a
* particular cpu can (and will) wrap - this is fine, when we go to shutdown the
* percpu counters will all sum to the correct value
*
* (More precisely: because modular arithmetic is commutative the sum of all the
* percpu_count vars will be equal to what it would have been if all the gets
* and puts were done to a single integer, even if some of the percpu integers
* overflow or underflow).
*
* The real trick to implementing percpu refcounts is shutdown. We can't detect
* the ref hitting 0 on every put - this would require global synchronization
* and defeat the whole purpose of using percpu refs.
*
* What we do is require the user to keep track of the initial refcount; we know
* the ref can't hit 0 before the user drops the initial ref, so as long as we
* convert to non percpu mode before the initial ref is dropped everything
* works.
*
* Converting to non percpu mode is done with some RCUish stuff in
* percpu_ref_kill. Additionally, we need a bias value so that the
* atomic_long_t can't hit 0 before we've added up all the percpu refs.
*/
#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
static DEFINE_SPINLOCK(percpu_ref_switch_lock);
static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
{
return (unsigned long __percpu *)
(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
}
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
* @release: function which will be called when refcount hits 0
* @flags: PERCPU_REF_INIT_* flags
* @gfp: allocation mask to use
*
* Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
* @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
* change the start state to atomic with the latter setting the initial refcount
* to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
*/
int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
unsigned int flags, gfp_t gfp)
{
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
__alignof__(unsigned long));
unsigned long start_count = 0;
struct percpu_ref_data *data;
ref->percpu_count_ptr = (unsigned long)
__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
if (!ref->percpu_count_ptr)
return -ENOMEM;
data = kzalloc(sizeof(*ref->data), gfp);
if (!data) {
free_percpu((void __percpu *)ref->percpu_count_ptr);
ref->percpu_count_ptr = 0;
return -ENOMEM;
}
data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
data->allow_reinit = true;
} else {
start_count += PERCPU_COUNT_BIAS;
}
if (flags & PERCPU_REF_INIT_DEAD)
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
else
start_count++;
atomic_long_set(&data->count, start_count);
data->release = release;
data->confirm_switch = NULL;
data->ref = ref;
ref->data = data;
return 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_init);
static void __percpu_ref_exit(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
if (percpu_count) {
/* non-NULL confirm_switch indicates switching in progress */
WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
free_percpu(percpu_count);
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
}
}
/**
* percpu_ref_exit - undo percpu_ref_init()
* @ref: percpu_ref to exit
*
* This function exits @ref. The caller is responsible for ensuring that
* @ref is no longer in active use. The usual places to invoke this
* function from are the @ref->release() callback or in init failure path
* where percpu_ref_init() succeeded but other parts of the initialization
* of the embedding object failed.
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
struct percpu_ref_data *data = ref->data;
unsigned long flags;
__percpu_ref_exit(ref);
if (!data)
return;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
__PERCPU_REF_FLAG_BITS;
ref->data = NULL;
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
kfree(data);
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
{
struct percpu_ref_data *data = container_of(rcu,
struct percpu_ref_data, rcu);
struct percpu_ref *ref = data->ref;
data->confirm_switch(ref);
data->confirm_switch = NULL;
wake_up_all(&percpu_ref_switch_waitq);
if (!data->allow_reinit)
__percpu_ref_exit(ref);
/* drop ref from percpu_ref_switch_to_atomic() */
percpu_ref_put(ref);
}
static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
{
struct percpu_ref_data *data = container_of(rcu,
struct percpu_ref_data, rcu);
struct percpu_ref *ref = data->ref;
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
static atomic_t underflows;
unsigned long count = 0;
int cpu;
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(percpu_count, cpu);
pr_debug("global %lu percpu %lu\n",
atomic_long_read(&data->count), count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
* to &ref->count; since gets could be happening on one cpu while puts
* happen on another, adding a single cpu's count could cause
* @ref->count to hit 0 before we've got a consistent value - but the
* sum of all the counts will be consistent and correct.
*
* Subtracting the bias value then has to happen _after_ adding count to
* &ref->count; we need the bias value to prevent &ref->count from
* reaching 0 before we add the percpu counts. But doing it at the same
* time is equivalent and saves us atomic operations:
*/
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
"percpu ref (%ps) <= 0 (%ld) after switching to atomic",
data->release, atomic_long_read(&data->count)) &&
atomic_inc_return(&underflows) < 4) {
pr_err("%s(): percpu_ref underflow", __func__);
mem_dump_obj(data);
}
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
percpu_ref_call_confirm_rcu(rcu);
}
static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
{
}
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
if (confirm_switch)
confirm_switch(ref);
return;
}
/* switching from percpu to atomic */
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
/*
* Non-NULL ->confirm_switch is used to indicate that switching is
* in progress. Use noop one if unspecified.
*/
ref->data->confirm_switch = confirm_switch ?:
percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
call_rcu_hurry(&ref->data->rcu,
percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
int cpu;
BUG_ON(!percpu_count);
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
if (WARN_ON_ONCE(!ref->data->allow_reinit))
return;
atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
/*
* Restore per-cpu operation. smp_store_release() is paired
* with READ_ONCE() in __ref_is_percpu() and guarantees that the
* zeroing is visible to all percpu accesses which can see the
* following __PERCPU_REF_ATOMIC clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(percpu_count, cpu) = 0;
smp_store_release(&ref->percpu_count_ptr,
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
}
static void __percpu_ref_switch_mode(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
struct percpu_ref_data *data = ref->data;
lockdep_assert_held(&percpu_ref_switch_lock);
/*
* If the previous ATOMIC switching hasn't finished yet, wait for
* its completion. If the caller ensures that ATOMIC switching
* isn't in progress, this function can be called from any context.
*/
wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
percpu_ref_switch_lock);
if (data->force_atomic || percpu_ref_is_dying(ref))
__percpu_ref_switch_to_atomic(ref, confirm_switch);
else
__percpu_ref_switch_to_percpu(ref);
}
/**
* percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
* @ref: percpu_ref to switch to atomic mode
* @confirm_switch: optional confirmation callback
*
* There's no reason to use this function for the usual reference counting.
* Use percpu_ref_kill[_and_confirm]().
*
* Schedule switching of @ref to atomic mode. All its percpu counts will
* be collected to the main atomic counter. On completion, when all CPUs
* are guaraneed to be in atomic mode, @confirm_switch, which may not
* block, is invoked. This function may be invoked concurrently with all
* the get/put operations and can safely be mixed with kill and reinit
* operations. Note that @ref will stay in atomic mode across kill/reinit
* cycles until percpu_ref_switch_to_percpu() is called.
*
* This function may block if @ref is in the process of switching to atomic
* mode. If the caller ensures that @ref is not in the process of
* switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
ref->data->force_atomic = true;
__percpu_ref_switch_mode(ref, confirm_switch);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
/**
* percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
* @ref: percpu_ref to switch to atomic mode
*
* Schedule switching the ref to atomic mode, and wait for the
* switch to complete. Caller must ensure that no other thread
* will switch back to percpu mode.
*/
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
{
percpu_ref_switch_to_atomic(ref, NULL);
wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
/**
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
* @ref: percpu_ref to switch to percpu mode
*
* There's no reason to use this function for the usual reference counting.
* To re-use an expired ref, use percpu_ref_reinit().
*
* Switch @ref to percpu mode. This function may be invoked concurrently
* with all the get/put operations and can safely be mixed with kill and
* reinit operations. This function reverses the sticky atomic state set
* by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
* dying or dead, the actual switching takes place on the following
* percpu_ref_reinit().
*
* This function may block if @ref is in the process of switching to atomic
* mode. If the caller ensures that @ref is not in the process of
* switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
ref->data->force_atomic = false;
__percpu_ref_switch_mode(ref, NULL);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
/**
* percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
* @ref: percpu_ref to kill
* @confirm_kill: optional confirmation callback
*
* Equivalent to percpu_ref_kill() but also schedules kill confirmation if
* @confirm_kill is not NULL. @confirm_kill, which may not block, will be
* called after @ref is seen as dead from all CPUs at which point all
* further invocations of percpu_ref_tryget_live() will fail. See
* percpu_ref_tryget_live() for details.
*
* This function normally doesn't block and can be called from any context
* but it may block if @confirm_kill is specified and @ref is in the
* process of switching to atomic mode by percpu_ref_switch_to_atomic().
*
* There are no implied RCU grace periods between kill and release.
*/
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
WARN_ONCE(percpu_ref_is_dying(ref),
"%s called more than once on %ps!", __func__,
ref->data->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
__percpu_ref_switch_mode(ref, confirm_kill);
percpu_ref_put(ref);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
/**
* percpu_ref_is_zero - test whether a percpu refcount reached zero
* @ref: percpu_ref to test
*
* Returns %true if @ref reached zero.
*
* This function is safe to call as long as @ref is between init and exit.
*/
bool percpu_ref_is_zero(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
unsigned long count, flags;
if (__ref_is_percpu(ref, &percpu_count))
return false;
/* protect us from being destroyed */
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
if (ref->data)
count = atomic_long_read(&ref->data->count);
else
count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
return count == 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
/**
* percpu_ref_reinit - re-initialize a percpu refcount
* @ref: perpcu_ref to re-initialize
*
* Re-initialize @ref so that it's in the same state as when it finished
* percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
* initialized successfully and reached 0 but not exited.
*
* Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
* this function is in progress.
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
percpu_ref_resurrect(ref);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
/**
* percpu_ref_resurrect - modify a percpu refcount from dead to live
* @ref: perpcu_ref to resurrect
*
* Modify @ref so that it's in the same state as before percpu_ref_kill() was
* called. @ref must be dead but must not yet have exited.
*
* If @ref->release() frees @ref then the caller is responsible for
* guaranteeing that @ref->release() does not get called while this
* function is in progress.
*
* Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
* this function is in progress.
*/
void percpu_ref_resurrect(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
WARN_ON_ONCE(!percpu_ref_is_dying(ref));
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
__percpu_ref_switch_mode(ref, NULL);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_resurrect); | c | github | https://github.com/torvalds/linux | lib/percpu-refcount.c |
# Helper to create files with unique contents
# Create multiple files with unique contents within this test run. Takes the
# number of directories, the number of files in each directory, and the base
# directory.
#
# test_create_unique_files 2 3 my_dir -- Creates 2 directories with 3 files
# each in my_dir, all with contents
# different from previous invocations
# of this command in this run.
test_create_unique_files () {
test "$#" -ne 3 && BUG "3 param"
local dirs="$1" &&
local files="$2" &&
local basedir="$3" &&
local counter="0" &&
local i &&
local j &&
test_tick &&
local basedata="$basedir$test_tick" &&
rm -rf "$basedir" &&
for i in $(test_seq $dirs)
do
local dir="$basedir/dir$i" &&
mkdir -p "$dir" &&
for j in $(test_seq $files)
do
counter=$((counter + 1)) &&
echo "$basedata.$counter">"$dir/file$j.txt"
done
done
} | unknown | github | https://github.com/git/git | t/lib-unique-files.sh |
env:
SMART_RETRIES: "true"
steps:
- group: platform-support-unix
steps:
- label: "{{matrix.image}} / platform-support-unix"
command: .ci/scripts/run-gradle.sh --continue -Dbwc.checkout.align=true functionalTests
timeout_in_minutes: 420
matrix:
setup:
image:
- debian-12
- debian-13
- opensuse-leap-15
- oraclelinux-8
- oraclelinux-9
- sles-15
- ubuntu-2204
- ubuntu-2404
- rocky-8
- rocky-9
- rhel-8
- rhel-9
- rhel-10
- almalinux-8
- almalinux-9
agents:
provider: gcp
image: family/elasticsearch-{{matrix.image}}
localSsds: 1
localSsdInterface: nvme
machineType: custom-32-98304
env: {}
retry:
automatic:
- exit_status: "-1"
limit: 3
signal_reason: none
- signal_reason: agent_stop
limit: 3
- exit_status: "1"
limit: 1
- group: platform-support-windows
steps:
- label: "{{matrix.image}} / {{matrix.GRADLE_TASK}} / platform-support-windows"
command: |
.\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh --continue
timeout_in_minutes: 420
matrix:
setup:
image:
- windows-2022
- windows-2025
GRADLE_TASK:
- checkPart1
- checkPart2
- checkPart3
- checkPart4
- checkPart5
- checkPart6
- checkRestCompat
agents:
provider: gcp
image: family/elasticsearch-{{matrix.image}}
machineType: n1-standard-16
diskType: pd-ssd
diskSizeGb: 350
env:
GRADLE_TASK: "{{matrix.GRADLE_TASK}}"
retry:
automatic:
- exit_status: "-1"
limit: 3
signal_reason: none
- signal_reason: agent_stop
limit: 3
- exit_status: "1"
limit: 1
- group: platform-support-arm
steps:
- label: "{{matrix.image}} / {{matrix.GRADLE_TASK}} / platform-support-arm"
command: .ci/scripts/run-gradle.sh --continue -Dbwc.checkout.align=true {{matrix.GRADLE_TASK}}
timeout_in_minutes: 420
matrix:
setup:
image:
- almalinux-8-aarch64
- ubuntu-2404-aarch64
GRADLE_TASK:
- checkPart1
- checkPart2
- checkPart3
- checkPart4
- checkPart5
- checkPart6
- checkRestCompat
agents:
provider: aws
imagePrefix: elasticsearch-{{matrix.image}}
instanceType: m6g.8xlarge
diskSizeGb: 350
diskType: gp3
diskName: /dev/sda1
env:
GRADLE_TASK: "{{matrix.GRADLE_TASK}}"
retry:
automatic:
- exit_status: "-1"
limit: 3
signal_reason: none
- signal_reason: agent_stop
limit: 3
- exit_status: "1"
limit: 1
- group: platform-support-unix-aws
steps:
- label: "{{matrix.image}} / platform-support-aws"
command: .ci/scripts/run-gradle.sh --continue -Dbwc.checkout.align=true functionalTests
timeout_in_minutes: 420
matrix:
setup:
image:
- amazonlinux-2023
retry:
automatic:
- exit_status: "-1"
limit: 3
signal_reason: none
- signal_reason: agent_stop
limit: 3
- exit_status: "1"
limit: 1
agents:
provider: aws
imagePrefix: elasticsearch-{{matrix.image}}
instanceType: m6a.8xlarge
diskSizeGb: 350
diskType: gp3
diskName: /dev/sda1 | unknown | github | https://github.com/elastic/elasticsearch | .buildkite/pipelines/periodic-platform-support.yml |
# Copyright 2012-2013 STACKOPS TECHNOLOGIES S.L.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fabric.api import settings, sudo
from cuisine import package_ensure, package_clean
def stop():
with settings(warn_only=True):
sudo("nohup service apirestd stop")
sudo("nohup service discovery-agent stop")
sudo("nohup service events-agent stop")
sudo("nohup service health-system stop")
sudo("nohup service celeryd stop")
def start():
sudo("python /var/lib/stackops-head/bin/head-init initialize all "
"2>/dev/null")
sudo("nohup service apirestd restart")
sudo("nohup service discovery-agent restart")
sudo("nohup service events-agent restart")
sudo("nohup service health-system restart")
sudo("nohup service celeryd restart")
def configure_ubuntu_packages():
"""Configure portal packages"""
package_ensure('stackops-head')
def uninstall_ubuntu_packages():
"""Uninstall portal packages"""
package_clean('stackops-head')
def install(dhcp_start, dhcp_end, dhcp_listen_interface, gateway,
netmask, domain, dns, license_manager_url,
license_token='vs0QiaN9TA6lIIe3uPSfiG3fs',
download_iso="False",enable_dhcp="True"):
"""Generate automation configuration."""
sudo('echo stackops-head stackops-head/accepted-stackops-license '
'boolean true | debconf-set-selections')
sudo('echo stackops-head stackops-head/dhcp-start string %s | '
'debconf-set-selections' % dhcp_start)
sudo('echo stackops-head stackops-head/dhcp-end string %s | '
'debconf-set-selections' % dhcp_end)
sudo('echo stackops-head stackops-head/dhcp_listen_interface string %s | '
'debconf-set-selections' % dhcp_listen_interface)
sudo('echo stackops-head stackops-head/domain string %s | '
'debconf-set-selections' % domain)
sudo('echo stackops-head stackops-head/gateway string %s | '
'debconf-set-selections' % gateway)
sudo('echo stackops-head stackops-head/netmask string %s | '
'debconf-set-selections' % netmask)
sudo('echo stackops-head stackops-head/dns string %s | '
'debconf-set-selections' % dns)
sudo('echo stackops-head stackops-head/download-stackops boolean %s '
'| debconf-set-selections' % str(download_iso).lower())
sudo('echo stackops-head stackops-head/enable_dhcp boolean %s '
'| debconf-set-selections' % str(enable_dhcp).lower())
sudo('echo stackops-head stackops-head/license-manager-url string %s | '
'debconf-set-selections' % license_manager_url)
sudo('echo stackops-head stackops-head/license-manager-token string %s | '
'debconf-set-selections' % license_token)
configure_ubuntu_packages()
def configure(endpoint,
token_service,
mysql_username,
mysql_password,
automation_user,
automation_password,
mysql_schema="stackopshead",
mysql_host="127.0.0.1",
mysql_port="3306"):
"""Configure mysql in automation"""
sql_connection = ("mysql://" + mysql_username + ":" + mysql_password +
"@" + mysql_host + ":" + mysql_port + "/" + mysql_schema)
sudo('sed -e "s,^--sql_connection\s*=\s*.\+$,--sql_connection=%s," '
'-i /var/lib/stackops-head/etc/*.conf ' % sql_connection)
"""Configure keystone related in automation"""
sudo('sed -e "s,^--automation_user\s*=\s*.\+$,--automation_user=%s," '
'-i /var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf'
% automation_user)
sudo('sed -e "s,^--automation_password\s*=\s*.\+$,'
'--automation_password=%s," -i '
'/var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf'
% automation_password)
uri_keystone_validation = endpoint + '/tokens/'
sudo('sed -e "s,^--use_authorization\s*=\s*.\+$,--use_authorization=%s," '
'-i /var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf'
% "True")
sudo('sed -e "s,^--uri_keystone_validation\s*=\s*.\+$,'
'--uri_keystone_validation=%s," '
'-i /var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf '
% uri_keystone_validation)
sudo('sed -e "s,^--token_service\s*=\s*.\+$,'
'--token_service=%s," '
'-i /var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf '
% token_service) | unknown | codeparrot/codeparrot-clean | ||
import unittest
from test import support
import collections, random, string
import gc, weakref
class DictTest(unittest.TestCase):
def test_invalid_keyword_arguments(self):
class Custom(dict):
pass
for invalid in {1 : 2}, Custom({1 : 2}):
with self.assertRaises(TypeError):
dict(**invalid)
with self.assertRaises(TypeError):
{}.update(**invalid)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assertIsNot(dict(), {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [(''.join(random.sample(string.ascii_letters, 8)), i)
for i in range(n)]
random.shuffle(items)
formatted_items = ('{!r}: {:d}'.format(k, v) for k, v in items)
dictliteral = '{' + ', '.join(formatted_items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue({1: 2})
self.assertIs(bool({}), False)
self.assertIs(bool({1: 2}), True)
def test_keys(self):
d = {}
self.assertEqual(set(d.keys()), set())
d = {'a': 1, 'b': 2}
k = d.keys()
self.assertIn('a', d)
self.assertIn('b', d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(dict(a=1).keys()), "dict_keys(['a'])")
def test_values(self):
d = {}
self.assertEqual(set(d.values()), set())
d = {1:2}
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(dict(a=1).values()), "dict_values([1])")
def test_items(self):
d = {}
self.assertEqual(set(d.items()), set())
d = {1:2}
self.assertEqual(set(d.items()), {(1, 2)})
self.assertRaises(TypeError, d.items, None)
self.assertEqual(repr(dict(a=1).items()), "dict_items([('a', 1)])")
def test_contains(self):
d = {}
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = {'a': 1, 'b': 2}
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertIsInstance(dictlike.fromkeys('a'), dictlike)
self.assertIsInstance(dictlike().fromkeys('a'), dictlike)
class mydict(dict):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = {'a': 1, 'b': 2}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
# changing dict size during iteration
d = {}
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i+1] = 1
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_eq(self):
self.assertEqual({}, {})
self.assertEqual({1: 2}, {1: 2})
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = {BadCmp(): 1}
d2 = {1: 1}
with self.assertRaises(Exc):
d1 == d2
def test_keys_contained(self):
self.helper_keys_contained(lambda x: x.keys())
self.helper_keys_contained(lambda x: x.items())
def helper_keys_contained(self, fn):
# Test rich comparisons against dict key views, which should behave the
# same as sets.
empty = fn(dict())
empty2 = fn(dict())
smaller = fn({1:1, 2:2})
larger = fn({1:1, 2:2, 3:3})
larger2 = fn({1:1, 2:2, 3:3})
larger3 = fn({4:1, 2:2, 3:3})
self.assertTrue(smaller < larger)
self.assertTrue(smaller <= larger)
self.assertTrue(larger > smaller)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller >= larger)
self.assertFalse(smaller > larger)
self.assertFalse(larger <= smaller)
self.assertFalse(larger < smaller)
self.assertFalse(smaller < larger3)
self.assertFalse(smaller <= larger3)
self.assertFalse(larger3 > smaller)
self.assertFalse(larger3 >= smaller)
# Inequality strictness
self.assertTrue(larger2 >= larger)
self.assertTrue(larger2 <= larger)
self.assertFalse(larger2 > larger)
self.assertFalse(larger2 < larger)
self.assertTrue(larger == larger2)
self.assertTrue(smaller != larger)
# There is an optimization on the zero-element case.
self.assertTrue(empty == empty2)
self.assertFalse(empty != empty2)
self.assertFalse(empty == smaller)
self.assertTrue(empty != smaller)
# With the same size, an elementwise compare happens
self.assertTrue(larger != larger3)
self.assertFalse(larger == larger3)
def test_errors_in_view_containment_check(self):
class C:
def __eq__(self, other):
raise RuntimeError
d1 = {1: C()}
d2 = {1: C()}
with self.assertRaises(RuntimeError):
d1.items() == d2.items()
with self.assertRaises(RuntimeError):
d1.items() != d2.items()
with self.assertRaises(RuntimeError):
d1.items() <= d2.items()
with self.assertRaises(RuntimeError):
d1.items() >= d2.items()
d3 = {1: C(), 2: C()}
with self.assertRaises(RuntimeError):
d2.items() < d3.items()
with self.assertRaises(RuntimeError):
d3.items() > d2.items()
def test_dictview_set_operations_on_keys(self):
k1 = {1:1, 2:2}.keys()
k2 = {1:1, 2:2, 3:3}.keys()
k3 = {4:4}.keys()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {1,2})
self.assertEqual(k2 - k1, {3})
self.assertEqual(k3 - k1, {4})
self.assertEqual(k1 & k2, {1,2})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {1,2,3})
self.assertEqual(k1 ^ k2, {3})
self.assertEqual(k1 ^ k3, {1,2,4})
def test_dictview_set_operations_on_items(self):
k1 = {1:1, 2:2}.items()
k2 = {1:1, 2:2, 3:3}.items()
k3 = {4:4}.items()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {(1,1), (2,2)})
self.assertEqual(k2 - k1, {(3,3)})
self.assertEqual(k3 - k1, {(4,4)})
self.assertEqual(k1 & k2, {(1,1), (2,2)})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {(1,1), (2,2), (3,3)})
self.assertEqual(k1 ^ k2, {(3,3)})
self.assertEqual(k1 ^ k3, {(1,1), (2,2), (4,4)})
def test_dictview_mixed_set_operations(self):
# Just a few for .keys()
self.assertTrue({1:1}.keys() == {1})
self.assertTrue({1} == {1:1}.keys())
self.assertEqual({1:1}.keys() | {2}, {1, 2})
self.assertEqual({2} | {1:1}.keys(), {1, 2})
# And a few for .items()
self.assertTrue({1:1}.items() == {(1,1)})
self.assertTrue({(1,1)} == {1:1}.items())
self.assertEqual({1:1}.items() | {2}, {(1,1), 2})
self.assertEqual({2} | {1:1}.items(), {(1,1), 2})
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(dict, "__missing__"))
self.assertFalse(hasattr({}, "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = {}
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __eq__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
with self.assertRaises(CustomException):
exec(stmt, locals())
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = {'a': 1 // 0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
d = {}
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter and
# dictview objects.
class C(object):
pass
views = (dict.items, dict.values, dict.keys)
for v in views:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.v = v(container)
obj.x = iter(obj.v)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked({})
self._not_tracked({x:(), y:x, z:1})
self._not_tracked({1: "a", "b": 2})
self._not_tracked({1: 2, (None, True, False, ()): int})
self._not_tracked({1: object()})
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked({1: []})
self._tracked({1: ([],)})
self._tracked({1: {}})
self._tracked({1: set()})
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = dict()
dd = dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = dict()
dd.update(d)
self._not_tracked(dd)
d = dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = dict()
dd.update(d)
self._tracked(dd)
d = dict(x=x, y=y, z=z)
self._not_tracked(d)
d = dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = dict([(x, y), (z, 1)])
self._not_tracked(d)
d = dict([(x, y), (z, w)])
self._tracked(d)
d = dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(dict):
pass
self._tracked(MyDict())
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Based on:
# http://src.chromium.org/viewvc/blink/trunk/Source/build/scripts/template_expander.py
import imp
import inspect
import os.path
import sys
# Disable lint check for finding modules:
# pylint: disable=F0401
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("jinja2")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("public"), "public/third_party"))
import jinja2
def ApplyTemplate(mojo_generator, base_dir, path_to_template, params,
filters=None, **kwargs):
template_directory, template_name = os.path.split(path_to_template)
path_to_templates = os.path.join(base_dir, template_directory)
loader = jinja2.FileSystemLoader([path_to_templates])
final_kwargs = dict(mojo_generator.GetJinjaParameters())
final_kwargs.update(kwargs)
jinja_env = jinja2.Environment(loader=loader, keep_trailing_newline=True,
**final_kwargs)
jinja_env.globals.update(mojo_generator.GetGlobals())
if filters:
jinja_env.filters.update(filters)
template = jinja_env.get_template(template_name)
return template.render(params)
def UseJinja(path_to_template, **kwargs):
# Get the directory of our caller's file.
base_dir = os.path.dirname(inspect.getfile(sys._getframe(1)))
def RealDecorator(generator):
def GeneratorInternal(*args, **kwargs2):
parameters = generator(*args, **kwargs2)
return ApplyTemplate(args[0], base_dir, path_to_template, parameters,
**kwargs)
GeneratorInternal.func_name = generator.func_name
return GeneratorInternal
return RealDecorator | unknown | codeparrot/codeparrot-clean | ||
"""About Dialog for IDLE
"""
import os
import sys
import webbrowser
from platform import python_version, architecture
from tkinter import Toplevel, Frame, Label, Button, PhotoImage
from tkinter import SUNKEN, TOP, BOTTOM, LEFT, X, BOTH, W, EW, NSEW, E
from idlelib import textview
pyver = python_version()
if sys.platform == 'darwin':
bits = '64' if sys.maxsize > 2**32 else '32'
else:
bits = architecture()[0][:2]
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self, parent, title=None, *, _htest=False, _utest=False):
"""Create popup, do not return until tk widget destroyed.
parent - parent of this dialog
title - string which is title of popup dialog
_htest - bool, change box location when running htest
_utest - bool, don't wait_window when running unittest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
# place dialog below parent if running htest
self.geometry("+%d+%d" % (
parent.winfo_rootx()+30,
parent.winfo_rooty()+(30 if not _htest else 100)))
self.bg = "#bbbbbb"
self.fg = "#000000"
self.create_widgets()
self.resizable(height=False, width=False)
self.title(title or
f'About IDLE {pyver} ({bits} bit)')
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.ok)
self.parent = parent
self.button_ok.focus_set()
self.bind('<Return>', self.ok) # dismiss dialog
self.bind('<Escape>', self.ok) # dismiss dialog
self._current_textview = None
self._utest = _utest
if not _utest:
self.deiconify()
self.wait_window()
def create_widgets(self):
frame = Frame(self, borderwidth=2, relief=SUNKEN)
frame_buttons = Frame(self)
frame_buttons.pack(side=BOTTOM, fill=X)
frame.pack(side=TOP, expand=True, fill=BOTH)
self.button_ok = Button(frame_buttons, text='Close',
command=self.ok)
self.button_ok.pack(padx=5, pady=5)
frame_background = Frame(frame, bg=self.bg)
frame_background.pack(expand=True, fill=BOTH)
header = Label(frame_background, text='IDLE', fg=self.fg,
bg=self.bg, font=('courier', 24, 'bold'))
header.grid(row=0, column=0, sticky=E, padx=10, pady=10)
tkpatch = self._root().getvar('tk_patchLevel')
ext = '.png' if tkpatch >= '8.6' else '.gif'
icon = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'Icons', f'idle_48{ext}')
self.icon_image = PhotoImage(master=self._root(), file=icon)
logo = Label(frame_background, image=self.icon_image, bg=self.bg)
logo.grid(row=0, column=0, sticky=W, rowspan=2, padx=10, pady=10)
byline_text = "Python's Integrated Development\nand Learning Environment" + 5*'\n'
byline = Label(frame_background, text=byline_text, justify=LEFT,
fg=self.fg, bg=self.bg)
byline.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
forums_url = "https://discuss.python.org"
forums = Button(frame_background, text='Python (and IDLE) Discussion', width=35,
highlightbackground=self.bg,
command=lambda: webbrowser.open(forums_url))
forums.grid(row=6, column=0, sticky=W, padx=10, pady=10)
docs_url = ("https://docs.python.org/%d.%d/library/idle.html" %
sys.version_info[:2])
docs = Button(frame_background, text='IDLE Documentation', width=35,
highlightbackground=self.bg,
command=lambda: webbrowser.open(docs_url))
docs.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=10)
Frame(frame_background, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
tclver = str(self.info_patchlevel())
tkver = ' and ' + tkpatch if tkpatch != tclver else ''
versions = f"Python {pyver} with tcl/tk {tclver}{tkver}"
vers = Label(frame_background, text=versions, fg=self.fg, bg=self.bg)
vers.grid(row=9, column=0, sticky=W, padx=10, pady=0)
py_buttons = Frame(frame_background, bg=self.bg)
py_buttons.grid(row=10, column=0, columnspan=2, sticky=NSEW)
self.py_license = Button(py_buttons, text='License', width=8,
highlightbackground=self.bg,
command=self.show_py_license)
self.py_license.pack(side=LEFT, padx=10, pady=10)
self.py_copyright = Button(py_buttons, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.show_py_copyright)
self.py_copyright.pack(side=LEFT, padx=10, pady=10)
self.py_credits = Button(py_buttons, text='Credits', width=8,
highlightbackground=self.bg,
command=self.show_py_credits)
self.py_credits.pack(side=LEFT, padx=10, pady=10)
Frame(frame_background, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle = Label(frame_background, text='IDLE', fg=self.fg, bg=self.bg)
idle.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_buttons = Frame(frame_background, bg=self.bg)
idle_buttons.grid(row=13, column=0, columnspan=3, sticky=NSEW)
self.readme = Button(idle_buttons, text='Readme', width=8,
highlightbackground=self.bg,
command=self.show_readme)
self.readme.pack(side=LEFT, padx=10, pady=10)
self.idle_news = Button(idle_buttons, text='News', width=8,
highlightbackground=self.bg,
command=self.show_idle_news)
self.idle_news.pack(side=LEFT, padx=10, pady=10)
self.idle_credits = Button(idle_buttons, text='Credits', width=8,
highlightbackground=self.bg,
command=self.show_idle_credits)
self.idle_credits.pack(side=LEFT, padx=10, pady=10)
# License, copyright, and credits are of type _sitebuiltins._Printer
def show_py_license(self):
"Handle License button event."
self.display_printer_text('About - License', license)
def show_py_copyright(self):
"Handle Copyright button event."
self.display_printer_text('About - Copyright', copyright)
def show_py_credits(self):
"Handle Python Credits button event."
self.display_printer_text('About - Python Credits', credits)
# Encode CREDITS.txt to utf-8 for proper version of Loewis.
# Specify others as ascii until need utf-8, so catch errors.
def show_idle_credits(self):
"Handle Idle Credits button event."
self.display_file_text('About - Credits', 'CREDITS.txt', 'utf-8')
def show_readme(self):
"Handle Readme button event."
self.display_file_text('About - Readme', 'README.txt', 'ascii')
def show_idle_news(self):
"Handle News button event."
self.display_file_text('About - News', 'News3.txt', 'utf-8')
def display_printer_text(self, title, printer):
"""Create textview for built-in constants.
Built-in constants have type _sitebuiltins._Printer. The
text is extracted from the built-in and then sent to a text
viewer with self as the parent and title as the title of
the popup.
"""
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
self._current_textview = textview.view_text(
self, title, text, _utest=self._utest)
def display_file_text(self, title, filename, encoding=None):
"""Create textview for filename.
The filename needs to be in the current directory. The path
is sent to a text viewer with self as the parent, title as
the title of the popup, and the file encoding.
"""
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
self._current_textview = textview.view_file(
self, title, fn, encoding, _utest=self._utest)
def ok(self, event=None):
"Dismiss help_about dialog."
self.grab_release()
self.destroy()
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_help_about', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(AboutDialog) | python | github | https://github.com/python/cpython | Lib/idlelib/help_about.py |
#!/usr/bin/env python
import os
from app import create_app, db
from app.models import (
User,
Role,
Agency,
Permission,
IncidentReport,
EditableHTML
)
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.rq import get_worker
from app.parse_csv import parse_to_db
# Import settings from .env file. Must define FLASK_CONFIG
if os.path.exists('.env'):
print('Importing environment from .env file')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.drop_all()
db.create_all()
db.session.commit()
@manager.option('-nu',
'--number-users',
default=10,
type=int,
help='Number of users to create',
dest='number_users')
@manager.option('-nr',
'--number-reports',
default=100,
type=int,
help='Number of reports to create',
dest='number_reports')
def add_fake_data(number_users, number_reports):
"""
Adds fake data to the database.
"""
User.generate_fake(count=number_users)
IncidentReport.generate_fake(count=number_reports)
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
# Create a default admin user
admin = User(email='admin@user.com',
phone_number='+12345678910',
password='password',
first_name='Admin',
last_name='User',
role=Role.query.filter_by(permissions=Permission.ADMINISTER)
.first(),
confirmed=True)
# Create a default agency worker user
worker = User(email='agency@user.com',
phone_number='+11098764321',
password='password',
first_name='AgencyWorker',
last_name='User',
role=Role.query
.filter_by(permissions=Permission.AGENCY_WORKER)
.first(),
confirmed=True)
worker.agencies = [Agency.get_agency_by_name('SEPTA')]
# Create a default general user
general = User(email='general@user.com',
phone_number='+15434549876',
password='password',
first_name='General',
last_name='User',
role=Role.query.filter_by(permissions=Permission.GENERAL)
.first(),
confirmed=True)
db.session.add(admin)
db.session.add(worker)
db.session.add(general)
db.session.commit()
@manager.option('-f',
'--filename',
default='poll244.csv',
type=str,
help='Filename of csv to parse',
dest='filename')
def parse_csv(filename):
"""Parses the given csv file into the database."""
parse_to_db(db, filename)
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production."""
Role.insert_roles()
Agency.insert_agencies()
EditableHTML.add_default_faq()
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
get_worker().work()
if __name__ == '__main__':
manager.run() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adadelta Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import adadelta
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_DATA_TYPES = [dtypes.half, dtypes.float32, dtypes.float64]
# TODO(b/143684500): Eigen to support complex sqrt
if (not test_util.IsBuiltWithNvcc() and not test.is_built_with_rocm()):
_DATA_TYPES += [dtypes.complex64, dtypes.complex128]
class AdadeltaOptimizerTest(test.TestCase):
def doTestBasic(self, use_resource=False, use_callable_params=False):
num_updates = 4 # number of ADADELTA steps to perform
for dtype in _DATA_TYPES:
for grad in [0.2, 0.1, 0.01]:
for lr in [1.0, 0.5, 0.1]:
var0_init = [1.0, 2.0]
var1_init = [3.0, 4.0]
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_init, dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
var1_init, dtype=dtype)
else:
var0 = variables.Variable(var0_init, dtype=dtype)
var1 = variables.Variable(var1_init, dtype=dtype)
grads = constant_op.constant([grad, grad], dtype=dtype)
accum = 0.0
accum_update = 0.0
# ADADELTA gradient optimizer
rho = 0.95
epsilon = 1e-8
if use_callable_params:
adadelta_opt = adadelta.Adadelta(
learning_rate=lambda: lr, # pylint: disable=cell-var-from-loop
rho=lambda: rho, # pylint: disable=cell-var-from-loop
epsilon=epsilon) # pylint: disable=cell-var-from-loop
else:
adadelta_opt = adadelta.Adadelta(
learning_rate=lr, rho=rho, epsilon=epsilon)
if not context.executing_eagerly():
adadelta_update = adadelta_opt.apply_gradients(
zip([grads, grads], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Assign slots
slot = [None] * 2
slot_update = [None] * 2
slot[0] = adadelta_opt.get_slot(var0, "accum_grad")
self.assertEqual(slot[0].shape, var0.shape)
slot_update[0] = adadelta_opt.get_slot(var0, "accum_var")
self.assertEqual(slot_update[0].shape, var0.shape)
slot[1] = adadelta_opt.get_slot(var1, "accum_grad")
self.assertEqual(slot[1].shape, var1.shape)
slot_update[1] = adadelta_opt.get_slot(var1, "accum_var")
self.assertEqual(slot_update[1].shape, var1.shape)
# Fetch params to validate initial values
self.assertAllClose(var0_init, self.evaluate(var0))
self.assertAllClose(var1_init, self.evaluate(var1))
update = [None] * num_updates
tot_update = 0
for step in range(num_updates):
# Run adadelta update for comparison
if not context.executing_eagerly():
self.evaluate(adadelta_update)
else:
adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1]))
# Perform initial update without previous accum values
accum = accum * rho + (grad**2) * (1 - rho)
update[step] = (
np.sqrt(accum_update + epsilon) *
(1. / np.sqrt(accum + epsilon)) * grad)
accum_update = (
accum_update * rho + (update[step]**2) * (1.0 - rho))
tot_update += update[step] * lr
if not context.executing_eagerly():
# Check that the accumulators have been updated
# TODO(lxuechen): This is hard to test in eager mode
for slot_idx in range(2):
self.assertAllCloseAccordingToType(
np.array([accum, accum], dtype=dtype.as_numpy_dtype(0)),
self.evaluate(slot[slot_idx]),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array(
[accum_update, accum_update],
dtype=dtype.as_numpy_dtype(0)),
self.evaluate(slot_update[slot_idx]),
rtol=1e-5)
# Check that the parameters have been updated
self.assertAllCloseAccordingToType(
np.array(
[var0_init[0] - tot_update, var0_init[1] - tot_update],
dtype=dtype.as_numpy_dtype(0)),
self.evaluate(var0),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array(
[var1_init[0] - tot_update, var1_init[1] - tot_update],
dtype=dtype.as_numpy_dtype(0)),
self.evaluate(var1),
rtol=1e-5)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testMinimizeSparseResourceVariable(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with ops.Graph().as_default():
for dtype in _DATA_TYPES:
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize(
loss, var_list=[var0])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
def testConstructAdadeltaWithLR(self):
opt = adadelta.Adadelta(lr=1.0, rho=0.9, epsilon=1.)
opt_2 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1., lr=1.0)
opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
def testConstructAdadeltaWithEpsilonValues(self):
opt = adadelta.Adadelta(epsilon=None)
self.assertEqual(opt.epsilon, 1e-7)
opt = adadelta.Adadelta(epsilon=1e-8)
self.assertEqual(opt.epsilon, 1e-8)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2019 Brian Quinlan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The game model for a game where a ball has to be kept in the air.
The same is similar to breakout except that there are no blocks to break at
the top of the screen i.e. the objective is simply to keep the ball in the air
using a paddle as long as possible.
"""
import abc
import enum
import random
from typing import Mapping, Optional
import numpy as np
def _normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
class Direction(enum.IntEnum):
"""The possible player moves."""
LEFT = 0
CENTER = 1
RIGHT = 2
class MoveMaker(abc.ABC):
@abc.abstractmethod
def make_move(self, state) -> Direction:
pass
def move_probabilities(self, state) -> Optional[Mapping[Direction, float]]:
return None
class Game:
NUM_STATES = 6
NUM_ACTIONS = 3
def __init__(self,
reward_time_multiplier=1.0,
reward_bounces_multiplier=0.0,
reward_height_multiplier=0.0,
punish_moves_multiplier=1.0):
self._ball_pos = np.array([3 + random.random() * 19, 100.0])
self._ball_v = np.array([-0.3 + random.random() * 0.6, 0.0])
self.ball_radius = 1
self._g = np.array([0.0, -0.025])
self._paddle_pos = np.array([12.5, -9.5])
self.paddle_radius = 10
self._done = False
self._score = 0
self._reward_time_multiplier = reward_time_multiplier
self._reward_bounces_multiplier = reward_bounces_multiplier
self._reward_height_multiplier = reward_height_multiplier
self._punish_moves_multiplier = punish_moves_multiplier
@property
def done(self):
return self._done
@property
def ball_x(self):
return self._ball_pos[0]
@property
def ball_y(self):
return self._ball_pos[1]
@property
def paddle_x(self):
return self._paddle_pos[0]
@property
def paddle_y(self):
return self._paddle_pos[1]
@property
def state(self):
return (
self._ball_pos[0],
self._ball_pos[1],
self._ball_v[0],
self._ball_v[1],
self._paddle_pos[0],
self._paddle_pos[1],
)
@property
def score(self):
return self._score
def _update(self):
assert not self._done
self._score += self._reward_time_multiplier
old_y = self.ball_y
self._ball_v += self._g
self._ball_pos += self._ball_v
distance = np.linalg.norm(
[self.paddle_x - self.ball_x, self.paddle_y - self.ball_y])
if distance < (self.paddle_radius + self.ball_radius):
self._score += self._reward_bounces_multiplier
n = _normalize(
[self.paddle_x - self.ball_x, self.paddle_y - self.ball_y])
self._ball_v = self._ball_v - 2 * (np.dot(self._ball_v, n)) * n
self._ball_pos += self._ball_v
if self.ball_y < self.ball_radius:
self._done = True
elif self.ball_x < self.ball_radius:
self._score += self._reward_bounces_multiplier
self._ball_v *= [-1, 1]
self._ball_pos += [self._ball_v[0], 0]
elif self.ball_x > (25 - self.ball_radius):
self._score += self._reward_bounces_multiplier
self._ball_v *= [-1, 1]
self._ball_pos += [self._ball_v[0], 0]
if self.ball_y > old_y:
self._score += (self.ball_y -
old_y) * self._reward_height_multiplier
return self.state
def move_left(self):
self._score -= self._punish_moves_multiplier
self._paddle_pos += [-2, 0]
if self._paddle_pos[0] < 0:
self._paddle_pos[0] = 0
return self._update()
def move_right(self):
self._score -= self._punish_moves_multiplier
self._paddle_pos += [2, 0]
if self._paddle_pos[0] > 25:
self._paddle_pos[0] = 25
return self._update()
def stay(self):
return self._update()
def move(self, direction: Direction):
if direction == Direction.LEFT:
self.move_left()
elif direction == Direction.CENTER:
self.stay()
elif direction == Direction.RIGHT:
self.move_right()
else:
assert "unexpected direction: %r" % (direction) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import pytest
from netmiko import ConnectHandler
from DEVICE_CREDS import *
def setup_module(module):
module.EXPECTED_RESPONSES = {
'base_prompt' : 'openstack-rb5',
'config_mode' : '(config)',
}
net_connect = ConnectHandler(**brocade_vdx)
# Enter enable mode
module.prompt_initial = net_connect.find_prompt()
net_connect.enable()
module.enable_prompt = net_connect.find_prompt()
# Send a set of config commands
module.config_mode = net_connect.config_mode()
config_commands = ['logging raslog console WARNING', 'interface vlan 20', 'banner motd test_message']
net_connect.send_config_set(config_commands)
# Exit config mode
module.exit_config_mode = net_connect.exit_config_mode()
# Verify config changes
module.config_commands_output = net_connect.send_command('show vlan brief')
net_connect.disconnect()
def test_config_mode():
assert EXPECTED_RESPONSES['config_mode'] in config_mode
def test_command_set():
assert 'VLAN0020' in config_commands_output
def test_exit_config_mode():
assert not EXPECTED_RESPONSES['config_mode'] in exit_config_mode | unknown | codeparrot/codeparrot-clean | ||
"""Test Home Assistant color util methods."""
import pytest
import voluptuous as vol
import homeassistant.util.color as color_util
GAMUT = color_util.GamutType(
color_util.XYPoint(0.704, 0.296),
color_util.XYPoint(0.2151, 0.7106),
color_util.XYPoint(0.138, 0.08),
)
GAMUT_INVALID_1 = color_util.GamutType(
color_util.XYPoint(0.704, 0.296),
color_util.XYPoint(-0.201, 0.7106),
color_util.XYPoint(0.138, 0.08),
)
GAMUT_INVALID_2 = color_util.GamutType(
color_util.XYPoint(0.704, 1.296),
color_util.XYPoint(0.2151, 0.7106),
color_util.XYPoint(0.138, 0.08),
)
GAMUT_INVALID_3 = color_util.GamutType(
color_util.XYPoint(0.0, 0.0),
color_util.XYPoint(0.0, 0.0),
color_util.XYPoint(0.0, 0.0),
)
GAMUT_INVALID_4 = color_util.GamutType(
color_util.XYPoint(0.1, 0.1),
color_util.XYPoint(0.3, 0.3),
color_util.XYPoint(0.7, 0.7),
)
# pylint: disable=invalid-name
def test_color_RGB_to_xy_brightness():
"""Test color_RGB_to_xy_brightness."""
assert color_util.color_RGB_to_xy_brightness(0, 0, 0) == (0, 0, 0)
assert color_util.color_RGB_to_xy_brightness(255, 255, 255) == (0.323, 0.329, 255)
assert color_util.color_RGB_to_xy_brightness(0, 0, 255) == (0.136, 0.04, 12)
assert color_util.color_RGB_to_xy_brightness(0, 255, 0) == (0.172, 0.747, 170)
assert color_util.color_RGB_to_xy_brightness(255, 0, 0) == (0.701, 0.299, 72)
assert color_util.color_RGB_to_xy_brightness(128, 0, 0) == (0.701, 0.299, 16)
assert color_util.color_RGB_to_xy_brightness(255, 0, 0, GAMUT) == (0.7, 0.299, 72)
assert color_util.color_RGB_to_xy_brightness(0, 255, 0, GAMUT) == (
0.215,
0.711,
170,
)
assert color_util.color_RGB_to_xy_brightness(0, 0, 255, GAMUT) == (0.138, 0.08, 12)
def test_color_RGB_to_xy():
"""Test color_RGB_to_xy."""
assert color_util.color_RGB_to_xy(0, 0, 0) == (0, 0)
assert color_util.color_RGB_to_xy(255, 255, 255) == (0.323, 0.329)
assert color_util.color_RGB_to_xy(0, 0, 255) == (0.136, 0.04)
assert color_util.color_RGB_to_xy(0, 255, 0) == (0.172, 0.747)
assert color_util.color_RGB_to_xy(255, 0, 0) == (0.701, 0.299)
assert color_util.color_RGB_to_xy(128, 0, 0) == (0.701, 0.299)
assert color_util.color_RGB_to_xy(0, 0, 255, GAMUT) == (0.138, 0.08)
assert color_util.color_RGB_to_xy(0, 255, 0, GAMUT) == (0.215, 0.711)
assert color_util.color_RGB_to_xy(255, 0, 0, GAMUT) == (0.7, 0.299)
def test_color_xy_brightness_to_RGB():
"""Test color_xy_brightness_to_RGB."""
assert color_util.color_xy_brightness_to_RGB(1, 1, 0) == (0, 0, 0)
assert color_util.color_xy_brightness_to_RGB(0.35, 0.35, 128) == (194, 186, 169)
assert color_util.color_xy_brightness_to_RGB(0.35, 0.35, 255) == (255, 243, 222)
assert color_util.color_xy_brightness_to_RGB(1, 0, 255) == (255, 0, 60)
assert color_util.color_xy_brightness_to_RGB(0, 1, 255) == (0, 255, 0)
assert color_util.color_xy_brightness_to_RGB(0, 0, 255) == (0, 63, 255)
assert color_util.color_xy_brightness_to_RGB(1, 0, 255, GAMUT) == (255, 0, 3)
assert color_util.color_xy_brightness_to_RGB(0, 1, 255, GAMUT) == (82, 255, 0)
assert color_util.color_xy_brightness_to_RGB(0, 0, 255, GAMUT) == (9, 85, 255)
def test_color_xy_to_RGB():
"""Test color_xy_to_RGB."""
assert color_util.color_xy_to_RGB(0.35, 0.35) == (255, 243, 222)
assert color_util.color_xy_to_RGB(1, 0) == (255, 0, 60)
assert color_util.color_xy_to_RGB(0, 1) == (0, 255, 0)
assert color_util.color_xy_to_RGB(0, 0) == (0, 63, 255)
assert color_util.color_xy_to_RGB(1, 0, GAMUT) == (255, 0, 3)
assert color_util.color_xy_to_RGB(0, 1, GAMUT) == (82, 255, 0)
assert color_util.color_xy_to_RGB(0, 0, GAMUT) == (9, 85, 255)
def test_color_RGB_to_hsv():
"""Test color_RGB_to_hsv."""
assert color_util.color_RGB_to_hsv(0, 0, 0) == (0, 0, 0)
assert color_util.color_RGB_to_hsv(255, 255, 255) == (0, 0, 100)
assert color_util.color_RGB_to_hsv(0, 0, 255) == (240, 100, 100)
assert color_util.color_RGB_to_hsv(0, 255, 0) == (120, 100, 100)
assert color_util.color_RGB_to_hsv(255, 0, 0) == (0, 100, 100)
def test_color_hsv_to_RGB():
"""Test color_hsv_to_RGB."""
assert color_util.color_hsv_to_RGB(0, 0, 0) == (0, 0, 0)
assert color_util.color_hsv_to_RGB(0, 0, 100) == (255, 255, 255)
assert color_util.color_hsv_to_RGB(240, 100, 100) == (0, 0, 255)
assert color_util.color_hsv_to_RGB(120, 100, 100) == (0, 255, 0)
assert color_util.color_hsv_to_RGB(0, 100, 100) == (255, 0, 0)
def test_color_hsb_to_RGB():
"""Test color_hsb_to_RGB."""
assert color_util.color_hsb_to_RGB(0, 0, 0) == (0, 0, 0)
assert color_util.color_hsb_to_RGB(0, 0, 1.0) == (255, 255, 255)
assert color_util.color_hsb_to_RGB(240, 1.0, 1.0) == (0, 0, 255)
assert color_util.color_hsb_to_RGB(120, 1.0, 1.0) == (0, 255, 0)
assert color_util.color_hsb_to_RGB(0, 1.0, 1.0) == (255, 0, 0)
def test_color_xy_to_hs():
"""Test color_xy_to_hs."""
assert color_util.color_xy_to_hs(1, 1) == (47.294, 100)
assert color_util.color_xy_to_hs(0.35, 0.35) == (38.182, 12.941)
assert color_util.color_xy_to_hs(1, 0) == (345.882, 100)
assert color_util.color_xy_to_hs(0, 1) == (120, 100)
assert color_util.color_xy_to_hs(0, 0) == (225.176, 100)
assert color_util.color_xy_to_hs(1, 0, GAMUT) == (359.294, 100)
assert color_util.color_xy_to_hs(0, 1, GAMUT) == (100.706, 100)
assert color_util.color_xy_to_hs(0, 0, GAMUT) == (221.463, 96.471)
def test_color_hs_to_xy():
"""Test color_hs_to_xy."""
assert color_util.color_hs_to_xy(180, 100) == (0.151, 0.343)
assert color_util.color_hs_to_xy(350, 12.5) == (0.356, 0.321)
assert color_util.color_hs_to_xy(140, 50) == (0.229, 0.474)
assert color_util.color_hs_to_xy(0, 40) == (0.474, 0.317)
assert color_util.color_hs_to_xy(360, 0) == (0.323, 0.329)
assert color_util.color_hs_to_xy(0, 100, GAMUT) == (0.7, 0.299)
assert color_util.color_hs_to_xy(120, 100, GAMUT) == (0.215, 0.711)
assert color_util.color_hs_to_xy(180, 100, GAMUT) == (0.17, 0.34)
assert color_util.color_hs_to_xy(240, 100, GAMUT) == (0.138, 0.08)
assert color_util.color_hs_to_xy(360, 100, GAMUT) == (0.7, 0.299)
def test_rgb_hex_to_rgb_list():
"""Test rgb_hex_to_rgb_list."""
assert [255, 255, 255] == color_util.rgb_hex_to_rgb_list("ffffff")
assert [0, 0, 0] == color_util.rgb_hex_to_rgb_list("000000")
assert [255, 255, 255, 255] == color_util.rgb_hex_to_rgb_list("ffffffff")
assert [0, 0, 0, 0] == color_util.rgb_hex_to_rgb_list("00000000")
assert [51, 153, 255] == color_util.rgb_hex_to_rgb_list("3399ff")
assert [51, 153, 255, 0] == color_util.rgb_hex_to_rgb_list("3399ff00")
def test_color_name_to_rgb_valid_name():
"""Test color_name_to_rgb."""
assert color_util.color_name_to_rgb("red") == (255, 0, 0)
assert color_util.color_name_to_rgb("blue") == (0, 0, 255)
assert color_util.color_name_to_rgb("green") == (0, 128, 0)
# spaces in the name
assert color_util.color_name_to_rgb("dark slate blue") == (72, 61, 139)
# spaces removed from name
assert color_util.color_name_to_rgb("darkslateblue") == (72, 61, 139)
assert color_util.color_name_to_rgb("dark slateblue") == (72, 61, 139)
assert color_util.color_name_to_rgb("darkslate blue") == (72, 61, 139)
def test_color_name_to_rgb_unknown_name_raises_value_error():
"""Test color_name_to_rgb."""
with pytest.raises(ValueError):
color_util.color_name_to_rgb("not a color")
def test_color_rgb_to_rgbw():
"""Test color_rgb_to_rgbw."""
assert color_util.color_rgb_to_rgbw(0, 0, 0) == (0, 0, 0, 0)
assert color_util.color_rgb_to_rgbw(255, 255, 255) == (0, 0, 0, 255)
assert color_util.color_rgb_to_rgbw(255, 0, 0) == (255, 0, 0, 0)
assert color_util.color_rgb_to_rgbw(0, 255, 0) == (0, 255, 0, 0)
assert color_util.color_rgb_to_rgbw(0, 0, 255) == (0, 0, 255, 0)
assert color_util.color_rgb_to_rgbw(255, 127, 0) == (255, 127, 0, 0)
assert color_util.color_rgb_to_rgbw(255, 127, 127) == (255, 0, 0, 253)
assert color_util.color_rgb_to_rgbw(127, 127, 127) == (0, 0, 0, 127)
def test_color_rgbw_to_rgb():
"""Test color_rgbw_to_rgb."""
assert color_util.color_rgbw_to_rgb(0, 0, 0, 0) == (0, 0, 0)
assert color_util.color_rgbw_to_rgb(0, 0, 0, 255) == (255, 255, 255)
assert color_util.color_rgbw_to_rgb(255, 0, 0, 0) == (255, 0, 0)
assert color_util.color_rgbw_to_rgb(0, 255, 0, 0) == (0, 255, 0)
assert color_util.color_rgbw_to_rgb(0, 0, 255, 0) == (0, 0, 255)
assert color_util.color_rgbw_to_rgb(255, 127, 0, 0) == (255, 127, 0)
assert color_util.color_rgbw_to_rgb(255, 0, 0, 253) == (255, 127, 127)
assert color_util.color_rgbw_to_rgb(0, 0, 0, 127) == (127, 127, 127)
def test_color_rgb_to_hex():
"""Test color_rgb_to_hex."""
assert color_util.color_rgb_to_hex(255, 255, 255) == "ffffff"
assert color_util.color_rgb_to_hex(0, 0, 0) == "000000"
assert color_util.color_rgb_to_hex(51, 153, 255) == "3399ff"
assert color_util.color_rgb_to_hex(255, 67.9204190, 0) == "ff4400"
def test_gamut():
"""Test gamut functions."""
assert color_util.check_valid_gamut(GAMUT)
assert not color_util.check_valid_gamut(GAMUT_INVALID_1)
assert not color_util.check_valid_gamut(GAMUT_INVALID_2)
assert not color_util.check_valid_gamut(GAMUT_INVALID_3)
assert not color_util.check_valid_gamut(GAMUT_INVALID_4)
def test_color_temperature_mired_to_kelvin():
"""Test color_temperature_mired_to_kelvin."""
assert color_util.color_temperature_mired_to_kelvin(40) == 25000
assert color_util.color_temperature_mired_to_kelvin(200) == 5000
with pytest.raises(ZeroDivisionError):
assert color_util.color_temperature_mired_to_kelvin(0)
def test_color_temperature_kelvin_to_mired():
"""Test color_temperature_kelvin_to_mired."""
assert color_util.color_temperature_kelvin_to_mired(25000) == 40
assert color_util.color_temperature_kelvin_to_mired(5000) == 200
with pytest.raises(ZeroDivisionError):
assert color_util.color_temperature_kelvin_to_mired(0)
def test_returns_same_value_for_any_two_temperatures_below_1000():
"""Function should return same value for 999 Kelvin and 0 Kelvin."""
rgb_1 = color_util.color_temperature_to_rgb(999)
rgb_2 = color_util.color_temperature_to_rgb(0)
assert rgb_1 == rgb_2
def test_returns_same_value_for_any_two_temperatures_above_40000():
"""Function should return same value for 40001K and 999999K."""
rgb_1 = color_util.color_temperature_to_rgb(40001)
rgb_2 = color_util.color_temperature_to_rgb(999999)
assert rgb_1 == rgb_2
def test_should_return_pure_white_at_6600():
"""
Function should return red=255, blue=255, green=255 when given 6600K.
6600K is considered "pure white" light.
This is just a rough estimate because the formula itself is a "best
guess" approach.
"""
rgb = color_util.color_temperature_to_rgb(6600)
assert (255, 255, 255) == rgb
def test_color_above_6600_should_have_more_blue_than_red_or_green():
"""Function should return a higher blue value for blue-ish light."""
rgb = color_util.color_temperature_to_rgb(6700)
assert rgb[2] > rgb[1]
assert rgb[2] > rgb[0]
def test_color_below_6600_should_have_more_red_than_blue_or_green():
"""Function should return a higher red value for red-ish light."""
rgb = color_util.color_temperature_to_rgb(6500)
assert rgb[0] > rgb[1]
assert rgb[0] > rgb[2]
def test_get_color_in_voluptuous():
"""Test using the get method in color validation."""
schema = vol.Schema(color_util.color_name_to_rgb)
with pytest.raises(vol.Invalid):
schema("not a color")
assert schema("red") == (255, 0, 0) | unknown | codeparrot/codeparrot-clean | ||
import requests
from roboronya.plugins.plugin import Plugin
from roboronya.config import URBAN_DICT_URL,URBAN_DICT_RANDOM_URL
class Knowledge(Plugin):
description = 'Wanna learn the meaning of something? Ask Roboronya, she knows. For a specific meaning use /whatis <words>, or use /whatis for a random meaning.'
name = 'whatis'
def run(roboronya, conv, cmd_args, **kwargs):
if len(cmd_args) != 0:
response_json = requests.get(
URBAN_DICT_URL,
params={'term': ' '.join(cmd_args)}
).json()
else:
response_json = requests.get(
URBAN_DICT_RANDOM_URL
).json()
termList = response_json.get('list', [])
bestTerm = termList[0]
word = bestTerm['word']
definition = bestTerm['definition']
author = bestTerm['author']
example = bestTerm['example']
text = '**{}**: "{}"\n-{}'.format(word, definition, author)
if example != '':
text += '\n\nExample:\n*{}*'.format(example)
return roboronya.send_message(
conv,
text,
**kwargs) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0']).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0']).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks']).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks']).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1']).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks']).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main() | unknown | codeparrot/codeparrot-clean | ||
"""Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode by default, you can override this by
setting the mode parameter to input() or FileInput.__init__().
If an I/O error occurs during opening or reading a file, the IOError
exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
Performance: this module is unfortunately one of the slower ways of
processing large numbers of input lines. Nevertheless, a significant
speed-up has been obtained by using readlines(bufsize) instead of
readline(). A new keyword argument, bufsize=N, is present on the
input() function and the FileInput() class to override the default
buffer size.
XXX Possible additions:
- optional getopt argument processing
- isatty()
- read(), read(size), even readlines()
"""
import sys, os
__all__ = ["input","close","nextfile","filename","lineno","filelineno",
"isfirstline","isstdin","FileInput"]
_state = None
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
"""input([files[, inplace[, backup[, mode[, openhook]]]]])
Create an instance of the FileInput class. The instance will be used
as global state for the functions of this module, and is also returned
to use during iteration. The parameters to this function will be passed
along to the constructor of the FileInput class.
"""
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup, bufsize, mode, openhook)
return _state
def close():
"""Close the sequence."""
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
"""
Close the current file so that the next iteration will read the first
line from the next file (if any); lines not read from the file will
not count towards the cumulative line count. The filename is not
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
"""
Return the name of the file currently being read.
Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filelineno()
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.fileno()
def isfirstline():
"""
Returns true the line just read is the first line of its file,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
"""class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
nextfile() and close() correspond to the functions of the same name
in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
if isinstance(files, basestring):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._bufsize = bufsize or DEFAULT_BUFSIZE
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = None
self._buffer = []
self._bufindex = 0
# restrict mode argument to reading modes
if mode not in ('r', 'rU', 'U', 'rb'):
raise ValueError("FileInput opening mode must be one of "
"'r', 'rU', 'U' and 'rb'")
self._mode = mode
if inplace and openhook:
raise ValueError("FileInput cannot use an opening hook in inplace mode")
elif openhook and not hasattr(openhook, '__call__'):
raise ValueError("FileInput openhook must be callable")
self._openhook = openhook
def __del__(self):
self.close()
def close(self):
self.nextfile()
self._files = ()
def __iter__(self):
return self
def next(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
line = self.readline()
if not line:
raise StopIteration
return line
def __getitem__(self, i):
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
try:
return self.next()
except StopIteration:
raise IndexError, "end of input reached"
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except OSError: pass
self._isstdin = False
self._buffer = []
self._bufindex = 0
def readline(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = True
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or os.extsep+"bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, self._mode)
try:
perm = os.fstat(self._file.fileno()).st_mode
except OSError:
self._output = open(self._filename, "w")
else:
fd = os.open(self._filename,
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
perm)
self._output = os.fdopen(fd, "w")
try:
if hasattr(os, 'chmod'):
os.chmod(self._filename, perm)
except OSError:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
if self._openhook:
self._file = self._openhook(self._filename, self._mode)
else:
self._file = open(self._filename, self._mode)
self._buffer = self._file.readlines(self._bufsize)
self._bufindex = 0
if not self._buffer:
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
def lineno(self):
return self._lineno
def filelineno(self):
return self._filelineno
def fileno(self):
if self._file:
try:
return self._file.fileno()
except ValueError:
return -1
else:
return -1
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def hook_encoded(encoding):
import codecs
def openhook(filename, mode):
return codecs.open(filename, mode, encoding)
return openhook
def _test():
import getopt
inplace = 0
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':
_test() | unknown | codeparrot/codeparrot-clean | ||
import collections
import itertools
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(str):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = str.__new__(cls, prefix)
else:
obj = str.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substition
FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
if (hasattr(replace_with, 'parent')
and replace_with.parent is self.parent):
# We're replacing this element with one of its siblings.
if self.parent.index(replace_with) < my_index:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
my_index -= 1
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def replace_with_children(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replaceWithChildren = replace_with_children # BS3
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self):
"Finds the last element beneath this object to be parsed."
last_child = self
while hasattr(last_child, 'contents') and last_child.contents:
last_child = last_child.contents[-1]
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, str)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
if self.index(new_child) > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant()
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant()
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, str):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
def select(self, selector):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
for index, token in enumerate(tokens):
if tokens[index - 1] == '>':
# already found direct descendants in last step. skip this
# step.
continue
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = self._attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend(
[el for el in context.find_all(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if tag == "":
tag = True
el = current_context[0].find(tag, {'id': id})
if el is None:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
if not tag_name:
tag_name = True
classes = set(klass.split('.'))
found = []
def classes_match(tag):
if tag_name is not True and tag.name != tag_name:
return False
if not tag.has_attr('class'):
return False
return classes.issubset(tag['class'])
for context in current_context:
found.extend(context.find_all(classes_match))
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
if token == '>':
# Child selector
tag = tokens[index + 1]
if not tag:
tag = True
found = []
for context in current_context:
found.extend(context.find_all(tag, recursive=False))
current_context = found
continue
# Here we should just have a regular tag
if not self.tag_name_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
# Utility methods
def substitute_encoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
class NavigableString(str, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, str):
return str.__new__(cls, value)
return str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (str(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
if not isinstance(formatter, collections.Callable):
formatter = self.FORMATTERS.get(
formatter, EntitySubstitution.substitute_xml)
if formatter is None:
output = self
else:
output = formatter(self)
return self.PREFIX + output + self.SUFFIX
class CData(NavigableString):
PREFIX = '<![CDATA['
SUFFIX = ']]>'
class ProcessingInstruction(NavigableString):
PREFIX = '<?'
SUFFIX = '?>'
class Comment(NavigableString):
PREFIX = '<!--'
SUFFIX = '-->'
class Declaration(NavigableString):
PREFIX = '<!'
SUFFIX = '!>'
class Doctype(NavigableString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = '<!DOCTYPE '
SUFFIX = '>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if builder.cdata_list_attributes:
universal = builder.cdata_list_attributes.get('*', [])
tag_specific = builder.cdata_list_attributes.get(
self.name.lower(), [])
for cdata_list_attr in itertools.chain(universal, tag_specific):
if cdata_list_attr in attrs:
# Basically, we have a "class" attribute whose
# value is a whitespace-separated list of CSS
# classes. Split it into a list.
value = attrs[cdata_list_attr]
values = whitespace_re.split(value)
attrs[cdata_list_attr] = values
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
self.contains_substitutions = builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.contains_substitutions = False
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string)
def _all_strings(self, strip=False):
"""Yield all child strings, possibly stripping them."""
for descendant in self.descendants:
if not isinstance(descendant, NavigableString):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator="", strip=False):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(strip)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __bool__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, str):
val = str(val)
if (self.contains_substitutions
and eventual_encoding is not None
and '%SOUP-ENCODING%' in val):
val = self.substitute_encoding(val, eventual_encoding)
decoded = (str(key) + '='
+ EntitySubstitution.substitute_xml(val, True))
attrs.append(decoded)
close = ''
closeTag = ''
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s>' % self.name
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
pretty_print = (indent_level is not None)
if pretty_print:
space = (' ' * (indent_level - 1))
indent_contents = indent_level + 1
else:
space = ''
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if pretty_print:
s.append(space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if pretty_print and closeTag and self.next_sibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level:
text = text.strip()
if text:
if pretty_print:
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
# This was kind of misleading because has_key() (attributes) was
# different from __in__ (contents). has_key() is gone in Python 3,
# anyway.
has_key = has_attr
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and self.text != found.string:
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, str):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print "Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching, e.g. the 'class'
# attribute.
if (isinstance(match_against, str)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
result = (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
result = True
elif match_against is True:
result = markup is not None
elif isinstance(match_against, collections.Callable):
result = match_against(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup is not None and not isinstance(markup, str):
markup = str(markup)
#Now we know that chunk is either a string, or None.
if hasattr(match_against, 'match'):
# It's a regexp object.
result = markup and match_against.search(markup)
elif (hasattr(match_against, '__iter__')
and markup is not None
and not isinstance(match_against, str)):
result = markup in match_against
elif hasattr(match_against, 'items'):
if markup is None:
result = len(list(match_against.items())) == 0
else:
result = match_against in markup
elif match_against and isinstance(markup, str):
match_against = markup.__class__(match_against)
if not result:
result = match_against == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source | unknown | codeparrot/codeparrot-clean | ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Title: CURLOPT_CONV_TO_NETWORK_FUNCTION
Section: 3
Source: libcurl
See-also:
- CURLOPT_CONV_FROM_NETWORK_FUNCTION (3)
- CURLOPT_CONV_FROM_UTF8_FUNCTION (3)
Protocol:
- All
Added-in: 7.15.4
---
# NAME
CURLOPT_CONV_TO_NETWORK_FUNCTION - convert data to network from host encoding
# SYNOPSIS
~~~c
#include <curl/curl.h>
CURLcode conv_callback(char *ptr, size_t length);
CURLcode curl_easy_setopt(CURL *handle, CURLOPT_CONV_TO_NETWORK_FUNCTION,
conv_callback);
~~~
# DESCRIPTION
Pass a pointer to your callback function, which should match the prototype
shown above.
Applies to non-ASCII platforms. curl_version_info(3) returns the
CURL_VERSION_CONV feature bit set if this option is provided.
The data to be converted is in a buffer pointed to by the *ptr* parameter.
The amount of data to convert is indicated by the *length* parameter. The
converted data overlays the input data in the buffer pointed to by the ptr
parameter. *CURLE_OK* must be returned upon successful conversion. A CURLcode
return value defined by curl.h, such as *CURLE_CONV_FAILED*, should be
returned if an error was encountered.
CURLOPT_CONV_TO_NETWORK_FUNCTION(3) converts from host encoding to the
network encoding. It is used when commands or ASCII data are sent over the
network.
If you set a callback pointer to NULL, or do not set it at all, the built-in
libcurl iconv functions are used. If HAVE_ICONV was not defined when libcurl
was built, and no callback has been established, the conversion returns the
CURLE_CONV_REQD error code.
If HAVE_ICONV is defined, CURL_ICONV_CODESET_OF_HOST must also be defined.
For example:
~~~c
define CURL_ICONV_CODESET_OF_HOST "IBM-1047"
~~~
The iconv code in libcurl defaults the network and UTF8 codeset names as
follows:
~~~c
#define CURL_ICONV_CODESET_OF_NETWORK "ISO8859-1"
#define CURL_ICONV_CODESET_FOR_UTF8 "UTF-8"
~~~
You need to override these definitions if they are different on your system.
# DEFAULT
NULL
# %PROTOCOLS%
# EXAMPLE
~~~c
static CURLcode my_conv_from_ebcdic_to_ascii(char *buffer, size_t length)
{
int rc = 0;
/* in-place convert 'buffer' from EBCDIC to ASCII */
if(rc == 0) {
/* success */
return CURLE_OK;
}
else {
return CURLE_CONV_FAILED;
}
}
int main(void)
{
CURL *curl = curl_easy_init();
curl_easy_setopt(curl, CURLOPT_CONV_TO_NETWORK_FUNCTION,
my_conv_from_ebcdic_to_ascii);
}
~~~
# DEPRECATED
Not available and deprecated since 7.82.0.
Available only if **CURL_DOES_CONVERSIONS** was defined when libcurl was
built.
# %AVAILABILITY%
# RETURN VALUE
curl_easy_setopt(3) returns a CURLcode indicating success or error.
CURLE_OK (0) means everything was OK, non-zero means an error occurred, see
libcurl-errors(3). | unknown | github | https://github.com/curl/curl | docs/libcurl/opts/CURLOPT_CONV_TO_NETWORK_FUNCTION.md |
import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import signals
from django.db.models.query import QuerySet
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from hashlib import sha1
def make_digest(key):
"""Get the SHA1 hexdigest of the given key"""
return sha1(key.encode('utf-8')).hexdigest()
def _get_cache_keys(self):
"""Get all the cache keys for the given object"""
kv_id_fields = ('language', 'digest', 'content_type_id', 'object_id', 'field')
values = tuple(getattr(self, attr) for attr in kv_id_fields)
return ('datatrans_%s_%s_%s_%s_%s' % values,
'datatrans_%s' % self.id)
# cache for an hour
CACHE_DURATION = getattr(settings, 'DATATRANS_CACHE_DURATION', 60 * 60)
class KeyValueManager(models.Manager):
def get_query_set(self):
return KeyValueQuerySet(self.model)
def get_keyvalue(self, key, language, obj, field):
key = key or ''
digest = make_digest(key)
content_type = ContentType.objects.get_for_model(obj.__class__)
object_id = obj.id
keyvalue, created = self.get_or_create(digest=digest,
language=language,
content_type_id=content_type.id,
object_id=obj.id,
field=field,
defaults={'value': key})
return keyvalue
def lookup(self, key, language, obj, field):
kv = self.get_keyvalue(key, language, obj, field)
if kv.edited:
return kv.value
else:
return key
def for_model(self, model, fields, modelfield=None):
"""
Get KeyValues for a model. The fields argument is a list of model
fields.
If modelfield is specified, only KeyValue entries for that field will
be returned.
"""
field_names = [f.name for f in fields] if modelfield is None else [modelfield]
ct = ContentType.objects.get_for_model(model)
return self.filter(field__in=field_names, content_type__id=ct.id)
def contribute_to_class(self, model, name):
signals.post_save.connect(self._post_save, sender=model)
signals.post_delete.connect(self._post_delete, sender=model)
setattr(model, '_get_cache_keys', _get_cache_keys)
setattr(model, 'cache_keys', property(_get_cache_keys))
return super(KeyValueManager, self).contribute_to_class(model, name)
def _invalidate_cache(self, instance):
"""
Explicitly set a None value instead of just deleting so we don't have
any race conditions where.
"""
for key in instance.cache_keys:
cache.set(key, None, 5)
def _post_save(self, instance, **kwargs):
"""
Refresh the cache when saving
"""
for key in instance.cache_keys:
cache.set(key, instance, CACHE_DURATION)
def _post_delete(self, instance, **kwargs):
self._invalidate_cache(instance)
class KeyValueQuerySet(QuerySet):
def iterator(self):
superiter = super(KeyValueQuerySet, self).iterator()
while True:
obj = superiter.next()
# Use cache.add instead of cache.set to prevent race conditions
for key in obj.cache_keys:
cache.add(key, obj, CACHE_DURATION)
yield obj
def get(self, *args, **kwargs):
"""
Checks the cache to see if there's a cached entry for this pk. If not,
fetches using super then stores the result in cache.
Most of the logic here was gathered from a careful reading of
``django.db.models.sql.query.add_filter``
"""
if self.query.where:
# If there is any other ``where`` filter on this QuerySet just call
# super. There will be a where clause if this QuerySet has already
# been filtered/cloned.
return super(KeyValueQuerySet, self).get(*args, **kwargs)
kv_id_fields = ('language', 'digest', 'content_type', 'object_id', 'field')
# Punt on anything more complicated than get by pk/id only...
if len(kwargs) == 1:
k = kwargs.keys()[0]
if k in ('pk', 'pk__exact', 'id', 'id__exact'):
obj = cache.get('datatrans_%s' % kwargs.values()[0])
if obj is not None:
return obj
elif set(kv_id_fields) <= set(kwargs.keys()):
values = tuple(kwargs[attr] for attr in kv_id_fields)
obj = cache.get('datatrans_%s_%s_%s_%s_%s' % values)
if obj is not None:
return obj
# Calls self.iterator to fetch objects, storing object in cache.
return super(KeyValueQuerySet, self).get(*args, **kwargs)
class KeyValue(models.Model):
"""
The datatrans magic is stored in this model. It stores the localized fields of models.
"""
content_type = models.ForeignKey(ContentType, null=True)
object_id = models.PositiveIntegerField(null=True, default=None)
content_object = generic.GenericForeignKey('content_type', 'object_id')
field = models.CharField(max_length=255)
language = models.CharField(max_length=5, db_index=True, choices=settings.LANGUAGES)
value = models.TextField(blank=True)
edited = models.BooleanField(blank=True, default=False)
fuzzy = models.BooleanField(blank=True, default=False)
digest = models.CharField(max_length=40, db_index=True)
updated = models.DateTimeField(auto_now=True, default=datetime.datetime.now)
objects = KeyValueManager()
def __unicode__(self):
return u'%s: %s' % (self.language, self.value)
class Meta:
#unique_together = ('digest', 'language')
unique_together = ('language', 'content_type', 'field', 'object_id', 'digest')
class WordCount(models.Model):
"""
It all happens here
"""
class Meta:
abstract = True
total_words = models.IntegerField(default=0)
valid = models.BooleanField()
class ModelWordCount(WordCount):
"""
Caches the total number of localized words for a model
"""
content_type = models.ForeignKey(ContentType, db_index=True, unique=True)
class FieldWordCount(WordCount):
"""
Caches the total number of localized words for a model field.
"""
class Meta:
unique_together = ('content_type', 'field')
content_type = models.ForeignKey(ContentType, db_index=True)
field = models.CharField(max_length=64, db_index=True) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import LRUCache, open_if_exists, internalcode
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order:
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
"""
def __init__(self, searchpath, encoding='utf-8'):
if isinstance(searchpath, basestring):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
for dirpath, dirnames, filenames in os.walk(searchpath):
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
for item in _walk(fullname):
results.append(item)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source != self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function becomes the name of the template passed and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, basestring):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_source(self, environment, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
def list_templates(self):
result = []
for prefix, loader in self.mapping.iteritems():
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, basestring):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_l3out_extsubnet
short_description: Manage External Subnet objects (l3extSubnet:extsubnet)
description:
- Manage External Subnet objects (l3extSubnet:extsubnet)
version_added: '2.9'
options:
tenant:
description:
- Name of an existing tenant.
type: str
required: yes
aliases: [ tenant_name ]
l3out:
description:
- Name of an existing L3Out.
type: str
required: yes
aliases: [ l3out_name ]
extepg:
description:
- Name of an existing ExtEpg.
type: str
required: yes
aliases: [ extepg_name ]
network:
description:
- The network address for the Subnet.
type: str
required: yes
aliases: [ address, ip ]
subnet_name:
description:
- Name of External Subnet being created.
type: str
aliases: [ name ]
description:
description:
- Description for the External Subnet.
type: str
aliases: [ descr ]
scope:
description:
- Determines the scope of the Subnet.
- The C(export-rtctrl) option controls which external networks are advertised out of the fabric using route-maps and IP prefix-lists.
- The C(import-security) option classifies for the external EPG.
The rules and contracts defined in this external EPG apply to networks matching this subnet.
- The C(shared-rtctrl) option controls which external prefixes are advertised to other tenants for shared services.
- The C(shared-security) option configures the classifier for the subnets in the VRF where the routes are leaked.
- The APIC defaults to C(import-security) when unset during creation.
type: list
choices: [ export-rtctrl, import-security, shared-rtctrl, shared-security ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(tenant) and C(domain) and C(vrf) used must exist before using this module in your playbook.
The M(aci_tenant) and M(aci_domain) and M(aci_vrf) modules can be used for this.
seealso:
- module: aci_tenant
- module: aci_domain
- module: aci_vrf
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(l3ext:Out).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Rostyslav Davydenko (@rost-d)
'''
EXAMPLES = r'''
- name: Add a new External Subnet
aci_l3out_extsubnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
l3out: prod_l3out
extepg: prod_extepg
description: External Subnet for Production ExtEpg
network: 192.0.2.0/24
scope: export-rtctrl
state: present
delegate_to: localhost
- name: Delete External Subnet
aci_l3out_extsubnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
l3out: prod_l3out
extepg: prod_extepg
network: 192.0.2.0/24
state: absent
delegate_to: localhost
- name: Query ExtEpg information
aci_l3out_extsubnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
l3out: prod_l3out
extepg: prod_extepg
network: 192.0.2.0/24
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
l3out=dict(type='str', aliases=['l3out_name']), # Not required for querying all objects
extepg=dict(type='str', aliases=['extepg_name', 'name']), # Not required for querying all objects
network=dict(type='str', aliases=['address', 'ip']),
description=dict(type='str', aliases=['descr']),
subnet_name=dict(type='str', aliases=['name']),
scope=dict(type='list', choices=['export-rtctrl', 'import-security', 'shared-rtctrl', 'shared-security']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query'])
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['network']],
['state', 'absent', ['network']],
],
)
aci = ACIModule(module)
tenant = module.params['tenant']
l3out = module.params['l3out']
extepg = module.params['extepg']
network = module.params['network']
description = module.params['description']
subnet_name = module.params['subnet_name']
scope = ','.join(sorted(module.params['scope']))
state = module.params['state']
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='l3extOut',
aci_rn='out-{0}'.format(l3out),
module_object=l3out,
target_filter={'name': l3out},
),
subclass_2=dict(
aci_class='l3extInstP',
aci_rn='instP-{0}'.format(extepg),
module_object=extepg,
target_filter={'name': extepg},
),
subclass_3=dict(
aci_class='l3extSubnet',
aci_rn='extsubnet-[{0}]'.format(network),
module_object=network,
target_filter={'name': network},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='l3extSubnet',
class_config=dict(
ip=network,
descr=description,
name=subnet_name,
scope=scope,
),
)
aci.get_diff(aci_class='l3extSubnet')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer;
import org.apache.kafka.common.internals.SecurityManagerCompatibility;
import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerSaslClientCallbackHandler;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.CompletionException;
import javax.security.auth.Subject;
import javax.security.auth.callback.Callback;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class OAuthBearerSaslClientCallbackHandlerTest {
private static OAuthBearerToken createTokenWithLifetimeMillis(final long lifetimeMillis) {
return new OAuthBearerToken() {
@Override
public String value() {
return null;
}
@Override
public Long startTimeMs() {
return null;
}
@Override
public Set<String> scope() {
return null;
}
@Override
public String principalName() {
return null;
}
@Override
public long lifetimeMs() {
return lifetimeMillis;
}
};
}
@Test
public void testWithZeroTokens() {
OAuthBearerSaslClientCallbackHandler handler = createCallbackHandler();
CompletionException e = assertThrows(CompletionException.class, () -> SecurityManagerCompatibility.get().callAs(new Subject(),
() -> {
OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback();
handler.handle(new Callback[] {callback});
return null;
}
));
assertEquals(IOException.class, e.getCause().getClass());
}
@Test()
public void testWithPotentiallyMultipleTokens() {
OAuthBearerSaslClientCallbackHandler handler = createCallbackHandler();
SecurityManagerCompatibility.get().callAs(new Subject(), () -> {
final int maxTokens = 4;
final Set<Object> privateCredentials = SecurityManagerCompatibility.get().current()
.getPrivateCredentials();
privateCredentials.clear();
for (int num = 1; num <= maxTokens; ++num) {
privateCredentials.add(createTokenWithLifetimeMillis(num));
OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback();
handler.handle(new Callback[] {callback});
assertEquals(num, callback.token().lifetimeMs());
}
return null;
});
}
private static OAuthBearerSaslClientCallbackHandler createCallbackHandler() {
OAuthBearerSaslClientCallbackHandler handler = new OAuthBearerSaslClientCallbackHandler();
handler.configure(Collections.emptyMap(), OAuthBearerLoginModule.OAUTHBEARER_MECHANISM,
Collections.emptyList());
return handler;
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerSaslClientCallbackHandlerTest.java |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sales Order', help=WARNING_HELP, required=True),
'sale_warn_msg' : fields.text('Message for Sales Order'),
'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True),
'purchase_warn_msg' : fields.text('Message for Purchase Order'),
'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True),
'picking_warn_msg' : fields.text('Message for Stock Picking'),
'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True),
'invoice_warn_msg' : fields.text('Message for Invoice'),
}
_defaults = {
'sale_warn' : 'no-message',
'purchase_warn' : 'no-message',
'picking_warn' : 'no-message',
'invoice_warn' : 'no-message',
}
class sale_order(osv.osv):
_inherit = 'sale.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_invoice_id': False, 'partner_shipping_id':False, 'payment_term' : False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.sale_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
if partner.sale_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
class purchase_order(osv.osv):
_inherit = 'purchase.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_address_id': False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.purchase_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.purchase_warn_msg
warning = {
'title': title,
'message': message
}
if partner.purchase_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(purchase_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
if not partner_id:
return {'value': {
'account_id': False,
'payment_term': False,
}
}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.invoice_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.invoice_warn_msg
warning = {
'title': title,
'message': message
}
if partner.invoice_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice=date_invoice, payment_term=payment_term,
partner_bank_id=partner_bank_id, company_id=company_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
if partner.picking_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(stock_picking_in, self).onchange_partner_in(cr, uid, ids, partner_id, context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
class product_product(osv.osv):
_inherit = 'product.template'
_columns = {
'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sales Order Line', help=WARNING_HELP, required=True),
'sale_line_warn_msg' : fields.text('Message for Sales Order Line'),
'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True),
'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'),
}
_defaults = {
'sale_line_warn' : 'no-message',
'purchase_line_warn' : 'no-message',
}
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, warehouse_id=False, context=None):
warning = {}
if not product:
return {'value': {'th_weight' : 0, 'product_packaging': False,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.sale_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.sale_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(sale_order_line, self).product_id_change_with_wh( cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, warehouse_id=warehouse_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
def onchange_product_id(self,cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', notes=False, context=None):
warning = {}
if not product:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'notes': notes or '', 'product_uom' : uom or False}, 'domain':{'product_uom':[]}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.purchase_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.purchase_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.purchase_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(purchase_order_line, self).onchange_product_id(cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned, name=name, price_unit=price_unit, state=state, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import argparse
import csv
import pprint
import editdistance
parser = argparse.ArgumentParser()
parser.add_argument("members", help="members db csv")
parser.add_argument("pos", help="pos tsv")
parser.add_argument("output", help="output csv")
args = parser.parse_args()
pp = pprint.PrettyPrinter(indent=4)
def member_key(m):
return (m['FIRST NAME'] + m['LAST NAME']).lower().replace(" ", "")
def pos_key(p):
return (p['FIRSTNAME'] + p['LASTNAME']).replace(" ", "").lower()
def pos_key_alternative(p):
return p['NAME'].split("//")[0].replace(" ","").lower()
def matching(members, pos, pos_alt):
matches = {}
for key in list(members.keys()):
if key in pos:
matches[members[key]['MEMBER NUMBER']] = { 'pos': pos[key], 'member': members[key] }
elif key in pos_alt:
matches[members[key]['MEMBER NUMBER']] = { 'pos': pos_alt[key], 'member': members[key] }
return matches
def possible_match(matches, key):
if key in matches:
pos = matches[key]['pos']
member = matches[key]['member']
return [key, pos['ID'],
member['FIRST NAME'] + ' ' + member['LAST NAME'],
pos['FIRSTNAME'] + pos['LASTNAME']]
else:
return ['','','','']
def find_distance(non_matching, pos, distance):
keys = pos.keys()
for i in range(1, distance+1):
found = next((key for key in keys if editdistance.eval(non_matching, key) <= i), None)
if found:
return found
def add_idx(member, idx):
member['idx'] = idx
return member
with open(args.members, newline='') as members_file:
with open(args.pos, newline='') as pos_file:
members_reader = csv.DictReader(members_file)
pos_reader = list(csv.DictReader(pos_file, delimiter='\t'))
members = [add_idx(m, idx) for idx, m in enumerate(members_reader)]
membKeys = [m['MEMBER NUMBER'] for m in members]
members = { member_key(m): m for m in members }
pos = { pos_key(p) : p for p in pos_reader }
pos_alt = { pos_key_alternative(p) : p for p in pos_reader }
matches = matching(members, pos, pos_alt)
nonmatching_members = [key for key in list(members.keys())
if key not in pos]
nonmatching_pos = [key for key in list(pos.keys())
if key not in members]
with open(args.output, 'w', newline='') as outfile:
out = csv.writer(outfile)
keys = sorted([int(k) for k in membKeys])
out.writerow(['MEMBER NUMBER', 'POS ID','MEMBER NAME', 'POS NAME'])
[out.writerow(possible_match(matches, str(i))) for i in keys]
out.writerow(['Non matching'])
out.writerow(['MEMBER NUMBER', 'FIRST NAME', 'LAST NAME'])
[out.writerow([members[key]['MEMBER NUMBER'],
members[key]['FIRST NAME'],
members[key]['LAST NAME']]) for key
in nonmatching_members]
l_matches = [x for x in [(key, find_distance(key, pos, 3))
for key in nonmatching_members] if x[1]]
def print_l_match(m_key, p_key):
m = members[m_key]
p = pos[p_key]
print(m['MEMBER NUMBER'], p['ID'],
m['FIRST NAME'] + ' ' + m['LAST NAME'],
p['FIRSTNAME'] + ' ' + p['LASTNAME'])
[print_l_match(m_key, p_key) for (m_key, p_key) in l_matches]
print("Non matching:", len(nonmatching_members) - len(l_matches))
print("Matching:", len(matches))
print("L Matches:", len(l_matches)) | unknown | codeparrot/codeparrot-clean | ||
/*
* pgp-decrypt.c
* OpenPGP decrypt.
*
* Copyright (c) 2005 Marko Kreen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* contrib/pgcrypto/pgp-decrypt.c
*/
#include "postgres.h"
#include "mbuf.h"
#include "pgp.h"
#include "px.h"
#define NO_CTX_SIZE 0
#define ALLOW_CTX_SIZE 1
#define NO_COMPR 0
#define ALLOW_COMPR 1
#define NO_MDC 0
#define NEED_MDC 1
#define PKT_NORMAL 1
#define PKT_STREAM 2
#define PKT_CONTEXT 3
#define MAX_CHUNK (16*1024*1024)
static int
parse_new_len(PullFilter *src, int *len_p)
{
uint8 b;
int len;
int pkttype = PKT_NORMAL;
GETBYTE(src, b);
if (b <= 191)
len = b;
else if (b >= 192 && b <= 223)
{
len = ((unsigned) (b) - 192) << 8;
GETBYTE(src, b);
len += 192 + b;
}
else if (b == 255)
{
GETBYTE(src, b);
len = b;
GETBYTE(src, b);
len = (len << 8) | b;
GETBYTE(src, b);
len = (len << 8) | b;
GETBYTE(src, b);
len = (len << 8) | b;
}
else
{
len = 1 << (b & 0x1F);
pkttype = PKT_STREAM;
}
if (len < 0 || len > MAX_CHUNK)
{
px_debug("parse_new_len: weird length");
return PXE_PGP_CORRUPT_DATA;
}
*len_p = len;
return pkttype;
}
static int
parse_old_len(PullFilter *src, int *len_p, int lentype)
{
uint8 b;
int len;
GETBYTE(src, b);
len = b;
if (lentype == 1)
{
GETBYTE(src, b);
len = (len << 8) | b;
}
else if (lentype == 2)
{
GETBYTE(src, b);
len = (len << 8) | b;
GETBYTE(src, b);
len = (len << 8) | b;
GETBYTE(src, b);
len = (len << 8) | b;
}
if (len < 0 || len > MAX_CHUNK)
{
px_debug("parse_old_len: weird length");
return PXE_PGP_CORRUPT_DATA;
}
*len_p = len;
return PKT_NORMAL;
}
/* returns pkttype or 0 on eof */
int
pgp_parse_pkt_hdr(PullFilter *src, uint8 *tag, int *len_p, int allow_ctx)
{
int lentype;
int res;
uint8 *p;
/* EOF is normal here, thus we don't use GETBYTE */
res = pullf_read(src, 1, &p);
if (res < 0)
return res;
if (res == 0)
return 0;
if ((*p & 0x80) == 0)
{
px_debug("pgp_parse_pkt_hdr: not pkt hdr");
return PXE_PGP_CORRUPT_DATA;
}
if (*p & 0x40)
{
*tag = *p & 0x3f;
res = parse_new_len(src, len_p);
}
else
{
lentype = *p & 3;
*tag = (*p >> 2) & 0x0F;
if (lentype == 3)
res = allow_ctx ? PKT_CONTEXT : PXE_PGP_CORRUPT_DATA;
else
res = parse_old_len(src, len_p, lentype);
}
return res;
}
/*
* Packet reader
*/
struct PktData
{
int type;
int len;
};
static int
pktreader_pull(void *priv, PullFilter *src, int len,
uint8 **data_p, uint8 *buf, int buflen)
{
int res;
struct PktData *pkt = priv;
/* PKT_CONTEXT means: whatever there is */
if (pkt->type == PKT_CONTEXT)
return pullf_read(src, len, data_p);
while (pkt->len == 0)
{
/* this was last chunk in stream */
if (pkt->type == PKT_NORMAL)
return 0;
/* next chunk in stream */
res = parse_new_len(src, &pkt->len);
if (res < 0)
return res;
pkt->type = res;
}
if (len > pkt->len)
len = pkt->len;
res = pullf_read(src, len, data_p);
if (res > 0)
pkt->len -= res;
return res;
}
static void
pktreader_free(void *priv)
{
struct PktData *pkt = priv;
px_memset(pkt, 0, sizeof(*pkt));
pfree(pkt);
}
static struct PullFilterOps pktreader_filter = {
NULL, pktreader_pull, pktreader_free
};
/* needs helper function to pass several parameters */
int
pgp_create_pkt_reader(PullFilter **pf_p, PullFilter *src, int len,
int pkttype, PGP_Context *ctx)
{
int res;
struct PktData *pkt = palloc_object(struct PktData);
pkt->type = pkttype;
pkt->len = len;
res = pullf_create(pf_p, &pktreader_filter, pkt, src);
if (res < 0)
pfree(pkt);
return res;
}
/*
* Prefix check filter
* https://tools.ietf.org/html/rfc4880#section-5.7
* https://tools.ietf.org/html/rfc4880#section-5.13
*/
static int
prefix_init(void **priv_p, void *arg, PullFilter *src)
{
PGP_Context *ctx = arg;
int len;
int res;
uint8 *buf;
uint8 tmpbuf[PGP_MAX_BLOCK + 2];
len = pgp_get_cipher_block_size(ctx->cipher_algo);
/* Make sure we have space for prefix */
if (len > PGP_MAX_BLOCK)
return PXE_BUG;
res = pullf_read_max(src, len + 2, &buf, tmpbuf);
if (res < 0)
return res;
if (res != len + 2)
{
px_debug("prefix_init: short read");
px_memset(tmpbuf, 0, sizeof(tmpbuf));
return PXE_PGP_CORRUPT_DATA;
}
if (buf[len - 2] != buf[len] || buf[len - 1] != buf[len + 1])
{
px_debug("prefix_init: corrupt prefix");
/* report error in pgp_decrypt() */
ctx->corrupt_prefix = 1;
}
px_memset(tmpbuf, 0, sizeof(tmpbuf));
return 0;
}
static struct PullFilterOps prefix_filter = {
prefix_init, NULL, NULL
};
/*
* Decrypt filter
*/
static int
decrypt_init(void **priv_p, void *arg, PullFilter *src)
{
PGP_CFB *cfb = arg;
*priv_p = cfb;
/* we need to write somewhere, so ask for a buffer */
return 4096;
}
static int
decrypt_read(void *priv, PullFilter *src, int len,
uint8 **data_p, uint8 *buf, int buflen)
{
PGP_CFB *cfb = priv;
uint8 *tmp;
int res;
res = pullf_read(src, len, &tmp);
if (res > 0)
{
pgp_cfb_decrypt(cfb, tmp, res, buf);
*data_p = buf;
}
return res;
}
struct PullFilterOps pgp_decrypt_filter = {
decrypt_init, decrypt_read, NULL
};
/*
* MDC hasher filter
*/
static int
mdc_init(void **priv_p, void *arg, PullFilter *src)
{
PGP_Context *ctx = arg;
*priv_p = ctx;
return pgp_load_digest(PGP_DIGEST_SHA1, &ctx->mdc_ctx);
}
static void
mdc_free(void *priv)
{
PGP_Context *ctx = priv;
if (ctx->use_mdcbuf_filter)
return;
px_md_free(ctx->mdc_ctx);
ctx->mdc_ctx = NULL;
}
static int
mdc_finish(PGP_Context *ctx, PullFilter *src, int len)
{
int res;
uint8 hash[20];
uint8 tmpbuf[20];
uint8 *data;
/* should not happen */
if (ctx->use_mdcbuf_filter)
return PXE_BUG;
/* It's SHA1 */
if (len != 20)
return PXE_PGP_CORRUPT_DATA;
/* mdc_read should not call px_md_update */
ctx->in_mdc_pkt = 1;
/* read data */
res = pullf_read_max(src, len, &data, tmpbuf);
if (res < 0)
return res;
if (res == 0)
{
px_debug("no mdc");
return PXE_PGP_CORRUPT_DATA;
}
/* is the packet sane? */
if (res != 20)
{
px_debug("mdc_finish: read failed, res=%d", res);
return PXE_PGP_CORRUPT_DATA;
}
/*
* ok, we got the hash, now check
*/
px_md_finish(ctx->mdc_ctx, hash);
res = memcmp(hash, data, 20);
px_memset(hash, 0, 20);
px_memset(tmpbuf, 0, sizeof(tmpbuf));
if (res != 0)
{
px_debug("mdc_finish: mdc failed");
return PXE_PGP_CORRUPT_DATA;
}
ctx->mdc_checked = 1;
return 0;
}
static int
mdc_read(void *priv, PullFilter *src, int len,
uint8 **data_p, uint8 *buf, int buflen)
{
int res;
PGP_Context *ctx = priv;
/* skip this filter? */
if (ctx->use_mdcbuf_filter || ctx->in_mdc_pkt)
return pullf_read(src, len, data_p);
res = pullf_read(src, len, data_p);
if (res < 0)
return res;
if (res == 0)
{
px_debug("mdc_read: unexpected eof");
return PXE_PGP_CORRUPT_DATA;
}
px_md_update(ctx->mdc_ctx, *data_p, res);
return res;
}
static struct PullFilterOps mdc_filter = {
mdc_init, mdc_read, mdc_free
};
/*
* Combined Pkt reader and MDC hasher.
*
* For the case of SYMENCRYPTED_DATA_MDC packet, where
* the data part has 'context length', which means
* that data packet ends 22 bytes before end of parent
* packet, which is silly.
*/
#define MDCBUF_LEN 8192
struct MDCBufData
{
PGP_Context *ctx;
int eof;
int buflen;
int avail;
uint8 *pos;
int mdc_avail;
uint8 mdc_buf[22];
uint8 buf[MDCBUF_LEN];
};
static int
mdcbuf_init(void **priv_p, void *arg, PullFilter *src)
{
PGP_Context *ctx = arg;
struct MDCBufData *st;
st = palloc0_object(struct MDCBufData);
st->buflen = sizeof(st->buf);
st->ctx = ctx;
*priv_p = st;
/* take over the work of mdc_filter */
ctx->use_mdcbuf_filter = 1;
return 0;
}
static int
mdcbuf_finish(struct MDCBufData *st)
{
uint8 hash[20];
int res;
st->eof = 1;
if (st->mdc_buf[0] != 0xD3 || st->mdc_buf[1] != 0x14)
{
px_debug("mdcbuf_finish: bad MDC pkt hdr");
return PXE_PGP_CORRUPT_DATA;
}
px_md_update(st->ctx->mdc_ctx, st->mdc_buf, 2);
px_md_finish(st->ctx->mdc_ctx, hash);
res = memcmp(hash, st->mdc_buf + 2, 20);
px_memset(hash, 0, 20);
if (res)
{
px_debug("mdcbuf_finish: MDC does not match");
res = PXE_PGP_CORRUPT_DATA;
}
return res;
}
static void
mdcbuf_load_data(struct MDCBufData *st, uint8 *src, int len)
{
uint8 *dst = st->pos + st->avail;
memcpy(dst, src, len);
px_md_update(st->ctx->mdc_ctx, src, len);
st->avail += len;
}
static void
mdcbuf_load_mdc(struct MDCBufData *st, uint8 *src, int len)
{
memmove(st->mdc_buf + st->mdc_avail, src, len);
st->mdc_avail += len;
}
static int
mdcbuf_refill(struct MDCBufData *st, PullFilter *src)
{
uint8 *data;
int res;
int need;
/* put avail data in start */
if (st->avail > 0 && st->pos != st->buf)
memmove(st->buf, st->pos, st->avail);
st->pos = st->buf;
/* read new data */
need = st->buflen + 22 - st->avail - st->mdc_avail;
res = pullf_read(src, need, &data);
if (res < 0)
return res;
if (res == 0)
return mdcbuf_finish(st);
/* add to buffer */
if (res >= 22)
{
mdcbuf_load_data(st, st->mdc_buf, st->mdc_avail);
st->mdc_avail = 0;
mdcbuf_load_data(st, data, res - 22);
mdcbuf_load_mdc(st, data + res - 22, 22);
}
else
{
int canmove = st->mdc_avail + res - 22;
if (canmove > 0)
{
mdcbuf_load_data(st, st->mdc_buf, canmove);
st->mdc_avail -= canmove;
memmove(st->mdc_buf, st->mdc_buf + canmove, st->mdc_avail);
}
mdcbuf_load_mdc(st, data, res);
}
return 0;
}
static int
mdcbuf_read(void *priv, PullFilter *src, int len,
uint8 **data_p, uint8 *buf, int buflen)
{
struct MDCBufData *st = priv;
int res;
if (!st->eof && len > st->avail)
{
res = mdcbuf_refill(st, src);
if (res < 0)
return res;
}
if (len > st->avail)
len = st->avail;
*data_p = st->pos;
st->pos += len;
st->avail -= len;
return len;
}
static void
mdcbuf_free(void *priv)
{
struct MDCBufData *st = priv;
px_md_free(st->ctx->mdc_ctx);
st->ctx->mdc_ctx = NULL;
px_memset(st, 0, sizeof(*st));
pfree(st);
}
static struct PullFilterOps mdcbuf_filter = {
mdcbuf_init, mdcbuf_read, mdcbuf_free
};
/*
* Decrypt separate session key
*/
static int
decrypt_key(PGP_Context *ctx, const uint8 *src, int len)
{
int res;
uint8 algo;
PGP_CFB *cfb;
res = pgp_cfb_create(&cfb, ctx->s2k_cipher_algo,
ctx->s2k.key, ctx->s2k.key_len, 0, NULL);
if (res < 0)
return res;
pgp_cfb_decrypt(cfb, src, 1, &algo);
src++;
len--;
pgp_cfb_decrypt(cfb, src, len, ctx->sess_key);
pgp_cfb_free(cfb);
ctx->sess_key_len = len;
ctx->cipher_algo = algo;
if (pgp_get_cipher_key_size(algo) != len)
{
px_debug("sesskey bad len: algo=%d, expected=%d, got=%d",
algo, pgp_get_cipher_key_size(algo), len);
return PXE_PGP_CORRUPT_DATA;
}
return 0;
}
/*
* Handle key packet
*/
static int
parse_symenc_sesskey(PGP_Context *ctx, PullFilter *src)
{
uint8 *p;
int res;
uint8 tmpbuf[PGP_MAX_KEY + 2];
uint8 ver;
GETBYTE(src, ver);
GETBYTE(src, ctx->s2k_cipher_algo);
if (ver != 4)
{
px_debug("bad key pkt ver");
return PXE_PGP_CORRUPT_DATA;
}
/*
* read S2K info
*/
res = pgp_s2k_read(src, &ctx->s2k);
if (res < 0)
return res;
ctx->s2k_mode = ctx->s2k.mode;
ctx->s2k_count = s2k_decode_count(ctx->s2k.iter);
ctx->s2k_digest_algo = ctx->s2k.digest_algo;
/*
* generate key from password
*/
res = pgp_s2k_process(&ctx->s2k, ctx->s2k_cipher_algo,
ctx->sym_key, ctx->sym_key_len);
if (res < 0)
return res;
/*
* do we have separate session key?
*/
res = pullf_read_max(src, PGP_MAX_KEY + 2, &p, tmpbuf);
if (res < 0)
return res;
if (res == 0)
{
/*
* no, s2k key is session key
*/
memcpy(ctx->sess_key, ctx->s2k.key, ctx->s2k.key_len);
ctx->sess_key_len = ctx->s2k.key_len;
ctx->cipher_algo = ctx->s2k_cipher_algo;
res = 0;
ctx->use_sess_key = 0;
}
else
{
/*
* yes, decrypt it
*/
if (res < 17 || res > PGP_MAX_KEY + 1)
{
px_debug("expect key, but bad data");
return PXE_PGP_CORRUPT_DATA;
}
ctx->use_sess_key = 1;
res = decrypt_key(ctx, p, res);
}
px_memset(tmpbuf, 0, sizeof(tmpbuf));
return res;
}
static int
copy_crlf(MBuf *dst, uint8 *data, int len, int *got_cr)
{
uint8 *data_end = data + len;
uint8 tmpbuf[1024];
uint8 *tmp_end = tmpbuf + sizeof(tmpbuf);
uint8 *p;
int res;
p = tmpbuf;
if (*got_cr)
{
if (*data != '\n')
*p++ = '\r';
*got_cr = 0;
}
while (data < data_end)
{
if (*data == '\r')
{
if (data + 1 < data_end)
{
if (*(data + 1) == '\n')
data++;
}
else
{
*got_cr = 1;
break;
}
}
*p++ = *data++;
if (p >= tmp_end)
{
res = mbuf_append(dst, tmpbuf, p - tmpbuf);
if (res < 0)
return res;
p = tmpbuf;
}
}
if (p - tmpbuf > 0)
{
res = mbuf_append(dst, tmpbuf, p - tmpbuf);
if (res < 0)
return res;
}
px_memset(tmpbuf, 0, sizeof(tmpbuf));
return 0;
}
static int
parse_literal_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt)
{
int type;
int name_len;
int res;
uint8 *buf;
uint8 tmpbuf[4];
int got_cr = 0;
GETBYTE(pkt, type);
GETBYTE(pkt, name_len);
/* skip name */
while (name_len > 0)
{
res = pullf_read(pkt, name_len, &buf);
if (res < 0)
return res;
if (res == 0)
break;
name_len -= res;
}
if (name_len > 0)
{
px_debug("parse_literal_data: unexpected eof");
return PXE_PGP_CORRUPT_DATA;
}
/* skip date */
res = pullf_read_max(pkt, 4, &buf, tmpbuf);
if (res != 4)
{
px_debug("parse_literal_data: unexpected eof");
return PXE_PGP_CORRUPT_DATA;
}
px_memset(tmpbuf, 0, 4);
/*
* If called from an SQL function that returns text, pgp_decrypt() rejects
* inputs not self-identifying as text.
*/
if (ctx->text_mode)
if (type != 't' && type != 'u')
{
px_debug("parse_literal_data: data type=%c", type);
ctx->unexpected_binary = true;
}
ctx->unicode_mode = (type == 'u') ? 1 : 0;
/* read data */
while (1)
{
res = pullf_read(pkt, 32 * 1024, &buf);
if (res <= 0)
break;
if (ctx->text_mode && ctx->convert_crlf)
res = copy_crlf(dst, buf, res, &got_cr);
else
res = mbuf_append(dst, buf, res);
if (res < 0)
break;
}
if (res >= 0 && got_cr)
res = mbuf_append(dst, (const uint8 *) "\r", 1);
return res;
}
/* process_data_packets and parse_compressed_data call each other */
static int process_data_packets(PGP_Context *ctx, MBuf *dst,
PullFilter *src, int allow_compr, int need_mdc);
static int
parse_compressed_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt)
{
int res;
uint8 type;
PullFilter *pf_decompr;
uint8 *discard_buf;
GETBYTE(pkt, type);
ctx->compress_algo = type;
switch (type)
{
case PGP_COMPR_NONE:
res = process_data_packets(ctx, dst, pkt, NO_COMPR, NO_MDC);
break;
case PGP_COMPR_ZIP:
case PGP_COMPR_ZLIB:
res = pgp_decompress_filter(&pf_decompr, ctx, pkt);
if (res >= 0)
{
res = process_data_packets(ctx, dst, pf_decompr,
NO_COMPR, NO_MDC);
pullf_free(pf_decompr);
}
break;
case PGP_COMPR_BZIP2:
px_debug("parse_compressed_data: bzip2 unsupported");
/* report error in pgp_decrypt() */
ctx->unsupported_compr = 1;
/*
* Discard the compressed data, allowing it to first affect any
* MDC digest computation.
*/
while (1)
{
res = pullf_read(pkt, 32 * 1024, &discard_buf);
if (res <= 0)
break;
}
break;
default:
px_debug("parse_compressed_data: unknown compr type");
res = PXE_PGP_CORRUPT_DATA;
}
return res;
}
static int
process_data_packets(PGP_Context *ctx, MBuf *dst, PullFilter *src,
int allow_compr, int need_mdc)
{
uint8 tag;
int len,
res;
int got_data = 0;
int got_mdc = 0;
PullFilter *pkt = NULL;
while (1)
{
res = pgp_parse_pkt_hdr(src, &tag, &len, ALLOW_CTX_SIZE);
if (res <= 0)
break;
/* mdc packet should be last */
if (got_mdc)
{
px_debug("process_data_packets: data after mdc");
res = PXE_PGP_CORRUPT_DATA;
break;
}
/*
* Context length inside SYMENCRYPTED_DATA_MDC packet needs special
* handling.
*/
if (need_mdc && res == PKT_CONTEXT)
res = pullf_create(&pkt, &mdcbuf_filter, ctx, src);
else
res = pgp_create_pkt_reader(&pkt, src, len, res, ctx);
if (res < 0)
break;
switch (tag)
{
case PGP_PKT_LITERAL_DATA:
got_data = 1;
res = parse_literal_data(ctx, dst, pkt);
break;
case PGP_PKT_COMPRESSED_DATA:
if (allow_compr == 0)
{
px_debug("process_data_packets: unexpected compression");
res = PXE_PGP_CORRUPT_DATA;
}
else if (got_data)
{
/*
* compr data must be alone
*/
px_debug("process_data_packets: only one cmpr pkt allowed");
res = PXE_PGP_CORRUPT_DATA;
}
else
{
got_data = 1;
res = parse_compressed_data(ctx, dst, pkt);
}
break;
case PGP_PKT_MDC:
if (need_mdc == NO_MDC)
{
px_debug("process_data_packets: unexpected MDC");
res = PXE_PGP_CORRUPT_DATA;
break;
}
res = mdc_finish(ctx, pkt, len);
if (res >= 0)
got_mdc = 1;
break;
default:
px_debug("process_data_packets: unexpected pkt tag=%d", tag);
res = PXE_PGP_CORRUPT_DATA;
}
pullf_free(pkt);
pkt = NULL;
if (res < 0)
break;
}
if (pkt)
pullf_free(pkt);
if (res < 0)
return res;
if (!got_data)
{
px_debug("process_data_packets: no data");
res = PXE_PGP_CORRUPT_DATA;
}
if (need_mdc && !got_mdc && !ctx->use_mdcbuf_filter)
{
px_debug("process_data_packets: got no mdc");
res = PXE_PGP_CORRUPT_DATA;
}
return res;
}
static int
parse_symenc_data(PGP_Context *ctx, PullFilter *pkt, MBuf *dst)
{
int res;
PGP_CFB *cfb = NULL;
PullFilter *pf_decrypt = NULL;
PullFilter *pf_prefix = NULL;
res = pgp_cfb_create(&cfb, ctx->cipher_algo,
ctx->sess_key, ctx->sess_key_len, 1, NULL);
if (res < 0)
goto out;
res = pullf_create(&pf_decrypt, &pgp_decrypt_filter, cfb, pkt);
if (res < 0)
goto out;
res = pullf_create(&pf_prefix, &prefix_filter, ctx, pf_decrypt);
if (res < 0)
goto out;
res = process_data_packets(ctx, dst, pf_prefix, ALLOW_COMPR, NO_MDC);
out:
if (pf_prefix)
pullf_free(pf_prefix);
if (pf_decrypt)
pullf_free(pf_decrypt);
if (cfb)
pgp_cfb_free(cfb);
return res;
}
static int
parse_symenc_mdc_data(PGP_Context *ctx, PullFilter *pkt, MBuf *dst)
{
int res;
PGP_CFB *cfb = NULL;
PullFilter *pf_decrypt = NULL;
PullFilter *pf_prefix = NULL;
PullFilter *pf_mdc = NULL;
uint8 ver;
GETBYTE(pkt, ver);
if (ver != 1)
{
px_debug("parse_symenc_mdc_data: pkt ver != 1");
return PXE_PGP_CORRUPT_DATA;
}
res = pgp_cfb_create(&cfb, ctx->cipher_algo,
ctx->sess_key, ctx->sess_key_len, 0, NULL);
if (res < 0)
goto out;
res = pullf_create(&pf_decrypt, &pgp_decrypt_filter, cfb, pkt);
if (res < 0)
goto out;
res = pullf_create(&pf_mdc, &mdc_filter, ctx, pf_decrypt);
if (res < 0)
goto out;
res = pullf_create(&pf_prefix, &prefix_filter, ctx, pf_mdc);
if (res < 0)
goto out;
res = process_data_packets(ctx, dst, pf_prefix, ALLOW_COMPR, NEED_MDC);
out:
if (pf_prefix)
pullf_free(pf_prefix);
if (pf_mdc)
pullf_free(pf_mdc);
if (pf_decrypt)
pullf_free(pf_decrypt);
if (cfb)
pgp_cfb_free(cfb);
return res;
}
/*
* skip over packet contents
*/
int
pgp_skip_packet(PullFilter *pkt)
{
int res = 1;
uint8 *tmp;
while (res > 0)
res = pullf_read(pkt, 32 * 1024, &tmp);
return res;
}
/*
* expect to be at packet end, any data is error
*/
int
pgp_expect_packet_end(PullFilter *pkt)
{
int res;
uint8 *tmp;
res = pullf_read(pkt, 32 * 1024, &tmp);
if (res > 0)
{
px_debug("pgp_expect_packet_end: got data");
return PXE_PGP_CORRUPT_DATA;
}
return res;
}
int
pgp_decrypt(PGP_Context *ctx, MBuf *msrc, MBuf *mdst)
{
int res;
PullFilter *src = NULL;
PullFilter *pkt = NULL;
uint8 tag;
int len;
int got_key = 0;
int got_data = 0;
res = pullf_create_mbuf_reader(&src, msrc);
while (res >= 0)
{
res = pgp_parse_pkt_hdr(src, &tag, &len, NO_CTX_SIZE);
if (res <= 0)
break;
res = pgp_create_pkt_reader(&pkt, src, len, res, ctx);
if (res < 0)
break;
res = PXE_PGP_CORRUPT_DATA;
switch (tag)
{
case PGP_PKT_MARKER:
res = pgp_skip_packet(pkt);
break;
case PGP_PKT_PUBENCRYPTED_SESSKEY:
/* fixme: skip those */
res = pgp_parse_pubenc_sesskey(ctx, pkt);
got_key = 1;
break;
case PGP_PKT_SYMENCRYPTED_SESSKEY:
if (got_key)
/*
* Theoretically, there could be several keys, both public
* and symmetric, all of which encrypt same session key.
* Decrypt should try with each one, before failing.
*/
px_debug("pgp_decrypt: using first of several keys");
else
{
got_key = 1;
res = parse_symenc_sesskey(ctx, pkt);
}
break;
case PGP_PKT_SYMENCRYPTED_DATA:
if (!got_key)
px_debug("pgp_decrypt: have data but no key");
else if (got_data)
px_debug("pgp_decrypt: got second data packet");
else
{
got_data = 1;
ctx->disable_mdc = 1;
res = parse_symenc_data(ctx, pkt, mdst);
}
break;
case PGP_PKT_SYMENCRYPTED_DATA_MDC:
if (!got_key)
px_debug("pgp_decrypt: have data but no key");
else if (got_data)
px_debug("pgp_decrypt: several data pkts not supported");
else
{
got_data = 1;
ctx->disable_mdc = 0;
res = parse_symenc_mdc_data(ctx, pkt, mdst);
}
break;
default:
px_debug("pgp_decrypt: unknown tag: 0x%02x", tag);
}
pullf_free(pkt);
pkt = NULL;
}
if (pkt)
pullf_free(pkt);
if (src)
pullf_free(src);
if (res < 0)
return res;
/*
* Report a failure of the prefix_init() "quick check" now, rather than
* upon detection, to hinder timing attacks. pgcrypto is not generally
* secure against timing attacks, but this helps.
*/
if (!got_data || ctx->corrupt_prefix)
return PXE_PGP_CORRUPT_DATA;
/*
* Code interpreting purportedly-decrypted data prior to this stage shall
* report no error other than PXE_PGP_CORRUPT_DATA. (PXE_BUG is okay so
* long as it remains unreachable.) This ensures that an attacker able to
* choose a ciphertext and receive a corresponding decryption error
* message cannot use that oracle to gather clues about the decryption
* key. See "An Attack on CFB Mode Encryption As Used By OpenPGP" by
* Serge Mister and Robert Zuccherato.
*
* A problematic value in the first octet of a Literal Data or Compressed
* Data packet may indicate a simple user error, such as the need to call
* pgp_sym_decrypt_bytea instead of pgp_sym_decrypt. Occasionally,
* though, it is the first symptom of the encryption key not matching the
* decryption key. When this was the only problem encountered, report a
* specific error to guide the user; otherwise, we will have reported
* PXE_PGP_CORRUPT_DATA before now. A key mismatch makes the other errors
* into red herrings, and this avoids leaking clues to attackers.
*/
if (ctx->unsupported_compr)
return PXE_PGP_UNSUPPORTED_COMPR;
if (ctx->unexpected_binary)
return PXE_PGP_NOT_TEXT;
return res;
} | c | github | https://github.com/postgres/postgres | contrib/pgcrypto/pgp-decrypt.c |
from __future__ import unicode_literals
from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils.six.moves import range
from .models import (
A, M, MR, R, S, T, Avatar, Base, Child, HiddenUser, HiddenUserProfile,
M2MFrom, M2MTo, MRNull, Parent, RChild, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
# However, this doesn't work as child.parent access creates a query,
# and this means we will be generating extra queries (a lot for large
# querysets). This is not a fast-delete problem.
# self.assertNumQueries(2, c.delete)
c.delete()
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0) | unknown | codeparrot/codeparrot-clean | ||
"""Support to interact with Remember The Milk."""
import json
import logging
import os
from rtmapi import Rtm, RtmRequestFailedException
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_ID, CONF_NAME, CONF_TOKEN, STATE_OK
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
# httplib2 is a transitive dependency from RtmAPI. If this dependency is not
# set explicitly, the library does not work.
_LOGGER = logging.getLogger(__name__)
DOMAIN = "remember_the_milk"
DEFAULT_NAME = DOMAIN
GROUP_NAME_RTM = "remember the milk accounts"
CONF_SHARED_SECRET = "shared_secret"
CONF_ID_MAP = "id_map"
CONF_LIST_ID = "list_id"
CONF_TIMESERIES_ID = "timeseries_id"
CONF_TASK_ID = "task_id"
RTM_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SHARED_SECRET): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [RTM_SCHEMA])}, extra=vol.ALLOW_EXTRA
)
CONFIG_FILE_NAME = ".remember_the_milk.conf"
SERVICE_CREATE_TASK = "create_task"
SERVICE_COMPLETE_TASK = "complete_task"
SERVICE_SCHEMA_CREATE_TASK = vol.Schema(
{vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_ID): cv.string}
)
SERVICE_SCHEMA_COMPLETE_TASK = vol.Schema({vol.Required(CONF_ID): cv.string})
def setup(hass, config):
"""Set up the Remember the milk component."""
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name=GROUP_NAME_RTM)
stored_rtm_config = RememberTheMilkConfiguration(hass)
for rtm_config in config[DOMAIN]:
account_name = rtm_config[CONF_NAME]
_LOGGER.info("Adding Remember the milk account %s", account_name)
api_key = rtm_config[CONF_API_KEY]
shared_secret = rtm_config[CONF_SHARED_SECRET]
token = stored_rtm_config.get_token(account_name)
if token:
_LOGGER.debug("found token for account %s", account_name)
_create_instance(
hass,
account_name,
api_key,
shared_secret,
token,
stored_rtm_config,
component,
)
else:
_register_new_account(
hass, account_name, api_key, shared_secret, stored_rtm_config, component
)
_LOGGER.debug("Finished adding all Remember the milk accounts")
return True
def _create_instance(
hass, account_name, api_key, shared_secret, token, stored_rtm_config, component
):
entity = RememberTheMilk(
account_name, api_key, shared_secret, token, stored_rtm_config
)
component.add_entities([entity])
hass.services.register(
DOMAIN,
f"{account_name}_create_task",
entity.create_task,
schema=SERVICE_SCHEMA_CREATE_TASK,
)
hass.services.register(
DOMAIN,
f"{account_name}_complete_task",
entity.complete_task,
schema=SERVICE_SCHEMA_COMPLETE_TASK,
)
def _register_new_account(
hass, account_name, api_key, shared_secret, stored_rtm_config, component
):
request_id = None
configurator = hass.components.configurator
api = Rtm(api_key, shared_secret, "write", None)
url, frob = api.authenticate_desktop()
_LOGGER.debug("Sent authentication request to server")
def register_account_callback(_):
"""Call for register the configurator."""
api.retrieve_token(frob)
token = api.token
if api.token is None:
_LOGGER.error("Failed to register, please try again")
configurator.notify_errors(
request_id, "Failed to register, please try again."
)
return
stored_rtm_config.set_token(account_name, token)
_LOGGER.debug("Retrieved new token from server")
_create_instance(
hass,
account_name,
api_key,
shared_secret,
token,
stored_rtm_config,
component,
)
configurator.request_done(request_id)
request_id = configurator.async_request_config(
f"{DOMAIN} - {account_name}",
callback=register_account_callback,
description="You need to log in to Remember The Milk to"
+ "connect your account. \n\n"
+ 'Step 1: Click on the link "Remember The Milk login"\n\n'
+ 'Step 2: Click on "login completed"',
link_name="Remember The Milk login",
link_url=url,
submit_caption="login completed",
)
class RememberTheMilkConfiguration:
"""Internal configuration data for RememberTheMilk class.
This class stores the authentication token it get from the backend.
"""
def __init__(self, hass):
"""Create new instance of configuration."""
self._config_file_path = hass.config.path(CONFIG_FILE_NAME)
if not os.path.isfile(self._config_file_path):
self._config = dict()
return
try:
_LOGGER.debug("Loading configuration from file: %s", self._config_file_path)
with open(self._config_file_path, "r") as config_file:
self._config = json.load(config_file)
except ValueError:
_LOGGER.error(
"Failed to load configuration file, creating a " "new one: %s",
self._config_file_path,
)
self._config = dict()
def save_config(self):
"""Write the configuration to a file."""
with open(self._config_file_path, "w") as config_file:
json.dump(self._config, config_file)
def get_token(self, profile_name):
"""Get the server token for a profile."""
if profile_name in self._config:
return self._config[profile_name][CONF_TOKEN]
return None
def set_token(self, profile_name, token):
"""Store a new server token for a profile."""
self._initialize_profile(profile_name)
self._config[profile_name][CONF_TOKEN] = token
self.save_config()
def delete_token(self, profile_name):
"""Delete a token for a profile.
Usually called when the token has expired.
"""
self._config.pop(profile_name, None)
self.save_config()
def _initialize_profile(self, profile_name):
"""Initialize the data structures for a profile."""
if profile_name not in self._config:
self._config[profile_name] = dict()
if CONF_ID_MAP not in self._config[profile_name]:
self._config[profile_name][CONF_ID_MAP] = dict()
def get_rtm_id(self, profile_name, hass_id):
"""Get the RTM ids for a Home Assistant task ID.
The id of a RTM tasks consists of the tuple:
list id, timeseries id and the task id.
"""
self._initialize_profile(profile_name)
ids = self._config[profile_name][CONF_ID_MAP].get(hass_id)
if ids is None:
return None
return ids[CONF_LIST_ID], ids[CONF_TIMESERIES_ID], ids[CONF_TASK_ID]
def set_rtm_id(self, profile_name, hass_id, list_id, time_series_id, rtm_task_id):
"""Add/Update the RTM task ID for a Home Assistant task IS."""
self._initialize_profile(profile_name)
id_tuple = {
CONF_LIST_ID: list_id,
CONF_TIMESERIES_ID: time_series_id,
CONF_TASK_ID: rtm_task_id,
}
self._config[profile_name][CONF_ID_MAP][hass_id] = id_tuple
self.save_config()
def delete_rtm_id(self, profile_name, hass_id):
"""Delete a key mapping."""
self._initialize_profile(profile_name)
if hass_id in self._config[profile_name][CONF_ID_MAP]:
del self._config[profile_name][CONF_ID_MAP][hass_id]
self.save_config()
class RememberTheMilk(Entity):
"""Representation of an interface to Remember The Milk."""
def __init__(self, name, api_key, shared_secret, token, rtm_config):
"""Create new instance of Remember The Milk component."""
self._name = name
self._api_key = api_key
self._shared_secret = shared_secret
self._token = token
self._rtm_config = rtm_config
self._rtm_api = Rtm(api_key, shared_secret, "delete", token)
self._token_valid = None
self._check_token()
_LOGGER.debug("Instance created for account %s", self._name)
def _check_token(self):
"""Check if the API token is still valid.
If it is not valid any more, delete it from the configuration. This
will trigger a new authentication process.
"""
valid = self._rtm_api.token_valid()
if not valid:
_LOGGER.error(
"Token for account %s is invalid. You need to " "register again!",
self.name,
)
self._rtm_config.delete_token(self._name)
self._token_valid = False
else:
self._token_valid = True
return self._token_valid
def create_task(self, call):
"""Create a new task on Remember The Milk.
You can use the smart syntax to define the attributes of a new task,
e.g. "my task #some_tag ^today" will add tag "some_tag" and set the
due date to today.
"""
try:
task_name = call.data.get(CONF_NAME)
hass_id = call.data.get(CONF_ID)
rtm_id = None
if hass_id is not None:
rtm_id = self._rtm_config.get_rtm_id(self._name, hass_id)
result = self._rtm_api.rtm.timelines.create()
timeline = result.timeline.value
if hass_id is None or rtm_id is None:
result = self._rtm_api.rtm.tasks.add(
timeline=timeline, name=task_name, parse="1"
)
_LOGGER.debug(
"Created new task '%s' in account %s", task_name, self.name
)
self._rtm_config.set_rtm_id(
self._name,
hass_id,
result.list.id,
result.list.taskseries.id,
result.list.taskseries.task.id,
)
else:
self._rtm_api.rtm.tasks.setName(
name=task_name,
list_id=rtm_id[0],
taskseries_id=rtm_id[1],
task_id=rtm_id[2],
timeline=timeline,
)
_LOGGER.debug(
"Updated task with id '%s' in account " "%s to name %s",
hass_id,
self.name,
task_name,
)
except RtmRequestFailedException as rtm_exception:
_LOGGER.error(
"Error creating new Remember The Milk task for " "account %s: %s",
self._name,
rtm_exception,
)
return False
return True
def complete_task(self, call):
"""Complete a task that was previously created by this component."""
hass_id = call.data.get(CONF_ID)
rtm_id = self._rtm_config.get_rtm_id(self._name, hass_id)
if rtm_id is None:
_LOGGER.error(
"Could not find task with ID %s in account %s. "
"So task could not be closed",
hass_id,
self._name,
)
return False
try:
result = self._rtm_api.rtm.timelines.create()
timeline = result.timeline.value
self._rtm_api.rtm.tasks.complete(
list_id=rtm_id[0],
taskseries_id=rtm_id[1],
task_id=rtm_id[2],
timeline=timeline,
)
self._rtm_config.delete_rtm_id(self._name, hass_id)
_LOGGER.debug(
"Completed task with id %s in account %s", hass_id, self._name
)
except RtmRequestFailedException as rtm_exception:
_LOGGER.error(
"Error creating new Remember The Milk task for " "account %s: %s",
self._name,
rtm_exception,
)
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if not self._token_valid:
return "API token invalid"
return STATE_OK | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.errors;
import java.util.HashSet;
import java.util.Set;
/**
* The client has attempted to perform an operation on an invalid topic.
* For example the topic name is too long, contains invalid characters etc.
* This exception is not retriable because the operation won't suddenly become valid.
*
* @see UnknownTopicOrPartitionException
*/
public class InvalidTopicException extends InvalidConfigurationException {
private static final long serialVersionUID = 1L;
private final Set<String> invalidTopics;
public InvalidTopicException() {
super();
invalidTopics = new HashSet<>();
}
public InvalidTopicException(String message, Throwable cause) {
super(message, cause);
invalidTopics = new HashSet<>();
}
public InvalidTopicException(String message) {
super(message);
invalidTopics = new HashSet<>();
}
public InvalidTopicException(Throwable cause) {
super(cause);
invalidTopics = new HashSet<>();
}
public InvalidTopicException(Set<String> invalidTopics) {
super("Invalid topics: " + invalidTopics);
this.invalidTopics = invalidTopics;
}
public InvalidTopicException(String message, Set<String> invalidTopics) {
super(message);
this.invalidTopics = invalidTopics;
}
public Set<String> invalidTopics() {
return invalidTopics;
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java |
{
"remainingCount": 5,
"deprecations": [
{
"message": "Some deprecation message.",
"file": "\/path\/to\/some\/file.php",
"line": 39,
"count": 3
},
{
"message": "An other deprecation message.",
"file": "\/path\/to\/an\/other\/file.php",
"line": 25,
"count": 2
}
]
} | json | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/Fixtures/Descriptor/deprecations.json |
"""
awslimitchecker/tests/__init__.py
The latest version of this package is available at:
<https://github.com/jantman/awslimitchecker>
##############################################################################
Copyright 2015-2018 Jason Antman <jason@jasonantman.com>
This file is part of awslimitchecker, also known as awslimitchecker.
awslimitchecker is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
awslimitchecker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with awslimitchecker. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
##############################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/awslimitchecker> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
##############################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
##############################################################################
""" | unknown | codeparrot/codeparrot-clean | ||
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"context"
"errors"
"fmt"
"math"
"slices"
"sort"
"strings"
"time"
"github.com/facette/natsort"
"github.com/grafana/regexp"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/kahansum"
)
// FunctionCall is the type of a PromQL function implementation
//
// vals is a list of the evaluated arguments for the function call.
//
// For range vectors it will be a Matrix with one series, instant vectors a
// Vector, scalars a Vector with one series whose value is the scalar
// value,and nil for strings.
//
// args are the original arguments to the function, where you can access
// matrixSelectors, vectorSelectors, and StringLiterals.
//
// enh.Out is a pre-allocated empty vector that you may use to accumulate
// output before returning it. The vectors in vals should not be returned.a
//
// Range vector functions need only return a vector with the right value,
// the metric and timestamp are not needed.
//
// Instant vector functions need only return a vector with the right values and
// metrics, the timestamp are not needed.
//
// Scalar results should be returned as the value of a sample in a Vector.
type FunctionCall func(vectorVals []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations)
// === time() float64 ===
func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return Vector{Sample{
F: float64(enh.Ts) / 1000,
}}, nil
}
// pickOrInterpolateLeft returns the value at the left boundary of the range.
// If interpolation is needed (when smoothed is true and the first sample is before the range start),
// it returns the interpolated value at the left boundary; otherwise, it returns the first sample's value.
func pickOrInterpolateLeft(floats []FPoint, first int, rangeStart int64, smoothed, isCounter bool) float64 {
if smoothed && floats[first].T < rangeStart {
return interpolate(floats[first], floats[first+1], rangeStart, isCounter)
}
return floats[first].F
}
// pickOrInterpolateRight returns the value at the right boundary of the range.
// If interpolation is needed (when smoothed is true and the last sample is after the range end),
// it returns the interpolated value at the right boundary; otherwise, it returns the last sample's value.
func pickOrInterpolateRight(floats []FPoint, last int, rangeEnd int64, smoothed, isCounter bool) float64 {
if smoothed && last > 0 && floats[last].T > rangeEnd {
return interpolate(floats[last-1], floats[last], rangeEnd, isCounter)
}
return floats[last].F
}
// interpolate performs linear interpolation between two points.
// If isCounter is true and there is a counter reset, it models the counter
// as starting from 0 (post-reset) by setting y1 to 0.
// It then calculates the interpolated value at the given timestamp.
func interpolate(p1, p2 FPoint, t int64, isCounter bool) float64 {
y1 := p1.F
y2 := p2.F
if isCounter && y2 < y1 {
y1 = 0
}
return y1 + (y2-y1)*float64(t-p1.T)/float64(p2.T-p1.T)
}
// correctForCounterResets calculates the correction for counter resets.
// This function is only used for extendedRate functions with smoothed or anchored rates.
func correctForCounterResets(left, right float64, points []FPoint) float64 {
var correction float64
prev := left
for _, p := range points {
if p.F < prev {
correction += prev
}
prev = p.F
}
if right < prev {
correction += prev
}
return correction
}
// extendedRate is a utility function for anchored/smoothed rate/increase/delta.
// It calculates the rate (allowing for counter resets if isCounter is true),
// extrapolates if the first/last sample if needed, and returns
// the result as either per-second (if isRate is true) or overall.
func extendedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
var (
ms = args[0].(*parser.MatrixSelector)
vs = ms.VectorSelector.(*parser.VectorSelector)
samples = vals[0]
f = samples.Floats
lastSampleIndex = len(f) - 1
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
rangeEnd = enh.Ts - durationMilliseconds(vs.Offset)
annos annotations.Annotations
smoothed = vs.Smoothed
)
firstSampleIndex := max(0, sort.Search(lastSampleIndex, func(i int) bool { return f[i].T > rangeStart })-1)
if smoothed {
lastSampleIndex = sort.Search(lastSampleIndex, func(i int) bool { return f[i].T >= rangeEnd })
}
if f[lastSampleIndex].T <= rangeStart {
return enh.Out, annos
}
left := pickOrInterpolateLeft(f, firstSampleIndex, rangeStart, smoothed, isCounter)
right := pickOrInterpolateRight(f, lastSampleIndex, rangeEnd, smoothed, isCounter)
resultFloat := right - left
if isCounter {
// We only need to consider samples exactly within the range
// for counter resets correction, as pickOrInterpolateLeft and
// pickOrInterpolateRight already handle the resets at boundaries.
if f[firstSampleIndex].T <= rangeStart {
firstSampleIndex++
}
if f[lastSampleIndex].T >= rangeEnd {
lastSampleIndex--
}
resultFloat += correctForCounterResets(left, right, f[firstSampleIndex:lastSampleIndex+1])
}
if isRate {
resultFloat /= ms.Range.Seconds()
}
return append(enh.Out, Sample{F: resultFloat}), annos
}
// extrapolatedRate is a utility function for rate/increase/delta.
// It calculates the rate (allowing for counter resets if isCounter is true),
// extrapolates if the first/last sample is close to the boundary, and returns
// the result as either per-second (if isRate is true) or overall.
//
// Note: If the vector selector is smoothed or anchored, it will use the
// extendedRate function instead.
func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
ms := args[0].(*parser.MatrixSelector)
vs := ms.VectorSelector.(*parser.VectorSelector)
if vs.Anchored || vs.Smoothed {
return extendedRate(vals, args, enh, isCounter, isRate)
}
var (
samples = vals[0]
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
rangeEnd = enh.Ts - durationMilliseconds(vs.Offset)
resultFloat float64
resultHistogram *histogram.FloatHistogram
firstT, lastT int64
numSamplesMinusOne int
annos annotations.Annotations
)
// We need either at least two Histograms and no Floats, or at least two
// Floats and no Histograms to calculate a rate. Otherwise, drop this
// Vector element.
if len(samples.Histograms) > 0 && len(samples.Floats) > 0 {
return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(samples.Metric), args[0].PositionRange()))
}
switch {
case len(samples.Histograms) > 1:
numSamplesMinusOne = len(samples.Histograms) - 1
firstT = samples.Histograms[0].T
lastT = samples.Histograms[numSamplesMinusOne].T
var newAnnos annotations.Annotations
resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, samples.Metric, args[0].PositionRange())
annos.Merge(newAnnos)
if resultHistogram == nil {
// The histograms are not compatible with each other.
return enh.Out, annos
}
case len(samples.Floats) > 1:
numSamplesMinusOne = len(samples.Floats) - 1
firstT = samples.Floats[0].T
lastT = samples.Floats[numSamplesMinusOne].T
resultFloat = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F
if !isCounter {
break
}
// Handle counter resets:
prevValue := samples.Floats[0].F
for _, currPoint := range samples.Floats[1:] {
if currPoint.F < prevValue {
resultFloat += prevValue
}
prevValue = currPoint.F
}
default:
// TODO: add RangeTooShortWarning
return enh.Out, annos
}
// Duration between first/last samples and boundary of range.
durationToStart := float64(firstT-rangeStart) / 1000
durationToEnd := float64(rangeEnd-lastT) / 1000
sampledInterval := float64(lastT-firstT) / 1000
averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne)
// If samples are close enough to the (lower or upper) boundary of the
// range, we extrapolate the rate all the way to the boundary in
// question. "Close enough" is defined as "up to 10% more than the
// average duration between samples within the range", see
// extrapolationThreshold below. Essentially, we are assuming a more or
// less regular spacing between samples, and if we don't see a sample
// where we would expect one, we assume the series does not cover the
// whole range, but starts and/or ends within the range. We still
// extrapolate the rate in this case, but not all the way to the
// boundary, but only by half of the average duration between samples
// (which is our guess for where the series actually starts or ends).
extrapolationThreshold := averageDurationBetweenSamples * 1.1
if durationToStart >= extrapolationThreshold {
durationToStart = averageDurationBetweenSamples / 2
}
if isCounter {
// Counters cannot be negative. If we have any slope at all
// (i.e. resultFloat went up), we can extrapolate the zero point
// of the counter. If the duration to the zero point is shorter
// than the durationToStart, we take the zero point as the start
// of the series, thereby avoiding extrapolation to negative
// counter values.
durationToZero := durationToStart
if resultFloat > 0 &&
len(samples.Floats) > 0 &&
samples.Floats[0].F >= 0 {
durationToZero = sampledInterval * (samples.Floats[0].F / resultFloat)
} else if resultHistogram != nil &&
resultHistogram.Count > 0 &&
len(samples.Histograms) > 0 &&
samples.Histograms[0].H.Count >= 0 {
durationToZero = sampledInterval * (samples.Histograms[0].H.Count / resultHistogram.Count)
}
if durationToZero < durationToStart {
durationToStart = durationToZero
}
}
if durationToEnd >= extrapolationThreshold {
durationToEnd = averageDurationBetweenSamples / 2
}
factor := (sampledInterval + durationToStart + durationToEnd) / sampledInterval
if isRate {
factor /= ms.Range.Seconds()
}
if resultHistogram == nil {
resultFloat *= factor
} else {
resultHistogram.Mul(factor)
}
return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}), annos
}
// histogramRate is a helper function for extrapolatedRate. It requires
// points[0] to be a histogram. It returns nil if any other Point in points is
// not a histogram, and a warning wrapped in an annotation in that case.
// Otherwise, it returns the calculated histogram and an empty annotation.
func histogramRate(points []HPoint, isCounter bool, labels labels.Labels, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
var (
prev = points[0].H
usingCustomBuckets = prev.UsesCustomBuckets()
last = points[len(points)-1].H
annos annotations.Annotations
)
if last == nil {
return nil, annos.Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(labels), pos))
}
// We check for gauge type histograms in the loop below, but the loop
// below does not run on the first and last point, so check the first
// and last point now.
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
annos.Add(annotations.NewNativeHistogramNotCounterWarning(getMetricName(labels), pos))
}
// Null out the 1st sample if there is a counter reset between the 1st
// and 2nd. In this case, we want to ignore any incompatibility in the
// bucket layout of the 1st sample because we do not need to look at it.
if isCounter && len(points) > 1 {
second := points[1].H
if second != nil && second.DetectReset(prev) {
prev = &histogram.FloatHistogram{}
prev.Schema = second.Schema
prev.CustomValues = second.CustomValues
usingCustomBuckets = second.UsesCustomBuckets()
}
}
if last.UsesCustomBuckets() != usingCustomBuckets {
return nil, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(labels), pos))
}
// First iteration to find out two things:
// - What's the smallest relevant schema?
// - Are all data points histograms?
minSchema := min(last.Schema, prev.Schema)
for _, currPoint := range points[1 : len(points)-1] {
curr := currPoint.H
if curr == nil {
return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(labels), pos))
}
if !isCounter {
continue
}
if curr.CounterResetHint == histogram.GaugeType {
annos.Add(annotations.NewNativeHistogramNotCounterWarning(getMetricName(labels), pos))
}
if curr.Schema < minSchema {
minSchema = curr.Schema
}
if curr.UsesCustomBuckets() != usingCustomBuckets {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(labels), pos))
}
}
h := last.CopyToSchema(minSchema)
// This subtraction may deliberately include conflicting counter resets.
// Counter resets are treated explicitly in this function, so the
// information about conflicting counter resets is ignored here.
_, _, nhcbBoundsReconciled, err := h.Sub(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(labels), pos))
}
}
if nhcbBoundsReconciled {
annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(pos, annotations.HistogramSub))
}
if isCounter {
// Second iteration to deal with counter resets.
for _, currPoint := range points[1:] {
curr := currPoint.H
if curr.DetectReset(prev) {
// Counter reset conflict ignored here for the same reason as above.
_, _, nhcbBoundsReconciled, err := h.Add(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(labels), pos))
}
}
if nhcbBoundsReconciled {
annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(pos, annotations.HistogramAdd))
}
}
prev = curr
}
} else if points[0].H.CounterResetHint != histogram.GaugeType || points[len(points)-1].H.CounterResetHint != histogram.GaugeType {
annos.Add(annotations.NewNativeHistogramNotGaugeWarning(getMetricName(labels), pos))
}
h.CounterResetHint = histogram.GaugeType
return h.Compact(0), annos
}
// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcDelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return extrapolatedRate(matrixVals, args, enh, false, false)
}
// === rate(node parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcRate(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return extrapolatedRate(matrixVals, args, enh, true, true)
}
// === increase(node parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcIncrease(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return extrapolatedRate(matrixVals, args, enh, true, false)
}
// === irate(node parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcIrate(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return instantValue(matrixVals, args, enh.Out, true)
}
// === idelta(node model.ValMatrix) (Vector, Annotations) ===
func funcIdelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return instantValue(matrixVals, args, enh.Out, false)
}
func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) {
var (
samples = vals[0]
ss = make([]Sample, 0, 2)
annos annotations.Annotations
)
// No sense in trying to compute a rate without at least two points. Drop
// this Vector element.
// TODO: add RangeTooShortWarning
if len(samples.Floats)+len(samples.Histograms) < 2 {
return out, nil
}
// Add the last 2 float samples if they exist.
for i := max(0, len(samples.Floats)-2); i < len(samples.Floats); i++ {
ss = append(ss, Sample{
F: samples.Floats[i].F,
T: samples.Floats[i].T,
})
}
// Add the last 2 histogram samples into their correct position if they exist.
for i := max(0, len(samples.Histograms)-2); i < len(samples.Histograms); i++ {
s := Sample{
H: samples.Histograms[i].H,
T: samples.Histograms[i].T,
}
switch {
case len(ss) == 0:
ss = append(ss, s)
case len(ss) == 1:
if s.T < ss[0].T {
ss = append([]Sample{s}, ss...)
} else {
ss = append(ss, s)
}
case s.T < ss[0].T:
// s is older than 1st, so discard it.
case s.T > ss[1].T:
// s is newest, so add it as 2nd and make the old 2nd the new 1st.
ss[0] = ss[1]
ss[1] = s
default:
// In all other cases, we just make s the new 1st.
// This establishes a correct order, even in the (irregular)
// case of equal timestamps.
ss[0] = s
}
}
resultSample := ss[1]
sampledInterval := ss[1].T - ss[0].T
if sampledInterval == 0 {
// Avoid dividing by 0.
return out, nil
}
switch {
case ss[1].H == nil && ss[0].H == nil:
if !isRate || !(ss[1].F < ss[0].F) {
// Gauge, or counter without reset, or counter with NaN value.
resultSample.F = ss[1].F - ss[0].F
}
// In case of a counter reset, we leave resultSample at
// its current value, which is already ss[1].
case ss[1].H != nil && ss[0].H != nil:
resultSample.H = ss[1].H.Copy()
// irate should only be applied to counters.
if isRate && (ss[1].H.CounterResetHint == histogram.GaugeType || ss[0].H.CounterResetHint == histogram.GaugeType) {
annos.Add(annotations.NewNativeHistogramNotCounterWarning(getMetricName(samples.Metric), args.PositionRange()))
}
// idelta should only be applied to gauges.
if !isRate && (ss[1].H.CounterResetHint != histogram.GaugeType || ss[0].H.CounterResetHint != histogram.GaugeType) {
annos.Add(annotations.NewNativeHistogramNotGaugeWarning(getMetricName(samples.Metric), args.PositionRange()))
}
if !isRate || !ss[1].H.DetectReset(ss[0].H) {
// This subtraction may deliberately include conflicting
// counter resets. Counter resets are treated explicitly
// in this function, so the information about
// conflicting counter resets is ignored here.
_, _, nhcbBoundsReconciled, err := resultSample.H.Sub(ss[0].H)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return out, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(samples.Metric), args.PositionRange()))
}
if nhcbBoundsReconciled {
annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(args.PositionRange(), annotations.HistogramSub))
}
}
resultSample.H.CounterResetHint = histogram.GaugeType
resultSample.H.Compact(0)
default:
// Mix of a float and a histogram.
return out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(samples.Metric), args.PositionRange()))
}
if isRate {
// Convert to per-second.
if resultSample.H == nil {
resultSample.F /= float64(sampledInterval) / 1000
} else {
resultSample.H.Div(float64(sampledInterval) / 1000)
}
}
return append(out, resultSample), annos
}
// Calculate the trend value at the given index i in raw data d.
// This is somewhat analogous to the slope of the trend at the given index.
// The argument "tf" is the trend factor.
// The argument "s0" is the computed smoothed value.
// The argument "s1" is the computed trend factor.
// The argument "b" is the raw input value.
func calcTrendValue(i int, tf, s0, s1, b float64) float64 {
if i == 0 {
return b
}
x := tf * (s1 - s0)
y := (1 - tf) * b
return x + y
}
// Double exponential smoothing is similar to a weighted moving average, where
// historical data has exponentially less influence on the current data. It also
// accounts for trends in data. The smoothing factor (0 < sf < 1) affects how
// historical data will affect the current data. A lower smoothing factor
// increases the influence of historical data. The trend factor (0 < tf < 1)
// affects how trends in historical data will affect the current data. A higher
// trend factor increases the influence. of trends. Algorithm taken from
// https://en.wikipedia.org/wiki/Exponential_smoothing .
func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(vectorVals) < 2 || len(vectorVals[0]) == 0 || len(vectorVals[1]) == 0 || len(matrixVal) == 0 {
return enh.Out, nil
}
samples := matrixVal[0]
// The smoothing factor argument.
sf := vectorVals[0][0].F
// The trend factor argument.
tf := vectorVals[1][0].F
// Check that the input parameters are valid.
if sf <= 0 || sf >= 1 {
panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf))
}
if tf <= 0 || tf >= 1 {
panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf))
}
l := len(samples.Floats)
// Can't do the smoothing operation with less than two points.
if l < 2 {
// Annotate mix of float and histogram.
if l == 1 && len(samples.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return enh.Out, nil
}
var s0, s1, b float64
// Set initial values.
s1 = samples.Floats[0].F
b = samples.Floats[1].F - samples.Floats[0].F
// Run the smoothing operation.
var x, y float64
for i := 1; i < l; i++ {
// Scale the raw value against the smoothing factor.
x = sf * samples.Floats[i].F
// Scale the last smoothed value with the trend at this point.
b = calcTrendValue(i-1, tf, s0, s1, b)
y = (1 - sf) * (s1 + b)
s0, s1 = s1, x+y
}
if len(samples.Histograms) > 0 {
return append(enh.Out, Sample{F: s1}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return append(enh.Out, Sample{F: s1}), nil
}
// filterFloats filters out histogram samples from the vector in-place.
func filterFloats(v Vector) Vector {
floats := v[:0]
for _, s := range v {
if s.H == nil {
floats = append(floats, s)
}
}
return floats
}
// === sort(node parser.ValueTypeVector) (Vector, Annotations) ===
func funcSort(vectorVals []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
// NaN should sort to the bottom, so take descending sort with NaN first and
// reverse it.
byValueSorter := vectorByReverseValueHeap(filterFloats(vectorVals[0]))
sort.Sort(sort.Reverse(byValueSorter))
return Vector(byValueSorter), nil
}
// === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) ===
func funcSortDesc(vectorVals []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
// NaN should sort to the bottom, so take ascending sort with NaN first and
// reverse it.
byValueSorter := vectorByValueHeap(filterFloats(vectorVals[0]))
sort.Sort(sort.Reverse(byValueSorter))
return Vector(byValueSorter), nil
}
// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
func funcSortByLabel(vectorVals []Vector, _ Matrix, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
lbls := stringSliceFromArgs(args[1:])
slices.SortFunc(vectorVals[0], func(a, b Sample) int {
for _, label := range lbls {
lv1 := a.Metric.Get(label)
lv2 := b.Metric.Get(label)
if lv1 == lv2 {
continue
}
if natsort.Compare(lv1, lv2) {
return -1
}
return +1
}
// If all labels provided as arguments were equal, sort by the full label set. This ensures a consistent ordering.
return labels.Compare(a.Metric, b.Metric)
})
return vectorVals[0], nil
}
// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
func funcSortByLabelDesc(vectorVals []Vector, _ Matrix, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
lbls := stringSliceFromArgs(args[1:])
slices.SortFunc(vectorVals[0], func(a, b Sample) int {
for _, label := range lbls {
lv1 := a.Metric.Get(label)
lv2 := b.Metric.Get(label)
if lv1 == lv2 {
continue
}
if natsort.Compare(lv1, lv2) {
return +1
}
return -1
}
// If all labels provided as arguments were equal, sort by the full label set. This ensures a consistent ordering.
return -labels.Compare(a.Metric, b.Metric)
})
return vectorVals[0], nil
}
func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if maxVal < minVal {
return enh.Out, nil
}
for _, el := range vec {
if el.H != nil {
// Process only float samples.
continue
}
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: math.Max(minVal, math.Min(maxVal, el.F)),
DropName: true,
})
}
return enh.Out, nil
}
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
func funcClamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vectorVals[0]
minVal := vectorVals[1][0].F
maxVal := vectorVals[2][0].F
return clamp(vec, minVal, maxVal, enh)
}
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
func funcClampMax(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vectorVals[0]
maxVal := vectorVals[1][0].F
return clamp(vec, math.Inf(-1), maxVal, enh)
}
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
func funcClampMin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vectorVals[0]
minVal := vectorVals[1][0].F
return clamp(vec, minVal, math.Inf(+1), enh)
}
// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) ===
func funcRound(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
// round returns a number rounded to toNearest.
// Ties are solved by rounding up.
toNearest := float64(1)
if len(args) >= 2 {
toNearest = vectorVals[1][0].F
}
// Invert as it seems to cause fewer floating point accuracy issues.
toNearestInverse := 1.0 / toNearest
return simpleFloatFunc(vectorVals, enh, func(f float64) float64 {
return math.Floor(f*toNearestInverse+0.5) / toNearestInverse
}), nil
}
// === Scalar(node parser.ValueTypeVector) Scalar ===
func funcScalar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
var (
v = vectorVals[0]
value float64
found bool
)
for _, s := range v {
if s.H == nil {
if found {
// More than one float found, return NaN.
return append(enh.Out, Sample{F: math.NaN()}), nil
}
found = true
value = s.F
}
}
// Return the single float if found, otherwise return NaN.
if !found {
return append(enh.Out, Sample{F: math.NaN()}), nil
}
return append(enh.Out, Sample{F: value}), nil
}
func aggrOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector {
if len(matrixVal) == 0 {
return enh.Out
}
el := matrixVal[0]
return append(enh.Out, Sample{F: aggrFn(el)})
}
func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
el := matrixVal[0]
res, err := aggrFn(el)
return append(enh.Out, Sample{H: res}), err
}
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
firstSeries := matrixVal[0]
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
// We improve the accuracy with the help of Kahan summation.
// For a while, we assumed that incremental mean calculation combined
// with Kahan summation (see
// https://stackoverflow.com/questions/61665473/is-it-beneficial-for-precision-to-calculate-the-incremental-mean-average
// for inspiration) is generally the preferred solution. However, it
// then turned out that direct mean calculation (still in combination
// with Kahan summation) is often more accurate. See discussion in
// https://github.com/prometheus/prometheus/issues/16714 . The problem
// with the direct mean calculation is that it can overflow float64 for
// inputs on which the incremental mean calculation works just fine. Our
// current approach is therefore to use direct mean calculation as long
// as we do not overflow (or underflow) the running sum. Once the latter
// would happen, we switch to incremental mean calculation. This seems
// to work reasonably well, but note that a deeper understanding would
// be needed to find out if maybe an earlier switch to incremental mean
// calculation would be better in terms of accuracy. Also, we could
// apply a number of additional means to improve the accuracy, like
// processing the values in a particular order. For now, we decided that
// the current implementation is accurate enough for practical purposes.
if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms.
var annos annotations.Annotations
vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) {
var counterResetSeen, notCounterResetSeen, nhcbBoundsReconciledSeen bool
trackCounterReset := func(h *histogram.FloatHistogram) {
switch h.CounterResetHint {
case histogram.CounterReset:
counterResetSeen = true
case histogram.NotCounterReset:
notCounterResetSeen = true
}
}
defer func() {
if counterResetSeen && notCounterResetSeen {
annos.Add(annotations.NewHistogramCounterResetCollisionWarning(args[0].PositionRange(), annotations.HistogramAgg))
}
if nhcbBoundsReconciledSeen {
annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(args[0].PositionRange(), annotations.HistogramAgg))
}
}()
var (
sum = s.Histograms[0].H.Copy()
mean, kahanC *histogram.FloatHistogram
count = 1.
incrementalMean bool
nhcbBoundsReconciled bool
err error
)
trackCounterReset(sum)
for i, h := range s.Histograms[1:] {
trackCounterReset(h.H)
count = float64(i + 2)
if !incrementalMean {
sumCopy := sum.Copy()
var cCopy *histogram.FloatHistogram
if kahanC != nil {
cCopy = kahanC.Copy()
}
cCopy, _, nhcbBoundsReconciled, err = sumCopy.KahanAdd(h.H, cCopy)
if err != nil {
return sumCopy.Div(count), err
}
if nhcbBoundsReconciled {
nhcbBoundsReconciledSeen = true
}
if !sumCopy.HasOverflow() {
sum, kahanC = sumCopy, cCopy
continue
}
incrementalMean = true
mean = sum.Copy().Div(count - 1)
if kahanC != nil {
kahanC.Div(count - 1)
}
}
q := (count - 1) / count
if kahanC != nil {
kahanC.Mul(q)
}
toAdd := h.H.Copy().Div(count)
kahanC, _, nhcbBoundsReconciled, err = mean.Mul(q).KahanAdd(toAdd, kahanC)
if err != nil {
return mean, err
}
if nhcbBoundsReconciled {
nhcbBoundsReconciledSeen = true
}
}
if incrementalMean {
if kahanC != nil {
_, _, _, err := mean.Add(kahanC)
return mean, err
}
return mean, nil
}
if kahanC != nil {
_, _, _, err := sum.Div(count).Add(kahanC.Div(count))
return sum, err
}
return sum.Div(count), nil
})
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
}
return vec, annos
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
var (
// Pre-set the 1st sample to start the loop with the 2nd.
sum, count = s.Floats[0].F, 1.
mean, kahanC float64
incrementalMean bool
)
for i, f := range s.Floats[1:] {
count = float64(i + 2)
if !incrementalMean {
newSum, newC := kahansum.Inc(f.F, sum, kahanC)
// Perform regular mean calculation as long as
// the sum doesn't overflow.
if !math.IsInf(newSum, 0) {
sum, kahanC = newSum, newC
continue
}
// Handle overflow by reverting to incremental
// calculation of the mean value.
incrementalMean = true
mean = sum / (count - 1)
kahanC /= (count - 1)
}
q := (count - 1) / count
mean, kahanC = kahansum.Inc(f.F/count, q*mean, q*kahanC)
}
if incrementalMean {
return mean + kahanC
}
return sum/count + kahanC/count
}), nil
}
// === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcCountOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return aggrOverTime(matrixVals, enh, func(s Series) float64 {
return float64(len(s.Floats) + len(s.Histograms))
}), nil
}
// === first_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
el := matrixVal[0]
var f FPoint
if len(el.Floats) > 0 {
f = el.Floats[0]
}
var h HPoint
if len(el.Histograms) > 0 {
h = el.Histograms[0]
}
// If a float data point exists and is older than any histogram data
// points, return it.
if h.H == nil || (len(el.Floats) > 0 && f.T < h.T) {
return append(enh.Out, Sample{
Metric: el.Metric,
F: f.F,
}), nil
}
return append(enh.Out, Sample{
Metric: el.Metric,
H: h.H.Copy(),
}), nil
}
// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
el := matrixVal[0]
var f FPoint
if len(el.Floats) > 0 {
f = el.Floats[len(el.Floats)-1]
}
var h HPoint
if len(el.Histograms) > 0 {
h = el.Histograms[len(el.Histograms)-1]
}
if h.H == nil || (len(el.Floats) > 0 && h.T < f.T) {
return append(enh.Out, Sample{
Metric: el.Metric,
F: f.F,
}), nil
}
return append(enh.Out, Sample{
Metric: el.Metric,
H: h.H.Copy(),
}), nil
}
// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
return enh.Out, nil
}
if len(samples.Histograms) > 0 {
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
values := make(vectorByValueHeap, 0, len(s.Floats))
for _, f := range s.Floats {
values = append(values, Sample{F: f.F})
}
median := quantile(0.5, values)
values = make(vectorByValueHeap, 0, len(s.Floats))
for _, f := range s.Floats {
values = append(values, Sample{F: math.Abs(f.F - median)})
}
return quantile(0.5, values)
}), annos
}
// === ts_of_first_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcTsOfFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
el := matrixVal[0]
var tf int64 = math.MaxInt64
if len(el.Floats) > 0 {
tf = el.Floats[0].T
}
var th int64 = math.MaxInt64
if len(el.Histograms) > 0 {
th = el.Histograms[0].T
}
return append(enh.Out, Sample{
Metric: el.Metric,
F: float64(min(tf, th)) / 1000,
}), nil
}
// === ts_of_last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcTsOfLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
el := matrixVal[0]
var tf int64
if len(el.Floats) > 0 {
tf = el.Floats[len(el.Floats)-1].T
}
var th int64
if len(el.Histograms) > 0 {
th = el.Histograms[len(el.Histograms)-1].T
}
return append(enh.Out, Sample{
Metric: el.Metric,
F: float64(max(tf, th)) / 1000,
}), nil
}
// === ts_of_max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcTsOfMaxOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return compareOverTime(matrixVal, args, enh, func(cur, maxVal float64) bool {
return (cur >= maxVal) || math.IsNaN(maxVal)
}, true)
}
// === ts_of_min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcTsOfMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool {
return (cur <= maxVal) || math.IsNaN(maxVal)
}, true)
}
// compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime.
func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
return enh.Out, nil
}
if len(samples.Histograms) > 0 {
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
maxVal := s.Floats[0].F
tsOfMax := s.Floats[0].T
for _, f := range s.Floats {
if compareFn(f.F, maxVal) {
maxVal = f.F
tsOfMax = f.T
}
}
if returnTimestamp {
return float64(tsOfMax) / 1000
}
return maxVal
}), annos
}
// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcMaxOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool {
return (cur > maxVal) || math.IsNaN(maxVal)
}, false)
}
// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool {
return (cur < maxVal) || math.IsNaN(maxVal)
}, false)
}
// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
firstSeries := matrixVal[0]
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms.
var annos annotations.Annotations
vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) {
var counterResetSeen, notCounterResetSeen, nhcbBoundsReconciledSeen bool
trackCounterReset := func(h *histogram.FloatHistogram) {
switch h.CounterResetHint {
case histogram.CounterReset:
counterResetSeen = true
case histogram.NotCounterReset:
notCounterResetSeen = true
}
}
defer func() {
if counterResetSeen && notCounterResetSeen {
annos.Add(annotations.NewHistogramCounterResetCollisionWarning(args[0].PositionRange(), annotations.HistogramAgg))
}
if nhcbBoundsReconciledSeen {
annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(args[0].PositionRange(), annotations.HistogramAgg))
}
}()
sum := s.Histograms[0].H.Copy()
trackCounterReset(sum)
var (
comp *histogram.FloatHistogram
nhcbBoundsReconciled bool
err error
)
for _, h := range s.Histograms[1:] {
trackCounterReset(h.H)
comp, _, nhcbBoundsReconciled, err = sum.KahanAdd(h.H, comp)
if err != nil {
return sum, err
}
if nhcbBoundsReconciled {
nhcbBoundsReconciledSeen = true
}
}
if comp != nil {
sum, _, nhcbBoundsReconciled, err = sum.Add(comp)
if err != nil {
return sum, err
}
if nhcbBoundsReconciled {
nhcbBoundsReconciledSeen = true
}
}
return sum, err
})
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
}
return vec, annos
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
var sum, c float64
for _, f := range s.Floats {
sum, c = kahansum.Inc(f.F, sum, c)
}
if math.IsInf(sum, 0) {
return sum
}
return sum + c
}), nil
}
// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(vectorVals) == 0 || len(vectorVals[0]) == 0 || len(matrixVal) == 0 {
return enh.Out, nil
}
q := vectorVals[0][0].F
el := matrixVal[0]
if len(el.Floats) == 0 {
return enh.Out, nil
}
var annos annotations.Annotations
if math.IsNaN(q) || q < 0 || q > 1 {
annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
}
if len(el.Histograms) > 0 {
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(el.Metric), args[0].PositionRange()))
}
values := make(vectorByValueHeap, 0, len(el.Floats))
for _, f := range el.Floats {
values = append(values, Sample{F: f.F})
}
return append(enh.Out, Sample{F: quantile(q, values)}), annos
}
func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
samples := matrixVal[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
return enh.Out, nil
}
if len(samples.Histograms) > 0 {
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
var count float64
var mean, cMean float64
var aux, cAux float64
for _, f := range s.Floats {
count++
delta := f.F - (mean + cMean)
mean, cMean = kahansum.Inc(delta/count, mean, cMean)
aux, cAux = kahansum.Inc(delta*(f.F-(mean+cMean)), aux, cAux)
}
variance := (aux + cAux) / count
if varianceToResult == nil {
return variance
}
return varianceToResult(variance)
}), annos
}
// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcStddevOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return varianceOverTime(matrixVals, args, enh, math.Sqrt)
}
// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcStdvarOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return varianceOverTime(matrixVals, args, enh, nil)
}
// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAbsent(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(vectorVals[0]) > 0 {
return enh.Out, nil
}
return append(enh.Out,
Sample{
Metric: createLabelsForAbsentFunction(args[0]),
F: 1,
}), nil
}
// === absent_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) ===
// As this function has a matrix as argument, it does not get all the Series.
// This function will return 1 if the matrix has at least one element.
// Due to engine optimization, this function is only called when this condition is true.
// Then, the engine post-processes the results to get the expected output.
func funcAbsentOverTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return append(enh.Out, Sample{F: 1}), nil
}
// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcPresentOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return aggrOverTime(matrixVals, enh, func(Series) float64 {
return 1
}), nil
}
func simpleFloatFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(float64) float64) Vector {
for _, el := range vectorVals[0] {
if el.H == nil { // Process only float samples.
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: f(el.F),
DropName: true,
})
}
}
return enh.Out
}
// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAbs(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Abs), nil
}
// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCeil(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Ceil), nil
}
// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcFloor(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Floor), nil
}
// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcExp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Exp), nil
}
// === sqrt(Vector VectorNode) (Vector, Annotations) ===
func funcSqrt(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Sqrt), nil
}
// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Log), nil
}
// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLog2(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Log2), nil
}
// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLog10(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Log10), nil
}
// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Sin), nil
}
// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCos(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Cos), nil
}
// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTan(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Tan), nil
}
// === asin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Asin), nil
}
// === acos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcos(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Acos), nil
}
// === atan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtan(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Atan), nil
}
// === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSinh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Sinh), nil
}
// === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCosh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Cosh), nil
}
// === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTanh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Tanh), nil
}
// === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsinh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Asinh), nil
}
// === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcosh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Acosh), nil
}
// === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtanh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, math.Atanh), nil
}
// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcRad(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, func(v float64) float64 {
return v * math.Pi / 180
}), nil
}
// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcDeg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, func(v float64) float64 {
return v * 180 / math.Pi
}), nil
}
// === pi() Scalar ===
func funcPi([]Vector, Matrix, parser.Expressions, *EvalNodeHelper) (Vector, annotations.Annotations) {
return Vector{Sample{F: math.Pi}}, nil
}
// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSgn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFloatFunc(vectorVals, enh, func(v float64) float64 {
switch {
case v < 0:
return -1
case v > 0:
return 1
default:
return v
}
}), nil
}
// === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTimestamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vectorVals[0]
for _, el := range vec {
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: float64(el.T) / 1000,
DropName: true,
})
}
return enh.Out, nil
}
// linearRegression performs a least-square linear regression analysis on the
// provided SamplePairs. It returns the slope, and the intercept value at the
// provided time.
func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept float64) {
var (
n float64
sumX, cX float64
sumY, cY float64
sumXY, cXY float64
sumX2, cX2 float64
initY float64
constY bool
)
initY = samples[0].F
constY = true
for i, sample := range samples {
// Set constY to false if any new y values are encountered.
if constY && i > 0 && sample.F != initY {
constY = false
}
n += 1.0
x := float64(sample.T-interceptTime) / 1e3
sumX, cX = kahansum.Inc(x, sumX, cX)
sumY, cY = kahansum.Inc(sample.F, sumY, cY)
sumXY, cXY = kahansum.Inc(x*sample.F, sumXY, cXY)
sumX2, cX2 = kahansum.Inc(x*x, sumX2, cX2)
}
if constY {
if math.IsInf(initY, 0) {
return math.NaN(), math.NaN()
}
return 0, initY
}
sumX += cX
sumY += cY
sumXY += cXY
sumX2 += cX2
covXY := sumXY - sumX*sumY/n
varX := sumX2 - sumX*sumX/n
slope = covXY / varX
intercept = sumY/n - slope*sumX/n
return slope, intercept
}
// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
samples := matrixVal[0]
// No sense in trying to compute a derivative without at least two float points.
// Drop this Vector element.
if len(samples.Floats) < 2 {
// Annotate mix of float and histogram.
if len(samples.Floats) == 1 && len(samples.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return enh.Out, nil
}
// We pass in an arbitrary timestamp that is near the values in use
// to avoid floating point accuracy issues, see
// https://github.com/prometheus/prometheus/issues/2674
slope, _ := linearRegression(samples.Floats, samples.Floats[0].T)
if len(samples.Histograms) > 0 {
return append(enh.Out, Sample{F: slope}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return append(enh.Out, Sample{F: slope}), nil
}
// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) ===
func funcPredictLinear(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(vectorVals) == 0 || len(vectorVals[0]) == 0 || len(matrixVal) == 0 {
return enh.Out, nil
}
samples := matrixVal[0]
duration := vectorVals[0][0].F
// No sense in trying to predict anything without at least two float points.
// Drop this Vector element.
if len(samples.Floats) < 2 {
// Annotate mix of float and histogram.
if len(samples.Floats) == 1 && len(samples.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return enh.Out, nil
}
slope, intercept := linearRegression(samples.Floats, enh.Ts)
if len(samples.Histograms) > 0 {
return append(enh.Out, Sample{F: slope*duration + intercept}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return append(enh.Out, Sample{F: slope*duration + intercept}), nil
}
func simpleHistogramFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector {
for _, el := range vectorVals[0] {
if el.H != nil { // Process only histogram samples.
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropReserved(func(n string) bool { return n == labels.MetricName })
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: f(el.H),
DropName: true,
})
}
}
return enh.Out
}
// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramCount(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 {
return h.Count
}), nil
}
// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramSum(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 {
return h.Sum
}), nil
}
// === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramAvg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 {
return h.Sum / h.Count
}), nil
}
func histogramVariance(vectorVals []Vector, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) {
return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 {
mean := h.Sum / h.Count
var variance, cVariance float64
it := h.AllBucketIterator()
for it.Next() {
bucket := it.At()
if bucket.Count == 0 {
continue
}
var val float64
switch {
case h.UsesCustomBuckets():
// Use arithmetic mean in case of custom buckets.
val = (bucket.Upper + bucket.Lower) / 2.0
case bucket.Lower <= 0 && bucket.Upper >= 0:
// Use zero (effectively the arithmetic mean) in the zero bucket of a standard exponential histogram.
val = 0
default:
// Use geometric mean in case of standard exponential buckets.
val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 {
val = -val
}
}
delta := val - mean
variance, cVariance = kahansum.Inc(bucket.Count*delta*delta, variance, cVariance)
}
variance += cVariance
variance /= h.Count
if varianceToResult != nil {
variance = varianceToResult(variance)
}
return variance
}), nil
}
// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramStdDev(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return histogramVariance(vectorVals, enh, math.Sqrt)
}
// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramStdVar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return histogramVariance(vectorVals, enh, nil)
}
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(vectorVals) < 3 || len(vectorVals[0]) == 0 || len(vectorVals[1]) == 0 {
return enh.Out, nil
}
lower := vectorVals[0][0].F
upper := vectorVals[1][0].F
inVec := vectorVals[2]
annos := enh.resetHistograms(inVec, args[2])
// Deal with the native histograms.
for _, sample := range enh.nativeHistogramSamples {
if sample.H == nil {
// Native histogram conflicts with classic histogram at the same timestamp, ignore.
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
hf, hfAnnos := HistogramFraction(lower, upper, sample.H, getMetricName(sample.Metric), args[0].PositionRange())
annos.Merge(hfAnnos)
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
F: hf,
DropName: true,
})
}
// Deal with classic histograms that have already been filtered for conflicting native histograms.
for _, mb := range enh.signatureToMetricWithBuckets {
if len(mb.buckets) == 0 {
continue
}
if !enh.enableDelayedNameRemoval {
mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: mb.metric,
F: BucketFraction(lower, upper, mb.buckets),
DropName: true,
})
}
return enh.Out, annos
}
// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(vectorVals) < 2 || len(vectorVals[0]) == 0 {
return enh.Out, nil
}
q := vectorVals[0][0].F
inVec := vectorVals[1]
var annos annotations.Annotations
if math.IsNaN(q) || q < 0 || q > 1 {
annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
}
annos.Merge(enh.resetHistograms(inVec, args[1]))
// Deal with the native histograms.
for _, sample := range enh.nativeHistogramSamples {
if sample.H == nil {
// Native histogram conflicts with classic histogram at the same timestamp, ignore.
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
hq, hqAnnos := HistogramQuantile(q, sample.H, getMetricName(sample.Metric), args[0].PositionRange())
annos.Merge(hqAnnos)
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
F: hq,
DropName: true,
})
}
// Deal with classic histograms that have already been filtered for conflicting native histograms.
for _, mb := range enh.signatureToMetricWithBuckets {
if len(mb.buckets) > 0 {
quantile, forcedMonotonicity, _, minBucket, maxBucket, maxDiff := BucketQuantile(q, mb.buckets)
if forcedMonotonicity {
metricName := ""
if enh.enableDelayedNameRemoval {
metricName = getMetricName(mb.metric)
}
annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(metricName, args[1].PositionRange(), enh.Ts, minBucket, maxBucket, maxDiff))
}
if !enh.enableDelayedNameRemoval {
mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: mb.metric,
F: quantile,
DropName: true,
})
}
}
return enh.Out, annos
}
// pickFirstSampleIndex returns the index of the last sample before
// or at the range start, or 0 if none exist before the range start.
// If the vector selector is not anchored, it always returns 0, true.
// The second return value is false if there are no samples in range (for anchored selectors).
func pickFirstSampleIndex(floats []FPoint, args parser.Expressions, enh *EvalNodeHelper) (int, bool) {
ms := args[0].(*parser.MatrixSelector)
vs := ms.VectorSelector.(*parser.VectorSelector)
if !vs.Anchored {
return 0, true
}
rangeStart := enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
if len(floats) == 0 || floats[len(floats)-1].T <= rangeStart {
return 0, false
}
return max(0, sort.Search(len(floats)-1, func(i int) bool { return floats[i].T > rangeStart })-1), true
}
// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcResets(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms
resets := 0
if len(floats) == 0 && len(histograms) == 0 {
return enh.Out, nil
}
var prevSample, curSample Sample
firstSampleIndex, found := pickFirstSampleIndex(floats, args, enh)
if !found {
return enh.Out, nil
}
for iFloat, iHistogram := firstSampleIndex, 0; iFloat < len(floats) || iHistogram < len(histograms); {
switch {
// Process a float sample if no histogram sample remains or its timestamp is earlier.
// Process a histogram sample if no float sample remains or its timestamp is earlier.
case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T:
curSample.F = floats[iFloat].F
curSample.H = nil
iFloat++
case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T:
curSample.H = histograms[iHistogram].H
iHistogram++
}
// Skip the comparison for the first sample, just initialize prevSample.
if iFloat+iHistogram == 1+firstSampleIndex {
prevSample = curSample
continue
}
switch {
case prevSample.H == nil && curSample.H == nil:
if curSample.F < prevSample.F {
resets++
}
case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil:
resets++
case prevSample.H != nil && curSample.H != nil:
if curSample.H.DetectReset(prevSample.H) {
resets++
}
}
prevSample = curSample
}
return append(enh.Out, Sample{F: float64(resets)}), nil
}
// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcChanges(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if len(matrixVal) == 0 {
return enh.Out, nil
}
floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms
changes := 0
if len(floats) == 0 && len(histograms) == 0 {
return enh.Out, nil
}
var prevSample, curSample Sample
firstSampleIndex, found := pickFirstSampleIndex(floats, args, enh)
if !found {
return enh.Out, nil
}
for iFloat, iHistogram := firstSampleIndex, 0; iFloat < len(floats) || iHistogram < len(histograms); {
switch {
// Process a float sample if no histogram sample remains or its timestamp is earlier.
// Process a histogram sample if no float sample remains or its timestamp is earlier.
case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T:
curSample.F = floats[iFloat].F
curSample.H = nil
iFloat++
case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T:
curSample.H = histograms[iHistogram].H
iHistogram++
}
// Skip the comparison for the first sample, just initialize prevSample.
if iFloat+iHistogram == 1+firstSampleIndex {
prevSample = curSample
continue
}
switch {
case prevSample.H == nil && curSample.H == nil:
if curSample.F != prevSample.F && !(math.IsNaN(curSample.F) && math.IsNaN(prevSample.F)) {
changes++
}
case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil:
changes++
case prevSample.H != nil && curSample.H != nil:
if !curSample.H.Equals(prevSample.H) {
changes++
}
}
prevSample = curSample
}
return append(enh.Out, Sample{F: float64(changes)}), nil
}
// label_replace function operates only on series; does not look at timestamps or values.
func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) {
var (
dst = stringFromArg(args[1])
repl = stringFromArg(args[2])
src = stringFromArg(args[3])
regexStr = stringFromArg(args[4])
)
regex, err := regexp.Compile("^(?s:" + regexStr + ")$")
if err != nil {
panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
}
if !model.UTF8Validation.IsValidLabelName(dst) {
panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
}
val, ws := ev.eval(ctx, args[0])
matrix := val.(Matrix)
lb := labels.NewBuilder(labels.EmptyLabels())
for i, el := range matrix {
srcVal := el.Metric.Get(src)
indexes := regex.FindStringSubmatchIndex(srcVal)
if indexes != nil { // Only replace when regexp matches.
res := regex.ExpandString([]byte{}, repl, srcVal, indexes)
lb.Reset(el.Metric)
lb.Set(dst, string(res))
matrix[i].Metric = lb.Labels()
if dst == model.MetricNameLabel {
matrix[i].DropName = false
} else {
matrix[i].DropName = el.DropName
}
}
}
return ev.mergeSeriesWithSameLabelset(matrix), ws
}
// === Vector(s Scalar) (Vector, Annotations) ===
func funcVector(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return append(enh.Out,
Sample{
Metric: labels.Labels{},
F: vectorVals[0][0].F,
}), nil
}
// label_join function operates only on series; does not look at timestamps or values.
func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) {
var (
dst = stringFromArg(args[1])
sep = stringFromArg(args[2])
srcLabels = make([]string, len(args)-3)
)
for i := 3; i < len(args); i++ {
src := stringFromArg(args[i])
if !model.UTF8Validation.IsValidLabelName(src) {
panic(fmt.Errorf("invalid source label name in label_join(): %s", src))
}
srcLabels[i-3] = src
}
if !model.UTF8Validation.IsValidLabelName(dst) {
panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst))
}
val, ws := ev.eval(ctx, args[0])
matrix := val.(Matrix)
srcVals := make([]string, len(srcLabels))
lb := labels.NewBuilder(labels.EmptyLabels())
for i, el := range matrix {
for i, src := range srcLabels {
srcVals[i] = el.Metric.Get(src)
}
strval := strings.Join(srcVals, sep)
lb.Reset(el.Metric)
lb.Set(dst, strval)
matrix[i].Metric = lb.Labels()
if dst == model.MetricNameLabel {
matrix[i].DropName = false
} else {
matrix[i].DropName = el.DropName
}
}
return ev.mergeSeriesWithSameLabelset(matrix), ws
}
// Common code for date related functions.
func dateWrapper(vectorVals []Vector, enh *EvalNodeHelper, f func(time.Time) float64) Vector {
if len(vectorVals) == 0 {
return append(enh.Out,
Sample{
Metric: labels.Labels{},
F: f(time.Unix(enh.Ts/1000, 0).UTC()),
})
}
for _, el := range vectorVals[0] {
if el.H != nil {
// Ignore histogram sample.
continue
}
t := time.Unix(int64(el.F), 0).UTC()
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: f(t),
DropName: true,
})
}
return enh.Out
}
// === days_in_month(v Vector) Scalar ===
func funcDaysInMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vectorVals, enh, func(t time.Time) float64 {
return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day())
}), nil
}
// === day_of_month(v Vector) Scalar ===
func funcDayOfMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vectorVals, enh, func(t time.Time) float64 {
return float64(t.Day())
}), nil
}
// === day_of_week(v Vector) Scalar ===
func funcDayOfWeek(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vectorVals, enh, func(t time.Time) float64 {
return float64(t.Weekday())
}), nil
}
// === day_of_year(v Vector) Scalar ===
func funcDayOfYear(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vectorVals, enh, func(t time.Time) float64 {
return float64(t.YearDay())
}), nil
}
// === hour(v Vector) Scalar ===
func funcHour(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vectorVals, enh, func(t time.Time) float64 {
return float64(t.Hour())
}), nil
}
// === minute(v Vector) Scalar ===
func funcMinute(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vectorVals, enh, func(t time.Time) float64 {
return float64(t.Minute())
}), nil
}
// === month(v Vector) Scalar ===
func funcMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vectorVals, enh, func(t time.Time) float64 {
return float64(t.Month())
}), nil
}
// === year(v Vector) Scalar ===
func funcYear(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vectorVals, enh, func(t time.Time) float64 {
return float64(t.Year())
}), nil
}
// FunctionCalls is a list of all functions supported by PromQL, including their types.
var FunctionCalls = map[string]FunctionCall{
"abs": funcAbs,
"absent": funcAbsent,
"absent_over_time": funcAbsentOverTime,
"acos": funcAcos,
"acosh": funcAcosh,
"asin": funcAsin,
"asinh": funcAsinh,
"atan": funcAtan,
"atanh": funcAtanh,
"avg_over_time": funcAvgOverTime,
"ceil": funcCeil,
"changes": funcChanges,
"clamp": funcClamp,
"clamp_max": funcClampMax,
"clamp_min": funcClampMin,
"cos": funcCos,
"cosh": funcCosh,
"count_over_time": funcCountOverTime,
"days_in_month": funcDaysInMonth,
"day_of_month": funcDayOfMonth,
"day_of_week": funcDayOfWeek,
"day_of_year": funcDayOfYear,
"deg": funcDeg,
"delta": funcDelta,
"deriv": funcDeriv,
"exp": funcExp,
"first_over_time": funcFirstOverTime,
"floor": funcFloor,
"histogram_avg": funcHistogramAvg,
"histogram_count": funcHistogramCount,
"histogram_fraction": funcHistogramFraction,
"histogram_quantile": funcHistogramQuantile,
"histogram_sum": funcHistogramSum,
"histogram_stddev": funcHistogramStdDev,
"histogram_stdvar": funcHistogramStdVar,
"double_exponential_smoothing": funcDoubleExponentialSmoothing,
"hour": funcHour,
"idelta": funcIdelta,
"increase": funcIncrease,
"info": nil,
"irate": funcIrate,
"label_replace": nil, // evalLabelReplace not called via this map.
"label_join": nil, // evalLabelJoin not called via this map.
"ln": funcLn,
"log10": funcLog10,
"log2": funcLog2,
"last_over_time": funcLastOverTime,
"mad_over_time": funcMadOverTime,
"max_over_time": funcMaxOverTime,
"min_over_time": funcMinOverTime,
"ts_of_first_over_time": funcTsOfFirstOverTime,
"ts_of_last_over_time": funcTsOfLastOverTime,
"ts_of_max_over_time": funcTsOfMaxOverTime,
"ts_of_min_over_time": funcTsOfMinOverTime,
"minute": funcMinute,
"month": funcMonth,
"pi": funcPi,
"predict_linear": funcPredictLinear,
"present_over_time": funcPresentOverTime,
"quantile_over_time": funcQuantileOverTime,
"rad": funcRad,
"rate": funcRate,
"resets": funcResets,
"round": funcRound,
"scalar": funcScalar,
"sgn": funcSgn,
"sin": funcSin,
"sinh": funcSinh,
"sort": funcSort,
"sort_desc": funcSortDesc,
"sort_by_label": funcSortByLabel,
"sort_by_label_desc": funcSortByLabelDesc,
"sqrt": funcSqrt,
"stddev_over_time": funcStddevOverTime,
"stdvar_over_time": funcStdvarOverTime,
"sum_over_time": funcSumOverTime,
"tan": funcTan,
"tanh": funcTanh,
"time": funcTime,
"timestamp": funcTimestamp,
"vector": funcVector,
"year": funcYear,
}
// AtModifierUnsafeFunctions are the functions whose result
// can vary if evaluation time is changed when the arguments are
// step invariant. It also includes functions that use the timestamps
// of the passed instant vector argument to calculate a result since
// that can also change with change in eval time.
var AtModifierUnsafeFunctions = map[string]struct{}{
// Step invariant functions.
"days_in_month": {}, "day_of_month": {}, "day_of_week": {}, "day_of_year": {},
"hour": {}, "minute": {}, "month": {}, "year": {},
"predict_linear": {}, "time": {},
// Uses timestamp of the argument for the result,
// hence unsafe to use with @ modifier.
"timestamp": {},
}
// AnchoredSafeFunctions are the functions that can be used with the anchored
// modifier. Anchored modifier returns matrices with samples outside of the
// boundaries, so not every function can be used with it.
var AnchoredSafeFunctions = map[string]struct{}{
"resets": {},
"changes": {},
"rate": {},
"increase": {},
"delta": {},
}
// SmoothedSafeFunctions are the functions that can be used with the smoothed
// modifier. Smoothed modifier returns matrices with samples outside of the
// boundaries, so not every function can be used with it.
var SmoothedSafeFunctions = map[string]struct{}{
"rate": {},
"increase": {},
"delta": {},
}
type vectorByValueHeap Vector
func (s vectorByValueHeap) Len() int {
return len(s)
}
func (s vectorByValueHeap) Less(i, j int) bool {
vi, vj := s[i].F, s[j].F
if math.IsNaN(vi) {
return true
}
return vi < vj
}
func (s vectorByValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *vectorByValueHeap) Push(x any) {
*s = append(*s, *(x.(*Sample)))
}
func (s *vectorByValueHeap) Pop() any {
old := *s
n := len(old)
el := old[n-1]
*s = old[0 : n-1]
return el
}
type vectorByReverseValueHeap Vector
func (s vectorByReverseValueHeap) Len() int {
return len(s)
}
func (s vectorByReverseValueHeap) Less(i, j int) bool {
vi, vj := s[i].F, s[j].F
if math.IsNaN(vi) {
return true
}
return vi > vj
}
func (s vectorByReverseValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *vectorByReverseValueHeap) Push(x any) {
*s = append(*s, *(x.(*Sample)))
}
func (s *vectorByReverseValueHeap) Pop() any {
old := *s
n := len(old)
el := old[n-1]
*s = old[0 : n-1]
return el
}
// createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched
// in a given expression. It is used in the absent functions.
func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels {
b := labels.NewBuilder(labels.EmptyLabels())
var lm []*labels.Matcher
switch n := expr.(type) {
case *parser.VectorSelector:
lm = n.LabelMatchers
case *parser.MatrixSelector:
lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers
default:
return labels.EmptyLabels()
}
// The 'has' map implements backwards-compatibility for historic behaviour:
// e.g. in `absent(x{job="a",job="b",foo="bar"})` then `job` is removed from the output.
// Note this gives arguably wrong behaviour for `absent(x{job="a",job="a",foo="bar"})`.
has := make(map[string]bool, len(lm))
for _, ma := range lm {
if ma.Name == labels.MetricName {
continue
}
if ma.Type == labels.MatchEqual && !has[ma.Name] {
b.Set(ma.Name, ma.Value)
has[ma.Name] = true
} else {
b.Del(ma.Name)
}
}
return b.Labels()
}
func stringFromArg(e parser.Expr) string {
return e.(*parser.StringLiteral).Val
}
func stringSliceFromArgs(args parser.Expressions) []string {
tmp := make([]string, len(args))
for i := range args {
tmp[i] = stringFromArg(args[i])
}
return tmp
}
func getMetricName(metric labels.Labels) string {
return metric.Get(model.MetricNameLabel)
} | go | github | https://github.com/prometheus/prometheus | promql/functions.go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.