code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.config;
import org.springframework.beans.factory.ObjectFactory;
/**
* Shared test types for this package.
*
* @author Chris Beams
*/
final class TestTypes {}
/**
* @author Juergen Hoeller
*/
class NoOpScope implements Scope {
@Override
public Object get(String name, ObjectFactory<?> objectFactory) {
throw new UnsupportedOperationException();
}
@Override
public Object remove(String name) {
throw new UnsupportedOperationException();
}
@Override
public void registerDestructionCallback(String name, Runnable callback) {
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/config/TestTypes.java |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adagrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(XLATestCase):
def testBasic(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testTensorLearningRate(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testSharing(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
import os
import subprocess
import sys
makeenvout = subprocess.Popen('cd ../../environment; make clean; make all', stdout=subprocess.PIPE, shell = True).stdout.read().decode('utf-8')
if "Error 1" in makeenvout:
print('Environment build failed. Make output:\n#######################################################')
print(makeenvout + '\n#######################################################')
sys.exit(1)
else:
print('Environment build succeeded.')
# Assume all is well.
isGood = True
# Build starter packages (where necessary)
makecppout = subprocess.Popen('cd ../../airesources/C++; g++ MyBot.cpp -std=c++11 -o MyBot', stderr=subprocess.PIPE, shell = True).stderr.read().decode('utf-8')
if "error" in makecppout:
print('C++ starter package build failed. Build output:\n#######################################################')
print(makecppout + '\n#######################################################')
isGood = False
else:
print('C++ starter package build succeeded.')
makejavaout = subprocess.Popen('cd ../../airesources/Java; javac MyBot.java', stderr=subprocess.PIPE, shell = True).stderr.read().decode('utf-8')
if "error" in makejavaout:
print('Java starter package build failed. Build output:\n#######################################################')
print(makejavaout + '\n#######################################################')
isGood = False
else:
print('Java starter package build succeeded.')
makerustout = subprocess.Popen('cd ../../airesources/Rust; cargo build --release', stderr=subprocess.PIPE, shell = True).stderr.read().decode('utf-8')
if "error" in makerustout:
print('Rust starter package build failed. Build output:\n#######################################################')
print(makerustout + '\n#######################################################')
isGood = False
else:
print('Rust starter package build succeeded.')
# Do scala eventually.
# Ensures that the environment can run a basic game where a bot wins. Confirm that the bot expected to win does indeed win.
genlines = subprocess.Popen('../../environment/halite -d "10 10" -q "python3 ModBot.py" "python3 ModBot.py" -s 1001', stdout=subprocess.PIPE, shell = True).stdout.read().decode('utf-8').split('\n')
if genlines[-4].split()[1] != "1" or genlines[-3].split()[1] != "2" or genlines[-2] != " " or genlines[-1] != " ":
print('General environment test failed. Environment output:\n#######################################################')
print('\n'.join(genlines) + '\n#######################################################')
isGood = False
else:
print('General environment test succeeded.')
# Ensures that the environment can run a basic game where a bot wins. Confirm that the bot expected to win does indeed win.
splines = subprocess.Popen('../../environment/halite -d "10 10" -q "../../airesources/C++/MyBot" "java -cp ../../airesources/Java MyBot" "python3 ../../airesources/Python/MyBot.py" "../../airesources/Rust/target/release/MyBot" -s 1000', stdout=subprocess.PIPE, shell = True).stdout.read().decode('utf-8').split('\n')
if splines[-2] != " " or splines[-1] != " ":
print('Starter package test failed. Environment output:\n#######################################################')
print('\n'.join(splines) + '\n#######################################################')
isGood = False
else:
print('Starter package test succeeded.')
splines = subprocess.Popen('cat *.log', stdout=subprocess.PIPE, shell = True).stdout.read().decode('utf-8').split('\n')
print('\n'.join(splines))
# Ensures that tie evaluation is correct. Confirm that the bot expected to win does indeed win.
tielines = subprocess.Popen('../../environment/halite -d "10 10" -q "python3 ModBot.py" "python3 ModBot.py" -s 998', stdout=subprocess.PIPE, shell = True).stdout.read().decode('utf-8').split('\n')
if tielines[-4].split()[1] != "2" or tielines[-3].split()[1] != "1" or tielines[-2] != " " or tielines[-1] != " ":
print('Tie evaluation test failed. Environment output:\n#######################################################')
print('\n'.join(tielines) + '\n#######################################################')
isGood = False
else:
print('Tie evaluation test succeeded.')
# Ensures that all timeouts work well.
timelines = subprocess.Popen('../../environment/halite -d "20 20" -q "python3 FailInitBot.py" "python3 TimeoutInitBot.py" "python3 Fail10Bot.py" "python3 Timeout10Bot.py" "python3 ModBot.py" -s 998', stdout=subprocess.PIPE, shell = True).stdout.read().decode('utf-8').split('\n')
if timelines[-7].split()[1] != "5" or timelines[-6].split()[1] != "4" or timelines[-5].split()[1] != "3" or timelines[-4].split()[1] != "2" or timelines[-3].split()[1] != "1" or timelines[-2] != "1 2 3 4 ":
print('Timeout evaluation test failed. Environment output:\n#######################################################')
print('\n'.join(timelines) + '\n#######################################################')
isGood = False
else:
print('Timeout evaluation test succeeded.')
# Output (in human form) the result of the tests.
if(isGood):
print('All environment tests succeeded.')
else:
print('Environment tests failed.')
sys.exit(1 - int(isGood)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from mock import *
from nose.tools import *
from cherrymusicserver import log
log.setTest()
import cherrymusicserver as cherry
def setup():
cherry.CherryMusic.setup_services()
cherry.service.provide('dbconnector', cherry.database.sql.MemConnector)
def test_server_wont_start_without_valid_basedir():
target_cfg = {'media.basedir': None} # invalid basedir defaults to None
class StopException(Exception):
pass
mock_stop = Mock(side_effect=StopException)
with patch('cherrymusicserver.config', target_cfg):
with patch('cherrymusicserver.CherryMusic.setup_config') as mock_setup:
with patch('sys.exit', mock_stop):
assert_raises(StopException, cherry.CherryMusic)
assert mock_setup.called
if __name__ == '__main__':
nose.runmodule() | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package blocktoattr
import (
"log"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hcldec"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/zclconf/go-cty/cty"
)
// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization
// functionality to allow attributes that are specified as having list or set
// type in the schema to be written with HCL block syntax as multiple nested
// blocks with the attribute name as the block type.
//
// The fixup is only applied in the absence of structural attribute types. The
// presence of these types indicate the use of a provider which does not
// support mapping blocks to attributes.
//
// This partially restores some of the block/attribute confusion from HCL 1
// so that existing patterns that depended on that confusion can continue to
// be used in the short term while we settle on a longer-term strategy.
//
// Most of the fixup work is actually done when the returned body is
// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual
// decode of the body might not, if the content of the body is so ambiguous
// that there's no safe way to map it to the schema.
func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body {
// The schema should never be nil, but in practice it seems to be sometimes
// in the presence of poorly-configured test mocks, so we'll be robust
// by synthesizing an empty one.
if schema == nil {
schema = &configschema.Block{}
}
if skipFixup(schema) {
// we don't have any context for the resource name or type, but
// hopefully this could help locate the evaluation in the logs if there
// were a problem
log.Println("[DEBUG] skipping FixUpBlockAttrs")
return body
}
return &fixupBody{
original: body,
schema: schema,
names: ambiguousNames(schema),
}
}
// skipFixup detects any use of Attribute.NestedType, or Types which could not
// be generate by the legacy SDK when taking SchemaConfigModeAttr into account.
func skipFixup(schema *configschema.Block) bool {
for _, attr := range schema.Attributes {
if attr.NestedType != nil {
return true
}
ty := attr.Type
// Lists and sets of objects could be generated by
// SchemaConfigModeAttr, but some other combinations can be ruled out.
// Tuples and objects could not be generated at all.
if ty.IsTupleType() || ty.IsObjectType() {
return true
}
// A map of objects was not possible.
if ty.IsMapType() && ty.ElementType().IsObjectType() {
return true
}
// Nested collections were not really supported, but could be generated
// with string types (though we conservatively limit this to primitive types)
if ty.IsCollectionType() {
ety := ty.ElementType()
if ety.IsCollectionType() && !ety.ElementType().IsPrimitiveType() {
return true
}
}
}
for _, block := range schema.BlockTypes {
if skipFixup(&block.Block) {
return true
}
}
return false
}
type fixupBody struct {
original hcl.Body
schema *configschema.Block
names map[string]struct{}
}
type unknownBlock interface {
Unknown() bool
}
func (b *fixupBody) Unknown() bool {
if u, ok := b.original.(unknownBlock); ok {
return u.Unknown()
}
return false
}
// Content decodes content from the body. The given schema must be the lower-level
// representation of the same schema that was previously passed to FixUpBlockAttrs,
// or else the result is undefined.
func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
schema = b.effectiveSchema(schema)
content, diags := b.original.Content(schema)
return b.fixupContent(content), diags
}
func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
schema = b.effectiveSchema(schema)
content, remain, diags := b.original.PartialContent(schema)
remain = &fixupBody{
original: remain,
schema: b.schema,
names: b.names,
}
return b.fixupContent(content), remain, diags
}
func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
// FixUpBlockAttrs is not intended to be used in situations where we'd use
// JustAttributes, so we just pass this through verbatim to complete our
// implementation of hcl.Body.
return b.original.JustAttributes()
}
func (b *fixupBody) MissingItemRange() hcl.Range {
return b.original.MissingItemRange()
}
// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's
// content to determine whether the author has used attribute or block syntax
// for each of the ambigious attributes where both are permitted.
//
// The resulting schema will always contain all of the same names that are
// in the given schema, but some attribute schemas may instead be replaced by
// block header schemas.
func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema {
return effectiveSchema(given, b.original, b.names, true)
}
func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent {
var ret hcl.BodyContent
ret.Attributes = make(hcl.Attributes)
for name, attr := range content.Attributes {
ret.Attributes[name] = attr
}
blockAttrVals := make(map[string][]*hcl.Block)
for _, block := range content.Blocks {
if _, exists := b.names[block.Type]; exists {
// If we get here then we've found a block type whose instances need
// to be re-interpreted as a list-of-objects attribute. We'll gather
// those up and fix them up below.
blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block)
continue
}
// We need to now re-wrap our inner body so it will be subject to the
// same attribute-as-block fixup when recursively decoded.
retBlock := *block // shallow copy
if blockS, ok := b.schema.BlockTypes[block.Type]; ok {
// Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then
retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block)
}
ret.Blocks = append(ret.Blocks, &retBlock)
}
// No we'll install synthetic attributes for each of our fixups. We can't
// do this exactly because HCL's information model expects an attribute
// to be a single decl but we have multiple separate blocks. We'll
// approximate things, then, by using only our first block for the source
// location information. (We are guaranteed at least one by the above logic.)
for name, blocks := range blockAttrVals {
ret.Attributes[name] = &hcl.Attribute{
Name: name,
Expr: &fixupBlocksExpr{
blocks: blocks,
ety: b.schema.Attributes[name].Type.ElementType(),
},
Range: blocks[0].DefRange,
NameRange: blocks[0].TypeRange,
}
}
ret.MissingItemRange = b.MissingItemRange()
return &ret
}
type fixupBlocksExpr struct {
blocks hcl.Blocks
ety cty.Type
}
func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
// In order to produce a suitable value for our expression we need to
// now decode the whole descendant block structure under each of our block
// bodies.
//
// That requires us to do something rather strange: we must construct a
// synthetic block type schema derived from the element type of the
// attribute, thus inverting our usual direction of lowering a schema
// into an implied type. Because a type is less detailed than a schema,
// the result is imprecise and in particular will just consider all
// the attributes to be optional and let the provider eventually decide
// whether to return errors if they turn out to be null when required.
schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety
spec := schema.DecoderSpec()
vals := make([]cty.Value, len(e.blocks))
var diags hcl.Diagnostics
for i, block := range e.blocks {
body := FixUpBlockAttrs(block.Body, schema)
val, blockDiags := hcldec.Decode(body, spec, ctx)
diags = append(diags, blockDiags...)
if val == cty.NilVal {
val = cty.UnknownVal(e.ety)
}
vals[i] = val
}
if len(vals) == 0 {
return cty.ListValEmpty(e.ety), diags
}
return cty.ListVal(vals), diags
}
func (e *fixupBlocksExpr) Variables() []hcl.Traversal {
var ret []hcl.Traversal
schema := SchemaForCtyElementType(e.ety)
spec := schema.DecoderSpec()
for _, block := range e.blocks {
ret = append(ret, hcldec.Variables(block.Body, spec)...)
}
return ret
}
func (e *fixupBlocksExpr) Range() hcl.Range {
// This is not really an appropriate range for the expression but it's
// the best we can do from here.
return e.blocks[0].DefRange
}
func (e *fixupBlocksExpr) StartRange() hcl.Range {
return e.blocks[0].DefRange
} | go | github | https://github.com/hashicorp/terraform | internal/lang/blocktoattr/fixup.go |
# Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Connects the POX messenger bus to HTTP.
Requires the "webserver" component.
NOTE: The web_transport keeps its own session IDs. Since it was first
written, though, sessions IDs have become part of every
Connection, and we could (but are not) reuse those.
"""
from SocketServer import ThreadingMixIn
from BaseHTTPServer import *
import time
import select
import random
import hashlib
import base64
import json
from pox.lib.recoco import Timer
from pox.messenger import Connection, Transport
from pox.core import core
from pox.web.webcore import *
log = core.getLogger()
class HTTPConnection (Connection):
def __init__ (self, transport):
Connection.__init__(self, transport)
self._messages = []
self._cond = threading.Condition()
self._quitting = False
# We're really protected from attack by the session key, we hope
self._tx_seq = -1 #random.randint(0, 1 << 32)
self._rx_seq = None
#self._t = Timer(10, lambda : self.send({'hi':'again'}), recurring=True)
self._touched = time.time()
self._send_welcome()
def _check_timeout (self):
if (time.time() - self._touched) > 120:
log.info("Session " + str(self) + " timed out")
self._close()
def _new_tx_seq (self):
self._tx_seq = (self._tx_seq + 1) & 0x7fFFffFF
return self._tx_seq
def _check_rx_seq (self, seq):
seq = int(seq)
if self._rx_seq is None: self._rx_seq = seq
if seq != self._rx_seq: return False
self._rx_seq = (self._rx_seq + 1) & 0x7fFFffFF
return True
def _close (self):
super(HTTPConnection, self)._close()
#TODO: track request sockets and cancel them?
self._quitting = True
def send_raw (self, data):
self._cond.acquire()
self._messages.append(data)
self._cond.notify()
self._cond.release()
def _do_rx_message (self, items):
for item in items:
self._rx_message(item)
class HTTPTransport (Transport):
def __init__ (self, nexus = None):
Transport.__init__(self, nexus)
self._connections = {}
#self._t = Timer(5, self._check_timeouts, recurring=True)
self._t = Timer(60*2, self._check_timeouts, recurring=True)
def _check_timeouts (self):
for c in self._connections.values():
c._check_timeout()
def _forget (self, connection):
# From MessengerTransport
if connection._session_id in self._connections:
del self._connections[connection._session_id]
else:
#print "Failed to forget", connection
pass
def create_session (self):
ses = HTTPConnection(self)
self._connections[ses._session_id] = ses
return ses
def get_session (self, key):
return self._connections.get(key, None)
class CometRequestHandler (SplitRequestHandler):
protocol_version = 'HTTP/1.1'
# def __init__ (self, *args, **kw):
# super(CometRequestHandler, self).__init__(*args, **kw)
def _init (self):
self.transport = self.args['transport']
self.auth_function = self.args.get('auth', None)
def _doAuth (self):
if self.auth_function:
auth = self.headers.get("Authorization", "").strip().lower()
success = False
if auth.startswith("basic "):
try:
auth = base64.decodestring(auth[6:].strip()).split(':', 1)
success = self.auth_function(auth[0], auth[1])
except:
pass
if success is not True:
self.send_response(401, "Authorization Required")
self.send_header("WWW-Authenticate", 'Basic realm="POX"')
self.end_headers()
return
def _getSession (self):
session_key = self.headers.get("X-POX-Messenger-Session-Key")
if session_key is None:
session_key = self.path.split('/')[-1]
session_key = session_key.strip()
if len(session_key) == 0:
#TODO: return some bad response and log
return None
if session_key == "new":
hmh = self.transport.create_session()
else:
hmh = self.transport.get_session(session_key)
#print session_key, hmh.session_key
return hmh
def _enter (self):
self._doAuth()
hmh = self._getSession()
if hmh is None:
#TODO: return some bad response and log
pass
else:
hmh._touched = time.time()
return hmh
def do_POST (self):
hmh = self._enter()
if hmh is None: return None
l = self.headers.get("Content-Length", "")
if l == "":
data = json.loads(self.rfile.read())
else:
data = json.loads(self.rfile.read(int(l)))
payload = data['data']
# We send null payload for timeout poking and initial setup
if 'seq' in data:
if not hmh._check_rx_seq(data['seq']):
# Bad seq!
data = '{"seq":-1,"ses":"%s"}' % (hmh._session_id,)
self.send_response(400, "Bad sequence number")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(data))
self.send_header("X-POX-Messenger-Sequence-Number", "-1")
if self.auth_function: self.send_header("WWW-Authenticate",
'Basic realm="POX"')
self.end_headers()
self.wfile.write(data)
hmh._close()
return
if payload is not None:
core.callLater(hmh._do_rx_message, payload)
try:
data = '{"seq":-1,"ses":"%s"}' % (hmh._session_id,)
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(data))
self.send_header("X-POX-Messenger-Sequence-Number", "-1")
if self.auth_function: self.send_header("WWW-Authenticate",
'Basic realm="POX"')
self.end_headers()
self.wfile.write(data)
except:
import traceback
traceback.print_exc()
pass
return
def do_GET (self):
hmh = self._enter()
if hmh is None: return None
hmh._cond.acquire()
if len(hmh._messages) == 0:
# Wait for messages
while True:
# Every couple seconds check if the socket is dead
hmh._cond.wait(2)
if len(hmh._messages): break
if hmh._quitting: break
r,w,x = select.select([self.wfile],[],[self.wfile], 0)
if len(r) or len(x):
# Other side disconnected?
hmh._cond.release()
return
# Okay...
if hmh._quitting:
#NOTE: we don't drain the messages first, but maybe we should?
try:
data = '{"seq":-1,"ses":"%s"}' % (hmh._session_id,)
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(data))
self.send_header("X-POX-Messenger-Sequence-Number", "-1")
if self.auth_function: self.send_header("WWW-Authenticate",
'Basic realm="POX"')
self.end_headers()
self.wfile.write(data)
except:
pass
hmh._cond.release()
return
num_messages = min(20, len(hmh._messages))
data = hmh._messages[:num_messages]
old_seq = hmh._tx_seq
seq = hmh._new_tx_seq()
data = '{"seq":%i,"ses":"%s","data":[%s]}' % (seq, hmh._session_id,
','.join(data))
try:
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(data))
self.send_header("X-POX-Messenger-Sequence-Number", str(seq))
if self.auth_function: self.send_header("WWW-Authenticate",
'Basic realm="POX"')
self.end_headers()
self.wfile.write(data)
del hmh._messages[:num_messages]
except:
hmh._tx_seq = old_seq
hmh._cond.release()
def launch (username='', password=''):
def _launch ():
transport = core.registerNew(HTTPTransport)
# Set up config info
config = {"transport":transport}
if len(username) and len(password):
config['auth'] = lambda u, p: (u == username) and (p == password)
core.WebServer.set_handler("/_webmsg/",CometRequestHandler,config,True)
core.call_when_ready(_launch, ["WebServer","MessengerNexus"],
name = "webmessenger") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# $Id:$
import ctypes
import math
import sys
import threading
import time
import lib_dsound as lib
from pyglet.media import MediaException, MediaThread, AbstractAudioDriver, \
AbstractAudioPlayer, MediaEvent
from pyglet.window.win32 import _user32, _kernel32
import pyglet
_debug = pyglet.options['debug_media']
class DirectSoundException(MediaException):
pass
def _db(gain):
'''Convert linear gain in range [0.0, 1.0] to 100ths of dB.'''
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log(min(gain, 1))), 0))
class DirectSoundWorker(MediaThread):
_min_write_size = 9600
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super(DirectSoundWorker, self).__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
if _debug:
print 'DirectSoundWorker run attempt acquire'
self.condition.acquire()
if _debug:
print 'DirectSoundWorker run acquire'
if self.stopped:
self.condition.release()
break
sleep_time = -1
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if _debug:
print 'DirectSoundWorker run release'
if sleep_time != -1:
self.sleep(sleep_time)
if _debug:
print 'DirectSoundWorker exiting'
def add(self, player):
if _debug:
print 'DirectSoundWorker add', player
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
if _debug:
print 'return DirectSoundWorker add', player
def remove(self, player):
if _debug:
print 'DirectSoundWorker remove', player
self.condition.acquire()
try:
self.players.remove(player)
except KeyError:
pass
self.condition.notify()
self.condition.release()
if _debug:
print 'return DirectSoundWorker remove', player
class DirectSoundAudioPlayer(AbstractAudioPlayer):
# How many bytes the ring buffer should be
_buffer_size = 44800 * 1
# Need to cache these because pyglet API allows update separately, but
# DSound requires both to be set at once.
_cone_inner_angle = 360
_cone_outer_angle = 360
def __init__(self, source_group, player):
super(DirectSoundAudioPlayer, self).__init__(source_group, player)
# Locking strategy:
# All DirectSound calls should be locked. All instance vars relating
# to buffering/filling/time/events should be locked (used by both
# application and worker thread). Other instance vars (consts and
# 3d vars) do not need to be locked.
self._lock = threading.RLock()
# Desired play state (may be actually paused due to underrun -- not
# implemented yet).
self._playing = False
# Up to one audio data may be buffered if too much data was received
# from the source that could not be written immediately into the
# buffer. See refill().
self._next_audio_data = None
# Theoretical write and play cursors for an infinite buffer. play
# cursor is always <= write cursor (when equal, underrun is
# happening).
self._write_cursor = 0
self._play_cursor = 0
# Cursor position of end of data. Silence is written after
# eos for one buffer size.
self._eos_cursor = None
# Indexes into DSound circular buffer. Complications ensue wrt each
# other to avoid writing over the play cursor. See get_write_size and
# write().
self._play_cursor_ring = 0
self._write_cursor_ring = 0
# List of (play_cursor, MediaEvent), in sort order
self._events = []
# List of (cursor, timestamp), in sort order (cursor gives expiry
# place of the timestamp)
self._timestamps = []
audio_format = source_group.audio_format
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
# DSound buffer
self._buffer = lib.IDirectSoundBuffer()
driver._dsound.CreateSoundBuffer(dsbdesc,
ctypes.byref(self._buffer),
None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer.SetCurrentPosition(0)
self.refill(self._buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if driver and driver.worker:
driver.worker.remove(self)
self.lock()
self._buffer.Stop()
self._buffer.Release()
self._buffer = None
if self._buffer3d:
self._buffer3d.Release()
self._buffer3d = None
self.unlock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def play(self):
if _debug:
print 'DirectSound play'
driver.worker.add(self)
self.lock()
if not self._playing:
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self.unlock()
if _debug:
print 'return DirectSound play'
def stop(self):
if _debug:
print 'DirectSound stop'
driver.worker.remove(self)
self.lock()
if self._playing:
self._playing = False
self._buffer.Stop()
self.unlock()
if _debug:
print 'return DirectSound stop'
def clear(self):
if _debug:
print 'DirectSound clear'
self.lock()
self._buffer.SetCurrentPosition(0)
self._play_cursor_ring = self._write_cursor_ring = 0
self._play_cursor = self._write_cursor
self._eos_cursor = None
self._next_audio_data = None
del self._events[:]
del self._timestamps[:]
self.unlock()
def refill(self, write_size):
self.lock()
while write_size > 0:
if _debug:
print 'refill, write_size =', write_size
# Get next audio packet (or remains of last one)
if self._next_audio_data:
audio_data = self._next_audio_data
self._next_audio_data = None
else:
audio_data = self.source_group.get_audio_data(write_size)
# Write it, or silence if there are no more packets
if audio_data:
# Add events
for event in audio_data.events:
event_cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((event_cursor, event))
# Add timestamp (at end of this data packet)
ts_cursor = self._write_cursor + audio_data.length
self._timestamps.append(
(ts_cursor, audio_data.timestamp + audio_data.duration))
# Write data
if _debug:
print 'write', audio_data.length
length = min(write_size, audio_data.length)
self.write(audio_data, length)
if audio_data.length:
self._next_audio_data = audio_data
write_size -= length
else:
# Write silence
if self._eos_cursor is None:
self._eos_cursor = self._write_cursor
self._events.append(
(self._eos_cursor, MediaEvent(0, 'on_eos')))
self._events.append(
(self._eos_cursor, MediaEvent(0, 'on_source_group_eos')))
self._events.sort()
if self._write_cursor > self._eos_cursor + self._buffer_size:
self.stop()
else:
self.write(None, write_size)
write_size = 0
self.unlock()
def update_play_cursor(self):
self.lock()
play_cursor_ring = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor_ring, None)
if play_cursor_ring.value < self._play_cursor_ring:
# Wrapped around
self._play_cursor += self._buffer_size - self._play_cursor_ring
self._play_cursor_ring = 0
self._play_cursor += play_cursor_ring.value - self._play_cursor_ring
self._play_cursor_ring = play_cursor_ring.value
# Dispatch pending events
pending_events = []
while self._events and self._events[0][0] <= self._play_cursor:
_, event = self._events.pop(0)
pending_events.append(event)
if _debug:
print 'Dispatching pending events:', pending_events
print 'Remaining events:', self._events
# Remove expired timestamps
while self._timestamps and self._timestamps[0][0] < self._play_cursor:
del self._timestamps[0]
self.unlock()
for event in pending_events:
event._sync_dispatch_to_player(self.player)
def get_write_size(self):
self.update_play_cursor()
self.lock()
play_cursor = self._play_cursor
write_cursor = self._write_cursor
self.unlock()
return self._buffer_size - max(write_cursor - play_cursor, 0)
def write(self, audio_data, length):
# Pass audio_data=None to write silence
if length == 0:
return 0
self.lock()
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
assert 0 < length <= self._buffer_size
self._buffer.Lock(self._write_cursor_ring, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.source_group.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.source_group.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor_ring += length
self._write_cursor_ring %= self._buffer_size
self.unlock()
def get_time(self):
self.lock()
if self._timestamps:
cursor, ts = self._timestamps[0]
result = ts + (self._play_cursor - cursor) / \
float(self.source_group.audio_format.bytes_per_second)
else:
result = None
self.unlock()
return result
def set_volume(self, volume):
volume = _db(volume)
self.lock()
self._buffer.SetVolume(volume)
self.unlock()
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self.lock()
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_min_distance(self, min_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_max_distance(self, max_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_pitch(self, pitch):
frequency = int(pitch * self.source_group.audio_format.sample_rate)
self.lock()
self._buffer.SetFrequency(frequency)
self.unlock()
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self.lock()
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self.lock()
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self.lock()
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
self.unlock()
class DirectSoundDriver(AbstractAudioDriver):
def __init__(self):
self._dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(self._dsound), None)
# A trick used by mplayer.. use desktop as window handle since it
# would be complex to use pyglet window handles (and what to do when
# application is audio only?).
hwnd = _user32.GetDesktopWindow()
self._dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
# Create primary buffer with 3D and volume capabilities
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
self._dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
# Create listener
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
# Create worker thread
self.worker = DirectSoundWorker()
self.worker.start()
def __del__(self):
try:
if self._buffer:
self.delete()
except:
pass
def create_audio_player(self, source_group, player):
return DirectSoundAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
self._buffer.Release()
self._buffer = None
self._listener.Release()
self._listener = None
# Listener API
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
def create_audio_driver():
global driver
driver = DirectSoundDriver()
return driver
# Global driver needed for access to worker thread and _dsound
driver = None | unknown | codeparrot/codeparrot-clean | ||
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
# Copyright (C) 2006 - 2007 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Use this class to fork off a thread to recieve event callbacks from the bitbake
server and queue them for the UI to process. This process must be used to avoid
client/server deadlocks.
"""
import socket, threading, pickle, collections
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
class BBUIEventQueue:
def __init__(self, BBServer, clientinfo=("localhost, 0")):
self.eventQueue = []
self.eventQueueLock = threading.Lock()
self.eventQueueNotify = threading.Event()
self.BBServer = BBServer
self.clientinfo = clientinfo
server = UIXMLRPCServer(self.clientinfo)
self.host, self.port = server.socket.getsockname()
server.register_function( self.system_quit, "event.quit" )
server.register_function( self.send_event, "event.sendpickle" )
server.socket.settimeout(1)
self.EventHandle = None
# the event handler registration may fail here due to cooker being in invalid state
# this is a transient situation, and we should retry a couple of times before
# giving up
for count_tries in range(5):
ret = self.BBServer.registerEventHandler(self.host, self.port)
if isinstance(ret, collections.Iterable):
self.EventHandle, error = ret
else:
self.EventHandle = ret
error = ""
if self.EventHandle != None:
break
errmsg = "Could not register UI event handler. Error: %s, host %s, "\
"port %d" % (error, self.host, self.port)
bb.warn("%s, retry" % errmsg)
import time
time.sleep(1)
else:
raise Exception(errmsg)
self.server = server
self.t = threading.Thread()
self.t.setDaemon(True)
self.t.run = self.startCallbackHandler
self.t.start()
def getEvent(self):
self.eventQueueLock.acquire()
if len(self.eventQueue) == 0:
self.eventQueueLock.release()
return None
item = self.eventQueue.pop(0)
if len(self.eventQueue) == 0:
self.eventQueueNotify.clear()
self.eventQueueLock.release()
return item
def waitEvent(self, delay):
self.eventQueueNotify.wait(delay)
return self.getEvent()
def queue_event(self, event):
self.eventQueueLock.acquire()
self.eventQueue.append(event)
self.eventQueueNotify.set()
self.eventQueueLock.release()
def send_event(self, event):
self.queue_event(pickle.loads(event))
def startCallbackHandler(self):
self.server.timeout = 1
bb.utils.set_process_name("UIEventQueue")
while not self.server.quit:
try:
self.server.handle_request()
except Exception as e:
import traceback
logger.error("BBUIEventQueue.startCallbackHandler: Exception while trying to handle request: %s\n%s" % (e, traceback.format_exc()))
self.server.server_close()
def system_quit( self ):
"""
Shut down the callback thread
"""
try:
self.BBServer.unregisterEventHandler(self.EventHandle)
except:
pass
self.server.quit = True
class UIXMLRPCServer (SimpleXMLRPCServer):
def __init__( self, interface ):
self.quit = False
SimpleXMLRPCServer.__init__( self,
interface,
requestHandler=SimpleXMLRPCRequestHandler,
logRequests=False, allow_none=True, use_builtin_types=True)
def get_request(self):
while not self.quit:
try:
sock, addr = self.socket.accept()
sock.settimeout(1)
return (sock, addr)
except socket.timeout:
pass
return (None, None)
def close_request(self, request):
if request is None:
return
SimpleXMLRPCServer.close_request(self, request)
def process_request(self, request, client_address):
if request is None:
return
SimpleXMLRPCServer.process_request(self, request, client_address) | unknown | codeparrot/codeparrot-clean | ||
import { NextRequest, NextResponse } from "next/server";
import { revalidatePath, revalidateTag } from "next/cache";
export async function PUT(request: NextRequest) {
const requestBody = await request.text();
const { paths, tags } = requestBody
? JSON.parse(requestBody)
: { paths: [], tags: [] };
let revalidated = false;
if (
request.headers.get("X-Headless-Secret-Key") !== process.env.HEADLESS_SECRET
) {
return NextResponse.json({ message: "Invalid secret" }, { status: 401 });
}
try {
if (paths && Array.isArray(paths) && paths.length > 0) {
Promise.all(paths.map((path) => revalidatePath(path)));
console.log("Revalidated paths:", paths);
revalidated = true;
}
if (tags && Array.isArray(tags) && tags.length > 0) {
Promise.all(tags.map((tag) => revalidateTag(tag)));
console.log("Revalidated tags:", tags);
revalidated = true;
}
return NextResponse.json({
revalidated,
now: Date.now(),
paths,
tags: tags,
});
} catch (error) {
return NextResponse.json(
{ message: "Error revalidating paths or tags" },
{ status: 500 },
);
}
} | typescript | github | https://github.com/vercel/next.js | examples/cms-wordpress/src/app/api/revalidate/route.ts |
import requests
from flask import current_app, request, jsonify
from flask_cors import cross_origin
from alerta.app.exceptions import ApiError, NoCustomerMatch
from alerta.app.models.customer import Customer
from alerta.app.models.token import Jwt
from alerta.app.auth.utils import create_token
from . import auth
@auth.route('/auth/google', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def google():
access_token_url = 'https://accounts.google.com/o/oauth2/token'
people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'
payload = {
'client_id': request.json['clientId'],
'client_secret': current_app.config['OAUTH2_CLIENT_SECRET'],
'redirect_uri': request.json['redirectUri'],
'grant_type': 'authorization_code',
'code': request.json['code'],
}
r = requests.post(access_token_url, data=payload)
token = r.json()
id_token = Jwt.parse(
token['id_token'],
key='',
verify=False,
algorithm='RS256'
)
domain = id_token.email.split('@')[1]
if current_app.config['AUTH_REQUIRED'] and not ('*' in current_app.config['ALLOWED_EMAIL_DOMAINS']
or domain in current_app.config['ALLOWED_EMAIL_DOMAINS']):
raise ApiError("User %s is not authorized" % id_token.email, 403)
# Get Google+ profile for Full name
headers = {'Authorization': 'Bearer ' + token['access_token']}
r = requests.get(people_api_url, headers=headers)
profile = r.json()
if not profile:
raise ApiError("Google+ API is not enabled for this Client ID", 400)
if current_app.config['CUSTOMER_VIEWS']:
try:
customer = Customer.lookup(id_token.email, groups=[domain])
except NoCustomerMatch as e:
raise ApiError(str(e), 403)
else:
customer = None
token = create_token(id_token.subject, profile['name'], id_token.email, provider='google', customer=customer,
orgs=[domain], email=id_token.email, email_verified=id_token.email_verified)
return jsonify(token=token.tokenize) | unknown | codeparrot/codeparrot-clean | ||
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Welcome to Tauri!</title>
</head>
<body>
<h1>Welcome to Tauri!</h1>
</body>
</html> | html | github | https://github.com/tauri-apps/tauri | examples/multiwebview/index.html |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.client.Agent} and related new client APIs.
"""
import cookielib
import zlib
from StringIO import StringIO
from zope.interface.verify import verifyObject
from twisted.trial.unittest import TestCase
from twisted.web import client, error, http_headers
from twisted.web._newclient import RequestNotSent, RequestTransmissionFailed
from twisted.web._newclient import ResponseNeverReceived, ResponseFailed
from twisted.web._newclient import PotentialDataLoss
from twisted.internet import defer, task
from twisted.python.failure import Failure
from twisted.python.components import proxyForInterface
from twisted.test.proto_helpers import StringTransport, MemoryReactorClock
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionRefusedError, ConnectionDone
from twisted.internet.error import ConnectionLost
from twisted.internet.protocol import Protocol, Factory
from twisted.internet.defer import Deferred, succeed, CancelledError
from twisted.internet.endpoints import TCP4ClientEndpoint, SSL4ClientEndpoint
from twisted.web.client import (FileBodyProducer, Request, HTTPConnectionPool,
ResponseDone, _HTTP11ClientFactory)
from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse
from twisted.web.http_headers import Headers
from twisted.web._newclient import HTTP11ClientProtocol, Response
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from zope.interface.declarations import implementer
from twisted.web.iweb import IPolicyForHTTPS
from twisted.python.deprecate import getDeprecationWarningString
from twisted.python.versions import Version
from twisted.web.client import BrowserLikePolicyForHTTPS
from twisted.web.error import SchemeNotSupported
try:
from twisted.internet import ssl
from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol
except ImportError:
ssl = None
else:
from twisted.internet._sslverify import ClientTLSOptions
class StubHTTPProtocol(Protocol):
"""
A protocol like L{HTTP11ClientProtocol} but which does not actually know
HTTP/1.1 and only collects requests in a list.
@ivar requests: A C{list} of two-tuples. Each time a request is made, a
tuple consisting of the request and the L{Deferred} returned from the
request method is appended to this list.
"""
def __init__(self):
self.requests = []
self.state = 'QUIESCENT'
def request(self, request):
"""
Capture the given request for later inspection.
@return: A L{Deferred} which this code will never fire.
"""
result = Deferred()
self.requests.append((request, result))
return result
class FileConsumer(object):
def __init__(self, outputFile):
self.outputFile = outputFile
def write(self, bytes):
self.outputFile.write(bytes)
class FileBodyProducerTests(TestCase):
"""
Tests for the L{FileBodyProducer} which reads bytes from a file and writes
them to an L{IConsumer}.
"""
def _termination(self):
"""
This method can be used as the C{terminationPredicateFactory} for a
L{Cooperator}. It returns a predicate which immediately returns
C{False}, indicating that no more work should be done this iteration.
This has the result of only allowing one iteration of a cooperative
task to be run per L{Cooperator} iteration.
"""
return lambda: True
def setUp(self):
"""
Create a L{Cooperator} hooked up to an easily controlled, deterministic
scheduler to use with L{FileBodyProducer}.
"""
self._scheduled = []
self.cooperator = task.Cooperator(
self._termination, self._scheduled.append)
def test_interface(self):
"""
L{FileBodyProducer} instances provide L{IBodyProducer}.
"""
self.assertTrue(verifyObject(
IBodyProducer, FileBodyProducer(StringIO(""))))
def test_unknownLength(self):
"""
If the L{FileBodyProducer} is constructed with a file-like object
without either a C{seek} or C{tell} method, its C{length} attribute is
set to C{UNKNOWN_LENGTH}.
"""
class HasSeek(object):
def seek(self, offset, whence):
pass
class HasTell(object):
def tell(self):
pass
producer = FileBodyProducer(HasSeek())
self.assertEqual(UNKNOWN_LENGTH, producer.length)
producer = FileBodyProducer(HasTell())
self.assertEqual(UNKNOWN_LENGTH, producer.length)
def test_knownLength(self):
"""
If the L{FileBodyProducer} is constructed with a file-like object with
both C{seek} and C{tell} methods, its C{length} attribute is set to the
size of the file as determined by those methods.
"""
inputBytes = "here are some bytes"
inputFile = StringIO(inputBytes)
inputFile.seek(5)
producer = FileBodyProducer(inputFile)
self.assertEqual(len(inputBytes) - 5, producer.length)
self.assertEqual(inputFile.tell(), 5)
def test_defaultCooperator(self):
"""
If no L{Cooperator} instance is passed to L{FileBodyProducer}, the
global cooperator is used.
"""
producer = FileBodyProducer(StringIO(""))
self.assertEqual(task.cooperate, producer._cooperate)
def test_startProducing(self):
"""
L{FileBodyProducer.startProducing} starts writing bytes from the input
file to the given L{IConsumer} and returns a L{Deferred} which fires
when they have all been written.
"""
expectedResult = "hello, world"
readSize = 3
output = StringIO()
consumer = FileConsumer(output)
producer = FileBodyProducer(
StringIO(expectedResult), self.cooperator, readSize)
complete = producer.startProducing(consumer)
for i in range(len(expectedResult) // readSize + 1):
self._scheduled.pop(0)()
self.assertEqual([], self._scheduled)
self.assertEqual(expectedResult, output.getvalue())
self.assertEqual(None, self.successResultOf(complete))
def test_inputClosedAtEOF(self):
"""
When L{FileBodyProducer} reaches end-of-file on the input file given to
it, the input file is closed.
"""
readSize = 4
inputBytes = "some friendly bytes"
inputFile = StringIO(inputBytes)
producer = FileBodyProducer(inputFile, self.cooperator, readSize)
consumer = FileConsumer(StringIO())
producer.startProducing(consumer)
for i in range(len(inputBytes) // readSize + 2):
self._scheduled.pop(0)()
self.assertTrue(inputFile.closed)
def test_failedReadWhileProducing(self):
"""
If a read from the input file fails while producing bytes to the
consumer, the L{Deferred} returned by
L{FileBodyProducer.startProducing} fires with a L{Failure} wrapping
that exception.
"""
class BrokenFile(object):
def read(self, count):
raise IOError("Simulated bad thing")
producer = FileBodyProducer(BrokenFile(), self.cooperator)
complete = producer.startProducing(FileConsumer(StringIO()))
self._scheduled.pop(0)()
self.failureResultOf(complete).trap(IOError)
def test_stopProducing(self):
"""
L{FileBodyProducer.stopProducing} stops the underlying L{IPullProducer}
and the cooperative task responsible for calling C{resumeProducing} and
closes the input file but does not cause the L{Deferred} returned by
C{startProducing} to fire.
"""
expectedResult = "hello, world"
readSize = 3
output = StringIO()
consumer = FileConsumer(output)
inputFile = StringIO(expectedResult)
producer = FileBodyProducer(
inputFile, self.cooperator, readSize)
complete = producer.startProducing(consumer)
producer.stopProducing()
self.assertTrue(inputFile.closed)
self._scheduled.pop(0)()
self.assertEqual("", output.getvalue())
self.assertNoResult(complete)
def test_pauseProducing(self):
"""
L{FileBodyProducer.pauseProducing} temporarily suspends writing bytes
from the input file to the given L{IConsumer}.
"""
expectedResult = "hello, world"
readSize = 5
output = StringIO()
consumer = FileConsumer(output)
producer = FileBodyProducer(
StringIO(expectedResult), self.cooperator, readSize)
complete = producer.startProducing(consumer)
self._scheduled.pop(0)()
self.assertEqual(output.getvalue(), expectedResult[:5])
producer.pauseProducing()
# Sort of depends on an implementation detail of Cooperator: even
# though the only task is paused, there's still a scheduled call. If
# this were to go away because Cooperator became smart enough to cancel
# this call in this case, that would be fine.
self._scheduled.pop(0)()
# Since the producer is paused, no new data should be here.
self.assertEqual(output.getvalue(), expectedResult[:5])
self.assertEqual([], self._scheduled)
self.assertNoResult(complete)
def test_resumeProducing(self):
"""
L{FileBodyProducer.resumeProducing} re-commences writing bytes from the
input file to the given L{IConsumer} after it was previously paused
with L{FileBodyProducer.pauseProducing}.
"""
expectedResult = "hello, world"
readSize = 5
output = StringIO()
consumer = FileConsumer(output)
producer = FileBodyProducer(
StringIO(expectedResult), self.cooperator, readSize)
producer.startProducing(consumer)
self._scheduled.pop(0)()
self.assertEqual(expectedResult[:readSize], output.getvalue())
producer.pauseProducing()
producer.resumeProducing()
self._scheduled.pop(0)()
self.assertEqual(expectedResult[:readSize * 2], output.getvalue())
class FakeReactorAndConnectMixin:
"""
A test mixin providing a testable C{Reactor} class and a dummy C{connect}
method which allows instances to pretend to be endpoints.
"""
Reactor = MemoryReactorClock
@implementer(IPolicyForHTTPS)
class StubPolicy(object):
"""
A stub policy for HTTPS URIs which allows HTTPS tests to run even if
pyOpenSSL isn't installed.
"""
def creatorForNetloc(self, hostname, port):
"""
Don't actually do anything.
@param hostname: ignored
@param port: ignored
"""
class StubEndpoint(object):
"""
Endpoint that wraps existing endpoint, substitutes StubHTTPProtocol, and
resulting protocol instances are attached to the given test case.
"""
def __init__(self, endpoint, testCase):
self.endpoint = endpoint
self.testCase = testCase
self.factory = _HTTP11ClientFactory(lambda p: None)
self.protocol = StubHTTPProtocol()
self.factory.buildProtocol = lambda addr: self.protocol
def connect(self, ignoredFactory):
self.testCase.protocol = self.protocol
self.endpoint.connect(self.factory)
return succeed(self.protocol)
def buildAgentForWrapperTest(self, reactor):
"""
Return an Agent suitable for use in tests that wrap the Agent and want
both a fake reactor and StubHTTPProtocol.
"""
agent = client.Agent(reactor, self.StubPolicy())
_oldGetEndpoint = agent._getEndpoint
agent._getEndpoint = lambda *args: (
self.StubEndpoint(_oldGetEndpoint(*args), self))
return agent
def connect(self, factory):
"""
Fake implementation of an endpoint which synchronously
succeeds with an instance of L{StubHTTPProtocol} for ease of
testing.
"""
protocol = StubHTTPProtocol()
protocol.makeConnection(None)
self.protocol = protocol
return succeed(protocol)
class DummyEndpoint(object):
"""
An endpoint that uses a fake transport.
"""
def connect(self, factory):
protocol = factory.buildProtocol(None)
protocol.makeConnection(StringTransport())
return succeed(protocol)
class BadEndpoint(object):
"""
An endpoint that shouldn't be called.
"""
def connect(self, factory):
raise RuntimeError("This endpoint should not have been used.")
class DummyFactory(Factory):
"""
Create C{StubHTTPProtocol} instances.
"""
def __init__(self, quiescentCallback):
pass
protocol = StubHTTPProtocol
class HTTPConnectionPoolTests(TestCase, FakeReactorAndConnectMixin):
"""
Tests for the L{HTTPConnectionPool} class.
"""
def setUp(self):
self.fakeReactor = self.Reactor()
self.pool = HTTPConnectionPool(self.fakeReactor)
self.pool._factory = DummyFactory
# The retry code path is tested in HTTPConnectionPoolRetryTests:
self.pool.retryAutomatically = False
def test_getReturnsNewIfCacheEmpty(self):
"""
If there are no cached connections,
L{HTTPConnectionPool.getConnection} returns a new connection.
"""
self.assertEqual(self.pool._connections, {})
def gotConnection(conn):
self.assertIsInstance(conn, StubHTTPProtocol)
# The new connection is not stored in the pool:
self.assertNotIn(conn, self.pool._connections.values())
unknownKey = 12245
d = self.pool.getConnection(unknownKey, DummyEndpoint())
return d.addCallback(gotConnection)
def test_putStartsTimeout(self):
"""
If a connection is put back to the pool, a 240-sec timeout is started.
When the timeout hits, the connection is closed and removed from the
pool.
"""
# We start out with one cached connection:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
self.pool._putConnection(("http", "example.com", 80), protocol)
# Connection is in pool, still not closed:
self.assertEqual(protocol.transport.disconnecting, False)
self.assertIn(protocol,
self.pool._connections[("http", "example.com", 80)])
# Advance 239 seconds, still not closed:
self.fakeReactor.advance(239)
self.assertEqual(protocol.transport.disconnecting, False)
self.assertIn(protocol,
self.pool._connections[("http", "example.com", 80)])
self.assertIn(protocol, self.pool._timeouts)
# Advance past 240 seconds, connection will be closed:
self.fakeReactor.advance(1.1)
self.assertEqual(protocol.transport.disconnecting, True)
self.assertNotIn(protocol,
self.pool._connections[("http", "example.com", 80)])
self.assertNotIn(protocol, self.pool._timeouts)
def test_putExceedsMaxPersistent(self):
"""
If an idle connection is put back in the cache and the max number of
persistent connections has been exceeded, one of the connections is
closed and removed from the cache.
"""
pool = self.pool
# We start out with two cached connection, the max:
origCached = [StubHTTPProtocol(), StubHTTPProtocol()]
for p in origCached:
p.makeConnection(StringTransport())
pool._putConnection(("http", "example.com", 80), p)
self.assertEqual(pool._connections[("http", "example.com", 80)],
origCached)
timeouts = pool._timeouts.copy()
# Now we add another one:
newProtocol = StubHTTPProtocol()
newProtocol.makeConnection(StringTransport())
pool._putConnection(("http", "example.com", 80), newProtocol)
# The oldest cached connections will be removed and disconnected:
newCached = pool._connections[("http", "example.com", 80)]
self.assertEqual(len(newCached), 2)
self.assertEqual(newCached, [origCached[1], newProtocol])
self.assertEqual([p.transport.disconnecting for p in newCached],
[False, False])
self.assertEqual(origCached[0].transport.disconnecting, True)
self.assertTrue(timeouts[origCached[0]].cancelled)
self.assertNotIn(origCached[0], pool._timeouts)
def test_maxPersistentPerHost(self):
"""
C{maxPersistentPerHost} is enforced per C{(scheme, host, port)}:
different keys have different max connections.
"""
def addProtocol(scheme, host, port):
p = StubHTTPProtocol()
p.makeConnection(StringTransport())
self.pool._putConnection((scheme, host, port), p)
return p
persistent = []
persistent.append(addProtocol("http", "example.com", 80))
persistent.append(addProtocol("http", "example.com", 80))
addProtocol("https", "example.com", 443)
addProtocol("http", "www2.example.com", 80)
self.assertEqual(
self.pool._connections[("http", "example.com", 80)], persistent)
self.assertEqual(
len(self.pool._connections[("https", "example.com", 443)]), 1)
self.assertEqual(
len(self.pool._connections[("http", "www2.example.com", 80)]), 1)
def test_getCachedConnection(self):
"""
Getting an address which has a cached connection returns the cached
connection, removes it from the cache and cancels its timeout.
"""
# We start out with one cached connection:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
self.pool._putConnection(("http", "example.com", 80), protocol)
def gotConnection(conn):
# We got the cached connection:
self.assertIdentical(protocol, conn)
self.assertNotIn(
conn, self.pool._connections[("http", "example.com", 80)])
# And the timeout was cancelled:
self.fakeReactor.advance(241)
self.assertEqual(conn.transport.disconnecting, False)
self.assertNotIn(conn, self.pool._timeouts)
return self.pool.getConnection(("http", "example.com", 80),
BadEndpoint(),
).addCallback(gotConnection)
def test_newConnection(self):
"""
The pool's C{_newConnection} method constructs a new connection.
"""
# We start out with one cached connection:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
key = 12245
self.pool._putConnection(key, protocol)
def gotConnection(newConnection):
# We got a new connection:
self.assertNotIdentical(protocol, newConnection)
# And the old connection is still there:
self.assertIn(protocol, self.pool._connections[key])
# While the new connection is not:
self.assertNotIn(newConnection, self.pool._connections.values())
d = self.pool._newConnection(key, DummyEndpoint())
return d.addCallback(gotConnection)
def test_getSkipsDisconnected(self):
"""
When getting connections out of the cache, disconnected connections
are removed and not returned.
"""
pool = self.pool
key = ("http", "example.com", 80)
# We start out with two cached connection, the max:
origCached = [StubHTTPProtocol(), StubHTTPProtocol()]
for p in origCached:
p.makeConnection(StringTransport())
pool._putConnection(key, p)
self.assertEqual(pool._connections[key], origCached)
# We close the first one:
origCached[0].state = "DISCONNECTED"
# Now, when we retrive connections we should get the *second* one:
result = []
self.pool.getConnection(key,
BadEndpoint()).addCallback(result.append)
self.assertIdentical(result[0], origCached[1])
# And both the disconnected and removed connections should be out of
# the cache:
self.assertEqual(pool._connections[key], [])
self.assertEqual(pool._timeouts, {})
def test_putNotQuiescent(self):
"""
If a non-quiescent connection is put back in the cache, an error is
logged.
"""
protocol = StubHTTPProtocol()
# By default state is QUIESCENT
self.assertEqual(protocol.state, "QUIESCENT")
protocol.state = "NOTQUIESCENT"
self.pool._putConnection(("http", "example.com", 80), protocol)
exc, = self.flushLoggedErrors(RuntimeError)
self.assertEqual(
exc.value.args[0],
"BUG: Non-quiescent protocol added to connection pool.")
self.assertIdentical(None, self.pool._connections.get(
("http", "example.com", 80)))
def test_getUsesQuiescentCallback(self):
"""
When L{HTTPConnectionPool.getConnection} connects, it returns a
C{Deferred} that fires with an instance of L{HTTP11ClientProtocol}
that has the correct quiescent callback attached. When this callback
is called the protocol is returned to the cache correctly, using the
right key.
"""
class StringEndpoint(object):
def connect(self, factory):
p = factory.buildProtocol(None)
p.makeConnection(StringTransport())
return succeed(p)
pool = HTTPConnectionPool(self.fakeReactor, True)
pool.retryAutomatically = False
result = []
key = "a key"
pool.getConnection(
key, StringEndpoint()).addCallback(
result.append)
protocol = result[0]
self.assertIsInstance(protocol, HTTP11ClientProtocol)
# Now that we have protocol instance, lets try to put it back in the
# pool:
protocol._state = "QUIESCENT"
protocol._quiescentCallback(protocol)
# If we try to retrive a connection to same destination again, we
# should get the same protocol, because it should've been added back
# to the pool:
result2 = []
pool.getConnection(
key, StringEndpoint()).addCallback(
result2.append)
self.assertIdentical(result2[0], protocol)
def test_closeCachedConnections(self):
"""
L{HTTPConnectionPool.closeCachedConnections} closes all cached
connections and removes them from the cache. It returns a Deferred
that fires when they have all lost their connections.
"""
persistent = []
def addProtocol(scheme, host, port):
p = HTTP11ClientProtocol()
p.makeConnection(StringTransport())
self.pool._putConnection((scheme, host, port), p)
persistent.append(p)
addProtocol("http", "example.com", 80)
addProtocol("http", "www2.example.com", 80)
doneDeferred = self.pool.closeCachedConnections()
# Connections have begun disconnecting:
for p in persistent:
self.assertEqual(p.transport.disconnecting, True)
self.assertEqual(self.pool._connections, {})
# All timeouts were cancelled and removed:
for dc in self.fakeReactor.getDelayedCalls():
self.assertEqual(dc.cancelled, True)
self.assertEqual(self.pool._timeouts, {})
# Returned Deferred fires when all connections have been closed:
result = []
doneDeferred.addCallback(result.append)
self.assertEqual(result, [])
persistent[0].connectionLost(Failure(ConnectionDone()))
self.assertEqual(result, [])
persistent[1].connectionLost(Failure(ConnectionDone()))
self.assertEqual(result, [None])
def test_cancelGetConnectionCancelsEndpointConnect(self):
"""
Cancelling the C{Deferred} returned from
L{HTTPConnectionPool.getConnection} cancels the C{Deferred} returned
by opening a new connection with the given endpoint.
"""
self.assertEqual(self.pool._connections, {})
connectionResult = Deferred()
class Endpoint:
def connect(self, factory):
return connectionResult
d = self.pool.getConnection(12345, Endpoint())
d.cancel()
self.assertEqual(self.failureResultOf(connectionResult).type,
CancelledError)
class AgentTestsMixin(object):
"""
Tests for any L{IAgent} implementation.
"""
def test_interface(self):
"""
The agent object provides L{IAgent}.
"""
self.assertTrue(verifyObject(IAgent, self.makeAgent()))
class AgentTests(TestCase, FakeReactorAndConnectMixin, AgentTestsMixin):
"""
Tests for the new HTTP client API provided by L{Agent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.Agent} instance
"""
return client.Agent(self.reactor)
def setUp(self):
"""
Create an L{Agent} wrapped around a fake reactor.
"""
self.reactor = self.Reactor()
self.agent = self.makeAgent()
def test_defaultPool(self):
"""
If no pool is passed in, the L{Agent} creates a non-persistent pool.
"""
agent = client.Agent(self.reactor)
self.assertIsInstance(agent._pool, HTTPConnectionPool)
self.assertEqual(agent._pool.persistent, False)
self.assertIdentical(agent._reactor, agent._pool._reactor)
def test_persistent(self):
"""
If C{persistent} is set to C{True} on the L{HTTPConnectionPool} (the
default), C{Request}s are created with their C{persistent} flag set to
C{True}.
"""
pool = HTTPConnectionPool(self.reactor)
agent = client.Agent(self.reactor, pool=pool)
agent._getEndpoint = lambda *args: self
agent.request("GET", "http://127.0.0.1")
self.assertEqual(self.protocol.requests[0][0].persistent, True)
def test_nonPersistent(self):
"""
If C{persistent} is set to C{False} when creating the
L{HTTPConnectionPool}, C{Request}s are created with their
C{persistent} flag set to C{False}.
Elsewhere in the tests for the underlying HTTP code we ensure that
this will result in the disconnection of the HTTP protocol once the
request is done, so that the connection will not be returned to the
pool.
"""
pool = HTTPConnectionPool(self.reactor, persistent=False)
agent = client.Agent(self.reactor, pool=pool)
agent._getEndpoint = lambda *args: self
agent.request("GET", "http://127.0.0.1")
self.assertEqual(self.protocol.requests[0][0].persistent, False)
def test_connectUsesConnectionPool(self):
"""
When a connection is made by the Agent, it uses its pool's
C{getConnection} method to do so, with the endpoint returned by
C{self._getEndpoint}. The key used is C{(scheme, host, port)}.
"""
endpoint = DummyEndpoint()
class MyAgent(client.Agent):
def _getEndpoint(this, scheme, host, port):
self.assertEqual((scheme, host, port),
("http", "foo", 80))
return endpoint
class DummyPool(object):
connected = False
persistent = False
def getConnection(this, key, ep):
this.connected = True
self.assertEqual(ep, endpoint)
# This is the key the default Agent uses, others will have
# different keys:
self.assertEqual(key, ("http", "foo", 80))
return defer.succeed(StubHTTPProtocol())
pool = DummyPool()
agent = MyAgent(self.reactor, pool=pool)
self.assertIdentical(pool, agent._pool)
headers = http_headers.Headers()
headers.addRawHeader("host", "foo")
bodyProducer = object()
agent.request('GET', 'http://foo/',
bodyProducer=bodyProducer, headers=headers)
self.assertEqual(agent._pool.connected, True)
def test_unsupportedScheme(self):
"""
L{Agent.request} returns a L{Deferred} which fails with
L{SchemeNotSupported} if the scheme of the URI passed to it is not
C{'http'}.
"""
return self.assertFailure(
self.agent.request('GET', 'mailto:alice@example.com'),
SchemeNotSupported)
def test_connectionFailed(self):
"""
The L{Deferred} returned by L{Agent.request} fires with a L{Failure} if
the TCP connection attempt fails.
"""
result = self.agent.request('GET', 'http://foo/')
# Cause the connection to be refused
host, port, factory = self.reactor.tcpClients.pop()[:3]
factory.clientConnectionFailed(None, Failure(ConnectionRefusedError()))
return self.assertFailure(result, ConnectionRefusedError)
def test_connectHTTP(self):
"""
L{Agent._getEndpoint} return a C{TCP4ClientEndpoint} when passed a
scheme of C{'http'}.
"""
expectedHost = 'example.com'
expectedPort = 1234
endpoint = self.agent._getEndpoint('http', expectedHost, expectedPort)
self.assertEqual(endpoint._host, expectedHost)
self.assertEqual(endpoint._port, expectedPort)
self.assertIsInstance(endpoint, TCP4ClientEndpoint)
def test_connectHTTPSCustomContextFactory(self):
"""
If a context factory is passed to L{Agent.__init__} it will be used to
determine the SSL parameters for HTTPS requests. When an HTTPS request
is made, the hostname and port number of the request URL will be passed
to the context factory's C{getContext} method. The resulting context
object will be used to establish the SSL connection.
"""
expectedHost = 'example.org'
expectedPort = 20443
expectedContext = object()
contextArgs = []
class StubWebContextFactory(object):
def getContext(self, hostname, port):
contextArgs.append((hostname, port))
return expectedContext
agent = client.Agent(self.reactor, StubWebContextFactory())
endpoint = agent._getEndpoint('https', expectedHost, expectedPort)
contextFactory = endpoint._sslContextFactory
context = contextFactory.getContext()
self.assertEqual(context, expectedContext)
self.assertEqual(contextArgs, [(expectedHost, expectedPort)])
def test_hostProvided(self):
"""
If C{None} is passed to L{Agent.request} for the C{headers} parameter,
a L{Headers} instance is created for the request and a I{Host} header
added to it.
"""
self.agent._getEndpoint = lambda *args: self
self.agent.request(
'GET', 'http://example.com/foo?bar')
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('host'), ['example.com'])
def test_hostOverride(self):
"""
If the headers passed to L{Agent.request} includes a value for the
I{Host} header, that value takes precedence over the one which would
otherwise be automatically provided.
"""
headers = http_headers.Headers({'foo': ['bar'], 'host': ['quux']})
self.agent._getEndpoint = lambda *args: self
self.agent.request(
'GET', 'http://example.com/foo?bar', headers)
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('host'), ['quux'])
def test_headersUnmodified(self):
"""
If a I{Host} header must be added to the request, the L{Headers}
instance passed to L{Agent.request} is not modified.
"""
headers = http_headers.Headers()
self.agent._getEndpoint = lambda *args: self
self.agent.request(
'GET', 'http://example.com/foo', headers)
protocol = self.protocol
# The request should have been issued.
self.assertEqual(len(protocol.requests), 1)
# And the headers object passed in should not have changed.
self.assertEqual(headers, http_headers.Headers())
def test_hostValueStandardHTTP(self):
"""
When passed a scheme of C{'http'} and a port of C{80},
L{Agent._computeHostValue} returns a string giving just
the host name passed to it.
"""
self.assertEqual(
self.agent._computeHostValue('http', 'example.com', 80),
'example.com')
def test_hostValueNonStandardHTTP(self):
"""
When passed a scheme of C{'http'} and a port other than C{80},
L{Agent._computeHostValue} returns a string giving the
host passed to it joined together with the port number by C{":"}.
"""
self.assertEqual(
self.agent._computeHostValue('http', 'example.com', 54321),
'example.com:54321')
def test_hostValueStandardHTTPS(self):
"""
When passed a scheme of C{'https'} and a port of C{443},
L{Agent._computeHostValue} returns a string giving just
the host name passed to it.
"""
self.assertEqual(
self.agent._computeHostValue('https', 'example.com', 443),
'example.com')
def test_hostValueNonStandardHTTPS(self):
"""
When passed a scheme of C{'https'} and a port other than C{443},
L{Agent._computeHostValue} returns a string giving the
host passed to it joined together with the port number by C{":"}.
"""
self.assertEqual(
self.agent._computeHostValue('https', 'example.com', 54321),
'example.com:54321')
def test_request(self):
"""
L{Agent.request} establishes a new connection to the host indicated by
the host part of the URI passed to it and issues a request using the
method, the path portion of the URI, the headers, and the body producer
passed to it. It returns a L{Deferred} which fires with an
L{IResponse} from the server.
"""
self.agent._getEndpoint = lambda *args: self
headers = http_headers.Headers({'foo': ['bar']})
# Just going to check the body for identity, so it doesn't need to be
# real.
body = object()
self.agent.request(
'GET', 'http://example.com:1234/foo?bar', headers, body)
protocol = self.protocol
# The request should be issued.
self.assertEqual(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertIsInstance(req, Request)
self.assertEqual(req.method, 'GET')
self.assertEqual(req.uri, '/foo?bar')
self.assertEqual(
req.headers,
http_headers.Headers({'foo': ['bar'],
'host': ['example.com:1234']}))
self.assertIdentical(req.bodyProducer, body)
def test_connectTimeout(self):
"""
L{Agent} takes a C{connectTimeout} argument which is forwarded to the
following C{connectTCP} agent.
"""
agent = client.Agent(self.reactor, connectTimeout=5)
agent.request('GET', 'http://foo/')
timeout = self.reactor.tcpClients.pop()[3]
self.assertEqual(5, timeout)
def test_connectSSLTimeout(self):
"""
L{Agent} takes a C{connectTimeout} argument which is forwarded to the
following C{connectSSL} call.
"""
agent = client.Agent(self.reactor, self.StubPolicy(), connectTimeout=5)
agent.request('GET', 'https://foo/')
timeout = self.reactor.sslClients.pop()[4]
self.assertEqual(5, timeout)
def test_bindAddress(self):
"""
L{Agent} takes a C{bindAddress} argument which is forwarded to the
following C{connectTCP} call.
"""
agent = client.Agent(self.reactor, bindAddress='192.168.0.1')
agent.request('GET', 'http://foo/')
address = self.reactor.tcpClients.pop()[4]
self.assertEqual('192.168.0.1', address)
def test_bindAddressSSL(self):
"""
L{Agent} takes a C{bindAddress} argument which is forwarded to the
following C{connectSSL} call.
"""
agent = client.Agent(self.reactor, self.StubPolicy(),
bindAddress='192.168.0.1')
agent.request('GET', 'https://foo/')
address = self.reactor.sslClients.pop()[5]
self.assertEqual('192.168.0.1', address)
def test_responseIncludesRequest(self):
"""
L{Response}s returned by L{Agent.request} have a reference to the
L{Request} that was originally issued.
"""
uri = b'http://example.com/'
agent = self.buildAgentForWrapperTest(self.reactor)
d = agent.request('GET', uri)
# The request should be issued.
self.assertEqual(len(self.protocol.requests), 1)
req, res = self.protocol.requests.pop()
self.assertIsInstance(req, Request)
resp = client.Response._construct(
('HTTP', 1, 1),
200,
'OK',
client.Headers({}),
None,
req)
res.callback(resp)
response = self.successResultOf(d)
self.assertEqual(
(response.request.method, response.request.absoluteURI,
response.request.headers),
(req.method, req.absoluteURI, req.headers))
def test_requestAbsoluteURI(self):
"""
L{Request.absoluteURI} is the absolute URI of the request.
"""
uri = b'http://example.com/foo;1234?bar#frag'
agent = self.buildAgentForWrapperTest(self.reactor)
agent.request(b'GET', uri)
# The request should be issued.
self.assertEqual(len(self.protocol.requests), 1)
req, res = self.protocol.requests.pop()
self.assertIsInstance(req, Request)
self.assertEquals(req.absoluteURI, uri)
def test_requestMissingAbsoluteURI(self):
"""
L{Request.absoluteURI} is C{None} if L{Request._parsedURI} is C{None}.
"""
request = client.Request(b'FOO', b'/', client.Headers(), None)
self.assertIdentical(request.absoluteURI, None)
class AgentHTTPSTests(TestCase, FakeReactorAndConnectMixin):
"""
Tests for the new HTTP client API that depends on SSL.
"""
if ssl is None:
skip = "SSL not present, cannot run SSL tests"
def makeEndpoint(self, host='example.com', port=443):
"""
Create an L{Agent} with an https scheme and return its endpoint
created according to the arguments.
@param host: The host for the endpoint.
@type host: L{bytes}
@param port: The port for the endpoint.
@type port: L{int}
@return: An endpoint of an L{Agent} constructed according to args.
@rtype: L{SSL4ClientEndpoint}
"""
return client.Agent(self.Reactor())._getEndpoint(b'https', host, port)
def test_endpointType(self):
"""
L{Agent._getEndpoint} return a L{SSL4ClientEndpoint} when passed a
scheme of C{'https'}.
"""
self.assertIsInstance(self.makeEndpoint(), SSL4ClientEndpoint)
def test_hostArgumentIsRespected(self):
"""
If a host is passed, the endpoint respects it.
"""
expectedHost = 'example.com'
endpoint = self.makeEndpoint(host=expectedHost)
self.assertEqual(endpoint._host, expectedHost)
def test_portArgumentIsRespected(self):
"""
If a port is passed, the endpoint respects it.
"""
expectedPort = 4321
endpoint = self.makeEndpoint(port=expectedPort)
self.assertEqual(endpoint._port, expectedPort)
def test_contextFactoryType(self):
"""
L{Agent} wraps its connection creator creator and uses modern TLS APIs.
"""
endpoint = self.makeEndpoint()
contextFactory = endpoint._sslContextFactory
self.assertIsInstance(contextFactory, ClientTLSOptions)
self.assertEqual(contextFactory._hostname, u"example.com")
def test_connectHTTPSCustomConnectionCreator(self):
"""
If a custom L{WebClientConnectionCreator}-like object is passed to
L{Agent.__init__} it will be used to determine the SSL parameters for
HTTPS requests. When an HTTPS request is made, the hostname and port
number of the request URL will be passed to the connection creator's
C{creatorForNetloc} method. The resulting context object will be used
to establish the SSL connection.
"""
expectedHost = 'example.org'
expectedPort = 20443
class JustEnoughConnection(object):
handshakeStarted = False
connectState = False
def do_handshake(self):
"""
The handshake started. Record that fact.
"""
self.handshakeStarted = True
def set_connect_state(self):
"""
The connection started. Record that fact.
"""
self.connectState = True
contextArgs = []
@implementer(IOpenSSLClientConnectionCreator)
class JustEnoughCreator(object):
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
def clientConnectionForTLS(self, tlsProtocol):
"""
Implement L{IOpenSSLClientConnectionCreator}.
@param tlsProtocol: The TLS protocol.
@type tlsProtocol: L{TLSMemoryBIOProtocol}
@return: C{expectedConnection}
"""
contextArgs.append((tlsProtocol, self.hostname, self.port))
return expectedConnection
expectedConnection = JustEnoughConnection()
@implementer(IPolicyForHTTPS)
class StubBrowserLikePolicyForHTTPS(object):
def creatorForNetloc(self, hostname, port):
"""
Emulate L{BrowserLikePolicyForHTTPS}.
@param hostname: The hostname to verify.
@type hostname: L{unicode}
@param port: The port number.
@type port: L{int}
@return: a stub L{IOpenSSLClientConnectionCreator}
@rtype: L{JustEnoughCreator}
"""
return JustEnoughCreator(hostname, port)
expectedCreatorCreator = StubBrowserLikePolicyForHTTPS()
reactor = self.Reactor()
agent = client.Agent(reactor, expectedCreatorCreator)
endpoint = agent._getEndpoint('https', expectedHost, expectedPort)
endpoint.connect(Factory.forProtocol(Protocol))
passedFactory = reactor.sslClients[-1][2]
passedContextFactory = reactor.sslClients[-1][3]
tlsFactory = TLSMemoryBIOFactory(
passedContextFactory, True, passedFactory
)
tlsProtocol = tlsFactory.buildProtocol(None)
tlsProtocol.makeConnection(StringTransport())
tls = contextArgs[0][0]
self.assertIsInstance(tls, TLSMemoryBIOProtocol)
self.assertEqual(contextArgs[0][1:], (expectedHost, expectedPort))
self.assertTrue(expectedConnection.handshakeStarted)
self.assertTrue(expectedConnection.connectState)
def test_deprecatedDuckPolicy(self):
"""
Passing something that duck-types I{like} a L{web client context
factory <twisted.web.client.WebClientContextFactory>} - something that
does not provide L{IPolicyForHTTPS} - to L{Agent} emits a
L{DeprecationWarning} even if you don't actually C{import
WebClientContextFactory} to do it.
"""
def warnMe():
client.Agent(MemoryReactorClock(),
"does-not-provide-IPolicyForHTTPS")
warnMe()
warnings = self.flushWarnings([warnMe])
self.assertEqual(len(warnings), 1)
[warning] = warnings
self.assertEqual(warning['category'], DeprecationWarning)
self.assertEqual(
warning['message'],
"'does-not-provide-IPolicyForHTTPS' was passed as the HTTPS "
"policy for an Agent, but it does not provide IPolicyForHTTPS. "
"Since Twisted 14.0, you must pass a provider of IPolicyForHTTPS."
)
class WebClientContextFactoryTests(TestCase):
"""
Tests for the context factory wrapper for web clients
L{twisted.web.client.WebClientContextFactory}.
"""
def setUp(self):
"""
Get WebClientContextFactory while quashing its deprecation warning.
"""
from twisted.web.client import WebClientContextFactory
self.warned = self.flushWarnings([WebClientContextFactoryTests.setUp])
self.webClientContextFactory = WebClientContextFactory
def test_deprecated(self):
"""
L{twisted.web.client.WebClientContextFactory} is deprecated. Importing
it displays a warning.
"""
self.assertEqual(len(self.warned), 1)
[warning] = self.warned
self.assertEqual(warning['category'], DeprecationWarning)
self.assertEqual(
warning['message'],
getDeprecationWarningString(
self.webClientContextFactory, Version("Twisted", 14, 0, 0),
replacement=BrowserLikePolicyForHTTPS,
)
# See https://twistedmatrix.com/trac/ticket/7242
.replace(";", ":")
)
def test_missingSSL(self):
"""
If C{getContext} is called and SSL is not available, raise
L{NotImplementedError}.
"""
self.assertRaises(
NotImplementedError,
self.webClientContextFactory().getContext,
'example.com', 443,
)
def test_returnsContext(self):
"""
If SSL is present, C{getContext} returns a L{SSL.Context}.
"""
ctx = self.webClientContextFactory().getContext('example.com', 443)
self.assertIsInstance(ctx, ssl.SSL.Context)
def test_setsTrustRootOnContextToDefaultTrustRoot(self):
"""
The L{CertificateOptions} has C{trustRoot} set to the default trust
roots.
"""
ctx = self.webClientContextFactory()
certificateOptions = ctx._getCertificateOptions('example.com', 443)
self.assertIsInstance(
certificateOptions.trustRoot, ssl.OpenSSLDefaultPaths)
if ssl is None:
test_returnsContext.skip = "SSL not present, cannot run SSL tests."
test_setsTrustRootOnContextToDefaultTrustRoot.skip = (
"SSL not present, cannot run SSL tests.")
else:
test_missingSSL.skip = "SSL present."
class HTTPConnectionPoolRetryTests(TestCase, FakeReactorAndConnectMixin):
"""
L{client.HTTPConnectionPool}, by using
L{client._RetryingHTTP11ClientProtocol}, supports retrying requests done
against previously cached connections.
"""
def test_onlyRetryIdempotentMethods(self):
"""
Only GET, HEAD, OPTIONS, TRACE, DELETE methods cause a retry.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
self.assertTrue(connection._shouldRetry("GET", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry("HEAD", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry(
"OPTIONS", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry(
"TRACE", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry(
"DELETE", RequestNotSent(), None))
self.assertFalse(connection._shouldRetry(
"POST", RequestNotSent(), None))
self.assertFalse(connection._shouldRetry(
"MYMETHOD", RequestNotSent(), None))
# This will be covered by a different ticket, since we need support
#for resettable body producers:
# self.assertTrue(connection._doRetry("PUT", RequestNotSent(), None))
def test_onlyRetryIfNoResponseReceived(self):
"""
Only L{RequestNotSent}, L{RequestTransmissionFailed} and
L{ResponseNeverReceived} exceptions cause a retry.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
self.assertTrue(connection._shouldRetry("GET", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry(
"GET", RequestTransmissionFailed([]), None))
self.assertTrue(connection._shouldRetry(
"GET", ResponseNeverReceived([]),None))
self.assertFalse(connection._shouldRetry(
"GET", ResponseFailed([]), None))
self.assertFalse(connection._shouldRetry(
"GET", ConnectionRefusedError(), None))
def test_dontRetryIfFailedDueToCancel(self):
"""
If a request failed due to the operation being cancelled,
C{_shouldRetry} returns C{False} to indicate the request should not be
retried.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
exception = ResponseNeverReceived([Failure(defer.CancelledError())])
self.assertFalse(connection._shouldRetry(
"GET", exception, None))
def test_retryIfFailedDueToNonCancelException(self):
"""
If a request failed with L{ResponseNeverReceived} due to some
arbitrary exception, C{_shouldRetry} returns C{True} to indicate the
request should be retried.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
self.assertTrue(connection._shouldRetry(
"GET", ResponseNeverReceived([Failure(Exception())]), None))
def test_wrappedOnPersistentReturned(self):
"""
If L{client.HTTPConnectionPool.getConnection} returns a previously
cached connection, it will get wrapped in a
L{client._RetryingHTTP11ClientProtocol}.
"""
pool = client.HTTPConnectionPool(Clock())
# Add a connection to the cache:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
pool._putConnection(123, protocol)
# Retrieve it, it should come back wrapped in a
# _RetryingHTTP11ClientProtocol:
d = pool.getConnection(123, DummyEndpoint())
def gotConnection(connection):
self.assertIsInstance(connection,
client._RetryingHTTP11ClientProtocol)
self.assertIdentical(connection._clientProtocol, protocol)
return d.addCallback(gotConnection)
def test_notWrappedOnNewReturned(self):
"""
If L{client.HTTPConnectionPool.getConnection} returns a new
connection, it will be returned as is.
"""
pool = client.HTTPConnectionPool(None)
d = pool.getConnection(123, DummyEndpoint())
def gotConnection(connection):
# Don't want to use isinstance since potentially the wrapper might
# subclass it at some point:
self.assertIdentical(connection.__class__, HTTP11ClientProtocol)
return d.addCallback(gotConnection)
def retryAttempt(self, willWeRetry):
"""
Fail a first request, possibly retrying depending on argument.
"""
protocols = []
def newProtocol():
protocol = StubHTTPProtocol()
protocols.append(protocol)
return defer.succeed(protocol)
bodyProducer = object()
request = client.Request("FOO", "/", client.Headers(), bodyProducer,
persistent=True)
newProtocol()
protocol = protocols[0]
retrier = client._RetryingHTTP11ClientProtocol(protocol, newProtocol)
def _shouldRetry(m, e, bp):
self.assertEqual(m, "FOO")
self.assertIdentical(bp, bodyProducer)
self.assertIsInstance(e, (RequestNotSent, ResponseNeverReceived))
return willWeRetry
retrier._shouldRetry = _shouldRetry
d = retrier.request(request)
# So far, one request made:
self.assertEqual(len(protocols), 1)
self.assertEqual(len(protocols[0].requests), 1)
# Fail the first request:
protocol.requests[0][1].errback(RequestNotSent())
return d, protocols
def test_retryIfShouldRetryReturnsTrue(self):
"""
L{client._RetryingHTTP11ClientProtocol} retries when
L{client._RetryingHTTP11ClientProtocol._shouldRetry} returns C{True}.
"""
d, protocols = self.retryAttempt(True)
# We retried!
self.assertEqual(len(protocols), 2)
response = object()
protocols[1].requests[0][1].callback(response)
return d.addCallback(self.assertIdentical, response)
def test_dontRetryIfShouldRetryReturnsFalse(self):
"""
L{client._RetryingHTTP11ClientProtocol} does not retry when
L{client._RetryingHTTP11ClientProtocol._shouldRetry} returns C{False}.
"""
d, protocols = self.retryAttempt(False)
# We did not retry:
self.assertEqual(len(protocols), 1)
return self.assertFailure(d, RequestNotSent)
def test_onlyRetryWithoutBody(self):
"""
L{_RetryingHTTP11ClientProtocol} only retries queries that don't have
a body.
This is an implementation restriction; if the restriction is fixed,
this test should be removed and PUT added to list of methods that
support retries.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
self.assertTrue(connection._shouldRetry("GET", RequestNotSent(), None))
self.assertFalse(connection._shouldRetry("GET", RequestNotSent(), object()))
def test_onlyRetryOnce(self):
"""
If a L{client._RetryingHTTP11ClientProtocol} fails more than once on
an idempotent query before a response is received, it will not retry.
"""
d, protocols = self.retryAttempt(True)
self.assertEqual(len(protocols), 2)
# Fail the second request too:
protocols[1].requests[0][1].errback(ResponseNeverReceived([]))
# We didn't retry again:
self.assertEqual(len(protocols), 2)
return self.assertFailure(d, ResponseNeverReceived)
def test_dontRetryIfRetryAutomaticallyFalse(self):
"""
If L{HTTPConnectionPool.retryAutomatically} is set to C{False}, don't
wrap connections with retrying logic.
"""
pool = client.HTTPConnectionPool(Clock())
pool.retryAutomatically = False
# Add a connection to the cache:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
pool._putConnection(123, protocol)
# Retrieve it, it should come back unwrapped:
d = pool.getConnection(123, DummyEndpoint())
def gotConnection(connection):
self.assertIdentical(connection, protocol)
return d.addCallback(gotConnection)
def test_retryWithNewConnection(self):
"""
L{client.HTTPConnectionPool} creates
{client._RetryingHTTP11ClientProtocol} with a new connection factory
method that creates a new connection using the same key and endpoint
as the wrapped connection.
"""
pool = client.HTTPConnectionPool(Clock())
key = 123
endpoint = DummyEndpoint()
newConnections = []
# Override the pool's _newConnection:
def newConnection(k, e):
newConnections.append((k, e))
pool._newConnection = newConnection
# Add a connection to the cache:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
pool._putConnection(key, protocol)
# Retrieve it, it should come back wrapped in a
# _RetryingHTTP11ClientProtocol:
d = pool.getConnection(key, endpoint)
def gotConnection(connection):
self.assertIsInstance(connection,
client._RetryingHTTP11ClientProtocol)
self.assertIdentical(connection._clientProtocol, protocol)
# Verify that the _newConnection method on retrying connection
# calls _newConnection on the pool:
self.assertEqual(newConnections, [])
connection._newConnection()
self.assertEqual(len(newConnections), 1)
self.assertEqual(newConnections[0][0], key)
self.assertIdentical(newConnections[0][1], endpoint)
return d.addCallback(gotConnection)
class CookieTestsMixin(object):
"""
Mixin for unit tests dealing with cookies.
"""
def addCookies(self, cookieJar, uri, cookies):
"""
Add a cookie to a cookie jar.
"""
response = client._FakeUrllib2Response(
client.Response(
('HTTP', 1, 1),
200,
'OK',
client.Headers({'Set-Cookie': cookies}),
None))
request = client._FakeUrllib2Request(uri)
cookieJar.extract_cookies(response, request)
return request, response
class CookieJarTests(TestCase, CookieTestsMixin):
"""
Tests for L{twisted.web.client._FakeUrllib2Response} and
L{twisted.web.client._FakeUrllib2Request}'s interactions with
C{cookielib.CookieJar} instances.
"""
def makeCookieJar(self):
"""
@return: a C{cookielib.CookieJar} with some sample cookies
"""
cookieJar = cookielib.CookieJar()
reqres = self.addCookies(
cookieJar,
'http://example.com:1234/foo?bar',
['foo=1; cow=moo; Path=/foo; Comment=hello',
'bar=2; Comment=goodbye'])
return cookieJar, reqres
def test_extractCookies(self):
"""
L{cookielib.CookieJar.extract_cookies} extracts cookie information from
fake urllib2 response instances.
"""
jar = self.makeCookieJar()[0]
cookies = dict([(c.name, c) for c in jar])
cookie = cookies['foo']
self.assertEqual(cookie.version, 0)
self.assertEqual(cookie.name, 'foo')
self.assertEqual(cookie.value, '1')
self.assertEqual(cookie.path, '/foo')
self.assertEqual(cookie.comment, 'hello')
self.assertEqual(cookie.get_nonstandard_attr('cow'), 'moo')
cookie = cookies['bar']
self.assertEqual(cookie.version, 0)
self.assertEqual(cookie.name, 'bar')
self.assertEqual(cookie.value, '2')
self.assertEqual(cookie.path, '/')
self.assertEqual(cookie.comment, 'goodbye')
self.assertIdentical(cookie.get_nonstandard_attr('cow'), None)
def test_sendCookie(self):
"""
L{cookielib.CookieJar.add_cookie_header} adds a cookie header to a fake
urllib2 request instance.
"""
jar, (request, response) = self.makeCookieJar()
self.assertIdentical(
request.get_header('Cookie', None),
None)
jar.add_cookie_header(request)
self.assertEqual(
request.get_header('Cookie', None),
'foo=1; bar=2')
class CookieAgentTests(TestCase, CookieTestsMixin, FakeReactorAndConnectMixin,
AgentTestsMixin):
"""
Tests for L{twisted.web.client.CookieAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.CookieAgent}
"""
return client.CookieAgent(
self.buildAgentForWrapperTest(self.reactor),
cookielib.CookieJar())
def setUp(self):
self.reactor = self.Reactor()
def test_emptyCookieJarRequest(self):
"""
L{CookieAgent.request} does not insert any C{'Cookie'} header into the
L{Request} object if there is no cookie in the cookie jar for the URI
being requested. Cookies are extracted from the response and stored in
the cookie jar.
"""
cookieJar = cookielib.CookieJar()
self.assertEqual(list(cookieJar), [])
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
d = cookieAgent.request(
'GET', 'http://example.com:1234/foo?bar')
def _checkCookie(ignored):
cookies = list(cookieJar)
self.assertEqual(len(cookies), 1)
self.assertEqual(cookies[0].name, 'foo')
self.assertEqual(cookies[0].value, '1')
d.addCallback(_checkCookie)
req, res = self.protocol.requests.pop()
self.assertIdentical(req.headers.getRawHeaders('cookie'), None)
resp = client.Response(
('HTTP', 1, 1),
200,
'OK',
client.Headers({'Set-Cookie': ['foo=1',]}),
None)
res.callback(resp)
return d
def test_requestWithCookie(self):
"""
L{CookieAgent.request} inserts a C{'Cookie'} header into the L{Request}
object when there is a cookie matching the request URI in the cookie
jar.
"""
uri = 'http://example.com:1234/foo?bar'
cookie = 'foo=1'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 1)
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
cookieAgent.request('GET', uri)
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('cookie'), [cookie])
def test_secureCookie(self):
"""
L{CookieAgent} is able to handle secure cookies, ie cookies which
should only be handled over https.
"""
uri = 'https://example.com:1234/foo?bar'
cookie = 'foo=1;secure'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 1)
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
cookieAgent.request('GET', uri)
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('cookie'), ['foo=1'])
def test_secureCookieOnInsecureConnection(self):
"""
If a cookie is setup as secure, it won't be sent with the request if
it's not over HTTPS.
"""
uri = 'http://example.com/foo?bar'
cookie = 'foo=1;secure'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 1)
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
cookieAgent.request('GET', uri)
req, res = self.protocol.requests.pop()
self.assertIdentical(None, req.headers.getRawHeaders('cookie'))
def test_portCookie(self):
"""
L{CookieAgent} supports cookies which enforces the port number they
need to be transferred upon.
"""
uri = 'https://example.com:1234/foo?bar'
cookie = 'foo=1;port=1234'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 1)
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
cookieAgent.request('GET', uri)
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('cookie'), ['foo=1'])
def test_portCookieOnWrongPort(self):
"""
When creating a cookie with a port directive, it won't be added to the
L{cookie.CookieJar} if the URI is on a different port.
"""
uri = 'https://example.com:4567/foo?bar'
cookie = 'foo=1;port=1234'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 0)
class Decoder1(proxyForInterface(IResponse)):
"""
A test decoder to be used by L{client.ContentDecoderAgent} tests.
"""
class Decoder2(Decoder1):
"""
A test decoder to be used by L{client.ContentDecoderAgent} tests.
"""
class ContentDecoderAgentTests(TestCase, FakeReactorAndConnectMixin,
AgentTestsMixin):
"""
Tests for L{client.ContentDecoderAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.ContentDecoderAgent}
"""
return client.ContentDecoderAgent(self.agent, [])
def setUp(self):
"""
Create an L{Agent} wrapped around a fake reactor.
"""
self.reactor = self.Reactor()
self.agent = self.buildAgentForWrapperTest(self.reactor)
def test_acceptHeaders(self):
"""
L{client.ContentDecoderAgent} sets the I{Accept-Encoding} header to the
names of the available decoder objects.
"""
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
agent.request('GET', 'http://example.com/foo')
protocol = self.protocol
self.assertEqual(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('accept-encoding'),
['decoder1,decoder2'])
def test_existingHeaders(self):
"""
If there are existing I{Accept-Encoding} fields,
L{client.ContentDecoderAgent} creates a new field for the decoders it
knows about.
"""
headers = http_headers.Headers({'foo': ['bar'],
'accept-encoding': ['fizz']})
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
agent.request('GET', 'http://example.com/foo', headers=headers)
protocol = self.protocol
self.assertEqual(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertEqual(
list(req.headers.getAllRawHeaders()),
[('Host', ['example.com']),
('Foo', ['bar']),
('Accept-Encoding', ['fizz', 'decoder1,decoder2'])])
def test_plainEncodingResponse(self):
"""
If the response is not encoded despited the request I{Accept-Encoding}
headers, L{client.ContentDecoderAgent} simply forwards the response.
"""
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
deferred = agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
response = Response(('HTTP', 1, 1), 200, 'OK', http_headers.Headers(),
None)
res.callback(response)
return deferred.addCallback(self.assertIdentical, response)
def test_unsupportedEncoding(self):
"""
If an encoding unknown to the L{client.ContentDecoderAgent} is found,
the response is unchanged.
"""
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
deferred = agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'foo': ['bar'],
'content-encoding': ['fizz']})
response = Response(('HTTP', 1, 1), 200, 'OK', headers, None)
res.callback(response)
return deferred.addCallback(self.assertIdentical, response)
def test_unknownEncoding(self):
"""
When L{client.ContentDecoderAgent} encounters a decoder it doesn't know
about, it stops decoding even if another encoding is known afterwards.
"""
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
deferred = agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'foo': ['bar'],
'content-encoding':
['decoder1,fizz,decoder2']})
response = Response(('HTTP', 1, 1), 200, 'OK', headers, None)
res.callback(response)
def check(result):
self.assertNotIdentical(response, result)
self.assertIsInstance(result, Decoder2)
self.assertEqual(['decoder1,fizz'],
result.headers.getRawHeaders('content-encoding'))
return deferred.addCallback(check)
class SimpleAgentProtocol(Protocol):
"""
A L{Protocol} to be used with an L{client.Agent} to receive data.
@ivar finished: L{Deferred} firing when C{connectionLost} is called.
@ivar made: L{Deferred} firing when C{connectionMade} is called.
@ivar received: C{list} of received data.
"""
def __init__(self):
self.made = Deferred()
self.finished = Deferred()
self.received = []
def connectionMade(self):
self.made.callback(None)
def connectionLost(self, reason):
self.finished.callback(None)
def dataReceived(self, data):
self.received.append(data)
class ContentDecoderAgentWithGzipTests(TestCase,
FakeReactorAndConnectMixin):
def setUp(self):
"""
Create an L{Agent} wrapped around a fake reactor.
"""
self.reactor = self.Reactor()
agent = self.buildAgentForWrapperTest(self.reactor)
self.agent = client.ContentDecoderAgent(
agent, [("gzip", client.GzipDecoder)])
def test_gzipEncodingResponse(self):
"""
If the response has a C{gzip} I{Content-Encoding} header,
L{GzipDecoder} wraps the response to return uncompressed data to the
user.
"""
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'foo': ['bar'],
'content-encoding': ['gzip']})
transport = StringTransport()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, transport)
response.length = 12
res.callback(response)
compressor = zlib.compressobj(2, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = (compressor.compress('x' * 6) + compressor.compress('y' * 4) +
compressor.flush())
def checkResponse(result):
self.assertNotIdentical(result, response)
self.assertEqual(result.version, ('HTTP', 1, 1))
self.assertEqual(result.code, 200)
self.assertEqual(result.phrase, 'OK')
self.assertEqual(list(result.headers.getAllRawHeaders()),
[('Foo', ['bar'])])
self.assertEqual(result.length, UNKNOWN_LENGTH)
self.assertRaises(AttributeError, getattr, result, 'unknown')
response._bodyDataReceived(data[:5])
response._bodyDataReceived(data[5:])
response._bodyDataFinished()
protocol = SimpleAgentProtocol()
result.deliverBody(protocol)
self.assertEqual(protocol.received, ['x' * 6 + 'y' * 4])
return defer.gatherResults([protocol.made, protocol.finished])
deferred.addCallback(checkResponse)
return deferred
def test_brokenContent(self):
"""
If the data received by the L{GzipDecoder} isn't valid gzip-compressed
data, the call to C{deliverBody} fails with a C{zlib.error}.
"""
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'foo': ['bar'],
'content-encoding': ['gzip']})
transport = StringTransport()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, transport)
response.length = 12
res.callback(response)
data = "not gzipped content"
def checkResponse(result):
response._bodyDataReceived(data)
result.deliverBody(Protocol())
deferred.addCallback(checkResponse)
self.assertFailure(deferred, client.ResponseFailed)
def checkFailure(error):
error.reasons[0].trap(zlib.error)
self.assertIsInstance(error.response, Response)
return deferred.addCallback(checkFailure)
def test_flushData(self):
"""
When the connection with the server is lost, the gzip protocol calls
C{flush} on the zlib decompressor object to get uncompressed data which
may have been buffered.
"""
class decompressobj(object):
def __init__(self, wbits):
pass
def decompress(self, data):
return 'x'
def flush(self):
return 'y'
oldDecompressObj = zlib.decompressobj
zlib.decompressobj = decompressobj
self.addCleanup(setattr, zlib, 'decompressobj', oldDecompressObj)
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'content-encoding': ['gzip']})
transport = StringTransport()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, transport)
res.callback(response)
def checkResponse(result):
response._bodyDataReceived('data')
response._bodyDataFinished()
protocol = SimpleAgentProtocol()
result.deliverBody(protocol)
self.assertEqual(protocol.received, ['x', 'y'])
return defer.gatherResults([protocol.made, protocol.finished])
deferred.addCallback(checkResponse)
return deferred
def test_flushError(self):
"""
If the C{flush} call in C{connectionLost} fails, the C{zlib.error}
exception is caught and turned into a L{ResponseFailed}.
"""
class decompressobj(object):
def __init__(self, wbits):
pass
def decompress(self, data):
return 'x'
def flush(self):
raise zlib.error()
oldDecompressObj = zlib.decompressobj
zlib.decompressobj = decompressobj
self.addCleanup(setattr, zlib, 'decompressobj', oldDecompressObj)
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'content-encoding': ['gzip']})
transport = StringTransport()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, transport)
res.callback(response)
def checkResponse(result):
response._bodyDataReceived('data')
response._bodyDataFinished()
protocol = SimpleAgentProtocol()
result.deliverBody(protocol)
self.assertEqual(protocol.received, ['x', 'y'])
return defer.gatherResults([protocol.made, protocol.finished])
deferred.addCallback(checkResponse)
self.assertFailure(deferred, client.ResponseFailed)
def checkFailure(error):
error.reasons[1].trap(zlib.error)
self.assertIsInstance(error.response, Response)
return deferred.addCallback(checkFailure)
class ProxyAgentTests(TestCase, FakeReactorAndConnectMixin, AgentTestsMixin):
"""
Tests for L{client.ProxyAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.ProxyAgent}
"""
return client.ProxyAgent(
TCP4ClientEndpoint(self.reactor, "127.0.0.1", 1234),
self.reactor)
def setUp(self):
self.reactor = self.Reactor()
self.agent = client.ProxyAgent(
TCP4ClientEndpoint(self.reactor, "bar", 5678), self.reactor)
oldEndpoint = self.agent._proxyEndpoint
self.agent._proxyEndpoint = self.StubEndpoint(oldEndpoint, self)
def test_proxyRequest(self):
"""
L{client.ProxyAgent} issues an HTTP request against the proxy, with the
full URI as path, when C{request} is called.
"""
headers = http_headers.Headers({'foo': ['bar']})
# Just going to check the body for identity, so it doesn't need to be
# real.
body = object()
self.agent.request(
'GET', 'http://example.com:1234/foo?bar', headers, body)
host, port, factory = self.reactor.tcpClients.pop()[:3]
self.assertEqual(host, "bar")
self.assertEqual(port, 5678)
self.assertIsInstance(factory._wrappedFactory,
client._HTTP11ClientFactory)
protocol = self.protocol
# The request should be issued.
self.assertEqual(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertIsInstance(req, Request)
self.assertEqual(req.method, 'GET')
self.assertEqual(req.uri, 'http://example.com:1234/foo?bar')
self.assertEqual(
req.headers,
http_headers.Headers({'foo': ['bar'],
'host': ['example.com:1234']}))
self.assertIdentical(req.bodyProducer, body)
def test_nonPersistent(self):
"""
C{ProxyAgent} connections are not persistent by default.
"""
self.assertEqual(self.agent._pool.persistent, False)
def test_connectUsesConnectionPool(self):
"""
When a connection is made by the C{ProxyAgent}, it uses its pool's
C{getConnection} method to do so, with the endpoint it was constructed
with and a key of C{("http-proxy", endpoint)}.
"""
endpoint = DummyEndpoint()
class DummyPool(object):
connected = False
persistent = False
def getConnection(this, key, ep):
this.connected = True
self.assertIdentical(ep, endpoint)
# The key is *not* tied to the final destination, but only to
# the address of the proxy, since that's where *we* are
# connecting:
self.assertEqual(key, ("http-proxy", endpoint))
return defer.succeed(StubHTTPProtocol())
pool = DummyPool()
agent = client.ProxyAgent(endpoint, self.reactor, pool=pool)
self.assertIdentical(pool, agent._pool)
agent.request('GET', 'http://foo/')
self.assertEqual(agent._pool.connected, True)
class _RedirectAgentTestsMixin(object):
"""
Test cases mixin for L{RedirectAgentTests} and
L{BrowserLikeRedirectAgentTests}.
"""
def test_noRedirect(self):
"""
L{client.RedirectAgent} behaves like L{client.Agent} if the response
doesn't contain a redirect.
"""
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, None)
res.callback(response)
self.assertEqual(0, len(self.protocol.requests))
result = self.successResultOf(deferred)
self.assertIdentical(response, result)
self.assertIdentical(result.previousResponse, None)
def _testRedirectDefault(self, code):
"""
When getting a redirect, L{client.RedirectAgent} follows the URL
specified in the L{Location} header field and make a new request.
@param code: HTTP status code.
"""
self.agent.request('GET', 'http://example.com/foo')
host, port = self.reactor.tcpClients.pop()[:2]
self.assertEqual("example.com", host)
self.assertEqual(80, port)
req, res = self.protocol.requests.pop()
headers = http_headers.Headers(
{'location': ['https://example.com/bar']})
response = Response(('HTTP', 1, 1), code, 'OK', headers, None)
res.callback(response)
req2, res2 = self.protocol.requests.pop()
self.assertEqual('GET', req2.method)
self.assertEqual('/bar', req2.uri)
host, port = self.reactor.sslClients.pop()[:2]
self.assertEqual("example.com", host)
self.assertEqual(443, port)
def test_redirect301(self):
"""
L{client.RedirectAgent} follows redirects on status code 301.
"""
self._testRedirectDefault(301)
def test_redirect302(self):
"""
L{client.RedirectAgent} follows redirects on status code 302.
"""
self._testRedirectDefault(302)
def test_redirect307(self):
"""
L{client.RedirectAgent} follows redirects on status code 307.
"""
self._testRedirectDefault(307)
def _testRedirectToGet(self, code, method):
"""
L{client.RedirectAgent} changes the method to I{GET} when getting
a redirect on a non-I{GET} request.
@param code: HTTP status code.
@param method: HTTP request method.
"""
self.agent.request(method, 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers(
{'location': ['http://example.com/bar']})
response = Response(('HTTP', 1, 1), code, 'OK', headers, None)
res.callback(response)
req2, res2 = self.protocol.requests.pop()
self.assertEqual('GET', req2.method)
self.assertEqual('/bar', req2.uri)
def test_redirect303(self):
"""
L{client.RedirectAgent} changes the method to I{GET} when getting a 303
redirect on a I{POST} request.
"""
self._testRedirectToGet(303, 'POST')
def test_noLocationField(self):
"""
If no L{Location} header field is found when getting a redirect,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping a
L{error.RedirectWithNoLocation} exception.
"""
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers()
response = Response(('HTTP', 1, 1), 301, 'OK', headers, None)
res.callback(response)
fail = self.failureResultOf(deferred, client.ResponseFailed)
fail.value.reasons[0].trap(error.RedirectWithNoLocation)
self.assertEqual('http://example.com/foo',
fail.value.reasons[0].value.uri)
self.assertEqual(301, fail.value.response.code)
def _testPageRedirectFailure(self, code, method):
"""
When getting a redirect on an unsupported request method,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping
a L{error.PageRedirect} exception.
@param code: HTTP status code.
@param method: HTTP request method.
"""
deferred = self.agent.request(method, 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers()
response = Response(('HTTP', 1, 1), code, 'OK', headers, None)
res.callback(response)
fail = self.failureResultOf(deferred, client.ResponseFailed)
fail.value.reasons[0].trap(error.PageRedirect)
self.assertEqual('http://example.com/foo',
fail.value.reasons[0].value.location)
self.assertEqual(code, fail.value.response.code)
def test_307OnPost(self):
"""
When getting a 307 redirect on a I{POST} request,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping
a L{error.PageRedirect} exception.
"""
self._testPageRedirectFailure(307, 'POST')
def test_redirectLimit(self):
"""
If the limit of redirects specified to L{client.RedirectAgent} is
reached, the deferred fires with L{ResponseFailed} error wrapping
a L{InfiniteRedirection} exception.
"""
agent = self.buildAgentForWrapperTest(self.reactor)
redirectAgent = client.RedirectAgent(agent, 1)
deferred = redirectAgent.request(b'GET', b'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers(
{b'location': [b'http://example.com/bar']})
response = Response((b'HTTP', 1, 1), 302, b'OK', headers, None)
res.callback(response)
req2, res2 = self.protocol.requests.pop()
response2 = Response((b'HTTP', 1, 1), 302, b'OK', headers, None)
res2.callback(response2)
fail = self.failureResultOf(deferred, client.ResponseFailed)
fail.value.reasons[0].trap(error.InfiniteRedirection)
self.assertEqual('http://example.com/foo',
fail.value.reasons[0].value.location)
self.assertEqual(302, fail.value.response.code)
def _testRedirectURI(self, uri, location, finalURI):
"""
When L{client.RedirectAgent} encounters a relative redirect I{URI}, it
is resolved against the request I{URI} before following the redirect.
@param uri: Request URI.
@param location: I{Location} header redirect URI.
@param finalURI: Expected final URI.
"""
self.agent.request('GET', uri)
req, res = self.protocol.requests.pop()
headers = http_headers.Headers(
{'location': [location]})
response = Response(('HTTP', 1, 1), 302, 'OK', headers, None)
res.callback(response)
req2, res2 = self.protocol.requests.pop()
self.assertEqual('GET', req2.method)
self.assertEqual(finalURI, req2.absoluteURI)
def test_relativeURI(self):
"""
L{client.RedirectAgent} resolves and follows relative I{URI}s in
redirects, preserving query strings.
"""
self._testRedirectURI(
'http://example.com/foo/bar', 'baz',
'http://example.com/foo/baz')
self._testRedirectURI(
'http://example.com/foo/bar', '/baz',
'http://example.com/baz')
self._testRedirectURI(
'http://example.com/foo/bar', '/baz?a',
'http://example.com/baz?a')
def test_relativeURIPreserveFragments(self):
"""
L{client.RedirectAgent} resolves and follows relative I{URI}s in
redirects, preserving fragments in way that complies with the HTTP 1.1
bis draft.
@see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-22#section-7.1.2}
"""
self._testRedirectURI(
'http://example.com/foo/bar#frag', '/baz?a',
'http://example.com/baz?a#frag')
self._testRedirectURI(
'http://example.com/foo/bar', '/baz?a#frag2',
'http://example.com/baz?a#frag2')
def test_relativeURISchemeRelative(self):
"""
L{client.RedirectAgent} resolves and follows scheme relative I{URI}s in
redirects, replacing the hostname and port when required.
"""
self._testRedirectURI(
'http://example.com/foo/bar', '//foo.com/baz',
'http://foo.com/baz')
self._testRedirectURI(
'http://example.com/foo/bar', '//foo.com:81/baz',
'http://foo.com:81/baz')
def test_responseHistory(self):
"""
L{Response.response} references the previous L{Response} from
a redirect, or C{None} if there was no previous response.
"""
agent = self.buildAgentForWrapperTest(self.reactor)
redirectAgent = client.RedirectAgent(agent)
deferred = redirectAgent.request(b'GET', b'http://example.com/foo')
redirectReq, redirectRes = self.protocol.requests.pop()
headers = http_headers.Headers(
{b'location': [b'http://example.com/bar']})
redirectResponse = Response((b'HTTP', 1, 1), 302, b'OK', headers, None)
redirectRes.callback(redirectResponse)
req, res = self.protocol.requests.pop()
response = Response((b'HTTP', 1, 1), 200, b'OK', headers, None)
res.callback(response)
finalResponse = self.successResultOf(deferred)
self.assertIdentical(finalResponse.previousResponse, redirectResponse)
self.assertIdentical(redirectResponse.previousResponse, None)
class RedirectAgentTests(TestCase, FakeReactorAndConnectMixin,
_RedirectAgentTestsMixin, AgentTestsMixin):
"""
Tests for L{client.RedirectAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.RedirectAgent}
"""
return client.RedirectAgent(
self.buildAgentForWrapperTest(self.reactor))
def setUp(self):
self.reactor = self.Reactor()
self.agent = self.makeAgent()
def test_301OnPost(self):
"""
When getting a 301 redirect on a I{POST} request,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping
a L{error.PageRedirect} exception.
"""
self._testPageRedirectFailure(301, 'POST')
def test_302OnPost(self):
"""
When getting a 302 redirect on a I{POST} request,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping
a L{error.PageRedirect} exception.
"""
self._testPageRedirectFailure(302, 'POST')
class BrowserLikeRedirectAgentTests(TestCase,
FakeReactorAndConnectMixin,
_RedirectAgentTestsMixin,
AgentTestsMixin):
"""
Tests for L{client.BrowserLikeRedirectAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.BrowserLikeRedirectAgent}
"""
return client.BrowserLikeRedirectAgent(
self.buildAgentForWrapperTest(self.reactor))
def setUp(self):
self.reactor = self.Reactor()
self.agent = self.makeAgent()
def test_redirectToGet301(self):
"""
L{client.BrowserLikeRedirectAgent} changes the method to I{GET} when
getting a 302 redirect on a I{POST} request.
"""
self._testRedirectToGet(301, 'POST')
def test_redirectToGet302(self):
"""
L{client.BrowserLikeRedirectAgent} changes the method to I{GET} when
getting a 302 redirect on a I{POST} request.
"""
self._testRedirectToGet(302, 'POST')
class DummyResponse(object):
"""
Fake L{IResponse} for testing readBody that just captures the protocol
passed to deliverBody.
@ivar protocol: After C{deliverBody} is called, the protocol it was called
with.
"""
code = 200
phrase = "OK"
def __init__(self, headers=None):
"""
@param headers: The headers for this response. If C{None}, an empty
L{Headers} instance will be used.
@type headers: L{Headers}
"""
if headers is None:
headers = Headers()
self.headers = headers
def deliverBody(self, protocol):
"""
Just record the given protocol without actually delivering anything to
it.
"""
self.protocol = protocol
class ReadBodyTests(TestCase):
"""
Tests for L{client.readBody}
"""
def test_success(self):
"""
L{client.readBody} returns a L{Deferred} which fires with the complete
body of the L{IResponse} provider passed to it.
"""
response = DummyResponse()
d = client.readBody(response)
response.protocol.dataReceived("first")
response.protocol.dataReceived("second")
response.protocol.connectionLost(Failure(ResponseDone()))
self.assertEqual(self.successResultOf(d), "firstsecond")
def test_withPotentialDataLoss(self):
"""
If the full body of the L{IResponse} passed to L{client.readBody} is
not definitely received, the L{Deferred} returned by L{client.readBody}
fires with a L{Failure} wrapping L{client.PartialDownloadError} with
the content that was received.
"""
response = DummyResponse()
d = client.readBody(response)
response.protocol.dataReceived("first")
response.protocol.dataReceived("second")
response.protocol.connectionLost(Failure(PotentialDataLoss()))
failure = self.failureResultOf(d)
failure.trap(client.PartialDownloadError)
self.assertEqual({
"status": failure.value.status,
"message": failure.value.message,
"body": failure.value.response,
}, {
"status": 200,
"message": "OK",
"body": "firstsecond",
})
def test_otherErrors(self):
"""
If there is an exception other than L{client.PotentialDataLoss} while
L{client.readBody} is collecting the response body, the L{Deferred}
returned by {client.readBody} fires with that exception.
"""
response = DummyResponse()
d = client.readBody(response)
response.protocol.dataReceived("first")
response.protocol.connectionLost(
Failure(ConnectionLost("mystery problem")))
reason = self.failureResultOf(d)
reason.trap(ConnectionLost)
self.assertEqual(reason.value.args, ("mystery problem",)) | unknown | codeparrot/codeparrot-clean | ||
import random
import uuid
from datetime import date
from freezegun import freeze_time
from tests.conftest import normalize_spaces
def _get_example_performance_data():
return {
"total_notifications": 1_789_000_000,
"email_notifications": 1_123_000_000,
"sms_notifications": 987_654_321,
"letter_notifications": 1_234_567,
"live_service_count": random.randrange(1, 1000),
"notifications_by_type": [
{
"date": "2021-02-21",
"emails": 1_234_567, "sms": 123_456, "letters": 123,
},
{
"date": "2021-02-22",
"emails": 1, "sms": 2, "letters": 3,
},
{
"date": "2021-02-23",
"emails": 1, "sms": 2, "letters": 3,
},
{
"date": "2021-02-24",
"emails": 1, "sms": 2, "letters": 3,
},
{
"date": "2021-02-25",
"emails": 1, "sms": 2, "letters": 3,
},
{
"date": "2021-02-26",
"emails": 1, "sms": 2, "letters": 3,
},
{
"date": "2021-02-27",
"emails": 1, "sms": 2, "letters": 3,
},
],
"processing_time": [
{
"date": "2021-02-21",
"percentage_under_10_seconds": 99.2
},
{
"date": "2021-02-22",
"percentage_under_10_seconds": 95.3
},
{
"date": "2021-02-23",
"percentage_under_10_seconds": 95.6
},
{
"date": "2021-02-24",
"percentage_under_10_seconds": 96.7
},
{
"date": "2021-02-25",
"percentage_under_10_seconds": 95.7
},
{
"date": "2021-02-26",
"percentage_under_10_seconds": 96.5
},
{
"date": "2021-02-27",
"percentage_under_10_seconds": 98.6
},
],
"services_using_notify": [
{
"organisation_id": uuid.uuid4(),
"organisation_name": "Department of Examples and Patterns",
"service_id": uuid.uuid4(),
"service_name": "Example service"
},
{
"organisation_id": uuid.uuid4(),
"organisation_name": "Department of Examples and Patterns",
"service_id": uuid.uuid4(),
"service_name": "Example service 2"
},
{
"organisation_id": uuid.uuid4(),
"organisation_name": "Department of One Service",
"service_id": uuid.uuid4(),
"service_name": "Example service 3"
},
{
# On production there should be no live services without an
# organisation, but this isn’t always true in people’s local
# environments
"organisation_id": None,
"organisation_name": None,
"service_id": uuid.uuid4(),
"service_name": "Example service 4"
},
],
}
@freeze_time('2021-01-01')
def test_should_render_performance_page(
mocker,
client_request,
mock_get_service_and_organisation_counts,
):
mock_get_performance_data = mocker.patch(
'app.performance_dashboard_api_client.get_performance_dashboard_stats',
return_value=_get_example_performance_data(),
)
page = client_request.get('main.performance')
mock_get_performance_data.assert_called_once_with(
start_date=date(2020, 12, 25),
end_date=date(2021, 1, 1),
)
assert normalize_spaces(page.select_one('main').text) == (
'Performance data '
''
'Messages sent since May 2016 '
'1.8 billion total '
'1.1 billion emails '
'987.7 million text messages '
'1.2 million letters '
''
'Messages sent since May 2016 '
'Date Emails Text messages Letters '
'27 February 2021 1 2 3 '
'26 February 2021 1 2 3 '
'25 February 2021 1 2 3 '
'24 February 2021 1 2 3 '
'23 February 2021 1 2 3 '
'22 February 2021 1 2 3 '
'21 February 2021 1,234,567 123,456 123 '
'Only showing the last 7 days '
''
'Messages sent within 10 seconds '
'96.8% on average '
'Messages sent within 10 seconds '
'Date Percentage '
'27 February 2021 98.6% '
'26 February 2021 96.5% '
'25 February 2021 95.7% '
'24 February 2021 96.7% '
'23 February 2021 95.6% '
'22 February 2021 95.3% '
'21 February 2021 99.2% '
'Only showing the last 7 days '
''
'Organisations using Notify '
'There are 111 organisations and 9,999 services using Notify. '
'Organisations using Notify '
'Organisation Number of live services '
'Department of Examples and Patterns 2 '
'Department of One Service 1 '
'No organisation 1'
) | unknown | codeparrot/codeparrot-clean | ||
###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
# This module contains a collection of generalized utility widgets
from upload_wdg import *
from sobject_group_wdg import *
from calendar_wdg import *
from sobject_calendar_wdg import *
from data_export_wdg import *
from misc_input_wdg import *
from button_wdg import *
from button_new_wdg import *
from gear_menu_wdg import *
from chooser_wdg import *
from smart_select_wdg import *
from proxy_wdg import *
from checkin_wdg import *
from discussion_wdg import *
from text_wdg import *
from file_browser_wdg import *
from format_value_wdg import *
from embed_wdg import *
from swap_display_wdg import *
from reset_password_wdg import *
from title_wdg import *
from ckeditor_wdg import *
from video_wdg import *
#from color_input_wdg import *
#from preview_change_wdg import * | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.libraries import client
def resolve(url):
try:
result = client.request(url, mobile=True)
url = client.parseDOM(result, 'video')[0]
url = client.parseDOM(url, 'source', ret='src', attrs = {'type': '.+?'})[0]
return url
except:
return | unknown | codeparrot/codeparrot-clean | ||
int use_FREE_AND_NULL(int *v)
{
free(*v);
*v = NULL;
}
int need_no_if(int *v)
{
if (v)
free(v);
} | c | github | https://github.com/git/git | contrib/coccinelle/tests/free.c |
import mock
import pytest
from praw.models import Comment, Submission, Subreddit
from ... import IntegrationTest
class TestMultireddit(IntegrationTest):
@mock.patch("time.sleep", return_value=None)
def test_add(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestMultireddit.test_add"):
multi = self.reddit.user.multireddits()[0]
multi.add("redditdev")
assert "redditdev" in multi.subreddits
@mock.patch("time.sleep", return_value=None)
def test_copy(self, _):
self.reddit.read_only = False
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultireddit.test_copy"):
new = multi.copy()
assert new.name == multi.name
assert new.display_name == multi.display_name
assert pytest.placeholders.username in new.path
@mock.patch("time.sleep", return_value=None)
def test_copy__with_display_name(self, _):
self.reddit.read_only = False
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
name = "A--B\n" * 10
with self.recorder.use_cassette(
"TestMultireddit.test_copy__with_display_name"
):
new = multi.copy(display_name=name)
assert new.name == "a_b_a_b_a_b_a_b_a_b"
assert new.display_name == name
assert pytest.placeholders.username in new.path
@mock.patch("time.sleep", return_value=None)
def test_create(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestMultireddit.test_create"):
multireddit = self.reddit.multireddit.create(
"PRAW create test", subreddits=["redditdev"]
)
assert multireddit.display_name == "PRAW create test"
assert multireddit.name == "praw_create_test"
@mock.patch("time.sleep", return_value=None)
def test_delete(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestMultireddit.test_delete"):
multi = self.reddit.user.multireddits()[0]
multi.delete()
@mock.patch("time.sleep", return_value=None)
def test_remove(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette("TestMultireddit.test_remove"):
multi = self.reddit.user.multireddits()[0]
multi.remove("redditdev")
assert "redditdev" not in multi.subreddits
def test_subreddits(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultireddit.test_subreddits"):
assert multi.subreddits
assert all(isinstance(x, Subreddit) for x in multi.subreddits)
@mock.patch("time.sleep", return_value=None)
def test_update(self, _):
self.reddit.read_only = False
subreddits = ["pokemongo", "pokemongodev"]
with self.recorder.use_cassette("TestMultireddit.test_update"):
multi = self.reddit.user.multireddits()[0]
prev_path = multi.path
multi.update(
display_name="Updated display name", subreddits=subreddits
)
assert multi.display_name == "Updated display name"
assert multi.path == prev_path
assert multi.subreddits == subreddits
class TestMultiredditListings(IntegrationTest):
def test_comments(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette(
"TestMultiredditListings.test_comments"
):
comments = list(multi.comments())
assert len(comments) == 100
def test_controversial(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette(
"TestMultiredditListings.test_controversial"
):
submissions = list(multi.controversial())
assert len(submissions) == 100
def test_gilded(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultiredditListings.test_gilded"):
submissions = list(multi.gilded())
assert len(submissions) == 100
def test_hot(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultiredditListings.test_hot"):
submissions = list(multi.hot())
assert len(submissions) == 100
def test_new(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultiredditListings.test_new"):
submissions = list(multi.new())
assert len(submissions) == 100
@mock.patch("time.sleep", return_value=None)
def test_new__self_multi(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestMultiredditListings.test_new__self_multi"
):
multi = self.reddit.user.multireddits()[0]
submissions = list(multi.new())
assert len(submissions) == 100
def test_random_rising(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette(
"TestMultiredditListings.test_random_rising"
):
submissions = list(multi.random_rising())
assert len(submissions) > 0
def test_rising(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultiredditListings.test_rising"):
submissions = list(multi.rising())
assert len(submissions) > 0
def test_top(self):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultiredditListings.test_top"):
submissions = list(multi.top())
assert len(submissions) == 100
class TestMultiredditStreams(IntegrationTest):
@mock.patch("time.sleep", return_value=None)
def test_comments(self, _):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultiredditStreams.comments"):
generator = multi.stream.comments()
for i in range(110):
assert isinstance(next(generator), Comment)
@mock.patch("time.sleep", return_value=None)
def test_comments__with_pause(self, _):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette(
"TestMultiredditStreams.comments__with_pause"
):
comment_stream = multi.stream.comments(pause_after=0)
comment_count = 1
pause_count = 1
comment = next(comment_stream)
while comment is not None:
comment_count += 1
comment = next(comment_stream)
while comment is None:
pause_count += 1
comment = next(comment_stream)
assert comment_count == 102
assert pause_count == 4
@mock.patch("time.sleep", return_value=None)
def test_submissions(self, _):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultiredditStreams.submissions"):
generator = multi.stream.submissions()
for i in range(102):
assert isinstance(next(generator), Submission)
@mock.patch("time.sleep", return_value=None)
def test_submissions__with_pause(self, _):
multi = self.reddit.multireddit("kjoneslol", "sfwpornnetwork")
with self.recorder.use_cassette("TestMultiredditStreams.submissions"):
generator = multi.stream.submissions(pause_after=-1)
submission = next(generator)
submission_count = 0
while submission is not None:
submission_count += 1
submission = next(generator)
assert submission_count == 100 | unknown | codeparrot/codeparrot-clean | ||
# This file implements polynomial regression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.metrics import mean_squared_error
#Set number of samples and seed
NUM_SAMPLES = 100
np.random.seed(42)
# Our `True' function
def f(x):
return 7 *(x**3 -1.3 *x**2+0.5*x - 0.056)
# initialize sample data
data = np.array([[x,f(x) ] for x in np.random.random(NUM_SAMPLES)])
# grid of coordinates for true function
gridx = np.linspace(0, 1, NUM_SAMPLES)
gridy = np.array([f(x) for x in gridx])
datax = data[:,0]
normaly = data[:,1]+0.3*np.random.randn(NUM_SAMPLES)
#Plot sampled data points
plt.scatter(datax, normaly )
plt.title("Scatter plot of synthetic data with normal errors")
plt.plot(gridx, gridy, label = "True function", color = 'Red')
plt.legend(loc = 2)
plt.savefig("poly_scatter_normal.png")
plt.cla()
gen_poly = True
# Run polynomial regression repeatedly for increasing degrees
if gen_poly:
lm = LinearRegression()
for deg in range(1, 8):
poly = PolynomialFeatures(degree = deg)
newdatax = poly.fit_transform(datax.reshape(NUM_SAMPLES,1))
for i in range(1, NUM_SAMPLES+1):
lm.fit(newdatax[:i], normaly[:i].reshape(i, 1))
predictions = lm.predict(newdatax)
mse = mean_squared_error(predictions, normaly.reshape(NUM_SAMPLES,1))
#Plot everything
plt.ylim(-0.75, 1.25)
plt.scatter(datax, normaly)
plt.title("Degree {} polynomial regression on {} points with normal error".format(deg, i))
plt.plot(gridx, gridy, label = "True function", color = 'Red')
gridpred = lm.predict(poly.fit_transform(gridx.reshape(NUM_SAMPLES, 1)))
plt.plot(gridx.flatten(), gridpred.flatten(), label = "Polynomial regressor curve MSE = {:0.4f}".format(mse), color = 'Green')
plt.legend(loc = 2)
plt.savefig("polyreg_normal_{:02d}{:03d}.png".format(deg,i))
plt.cla()
# Run degree 10 polynomial regression repeatedly using a random sample of 30 points
gen_var = True
if gen_var:
lm = LinearRegression()
poly = PolynomialFeatures(degree = 10)
newdatax = poly.fit_transform(datax.reshape(NUM_SAMPLES,1))
for i in range(30):
samp = np.random.choice(range(NUM_SAMPLES), 30)
lm.fit(newdatax[samp], normaly[samp].reshape(30, 1))
predictions = lm.predict(newdatax)
mse = mean_squared_error(predictions, normaly.reshape(NUM_SAMPLES,1))
#Plot everything
plt.ylim(-0.75, 1.25)
plt.scatter(datax, normaly)
plt.title("Degree {} polynomial regression on 30 random points with normal error".format(10))
plt.plot(gridx, gridy, label = "True function", color = 'Red')
gridpred = lm.predict(poly.fit_transform(gridx.reshape(NUM_SAMPLES, 1)))
plt.plot(gridx.flatten(), gridpred.flatten(), label = "Polynomial regressor curve MSE = {:0.4f}".format(mse), color = 'Green')
plt.legend(loc = 2)
plt.savefig("polyreg_var_{:03d}.png".format(i))
plt.cla() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_cluster_ha
short_description: Manage High Availability (HA) on VMware vSphere clusters
description:
- Manages HA configuration on VMware vSphere clusters.
- All values and VMware object names are case sensitive.
version_added: '2.9'
author:
- Joseph Callen (@jcpowermac)
- Abhijeet Kasurde (@Akasurde)
requirements:
- Tested on ESXi 5.5 and 6.5.
- PyVmomi installed.
options:
cluster_name:
description:
- The name of the cluster to be managed.
type: str
required: yes
datacenter:
description:
- The name of the datacenter.
type: str
required: yes
aliases: [ datacenter_name ]
enable_ha:
description:
- Whether to enable HA.
type: bool
default: 'no'
ha_host_monitoring:
description:
- Whether HA restarts virtual machines after a host fails.
- If set to C(enabled), HA restarts virtual machines after a host fails.
- If set to C(disabled), HA does not restart virtual machines after a host fails.
- If C(enable_ha) is set to C(no), then this value is ignored.
type: str
choices: [ 'enabled', 'disabled' ]
default: 'enabled'
ha_vm_monitoring:
description:
- State of virtual machine health monitoring service.
- If set to C(vmAndAppMonitoring), HA response to both virtual machine and application heartbeat failure.
- If set to C(vmMonitoringDisabled), virtual machine health monitoring is disabled.
- If set to C(vmMonitoringOnly), HA response to virtual machine heartbeat failure.
- If C(enable_ha) is set to C(no), then this value is ignored.
type: str
choices: ['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled']
default: 'vmMonitoringDisabled'
host_isolation_response:
description:
- Indicates whether or VMs should be powered off if a host determines that it is isolated from the rest of the compute resource.
- If set to C(none), do not power off VMs in the event of a host network isolation.
- If set to C(powerOff), power off VMs in the event of a host network isolation.
- If set to C(shutdown), shut down VMs guest operating system in the event of a host network isolation.
type: str
choices: ['none', 'powerOff', 'shutdown']
default: 'none'
slot_based_admission_control:
description:
- Configure slot based admission control policy.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_level:
description:
- Number of host failures that should be tolerated.
type: int
required: true
type: dict
reservation_based_admission_control:
description:
- Configure reservation based admission control policy.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_level:
description:
- Number of host failures that should be tolerated.
type: int
required: true
auto_compute_percentages:
description:
- By default, C(failover_level) is used to calculate C(cpu_failover_resources_percent) and C(memory_failover_resources_percent).
If a user wants to override the percentage values, he has to set this field to false.
type: bool
default: true
cpu_failover_resources_percent:
description:
- Percentage of CPU resources in the cluster to reserve for failover.
Ignored if C(auto_compute_percentages) is not set to false.
type: int
default: 50
memory_failover_resources_percent:
description:
- Percentage of memory resources in the cluster to reserve for failover.
Ignored if C(auto_compute_percentages) is not set to false.
type: int
default: 50
type: dict
failover_host_admission_control:
description:
- Configure dedicated failover hosts.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_hosts:
description:
- List of dedicated failover hosts.
type: list
required: true
type: dict
ha_vm_failure_interval:
description:
- The number of seconds after which virtual machine is declared as failed
if no heartbeat has been received.
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
type: int
default: 30
ha_vm_min_up_time:
description:
- The number of seconds for the virtual machine's heartbeats to stabilize after
the virtual machine has been powered on.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
type: int
default: 120
ha_vm_max_failures:
description:
- Maximum number of failures and automated resets allowed during the time
that C(ha_vm_max_failure_window) specifies.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
type: int
default: 3
ha_vm_max_failure_window:
description:
- The number of seconds for the window during which up to C(ha_vm_max_failures) resets
can occur before automated responses stop.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
- Default specifies no failure window.
type: int
default: -1
ha_restart_priority:
description:
- Priority HA gives to a virtual machine if sufficient capacity is not available
to power on all failed virtual machines.
- Valid only if I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- If set to C(disabled), then HA is disabled for this virtual machine.
- If set to C(high), then virtual machine with this priority have a higher chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
- If set to C(medium), then virtual machine with this priority have an intermediate chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
- If set to C(low), then virtual machine with this priority have a lower chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
type: str
default: 'medium'
choices: [ 'disabled', 'high', 'low', 'medium' ]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r"""
- name: Enable HA without admission control
vmware_cluster_ha:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: datacenter
cluster_name: cluster
enable_ha: yes
delegate_to: localhost
- name: Enable HA and VM monitoring without admission control
vmware_cluster_ha:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter_name: DC0
cluster_name: "{{ cluster_name }}"
enable_ha: True
ha_vm_monitoring: vmMonitoringOnly
delegate_to: localhost
- name: Enable HA with admission control reserving 50% of resources for HA
vmware_cluster_ha:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: datacenter
cluster_name: cluster
enable_ha: yes
reservation_based_admission_control:
auto_compute_percentages: False
failover_level: 1
cpu_failover_resources_percent: 50
memory_failover_resources_percent: 50
delegate_to: localhost
"""
RETURN = r"""#
"""
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name,
vmware_argument_spec, wait_for_task)
from ansible.module_utils._text import to_native
class VMwareCluster(PyVmomi):
def __init__(self, module):
super(VMwareCluster, self).__init__(module)
self.cluster_name = module.params['cluster_name']
self.datacenter_name = module.params['datacenter']
self.enable_ha = module.params['enable_ha']
self.datacenter = None
self.cluster = None
self.host_isolation_response = getattr(vim.cluster.DasVmSettings.IsolationResponse, self.params.get('host_isolation_response'))
if self.enable_ha and (
self.params.get('slot_based_admission_control') or
self.params.get('reservation_based_admission_control') or
self.params.get('failover_host_admission_control')):
self.ha_admission_control = True
else:
self.ha_admission_control = False
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
if self.datacenter is None:
self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name)
if self.cluster is None:
self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
def get_failover_hosts(self):
"""
Get failover hosts for failover_host_admission_control policy
Returns: List of ESXi hosts sorted by name
"""
policy = self.params.get('failover_host_admission_control')
hosts = []
all_hosts = dict((h.name, h) for h in self.get_all_hosts_by_cluster(self.cluster_name))
for host in policy.get('failover_hosts'):
if host in all_hosts:
hosts.append(all_hosts.get(host))
else:
self.module.fail_json(msg="Host %s is not a member of cluster %s." % (host, self.cluster_name))
hosts.sort(key=lambda h: h.name)
return hosts
def check_ha_config_diff(self):
"""
Check HA configuration diff
Returns: True if there is diff, else False
"""
das_config = self.cluster.configurationEx.dasConfig
if das_config.enabled != self.enable_ha:
return True
if self.enable_ha and (
das_config.vmMonitoring != self.params.get('ha_vm_monitoring') or
das_config.hostMonitoring != self.params.get('ha_host_monitoring') or
das_config.admissionControlEnabled != self.ha_admission_control or
das_config.defaultVmSettings.restartPriority != self.params.get('ha_restart_priority') or
das_config.defaultVmSettings.isolationResponse != self.host_isolation_response or
das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring != self.params.get('ha_vm_monitoring') or
das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval != self.params.get('ha_vm_failure_interval') or
das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime != self.params.get('ha_vm_min_up_time') or
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures != self.params.get('ha_vm_max_failures') or
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow != self.params.get('ha_vm_max_failure_window')):
return True
if self.ha_admission_control:
if self.params.get('slot_based_admission_control'):
policy = self.params.get('slot_based_admission_control')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverLevelAdmissionControlPolicy) or \
das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
return True
elif self.params.get('reservation_based_admission_control'):
policy = self.params.get('reservation_based_admission_control')
auto_compute_percentages = policy.get('auto_compute_percentages')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverResourcesAdmissionControlPolicy) or \
das_config.admissionControlPolicy.autoComputePercentages != auto_compute_percentages or \
das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
return True
if not auto_compute_percentages:
if das_config.admissionControlPolicy.cpuFailoverResourcesPercent != policy.get('cpu_failover_resources_percent') or \
das_config.admissionControlPolicy.memoryFailoverResourcesPercent != policy.get('memory_failover_resources_percent'):
return True
elif self.params.get('failover_host_admission_control'):
policy = self.params.get('failover_host_admission_control')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverHostAdmissionControlPolicy):
return True
das_config.admissionControlPolicy.failoverHosts.sort(key=lambda h: h.name)
if das_config.admissionControlPolicy.failoverHosts != self.get_failover_hosts():
return True
return False
def configure_ha(self):
"""
Manage HA Configuration
"""
changed, result = False, None
if self.check_ha_config_diff():
if not self.module.check_mode:
cluster_config_spec = vim.cluster.ConfigSpecEx()
cluster_config_spec.dasConfig = vim.cluster.DasConfigInfo()
cluster_config_spec.dasConfig.enabled = self.enable_ha
if self.enable_ha:
vm_tool_spec = vim.cluster.VmToolsMonitoringSettings()
vm_tool_spec.enabled = True
vm_tool_spec.vmMonitoring = self.params.get('ha_vm_monitoring')
vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval')
vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time')
vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures')
vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window')
das_vm_config = vim.cluster.DasVmSettings()
das_vm_config.restartPriority = self.params.get('ha_restart_priority')
das_vm_config.isolationResponse = self.host_isolation_response
das_vm_config.vmToolsMonitoringSettings = vm_tool_spec
cluster_config_spec.dasConfig.defaultVmSettings = das_vm_config
cluster_config_spec.dasConfig.admissionControlEnabled = self.ha_admission_control
if self.ha_admission_control:
if self.params.get('slot_based_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
policy = self.params.get('slot_based_admission_control')
cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
elif self.params.get('reservation_based_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverResourcesAdmissionControlPolicy()
policy = self.params.get('reservation_based_admission_control')
auto_compute_percentages = policy.get('auto_compute_percentages')
cluster_config_spec.dasConfig.admissionControlPolicy.autoComputePercentages = auto_compute_percentages
cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
if not auto_compute_percentages:
cluster_config_spec.dasConfig.admissionControlPolicy.cpuFailoverResourcesPercent = \
policy.get('cpu_failover_resources_percent')
cluster_config_spec.dasConfig.admissionControlPolicy.memoryFailoverResourcesPercent = \
policy.get('memory_failover_resources_percent')
elif self.params.get('failover_host_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverHostAdmissionControlPolicy()
policy = self.params.get('failover_host_admission_control')
cluster_config_spec.dasConfig.admissionControlPolicy.failoverHosts = self.get_failover_hosts()
cluster_config_spec.dasConfig.hostMonitoring = self.params.get('ha_host_monitoring')
cluster_config_spec.dasConfig.vmMonitoring = self.params.get('ha_vm_monitoring')
try:
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
changed, result = wait_for_task(task)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=to_native(runtime_fault.msg))
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=to_native(method_fault.msg))
except TaskError as task_e:
self.module.fail_json(msg=to_native(task_e))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to update cluster"
" due to generic exception %s" % to_native(generic_exc))
else:
changed = True
self.module.exit_json(changed=changed, result=result)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
cluster_name=dict(type='str', required=True),
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
# HA
enable_ha=dict(type='bool', default=False),
ha_host_monitoring=dict(type='str',
default='enabled',
choices=['enabled', 'disabled']),
host_isolation_response=dict(type='str',
default='none',
choices=['none', 'powerOff', 'shutdown']),
# HA VM Monitoring related parameters
ha_vm_monitoring=dict(type='str',
choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'],
default='vmMonitoringDisabled'),
ha_vm_failure_interval=dict(type='int', default=30),
ha_vm_min_up_time=dict(type='int', default=120),
ha_vm_max_failures=dict(type='int', default=3),
ha_vm_max_failure_window=dict(type='int', default=-1),
ha_restart_priority=dict(type='str',
choices=['high', 'low', 'medium', 'disabled'],
default='medium'),
# HA Admission Control related parameters
slot_based_admission_control=dict(type='dict', options=dict(
failover_level=dict(type='int', required=True),
)),
reservation_based_admission_control=dict(type='dict', options=dict(
auto_compute_percentages=dict(type='bool', default=True),
failover_level=dict(type='int', required=True),
cpu_failover_resources_percent=dict(type='int', default=50),
memory_failover_resources_percent=dict(type='int', default=50),
)),
failover_host_admission_control=dict(type='dict', options=dict(
failover_hosts=dict(type='list', elements='str', required=True),
)),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['slot_based_admission_control', 'reservation_based_admission_control', 'failover_host_admission_control']
]
)
vmware_cluster_ha = VMwareCluster(module)
vmware_cluster_ha.configure_ha()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from .common import InfoExtractor
class ThisAmericanLifeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thisamericanlife\.org/(?:radio-archives/episode/|play_full\.php\?play=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.thisamericanlife.org/radio-archives/episode/487/harper-high-school-part-one',
'md5': '8f7d2da8926298fdfca2ee37764c11ce',
'info_dict': {
'id': '487',
'ext': 'm4a',
'title': '487: Harper High School, Part One',
'description': 'md5:ee40bdf3fb96174a9027f76dbecea655',
'thumbnail': 're:^https?://.*\.jpg$',
},
}, {
'url': 'http://www.thisamericanlife.org/play_full.php?play=487',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.thisamericanlife.org/radio-archives/episode/%s' % video_id, video_id)
return {
'id': video_id,
'url': 'http://stream.thisamericanlife.org/{0}/stream/{0}_64k.m3u8'.format(video_id),
'protocol': 'm3u8_native',
'ext': 'm4a',
'acodec': 'aac',
'vcodec': 'none',
'abr': 64,
'title': self._html_search_meta(r'twitter:title', webpage, 'title', fatal=True),
'description': self._html_search_meta(r'description', webpage, 'description'),
'thumbnail': self._og_search_thumbnail(webpage),
} | unknown | codeparrot/codeparrot-clean | ||
'''
IDE: Eclipse (PyDev)
Python version: 2.7
Operating system: Windows 8.1
@author: Emil Carlsson
@copyright: 2015 Emil Carlsson
@license: This program is distributed under the terms of the GNU General Public License
'''
from View import GlobalFunc
from View.Board import Board
class GameView(object):
__root = None
__controller = None
__boardView = None
def __init__(self, root, gameController, *args, **kwargs):
self.__root = root
self.__controller = gameController
def StartNewGame(self, player=None, opponent=None):
GlobalFunc.RemoveAllChildren(self.__root)
self.__boardView = Board(self.__root, self.__controller, player, opponent)
def OutOfMoves(self):
self.__boardView.AddInformation("You are out of moves.\nPlease finnish your turn.")
def ResetInformation(self):
self.__boardView.ResetInformation()
def MaxHandSize(self):
self.__boardView.AddInformation("Maximum hand size reached.\nPlease play a card if possible.")
def MaxVisibleHandSize(self):
self.__boardView.AddInformation("Maximum amount of visible cards reached.")
def RefreshBoard(self, playerOne, playerTwo):
self.__boardView.RefreshBoard(playerOne, playerTwo)
def RemoveFrame(self, frame):
frame.destroy()
def PlayerLost(self):
self.__boardView.AddInformation("You lost!\nGame Over!")
def PlayerWon(self):
self.__boardView.AddInformation("You won!\nGame Over!")
def OutOfCards(self):
self.__boardView.AddInformation("You are out of cards in your deck.")
def CardNotInHand(self):
self.__boardView.AddInformation("Card not on your hand.")
#TODO: Does not show
def WaitingForOpponent(self):
self.__boardView.AddInformation("Waiting for opponent...")
def AppendMessage(self, message):
self.__boardView.AppendInformation(message) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kruntime "k8s.io/apimachinery/pkg/runtime"
serviceconfigv1alpha1 "k8s.io/cloud-provider/controllers/service/config/v1alpha1"
cmconfigv1alpha1 "k8s.io/controller-manager/config/v1alpha1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
csrsigningconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/certificates/signer/config/v1alpha1"
cronjobconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/cronjob/config/v1alpha1"
daemonconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/daemon/config/v1alpha1"
deploymentconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/deployment/config/v1alpha1"
devicetaintevictionconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/devicetainteviction/config/v1alpha1"
endpointconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpoint/config/v1alpha1"
endpointsliceconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpointslice/config/v1alpha1"
endpointslicemirroringconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config/v1alpha1"
garbagecollectorconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/garbagecollector/config/v1alpha1"
jobconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/job/config/v1alpha1"
namespaceconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/namespace/config/v1alpha1"
nodeipamconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/nodeipam/config/v1alpha1"
nodelifecycleconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/nodelifecycle/config/v1alpha1"
poautosclerconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/podautoscaler/config/v1alpha1"
podgcconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/podgc/config/v1alpha1"
replicasetconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/replicaset/config/v1alpha1"
replicationconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/replication/config/v1alpha1"
resourceclaimconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/resourceclaim/config/v1alpha1"
resourcequotaconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/resourcequota/config/v1alpha1"
serviceaccountconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/serviceaccount/config/v1alpha1"
statefulsetconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/statefulset/config/v1alpha1"
ttlafterfinishedconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/ttlafterfinished/config/v1alpha1"
validatingadmissionpolicystatusv1alpha1 "k8s.io/kubernetes/pkg/controller/validatingadmissionpolicystatus/config/v1alpha1"
attachdetachconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config/v1alpha1"
ephemeralvolumeconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/ephemeral/config/v1alpha1"
persistentvolumeconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config/v1alpha1"
)
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_KubeControllerManagerConfiguration(obj *kubectrlmgrconfigv1alpha1.KubeControllerManagerConfiguration) {
// These defaults override the recommended defaults from the componentbaseconfigv1alpha1 package that are applied automatically
// These client-connection defaults are specific to the kube-controller-manager
if obj.Generic.ClientConnection.QPS == 0.0 {
obj.Generic.ClientConnection.QPS = 20.0
}
if obj.Generic.ClientConnection.Burst == 0 {
obj.Generic.ClientConnection.Burst = 30
}
// Use the default RecommendedDefaultGenericControllerManagerConfiguration options
cmconfigv1alpha1.RecommendedDefaultGenericControllerManagerConfiguration(&obj.Generic)
// Use the default RecommendedDefaultHPAControllerConfiguration options
attachdetachconfigv1alpha1.RecommendedDefaultAttachDetachControllerConfiguration(&obj.AttachDetachController)
// Use the default RecommendedDefaultCSRSigningControllerConfiguration options
csrsigningconfigv1alpha1.RecommendedDefaultCSRSigningControllerConfiguration(&obj.CSRSigningController)
// Use the default RecommendedDefaultDaemonSetControllerConfiguration options
daemonconfigv1alpha1.RecommendedDefaultDaemonSetControllerConfiguration(&obj.DaemonSetController)
// Use the default RecommendedDefaultDeploymentControllerConfiguration options
deploymentconfigv1alpha1.RecommendedDefaultDeploymentControllerConfiguration(&obj.DeploymentController)
// Use the default RecommendedDefaultDeviceTaintEvictionControllerConfiguration options
devicetaintevictionconfigv1alpha1.RecommendedDefaultDeviceTaintEvictionControllerConfiguration(&obj.DeviceTaintEvictionController)
// Use the default RecommendedDefaultResourceClaimControllerConfiguration options
resourceclaimconfigv1alpha1.RecommendedDefaultResourceClaimControllerConfiguration(&obj.ResourceClaimController)
// Use the default RecommendedDefaultStatefulSetControllerConfiguration options
statefulsetconfigv1alpha1.RecommendedDefaultStatefulSetControllerConfiguration(&obj.StatefulSetController)
// Use the default RecommendedDefaultEndpointControllerConfiguration options
endpointconfigv1alpha1.RecommendedDefaultEndpointControllerConfiguration(&obj.EndpointController)
// Use the default RecommendedDefaultEndpointSliceControllerConfiguration options
endpointsliceconfigv1alpha1.RecommendedDefaultEndpointSliceControllerConfiguration(&obj.EndpointSliceController)
// Use the default RecommendedDefaultEndpointSliceMirroringControllerConfiguration options
endpointslicemirroringconfigv1alpha1.RecommendedDefaultEndpointSliceMirroringControllerConfiguration(&obj.EndpointSliceMirroringController)
// Use the default RecommendedDefaultEphemeralVolumeControllerConfiguration options
ephemeralvolumeconfigv1alpha1.RecommendedDefaultEphemeralVolumeControllerConfiguration(&obj.EphemeralVolumeController)
// Use the default RecommendedDefaultGarbageCollectorControllerConfiguration options
garbagecollectorconfigv1alpha1.RecommendedDefaultGarbageCollectorControllerConfiguration(&obj.GarbageCollectorController)
// Use the default RecommendedDefaultJobControllerConfiguration options
jobconfigv1alpha1.RecommendedDefaultJobControllerConfiguration(&obj.JobController)
// Use the default RecommendedDefaultCronJobControllerConfiguration options
cronjobconfigv1alpha1.RecommendedDefaultCronJobControllerConfiguration(&obj.CronJobController)
// Use the default RecommendedDefaultNamespaceControllerConfiguration options
namespaceconfigv1alpha1.RecommendedDefaultNamespaceControllerConfiguration(&obj.NamespaceController)
// Use the default RecommendedDefaultNodeIPAMControllerConfiguration options
nodeipamconfigv1alpha1.RecommendedDefaultNodeIPAMControllerConfiguration(&obj.NodeIPAMController)
// Use the default RecommendedDefaultHPAControllerConfiguration options
poautosclerconfigv1alpha1.RecommendedDefaultHPAControllerConfiguration(&obj.HPAController)
// Use the default RecommendedDefaultNodeLifecycleControllerConfiguration options
nodelifecycleconfigv1alpha1.RecommendedDefaultNodeLifecycleControllerConfiguration(&obj.NodeLifecycleController)
// Use the default RecommendedDefaultPodGCControllerConfiguration options
podgcconfigv1alpha1.RecommendedDefaultPodGCControllerConfiguration(&obj.PodGCController)
// Use the default RecommendedDefaultReplicaSetControllerConfiguration options
replicasetconfigv1alpha1.RecommendedDefaultReplicaSetControllerConfiguration(&obj.ReplicaSetController)
// Use the default RecommendedDefaultReplicationControllerConfiguration options
replicationconfigv1alpha1.RecommendedDefaultReplicationControllerConfiguration(&obj.ReplicationController)
// Use the default RecommendedDefaultResourceQuotaControllerConfiguration options
resourcequotaconfigv1alpha1.RecommendedDefaultResourceQuotaControllerConfiguration(&obj.ResourceQuotaController)
// Use the default RecommendedDefaultServiceControllerConfiguration options
serviceconfigv1alpha1.RecommendedDefaultServiceControllerConfiguration(&obj.ServiceController)
// Use the default RecommendedDefaultLegacySATokenCleanerConfiguration options
serviceaccountconfigv1alpha1.RecommendedDefaultLegacySATokenCleanerConfiguration(&obj.LegacySATokenCleaner)
// Use the default RecommendedDefaultSAControllerConfiguration options
serviceaccountconfigv1alpha1.RecommendedDefaultSAControllerConfiguration(&obj.SAController)
// Use the default RecommendedDefaultTTLAfterFinishedControllerConfiguration options
ttlafterfinishedconfigv1alpha1.RecommendedDefaultTTLAfterFinishedControllerConfiguration(&obj.TTLAfterFinishedController)
// Use the default RecommendedDefaultPersistentVolumeBinderControllerConfiguration options
persistentvolumeconfigv1alpha1.RecommendedDefaultPersistentVolumeBinderControllerConfiguration(&obj.PersistentVolumeBinderController)
// Use the default RecommendedDefaultValidatingAdmissionPolicyStatusControllerConfiguration options
validatingadmissionpolicystatusv1alpha1.RecommendedDefaultValidatingAdmissionPolicyStatusControllerConfiguration(&obj.ValidatingAdmissionPolicyStatusController)
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/apis/config/v1alpha1/defaults.go |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.web.webmvcview.mvcviewjspresolver;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.ViewResolverRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
// tag::snippet[]
@Configuration
public class WebConfiguration implements WebMvcConfigurer {
@Override
public void configureViewResolvers(ViewResolverRegistry registry) {
registry.jsp();
}
}
// end::snippet[] | java | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/java/org/springframework/docs/web/webmvcview/mvcviewjspresolver/WebConfiguration.java |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from fpuf.apps.ufs.models import Familia, Cicle, MP, UF, ResultatAprenentatge,\
Contingut
import random
import string
from django.contrib.auth.models import User
class Command(BaseCommand):
args = '<poll_id poll_id ...>'
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
crea_dades_exemple()
self.stdout.write(u"Procés finalitzat")
def crea_dades_exemple():
usuaris=['pepe','u1','u2','u3','u4']
for nom in usuaris:
u=User.objects.create( username = nom )
u.set_password('pepe')
u.save()
u.perfil.reputacio_c = 200
u.perfil.favorit_c = 5
u.perfil.save()
print(u"Creant famílies")
f_informatica = Familia.objects.create( nom = u"Informàtica" )
f_administratiu = Familia.objects.create( nom = u"Administratiu" )
f_fol=Familia.objects.create(nom=u"Àrea Fol")
print( u"Creant cicles")
c_daw = Cicle.objects.create( codi = u"DAW", familia = f_informatica, nom = u"Desenvolupament d'aplicacions web" )
c_dam = Cicle.objects.create( codi = u"DAM", familia = f_informatica, nom = u"Desenvolupament d’aplicacions multiplataforma" )
c_asix = Cicle.objects.create( codi = u"ASIX", familia = f_informatica, nom = u"Administració de sistemes microinformàtics i xarxes" )
c_smx = Cicle.objects.create( codi = u"SMX", familia = f_informatica, nom = u"Sistemes microinformàtics i xarxes" )
#c_ga = Cicle.objects.create( codi = u"GA", familia = f_administratiu, nom = u"Gestió administrativa" )
c_fol = Cicle.objects.create( codi = u"FOL", familia = f_fol, nom = u"Àrea Fol" )
print( u"Definint ufs daw")
ufs_daw = (
( '01', u"Sistemes informàtics",
(
u"Instal·lació, configuració i explotació del sistema informàtic",
u"Gestió de la informació i de recursos en una xarxa",
u"Implantació de programari específic",
),
),
('02', u"Bases de dades",
(
u'Introducció a les bases de dades',
u'Llenguatges SQL: DML i DDL',
u'Llenguatges SQL: DCL i extensió procedimental',
u'Bases de dades objectes-relacionals',
),
),
('03', u"Programació",
(
u'Programació estructurada',
u'Disseny modular',
u'Fonaments de gestió de fitxers',
u'Programació orientada a objectes. Fonaments',
u'POO. Llibreries de classes fonamentals',
u'POO. Introducció a la persistència en BD',
),
),
('04', u"Llenguatge de marques i sistemes de gestió d’informació",
(
u"Programació amb XML",
u"Àmbits d’aplicació de l’XML",
u"Sistemes de gestió d’informació empresarial",
),
),
('05', u"Entorns de desenvolupament",
(
u"Desenvolupament de programari",
u"UF2. Optimització de programari",
u"UF3. Introducció al disseny orientat a objectes",
) ,
),
('06', u"Desenvolupament web en entorn client",
(
u"Sintaxi del llenguatge. Objectes predefinits del llenguatge.",
u"Estructures definides pel programador. Objectes." ,
u"Esdeveniments. Manegament de formularis. Model d'objectes del document.",
u"Comunicació asíncrona client-servidor.",
) ,
),
('07', u"Desenvolupament web en entorn servidor",
(
u"Desenvolupament web en entorn servidor.",
u"Generació dinàmica de pagines web." ,
u"Tècniques d’accés a dades." ,
u"Serveis web. Pàgines dinàmiques interactives. Webs Híbrids.",
) ,
),
('08', u"Desplegament d’aplicacions web",
(
u"Servidors web i de transferència de fitxers.",
u"Servidors d’aplicacions web." ,
u"Desplegament d’aplicacions web." ,
u"Control de versions i documentació",
) ,
),
('09', u"Disseny d’interfícies web",
(
u"Disseny de la interfície. Estils." ,
u"Elements multimèdia: creació i integració.",
u"Accessibilitat i usabilitat.",
),
),
('12', u"Projecte de DAW",
(
u"Projecte de desenvolupament d’aplicacions web",
) ,
),
)
print( u"Definint ufs dam")
ufs_dam = (
( '01', u"Sistemes informàtics",
(
u"Instal·lació, configuració i explotació del sistema informàtic",
u"Gestió de la informació i de recursos en una xarxa",
u"Implantació de programari específic",
),
),
('02', u"Bases de dades",
(
u'Introducció a les bases de dades',
u'Llenguatges SQL: DML i DDL',
u'Llenguatges SQL: DCL i extensió procedimental',
u'Bases de dades objectes-relacionals',
),
),
('03', u"Programació",
(
u'Programació estructurada',
u'Disseny modular',
u'Fonaments de gestió de fitxers',
u'Programació orientada a objectes. Fonaments',
u'POO. Llibreries de classes fonamentals',
u'POO. Introducció a la persistència en BD',
),
),
('04', u"Llenguatge de marques i sistemes de gestió d’informació",
(
u"Programació amb XML",
u"Àmbits d’aplicació de l’XML",
u"Sistemes de gestió d’informació empresarial",
),
),
('05', u"Entorns de desenvolupament",
(
u"Desenvolupament de programari",
u"Optimització de programari",
u"Introducció al disseny orientat a objectes",
) ,
),
('06',u"Accés a dades",
(
u"Persistència en fitxers",
u"Persistència en BDR-BDOR-BDOO",
u"Persistència en BD natives XML",
u"Components d’accés a dades",
) ,
),
('07',u"Desenvolupament d’interfícies",
(
u"Disseny i implementació d’interfícies",
u"Preparació i distribució d’aplicacions",
) ,
),
('08',u"Programació multimèdia i dispositius mòbils",
(
u"Desenvolupament d’aplicacions per dispositius mòbils",
u"Programació multimèdia",
u"Desenvolupament de jocs per dispositius mòbils",
) ,
),
('09',u"Programació de serveis i processos",
(
u"Seguretat i criptografia",
u"Processos i fils",
u"Sòcols i serveis",
) ,
),
('10',u"Sistemes de gestió empresarial",
(
u"Sistemes ERP-CRM. Implantació",
u"Sistemes ERP-CRM. Explotació i adequació",
) ,
),
('13',u"Projecte de desenvolupament d’aplicacions multiplataforma",
(
u"Projecte de desenvolupament d’aplicacions multiplataforma",
) ,
),
)
print( u"Definint ufs asix")
ufs_asix = (
( '01', u"Sistemes informàtics",
(
u"Instal·lació, configuració i explotació del sistema informàtic",
u"Gestió de la informació i de recursos en una xarxa",
u"Implantació de programari específic",
u"Seguretat, rendiment i recursos.",
),
),
('02', u"Bases de dades",
(
u'Introducció a les bases de dades',
u'Llenguatges SQL: DML i DDL',
u'Assegurament de la informació.',
),
),
('03', u"Programació",
(
u'Programació estructurada',
u'Disseny modular',
u'Fonaments de gestió de fitxers',
),
),
('04', u"Llenguatge de marques i sistemes de gestió d’informació",
(
u"Programació amb XML",
u"Àmbits d’aplicació de l’XML",
u"Sistemes de gestió d’informació empresarial",
),
),
('05', u"Fonaments de maquinari",
(
u"Arquitectura de sistemes.",
u"Instal·lació, configuració i recuperació de programari.",
u"Implantació i manteniment de CPD",
) ,
),
('06', u"Administració de sistemes operatius",
(
u"Administració avançada de sistemes operatius.",
u"Automatització de tasques i llenguatges de guions.",
) ,
),
('07', u"administració de dispositius de xarxa.",
(
u"Introducció a les xarxes.",
u"administració de dispositius de xarxa.",
u"administració de dispositius de xarxa.",
) ,
),
('08', u"serveis de xarxa i Internet",
(
u"serveis de noms i configuració automàtica",
u"serveis web i de transferència de fitxers.",
u"correu electrònic i missatgeria.",
u"serveis d’àudio i vídeo. ",
) ,
),
('09', u"implantació d’aplicacions web",
(
u"llenguatges de guions de servidor.",
u"llenguatges de guions de continguts.",
) ,
),
('10', u"administració de sistemes gestors de bases de dades",
(
u"llenguatges SQL: DCL i extensió procedimental.",
u"instal·lació i ajustament de SGBD corporatiu.",
) ,
),
('11', u"seguretat i alta disponibilitat",
(
u"seguretat física, lògica i legislació.",
u"seguretat activa i accés remot.",
u"tallafocs i servidors intermediaris.",
u"alta disponibilitat.",
) ,
),
('14', u"projecte d’administració de sistemes informàtics en xarxa",
(
u"projecte d’administració de sistemes informàtics en xarxa.",
) ,
),
)
print( u"Definint ufs smx")
ufs_smx = (
('01', u"muntatge i manteniment d’equips",
(
u"electricitat a l’ordinador. ",
u"components d’un equip microinformàtic. ",
u"muntatge d’un equip microinformàtic.",
u"noves tendències de muntatge.",
u"manteniment d’equips microinformàtics.",
u"instal·lació de programari.",
) ,
),
('02', u"sistemes operatius monolloc",
(
u"introducció als sistemes operatius.",
u"sistemes operatius propietaris.",
u"sistemes operatius lliures.",
) ,
),
('03', u"aplicacions ofimàtiques",
(
u"aplicacions ofimàtiques i atenció a l’usuari.",
u"el correu i l’agenda electrònica.",
u" processadors de text.",
u"fulls de càlcul.",
u"bases de dades.",
u"imatge i vídeo",
) ,
),
('04', u"sistemes operatius en xarxa",
(
u"sistemes operatius propietaris en xarxa.",
u"sistemes operatius lliures en xarxa.",
u"compartició de recursos i seguretat. ",
u"integració de sistemes operatius. ",
) ,
),
('05', u"xarxes locals",
(
u"Introducció a les xarxes locals.",
u"configuració de commutadors i encaminadors.",
u"resolució d’incidències en xarxes locals. ",
) ,
),
('06', u"seguretat informàtica",
(
u"seguretat passiva. ",
u"còpies de seguretat.",
u"legislació de seguretat i protecció de dades.",
u"seguretat activa.",
u"tallafocs i monitoratge de xarxes.",
) ,
),
('07', u"serveis de xarxa",
(
u"configuració de la xarxa (DNS i DHCP).",
u"correu electrònic i transmissió d’arxius.",
u"servidor web i servidor intermediari o proxy.",
u"accés a sistemes remots.",
) ,
),
('08', u"aplicacions web",
(
u"ofimàtica i eines web.",
u"gestors d’arxius web.",
u"gestors de continguts.",
u"portals web d’aprenentatge.",
u"fonaments d’HTML i fulls d’estils",
) ,
),
('11', u"anglès tècnic.",
(
u"anglès tècnic.",
) ,
),
('12', u"síntesi",
(
u"síntesi",
)
),
)
print( u"Definint ufs fol")
ufs_fol = (
('10', u"Formació i Orientació Laboral",
(u"Incorporació al treball",
u"Prevenció de riscos laborals") ,
),
('11', u"Empresa i Iniciativa Emprenedora",
(u"Empresa i iniciativa emprenedora",
) ,
),
)
# ufs_ga = (
# ('01', u"Comunicació empresarial i atenció al client",
# (
# u"Comunicació empresarial oral." ,
# u"Comunicació empresarial escrita.",
# u"Sistemes d'arxiu." ,
# u"Atenció al client/usuari.",
# ),
# ),
# ('02', u"Operacions administratives de compravenda", None,
# )
# )
cicles = [ (c_daw , ufs_daw, ) , (c_asix, ufs_asix,), (c_dam, ufs_dam), (c_fol, ufs_fol), (c_smx, ufs_smx), ]
for cicle, curriculum in cicles:
print( u"Creant currículum del cicle {0}".format( cicle.codi ) )
if not curriculum:
curriculum = [ ( '0{0}'.format(n), u"MP0{0} del cicle {1}".format( n, cicle.codi ), None, ) for n in range( 1,5 ) ]
for numero_mp, nom_mp, ufs in curriculum:
print( u" Creant MP {0}".format( numero_mp ) )
mp = MP.objects.create( numero = numero_mp, cicle = cicle, nom = nom_mp )
numero_uf = 0
for nom_uf in ufs:
numero_uf += 1
print( u" Creant UF {0} {1}".format( numero_uf, nom_uf ) )
uf = UF.objects.create( numero = numero_uf, nom = nom_uf, mp = mp )
# l1 = range( 1, random.randint( 3,10 ) )
# for n1 in l1:
# l2 = list( string.ascii_lowercase[:random.randint( 3, 7 ) ] )
# for n2 in l2:
# print( u" Creant RA {0}".format( '{0}.{1}'.format( n1,n2) ) )
# ResultatAprenentatge.objects.create( codi = '{0}.{1}'.format( n1,n2), nom = u"Aleatori {0}-{1}".format( n1,n2 ), uf = uf )
# l3 = range( 1, random.randint( 3,10 ) )
# for n1 in l3:
# for n2 in range(1, random.randint( 3,7 ) ):
# print( u" Creant C {0}".format( '{0}.{1}'.format( n1,n2) ) )
# Contingut.objects.create( codi = '{0}.{1}'.format( n1,n2), nom = u"Aleatori {0}-{1}".format( n1,n2 ), uf = uf )
print ( u"Fent equivalències:")
cicles = ( 'ASIX', 'DAW', 'SMX' )
mps = ( 'MP01', 'MP02', 'MP03', 'MP04', )
for mp in mps:
for cicle in cicles:
for n_uf in range(1,4):
buscant = "{cicle}-{mp}-UF{uf}".format( cicle = cicle, mp=mp, uf = n_uf )
print buscant
uf = UF.objects.get( codi = buscant )
for cicle_equiv in cicles:
if cicle_equiv != cicle:
buscant = "{cicle}-{mp}-UF{uf}".format( cicle = cicle_equiv, mp=mp, uf = n_uf )
print buscant
uf_equiv = UF.objects.get( codi = buscant )
uf.equivalents.add( uf_equiv ) | unknown | codeparrot/codeparrot-clean | ||
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# INPUTS: {BrainProtonDensitySlice.png}
# OUTPUTS: {ResampleImageFilterOutput1.png}
# 0
# INPUTS: {BrainProtonDensitySlice.png}
# OUTPUTS: {ResampleImageFilterOutput2.png}
# 1
# INPUTS: {BrainProtonDensitySlice.png}
# OUTPUTS: {ResampleImageFilterOutput3.png}
# 2
# INPUTS: {BrainProtonDensitySlice.png}
# OUTPUTS: {ResampleImageFilterOutput4.png}
# 3
from __future__ import print_function
import itk
from sys import argv, stderr, exit
itk.auto_progress(2)
# if( len(argv) < 3 ):
# print("""Missing Parameters
# Usage: ResampleImageFilter.py inputImageFile outputImageFile
# [exampleAction={0,1,2,3}]""", file=stderr)
# exit(1)
dim = 2
SOType = itk.SpatialObject[dim]
InternalImageType = itk.Image[itk.F, dim]
OutputPixelType = itk.UC
OutputImageType = itk.Image[OutputPixelType, dim]
ellipse = itk.EllipseSpatialObject[dim].New(Radius=[10, 5])
ellipse.GetObjectToParentTransform().SetOffset([20, 20])
ellipse.ComputeObjectToWorldTransform()
box = itk.BoxSpatialObject[dim].New(Size=20)
box.GetObjectToParentTransform().SetOffset([20, 40])
box.ComputeObjectToWorldTransform()
gaussian = itk.GaussianSpatialObject[dim].New(Radius=100)
gaussian.GetObjectToParentTransform().SetOffset([60, 60])
gaussian.GetObjectToParentTransform().SetScale(10)
gaussian.ComputeObjectToWorldTransform()
group = itk.GroupSpatialObject[dim].New()
group.AddSpatialObject(ellipse)
group.AddSpatialObject(box)
group.AddSpatialObject(gaussian)
filter = itk.SpatialObjectToImageFilter[SOType, InternalImageType].New(
group, Size=[100, 100], UseObjectValue=True)
filter.Update() # required ?!
rescale = itk.RescaleIntensityImageFilter[
InternalImageType,
OutputImageType].New(
filter,
OutputMinimum=itk.NumericTraits[OutputPixelType].NonpositiveMin(),
OutputMaximum=itk.NumericTraits[OutputPixelType].max())
itk.write(rescale, argv[1]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import datetime
import time
from django.core import mail
from nose.tools import eq_
import amo
import amo.tests
from amo.tests import addon_factory
from addons.models import Addon
from versions.models import Version, version_uploaded, ApplicationsVersions
from files.models import File
from applications.models import AppVersion
from editors.models import (EditorSubscription, RereviewQueueTheme,
ReviewerScore, send_notifications,
ViewFastTrackQueue, ViewFullReviewQueue,
ViewPendingQueue, ViewPreliminaryQueue,
ViewUnlistedFullReviewQueue,
ViewUnlistedPendingQueue,
ViewUnlistedPreliminaryQueue)
from users.models import UserProfile
def create_addon_file(name, version_str, addon_status, file_status,
platform=amo.PLATFORM_ALL, application=amo.FIREFOX,
admin_review=False, addon_type=amo.ADDON_EXTENSION,
created=None, file_kw=None, version_kw=None,
listed=True):
if file_kw is None:
file_kw = {}
if version_kw is None:
version_kw = {}
app_vr, created_ = AppVersion.objects.get_or_create(
application=application.id, version='1.0')
ad, created_ = Addon.with_unlisted.get_or_create(
name__localized_string=name,
defaults={'type': addon_type, 'name': name, 'is_listed': listed})
if admin_review:
ad.update(admin_review=True)
vr, created_ = Version.objects.get_or_create(addon=ad, version=version_str,
defaults=version_kw)
if not created_:
vr.update(**version_kw)
va, created_ = ApplicationsVersions.objects.get_or_create(
version=vr, application=application.id, min=app_vr, max=app_vr)
file_ = File.objects.create(version=vr, filename=u"%s.xpi" % name,
platform=platform.id, status=file_status,
**file_kw)
if created:
vr.update(created=created)
file_.update(created=created)
# Update status *after* we are done creating/modifying version and files:
Addon.with_unlisted.get(pk=ad.id).update(status=addon_status)
return {'addon': ad, 'version': vr, 'file': file_}
def create_search_ext(name, version_str, addon_status, file_status,
listed=True):
ad, created_ = Addon.with_unlisted.get_or_create(
name__localized_string=name,
defaults={'type': amo.ADDON_SEARCH, 'name': name, 'is_listed': listed})
vr, created_ = Version.objects.get_or_create(addon=ad, version=version_str)
File.objects.create(version=vr, filename=u"%s.xpi" % name,
platform=amo.PLATFORM_ALL.id, status=file_status)
# Update status *after* there are files:
Addon.with_unlisted.get(pk=ad.id).update(status=addon_status)
return ad
class TestQueue(amo.tests.TestCase):
"""Tests common attributes and coercions that each view must support."""
__test__ = False # this is an abstract test case
listed = True # Are we testing listed or unlisted queues?
def test_latest_version(self):
self.new_file(version=u'0.1', created=self.days_ago(2))
self.new_file(version=u'0.2', created=self.days_ago(1))
self.new_file(version=u'0.3')
row = self.Queue.objects.get()
eq_(row.latest_version, '0.3')
def test_file_platforms(self):
# Here's a dupe platform in another version:
self.new_file(version=u'0.1', platform=amo.PLATFORM_MAC,
created=self.days_ago(1))
self.new_file(version=u'0.2', platform=amo.PLATFORM_LINUX)
self.new_file(version=u'0.2', platform=amo.PLATFORM_MAC)
row = self.Queue.objects.get()
eq_(sorted(row.file_platform_ids),
[amo.PLATFORM_LINUX.id, amo.PLATFORM_MAC.id])
def test_file_applications(self):
self.new_file(version=u'0.1', application=amo.FIREFOX)
self.new_file(version=u'0.1', application=amo.THUNDERBIRD)
# Duplicate:
self.new_file(version=u'0.1', application=amo.FIREFOX)
row = self.Queue.objects.get()
eq_(sorted(row.application_ids),
[amo.FIREFOX.id, amo.THUNDERBIRD.id])
def test_addons_disabled_by_user_are_hidden(self):
f = self.new_file(version=u'0.1')
f['addon'].update(disabled_by_user=True)
eq_(list(self.Queue.objects.all()), [])
def test_addons_disabled_by_admin_are_hidden(self):
f = self.new_file(version=u'0.1')
f['addon'].update(status=amo.STATUS_DISABLED)
eq_(list(self.Queue.objects.all()), [])
def test_reviewed_files_are_hidden(self):
self.new_file(name='Unreviewed', version=u'0.1')
create_addon_file('Already Reviewed', '0.1',
amo.STATUS_PUBLIC, amo.STATUS_NULL)
eq_(sorted(q.addon_name for q in self.Queue.objects.all()),
['Unreviewed'])
def test_search_extensions(self):
self.new_search_ext('Search Tool', '0.1')
row = self.Queue.objects.get()
eq_(row.addon_name, u'Search Tool')
eq_(row.application_ids, [])
eq_(row.file_platform_ids, [amo.PLATFORM_ALL.id])
def test_count_all(self):
self.new_file(name='Addon 1', version=u'0.1')
self.new_file(name='Addon 1', version=u'0.2')
self.new_file(name='Addon 2', version=u'0.1')
self.new_file(name='Addon 2', version=u'0.2')
eq_(self.Queue.objects.all().count(), 2)
class TestPendingQueue(TestQueue):
__test__ = True
Queue = ViewPendingQueue
def new_file(self, name=u'Pending', version=u'1.0', **kw):
# Create the addon and everything related. Note that we are cheating,
# the addon status might not correspond to the files attached. This is
# important not to re-save() attached versions and files afterwards,
# because that might alter the addon status.
return create_addon_file(name, version,
amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def test_waiting_time(self):
self.new_file(name='Addon 1', version=u'0.1')
Version.objects.update(created=datetime.datetime.utcnow())
row = self.Queue.objects.all()[0]
eq_(row.waiting_time_days, 0)
# Time zone will be off, hard to test this.
assert row.waiting_time_hours is not None
# These apply to all queues, except that all add-ons in the Fast
# Track queue are Jetpack
def test_flags_admin_review(self):
f = self.new_file(version=u'0.1')
f['addon'].update(admin_review=True)
q = self.Queue.objects.get()
eq_(q.flags, [('admin-review', 'Admin Review')])
def test_flags_info_request(self):
self.new_file(version=u'0.1', version_kw={'has_info_request': True})
q = self.Queue.objects.get()
eq_(q.flags, [('info', 'More Information Requested')])
def test_flags_editor_comment(self):
self.new_file(version=u'0.1', version_kw={'has_editor_comment': True})
q = self.Queue.objects.get()
eq_(q.flags, [('editor', 'Contains Editor Comment')])
def test_flags_jetpack_and_restartless(self):
self.new_file(version=u'0.1', file_kw={'jetpack_version': '1.8',
'no_restart': True})
q = self.Queue.objects.get()
eq_(q.flags, [('jetpack', 'Jetpack Add-on')])
def test_flags_restartless(self):
self.new_file(version=u'0.1', file_kw={'no_restart': True})
q = self.Queue.objects.get()
eq_(q.flags, [('restartless', 'Restartless Add-on')])
def test_flags_sources_provided(self):
f = self.new_file(version=u'0.1')
f['addon'].versions.update(source='/some/source/file')
q = self.Queue.objects.get()
eq_(q.flags, [('sources-provided', 'Sources provided')])
def test_no_flags(self):
self.new_file(version=u'0.1')
q = self.Queue.objects.get()
eq_(q.flags, [])
class TestFullReviewQueue(TestQueue):
__test__ = True
Queue = ViewFullReviewQueue
def new_file(self, name=u'Nominated', version=u'1.0', **kw):
return create_addon_file(name, version,
amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def test_lite_review_addons_also_shows_up(self):
create_addon_file('Full', '0.1',
amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED,
listed=self.listed)
create_addon_file('Lite', '0.1',
amo.STATUS_LITE_AND_NOMINATED,
amo.STATUS_UNREVIEWED, listed=self.listed)
eq_(sorted(q.addon_name for q in self.Queue.objects.all()),
['Full', 'Lite'])
def test_any_nominated_file_shows_up(self):
create_addon_file('Null', '0.1',
amo.STATUS_NOMINATED, amo.STATUS_NULL,
listed=self.listed)
eq_(sorted(q.addon_name for q in self.Queue.objects.all()), ['Null'])
def test_waiting_time(self):
self.new_file(name='Addon 1', version=u'0.1')
Version.objects.update(nomination=datetime.datetime.utcnow())
row = self.Queue.objects.all()[0]
eq_(row.waiting_time_days, 0)
# Time zone will be off, hard to test this.
assert row.waiting_time_hours is not None
class TestPreliminaryQueue(TestQueue):
__test__ = True
Queue = ViewPreliminaryQueue
def new_file(self, name=u'Preliminary', version=u'1.0', **kw):
return create_addon_file(name, version,
amo.STATUS_LITE, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_LITE, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def test_unreviewed_addons_are_in_q(self):
create_addon_file('Lite', '0.1',
amo.STATUS_LITE, amo.STATUS_UNREVIEWED,
listed=self.listed)
create_addon_file('Unreviewed', '0.1',
amo.STATUS_UNREVIEWED, amo.STATUS_UNREVIEWED,
listed=self.listed)
eq_(sorted(q.addon_name for q in self.Queue.objects.all()),
['Lite', 'Unreviewed'])
def test_waiting_time(self):
self.new_file(name='Addon 1', version=u'0.1')
Version.objects.update(created=datetime.datetime.utcnow())
row = self.Queue.objects.all()[0]
eq_(row.waiting_time_days, 0)
# Time zone might be off due to your MySQL install, hard to test this.
assert row.waiting_time_min is not None
assert row.waiting_time_hours is not None
class TestFastTrackQueue(TestQueue):
__test__ = True
Queue = ViewFastTrackQueue
def query(self):
return sorted(list(q.addon_name for q in self.Queue.objects.all()))
def new_file(self, name=u'FastTrack', version=u'1.0', file_params=None,
**kw):
res = create_addon_file(name, version,
amo.STATUS_LITE, amo.STATUS_UNREVIEWED, **kw)
file_ = res['file']
params = dict(no_restart=True, requires_chrome=False,
jetpack_version='1.1')
if not file_params:
file_params = {}
params.update(file_params)
for k, v in params.items():
setattr(file_, k, v)
file_.save()
return res
def new_search_ext(self, name, version, **kw):
addon = create_search_ext(name, version,
amo.STATUS_LITE, amo.STATUS_UNREVIEWED,
**kw)
file_ = addon.versions.get().files.get()
file_.no_restart = True
file_.jetpack_version = '1.1'
file_.requires_chrome = False
file_.save()
return addon
def test_include_jetpacks(self):
self.new_file(name='jetpack')
eq_(self.query(), ['jetpack'])
def test_ignore_non_jetpacks(self):
self.new_file(file_params=dict(no_restart=False))
eq_(self.query(), [])
def test_ignore_non_sdk_bootstrapped_addons(self):
self.new_file(file_params=dict(jetpack_version=None))
eq_(self.query(), [])
def test_ignore_sneaky_jetpacks(self):
self.new_file(file_params=dict(requires_chrome=True))
eq_(self.query(), [])
def test_include_full_review(self):
ad = self.new_file(name='full')['addon']
ad.status = amo.STATUS_NOMINATED
ad.save()
eq_(self.query(), ['full'])
class TestUnlistedPendingQueue(TestPendingQueue):
Queue = ViewUnlistedPendingQueue
listed = False
class TestUnlistedFullReviewQueue(TestFullReviewQueue):
Queue = ViewUnlistedFullReviewQueue
listed = False
class TestUnlistedPreliminaryQueue(TestPreliminaryQueue):
Queue = ViewUnlistedPreliminaryQueue
listed = False
class TestEditorSubscription(amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestEditorSubscription, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.version = self.addon.current_version
self.user_one = UserProfile.objects.get(pk=55021)
self.user_two = UserProfile.objects.get(pk=999)
for user in [self.user_one, self.user_two]:
EditorSubscription.objects.create(addon=self.addon, user=user)
def test_email(self):
es = EditorSubscription.objects.get(user=self.user_one)
es.send_notification(self.version)
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].to, [u'del@icio.us'])
eq_(mail.outbox[0].subject,
'Mozilla Add-ons: Delicious Bookmarks Updated')
def test_notifications(self):
send_notifications(sender=self.version)
eq_(len(mail.outbox), 2)
emails = sorted([o.to for o in mail.outbox])
eq_(emails, [[u'del@icio.us'], [u'regular@mozilla.com']])
def test_notifications_clean(self):
send_notifications(Version, self.version)
eq_(EditorSubscription.objects.count(), 0)
mail.outbox = []
send_notifications(Version, self.version)
eq_(len(mail.outbox), 0)
def test_notifications_beta(self):
self.version.all_files[0].update(status=amo.STATUS_BETA)
version_uploaded.send(sender=self.version)
eq_(len(mail.outbox), 0)
def test_signal_edit(self):
self.version.save()
eq_(len(mail.outbox), 0)
def test_signal_create(self):
v = Version.objects.create(addon=self.addon)
version_uploaded.send(sender=v)
eq_(len(mail.outbox), 2)
eq_(mail.outbox[0].subject,
'Mozilla Add-ons: Delicious Bookmarks Updated')
def test_signal_create_twice(self):
v = Version.objects.create(addon=self.addon)
version_uploaded.send(sender=v)
mail.outbox = []
v = Version.objects.create(addon=self.addon)
version_uploaded.send(sender=v)
eq_(len(mail.outbox), 0)
class TestReviewerScore(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestReviewerScore, self).setUp()
self.addon = amo.tests.addon_factory(status=amo.STATUS_NOMINATED)
self.user = UserProfile.objects.get(email='editor@mozilla.com')
def _give_points(self, user=None, addon=None, status=None):
user = user or self.user
addon = addon or self.addon
ReviewerScore.award_points(user, addon, status or addon.status)
def check_event(self, type, status, event, **kwargs):
self.addon.type = type
eq_(ReviewerScore.get_event(self.addon, status, **kwargs), event, (
'Score event for type:%s and status:%s was not %s' % (
type, status, event)))
def test_events_addons(self):
types = {
amo.ADDON_ANY: None,
amo.ADDON_EXTENSION: 'ADDON',
amo.ADDON_THEME: 'THEME',
amo.ADDON_DICT: 'DICT',
amo.ADDON_SEARCH: 'SEARCH',
amo.ADDON_LPAPP: 'LP',
amo.ADDON_LPADDON: 'LP',
amo.ADDON_PLUGIN: 'ADDON',
amo.ADDON_API: 'ADDON',
amo.ADDON_PERSONA: 'PERSONA',
}
statuses = {
amo.STATUS_NULL: None,
amo.STATUS_UNREVIEWED: 'PRELIM',
amo.STATUS_PENDING: None,
amo.STATUS_NOMINATED: 'FULL',
amo.STATUS_PUBLIC: 'UPDATE',
amo.STATUS_DISABLED: None,
amo.STATUS_BETA: None,
amo.STATUS_LITE: 'PRELIM',
amo.STATUS_LITE_AND_NOMINATED: 'FULL',
amo.STATUS_PURGATORY: None,
amo.STATUS_DELETED: None,
amo.STATUS_REJECTED: None,
amo.STATUS_REVIEW_PENDING: None,
amo.STATUS_BLOCKED: None,
}
for tk, tv in types.items():
for sk, sv in statuses.items():
try:
event = getattr(amo, 'REVIEWED_%s_%s' % (tv, sv))
except AttributeError:
try:
event = getattr(amo, 'REVIEWED_%s' % tv)
except AttributeError:
event = None
self.check_event(tk, sk, event)
def test_award_points(self):
self._give_points()
eq_(ReviewerScore.objects.all()[0].score,
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_award_moderation_points(self):
ReviewerScore.award_moderation_points(self.user, self.addon, 1)
score = ReviewerScore.objects.all()[0]
eq_(score.score, amo.REVIEWED_SCORES.get(amo.REVIEWED_ADDON_REVIEW))
eq_(score.note_key, amo.REVIEWED_ADDON_REVIEW)
def test_get_total(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
eq_(ReviewerScore.get_total(self.user),
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL] +
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
eq_(ReviewerScore.get_total(user2),
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_get_recent(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
time.sleep(1) # Wait 1 sec so ordering by created is checked.
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2)
scores = ReviewerScore.get_recent(self.user)
eq_(len(scores), 2)
eq_(scores[0].score, amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
eq_(scores[1].score, amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_get_leaderboards(self):
user2 = UserProfile.objects.get(email='regular@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
leaders = ReviewerScore.get_leaderboards(self.user)
eq_(leaders['user_rank'], 1)
eq_(leaders['leader_near'], [])
eq_(leaders['leader_top'][0]['rank'], 1)
eq_(leaders['leader_top'][0]['user_id'], self.user.id)
eq_(leaders['leader_top'][0]['total'],
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL] +
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
eq_(leaders['leader_top'][1]['rank'], 2)
eq_(leaders['leader_top'][1]['user_id'], user2.id)
eq_(leaders['leader_top'][1]['total'],
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
self._give_points(
user=user2, addon=amo.tests.addon_factory(type=amo.ADDON_PERSONA))
leaders = ReviewerScore.get_leaderboards(
self.user, addon_type=amo.ADDON_PERSONA)
eq_(len(leaders['leader_top']), 1)
eq_(leaders['leader_top'][0]['user_id'], user2.id)
def test_no_admins_or_staff_in_leaderboards(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
leaders = ReviewerScore.get_leaderboards(self.user)
eq_(leaders['user_rank'], 1)
eq_(leaders['leader_near'], [])
eq_(leaders['leader_top'][0]['user_id'], self.user.id)
eq_(len(leaders['leader_top']), 1) # Only the editor is here.
assert user2.id not in [l['user_id'] for l in leaders['leader_top']], (
'Unexpected admin user found in leaderboards.')
def test_get_leaderboards_last(self):
users = []
for i in range(6):
users.append(UserProfile.objects.create(username='user-%s' % i))
last_user = users.pop(len(users) - 1)
for u in users:
self._give_points(user=u)
# Last user gets lower points by reviewing a persona.
addon = self.addon
addon.type = amo.ADDON_PERSONA
self._give_points(user=last_user, addon=addon)
leaders = ReviewerScore.get_leaderboards(last_user)
eq_(leaders['user_rank'], 6)
eq_(len(leaders['leader_top']), 3)
eq_(len(leaders['leader_near']), 2)
def test_all_users_by_score(self):
user2 = UserProfile.objects.get(email='regular@mozilla.com')
amo.REVIEWED_LEVELS[0]['points'] = 180
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
users = ReviewerScore.all_users_by_score()
eq_(len(users), 2)
# First user.
eq_(users[0]['total'], 180)
eq_(users[0]['user_id'], self.user.id)
eq_(users[0]['level'], amo.REVIEWED_LEVELS[0]['name'])
# Second user.
eq_(users[1]['total'], 120)
eq_(users[1]['user_id'], user2.id)
eq_(users[1]['level'], '')
def test_caching(self):
self._give_points()
with self.assertNumQueries(1):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_breakdown(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_breakdown(self.user)
# New points invalidates all caches.
self._give_points()
with self.assertNumQueries(1):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_breakdown(self.user)
class TestRereviewQueueTheme(amo.tests.TestCase):
def test_manager_soft_delete_addons(self):
"""Test manager excludes soft delete add-ons."""
# Normal RQT object.
RereviewQueueTheme.objects.create(
theme=addon_factory(type=amo.ADDON_PERSONA).persona, header='',
footer='')
# Deleted add-on RQT object.
addon = addon_factory(type=amo.ADDON_PERSONA)
RereviewQueueTheme.objects.create(
theme=addon.persona, header='', footer='')
addon.delete()
eq_(RereviewQueueTheme.objects.count(), 1)
eq_(RereviewQueueTheme.unfiltered.count(), 2)
def test_footer_path_without_footer(self):
rqt = RereviewQueueTheme.objects.create(
theme=addon_factory(type=amo.ADDON_PERSONA).persona, header='',
footer='')
assert rqt.footer_path == ''
def test_footer_url_without_footer(self):
rqt = RereviewQueueTheme.objects.create(
theme=addon_factory(type=amo.ADDON_PERSONA).persona, header='',
footer='')
assert rqt.footer_url == ''
def test_filter_for_many_to_many(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
addon = addon_factory(type=amo.ADDON_PERSONA)
rqt = RereviewQueueTheme.objects.create(theme=addon.persona)
assert addon.persona.rereviewqueuetheme_set.get() == rqt
# Delete the addon: it shouldn't be listed anymore.
addon.update(status=amo.STATUS_DELETED)
assert addon.persona.rereviewqueuetheme_set.all().count() == 0 | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package rpc
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc/rpcbase"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/severity"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/redact"
)
func (r RemoteOffset) measuredAt() time.Time {
return timeutil.Unix(0, r.MeasuredAt)
}
var _ redact.SafeFormatter = RemoteOffset{}
// SafeFormat implements the redact.SafeFormatter interface.
func (r RemoteOffset) SafeFormat(w redact.SafePrinter, _ rune) {
w.Printf("off=%s, err=%s, at=%s",
time.Duration(r.Offset), time.Duration(r.Uncertainty), r.measuredAt())
}
// String formats the RemoteOffset for human readability.
func (r RemoteOffset) String() string {
return redact.StringWithoutMarkers(r)
}
// A HeartbeatService exposes a method to echo its request params. It doubles
// as a way to measure the offset of the server from other nodes. It uses the
// clock to return the server time every heartbeat. It also keeps track of
// remote clocks sent to it by storing them in the remoteClockMonitor.
type HeartbeatService struct {
clock hlc.WallClock
// A pointer to the RemoteClockMonitor configured in the RPC Context,
// shared by rpc clients, to keep track of remote clock measurements.
remoteClockMonitor *RemoteClockMonitor
clusterID *base.ClusterIDContainer
nodeID *base.NodeIDContainer
version clusterversion.Handle
clusterName string
disableClusterNameVerification bool
onHandlePing func(context.Context, *PingRequest, *PingResponse) error // see ContextOptions.OnIncomingPing
// TestingAllowNamedRPCToAnonymousServer, when defined (in tests),
// disables errors in case a heartbeat requests a specific node ID but
// the remote node doesn't have a node ID yet. This testing knob is
// currently used by the multiTestContext which does not suitably
// populate separate node IDs for each heartbeat service.
testingAllowNamedRPCToAnonymousServer bool
}
func checkClusterName(clusterName string, peerName string) error {
if clusterName != peerName {
var err error
if clusterName == "" {
err = errors.Errorf("peer node expects cluster name %q, use --cluster-name to configure", peerName)
} else if peerName == "" {
err = errors.New("peer node does not have a cluster name configured, cannot use --cluster-name")
} else {
err = errors.Errorf(
"local cluster name %q does not match peer cluster name %q", clusterName, peerName)
}
log.Ops.Shoutf(context.Background(), severity.ERROR, "%v", err)
return err
}
return nil
}
func checkVersion(
ctx context.Context, version clusterversion.Handle, peerVersion roachpb.Version,
) error {
activeVersion := version.ActiveVersionOrEmpty(ctx)
if activeVersion == (clusterversion.ClusterVersion{}) {
// Cluster version has not yet been determined.
return nil
}
if peerVersion == (roachpb.Version{}) {
return errors.Errorf(
"cluster requires at least version %s, but peer did not provide a version", activeVersion)
}
// KV nodes which are part of the system tenant *must* carry at least the
// version currently active in the cluster. Great care is taken to ensure
// that all nodes are broadcasting the new version before updating the active
// version. However, secondary tenants are allowed to lag the currently
// active cluster version. They are permitted to broadcast any version which
// is supported by this binary.
minVersion := activeVersion.Version
if tenantID, isTenant := roachpb.ClientTenantFromContext(ctx); isTenant &&
!roachpb.IsSystemTenantID(tenantID.ToUint64()) {
minVersion = version.MinSupportedVersion()
}
if peerVersion.Less(minVersion) {
return errors.Errorf(
"cluster requires at least version %s, but peer has version %s",
minVersion, peerVersion)
}
return nil
}
// Ping echos the contents of the request to the response, and returns the
// server's current clock value, allowing the requester to measure its clock.
// The requester should also estimate its offset from this server along
// with the requester's address.
func (hs *HeartbeatService) Ping(ctx context.Context, request *PingRequest) (*PingResponse, error) {
if log.ExpensiveLogEnabled(ctx, 2) {
log.Dev.Infof(ctx, "received heartbeat: %+v vs local cluster %+v node %+v", request, hs.clusterID, hs.nodeID)
}
// Check that cluster IDs match.
clusterID := hs.clusterID.Get()
if request.ClusterID != nil && *request.ClusterID != uuid.Nil && clusterID != uuid.Nil {
// There is a cluster ID on both sides. Use that to verify the connection.
//
// Note: we could be checking the cluster name here too, however
// for UX reason it is better to check it on the other side (the side
// initiating the connection), so that the user of a newly started
// node gets a chance to see a cluster name mismatch as an error message
// on their side.
if *request.ClusterID != clusterID {
return nil, errors.Errorf(
"client cluster ID %q doesn't match server cluster ID %q", request.ClusterID, clusterID)
}
}
// Check that node IDs match.
var nodeID roachpb.NodeID
if hs.nodeID != nil {
nodeID = hs.nodeID.Get()
}
if request.TargetNodeID != 0 && (!hs.testingAllowNamedRPCToAnonymousServer || nodeID != 0) && request.TargetNodeID != nodeID {
// If nodeID != 0, the situation is clear (we are checking that
// the other side is talking to the right node).
//
// If nodeID == 0 this means that this node (serving the
// heartbeat) doesn't have a node ID yet. Then we can't serve
// connections for other nodes that want a specific node ID,
// however we can still serve connections that don't need a node
// ID, e.g. during initial gossip.
return nil, errors.Errorf(
"client requested node ID %d doesn't match server node ID %d", request.TargetNodeID, nodeID)
}
// Check version compatibility.
if err := checkVersion(ctx, hs.version, request.ServerVersion); err != nil {
return nil, errors.Wrap(err, "version compatibility check failed on ping request")
}
serverOffset := request.Offset
// The server offset should be the opposite of the client offset.
serverOffset.Offset = -serverOffset.Offset
// In this case, we won't be recording the RTT (note the 0 RTT). Therefore, the RPC class is gonna be ignored.
hs.remoteClockMonitor.UpdateOffset(ctx, request.OriginNodeID, serverOffset, 0, /* roundTripLatency */
rpcbase.DefaultClass)
response := PingResponse{
Pong: request.Ping,
ServerTime: hs.clock.Now().UnixNano(),
ServerVersion: hs.version.LatestVersion(),
ClusterName: hs.clusterName,
DisableClusterNameVerification: hs.disableClusterNameVerification,
}
if fn := hs.onHandlePing; fn != nil {
if err := fn(ctx, request, &response); err != nil {
log.Dev.Infof(ctx, "failing ping request from node n%d", request.OriginNodeID)
return nil, err
}
}
return &response, nil
} | go | github | https://github.com/cockroachdb/cockroach | pkg/rpc/heartbeat.go |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.resolver;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.resolver.AbstractResolveCandidatesByFileTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeNormalAnalysisSourceModuleResolveCandidatesByFileTestGenerated extends AbstractResolveCandidatesByFileTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInAllByPsi() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("annotatedClasses.kt")
public void testAnnotatedClasses() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/annotatedClasses.kt");
}
@Test
@TestMetadata("annotatedExpressions.kt")
public void testAnnotatedExpressions() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/annotatedExpressions.kt");
}
@Test
@TestMetadata("annotatedFunction.kt")
public void testAnnotatedFunction() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/annotatedFunction.kt");
}
@Test
@TestMetadata("annotatedProperty.kt")
public void testAnnotatedProperty() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/annotatedProperty.kt");
}
@Test
@TestMetadata("annotations.kt")
public void testAnnotations() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/annotations.kt");
}
@Test
@TestMetadata("annotationsWithoutCall.kt")
public void testAnnotationsWithoutCall() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/annotationsWithoutCall.kt");
}
@Test
@TestMetadata("arrayAccess.kt")
public void testArrayAccess() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/arrayAccess.kt");
}
@Test
@TestMetadata("callChain.kt")
public void testCallChain() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/callChain.kt");
}
@Test
@TestMetadata("callableReference.kt")
public void testCallableReference() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/callableReference.kt");
}
@Test
@TestMetadata("compare.kt")
public void testCompare() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/compare.kt");
}
@Test
@TestMetadata("constructors.kt")
public void testConstructors() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/constructors.kt");
}
@Test
@TestMetadata("contains.kt")
public void testContains() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contains.kt");
}
@Test
@TestMetadata("contextParameters.kt")
public void testContextParameters() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextParameters.kt");
}
@Test
@TestMetadata("delegatedFieldNestedName.kt")
public void testDelegatedFieldNestedName() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/delegatedFieldNestedName.kt");
}
@Test
@TestMetadata("delegatedProperty.kt")
public void testDelegatedProperty() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/delegatedProperty.kt");
}
@Test
@TestMetadata("elvis.kt")
public void testElvis() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/elvis.kt");
}
@Test
@TestMetadata("enum.kt")
public void testEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/enum.kt");
}
@Test
@TestMetadata("equals.kt")
public void testEquals() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/equals.kt");
}
@Test
@TestMetadata("explicitContextArgumentErrorExpression.kt")
public void testExplicitContextArgumentErrorExpression() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/explicitContextArgumentErrorExpression.kt");
}
@Test
@TestMetadata("explicitContextArguments.kt")
public void testExplicitContextArguments() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/explicitContextArguments.kt");
}
@Test
@TestMetadata("forWithIterator.kt")
public void testForWithIterator() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/forWithIterator.kt");
}
@Test
@TestMetadata("forWithRange.kt")
public void testForWithRange() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/forWithRange.kt");
}
@Test
@TestMetadata("infixFunction.kt")
public void testInfixFunction() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/infixFunction.kt");
}
@Test
@TestMetadata("innerClass.kt")
public void testInnerClass() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/innerClass.kt");
}
@Test
@TestMetadata("innerClassThis.kt")
public void testInnerClassThis() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/innerClassThis.kt");
}
@Test
@TestMetadata("invoke.kt")
public void testInvoke() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/invoke.kt");
}
@Test
@TestMetadata("labels.kt")
public void testLabels() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/labels.kt");
}
@Test
@TestMetadata("lambdaParameters.kt")
public void testLambdaParameters() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/lambdaParameters.kt");
}
@Test
@TestMetadata("multiEquals.kt")
public void testMultiEquals() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/multiEquals.kt");
}
@Test
@TestMetadata("nestedClass.kt")
public void testNestedClass() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/nestedClass.kt");
}
@Test
@TestMetadata("nestedClassThis.kt")
public void testNestedClassThis() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/nestedClassThis.kt");
}
@Test
@TestMetadata("operatorsWithContextParameters.kt")
public void testOperatorsWithContextParameters() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/operatorsWithContextParameters.kt");
}
@Test
@TestMetadata("parenthesisedAnnotationCallArguments.kt")
public void testParenthesisedAnnotationCallArguments() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/parenthesisedAnnotationCallArguments.kt");
}
@Test
@TestMetadata("parenthesisedCallArguments.kt")
public void testParenthesisedCallArguments() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/parenthesisedCallArguments.kt");
}
@Test
@TestMetadata("parenthesisedDelegatedConstructorCallArguments.kt")
public void testParenthesisedDelegatedConstructorCallArguments() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/parenthesisedDelegatedConstructorCallArguments.kt");
}
@Test
@TestMetadata("providedDelegate.kt")
public void testProvidedDelegate() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/providedDelegate.kt");
}
@Test
@TestMetadata("return.kt")
public void testReturn() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/return.kt");
}
@Test
@TestMetadata("staticImports.kt")
public void testStaticImports() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/staticImports.kt");
}
@Test
@TestMetadata("stringConcatenation.kt")
public void testStringConcatenation() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/stringConcatenation.kt");
}
@Test
@TestMetadata("syntheticProperty.kt")
public void testSyntheticProperty() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/syntheticProperty.kt");
}
@Test
@TestMetadata("this.kt")
public void testThis() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/this.kt");
}
@Test
@TestMetadata("typeParameters.kt")
public void testTypeParameters() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/typeParameters.kt");
}
@Test
@TestMetadata("types.kt")
public void testTypes() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/types.kt");
}
@Test
@TestMetadata("unaryOperators.kt")
public void testUnaryOperators() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/unaryOperators.kt");
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution")
@TestDataPath("$PROJECT_ROOT")
public class ContextSensitiveResolution {
@Test
public void testAllFilesPresentInContextSensitiveResolution() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/ambiguity")
@TestDataPath("$PROJECT_ROOT")
public class Ambiguity {
@Test
public void testAllFilesPresentInAmbiguity() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/ambiguity"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("companion.kt")
public void testCompanion() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/ambiguity/companion.kt");
}
@Test
@TestMetadata("local.kt")
public void testLocal() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/ambiguity/local.kt");
}
@Test
@TestMetadata("samePackage.kt")
public void testSamePackage() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/ambiguity/samePackage.kt");
}
@Test
@TestMetadata("starImport.kt")
public void testStarImport() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/ambiguity/starImport.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition")
@TestDataPath("$PROJECT_ROOT")
public class CallArgumentPosition {
@Test
public void testAllFilesPresentInCallArgumentPosition() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("ambiguouslyImported.kt")
public void testAmbiguouslyImported() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/ambiguouslyImported.kt");
}
@Test
@TestMetadata("ambiguouslyImportedInvisible.kt")
public void testAmbiguouslyImportedInvisible() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/ambiguouslyImportedInvisible.kt");
}
@Test
@TestMetadata("annotationArguments.kt")
public void testAnnotationArguments() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/annotationArguments.kt");
}
@Test
@TestMetadata("anonymousFun.kt")
public void testAnonymousFun() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/anonymousFun.kt");
}
@Test
@TestMetadata("bound.kt")
public void testBound() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/bound.kt");
}
@Test
@TestMetadata("contextParameters.kt")
public void testContextParameters() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/contextParameters.kt");
}
@Test
@TestMetadata("defaultArg.kt")
public void testDefaultArg() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/defaultArg.kt");
}
@Test
@TestMetadata("expectedType.kt")
public void testExpectedType() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/expectedType.kt");
}
@Test
@TestMetadata("expectedTypeEnum.kt")
public void testExpectedTypeEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/expectedTypeEnum.kt");
}
@Test
@TestMetadata("generic.kt")
public void testGeneric() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/generic.kt");
}
@Test
@TestMetadata("infixFun.kt")
public void testInfixFun() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/infixFun.kt");
}
@Test
@TestMetadata("invisibleImported.kt")
public void testInvisibleImported() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/invisibleImported.kt");
}
@Test
@TestMetadata("lambda.kt")
public void testLambda() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/lambda.kt");
}
@Test
@TestMetadata("lambdaEnum.kt")
public void testLambdaEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/lambdaEnum.kt");
}
@Test
@TestMetadata("lambdaReceiver.kt")
public void testLambdaReceiver() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/lambdaReceiver.kt");
}
@Test
@TestMetadata("lambdaReceiverParametrized.kt")
public void testLambdaReceiverParametrized() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/lambdaReceiverParametrized.kt");
}
@Test
@TestMetadata("multipleOverloads.kt")
public void testMultipleOverloads() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/multipleOverloads.kt");
}
@Test
@TestMetadata("namedArg.kt")
public void testNamedArg() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/namedArg.kt");
}
@Test
@TestMetadata("noContextSensitiveResolutionShouldHappen.kt")
public void testNoContextSensitiveResolutionShouldHappen() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/noContextSensitiveResolutionShouldHappen.kt");
}
@Test
@TestMetadata("nonEnums.kt")
public void testNonEnums() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/nonEnums.kt");
}
@Test
@TestMetadata("operatorOverload.kt")
public void testOperatorOverload() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/operatorOverload.kt");
}
@Test
@TestMetadata("overload.kt")
public void testOverload() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/overload.kt");
}
@Test
@TestMetadata("pcla.kt")
public void testPcla() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/pcla.kt");
}
@Test
@TestMetadata("regularArg.kt")
public void testRegularArg() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/regularArg.kt");
}
@Test
@TestMetadata("regularArgEnum.kt")
public void testRegularArgEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/regularArgEnum.kt");
}
@Test
@TestMetadata("simple.kt")
public void testSimple() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/simple.kt");
}
@Test
@TestMetadata("simpleDisabledFeature.kt")
public void testSimpleDisabledFeature() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/simpleDisabledFeature.kt");
}
@Test
@TestMetadata("simpleGeneric.kt")
public void testSimpleGeneric() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/simpleGeneric.kt");
}
@Test
@TestMetadata("simpleVarargs.kt")
public void testSimpleVarargs() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/simpleVarargs.kt");
}
@Test
@TestMetadata("varargs.kt")
public void testVarargs() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/callArgumentPosition/varargs.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/nestedInheritors")
@TestDataPath("$PROJECT_ROOT")
public class NestedInheritors {
@Test
public void testAllFilesPresentInNestedInheritors() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/nestedInheritors"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("equality.kt")
public void testEquality() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/nestedInheritors/equality.kt");
}
@Test
@TestMetadata("guard.kt")
public void testGuard() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/nestedInheritors/guard.kt");
}
@Test
@TestMetadata("inWhen.kt")
public void testInWhen() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/nestedInheritors/inWhen.kt");
}
@Test
@TestMetadata("negatedIs.kt")
public void testNegatedIs() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/nestedInheritors/negatedIs.kt");
}
@Test
@TestMetadata("property.kt")
public void testProperty() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/nestedInheritors/property.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions")
@TestDataPath("$PROJECT_ROOT")
public class OtherExpectedTypePositions {
@Test
public void testAllFilesPresentInOtherExpectedTypePositions() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("basicExampleWithEnumAndWhens.kt")
public void testBasicExampleWithEnumAndWhens() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/basicExampleWithEnumAndWhens.kt");
}
@Test
@TestMetadata("differentTrivialExpectedTypeMismatch.kt")
public void testDifferentTrivialExpectedTypeMismatch() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/differentTrivialExpectedTypeMismatch.kt");
}
@Test
@TestMetadata("differentTrivialExpectedTypePositions.kt")
public void testDifferentTrivialExpectedTypePositions() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/differentTrivialExpectedTypePositions.kt");
}
@Test
@TestMetadata("elvisOperator.kt")
public void testElvisOperator() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/elvisOperator.kt");
}
@Test
@TestMetadata("elvisOperatorEnum.kt")
public void testElvisOperatorEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/elvisOperatorEnum.kt");
}
@Test
@TestMetadata("equalityOperator.kt")
public void testEqualityOperator() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/equalityOperator.kt");
}
@Test
@TestMetadata("equalityOperatorOnBoundedEnumTypeParameter.kt")
public void testEqualityOperatorOnBoundedEnumTypeParameter() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/equalityOperatorOnBoundedEnumTypeParameter.kt");
}
@Test
@TestMetadata("equalityOperatorOnBoundedSealedTypeParameter.kt")
public void testEqualityOperatorOnBoundedSealedTypeParameter() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/equalityOperatorOnBoundedSealedTypeParameter.kt");
}
@Test
@TestMetadata("explicitReturnTypes.kt")
public void testExplicitReturnTypes() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/explicitReturnTypes.kt");
}
@Test
@TestMetadata("extensions.kt")
public void testExtensions() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/extensions.kt");
}
@Test
@TestMetadata("functionalTypes.kt")
public void testFunctionalTypes() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/functionalTypes.kt");
}
@Test
@TestMetadata("lambdasReturnStatements.kt")
public void testLambdasReturnStatements() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/lambdasReturnStatements.kt");
}
@Test
@TestMetadata("lambdasReturnStatementsWithResolvedQualifiers.kt")
public void testLambdasReturnStatementsWithResolvedQualifiers() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/lambdasReturnStatementsWithResolvedQualifiers.kt");
}
@Test
@TestMetadata("notNullAssertion.kt")
public void testNotNullAssertion() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/notNullAssertion.kt");
}
@Test
@TestMetadata("notNullAssertionEnum.kt")
public void testNotNullAssertionEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/notNullAssertionEnum.kt");
}
@Test
@TestMetadata("propInitializers.kt")
public void testPropInitializers() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/propInitializers.kt");
}
@Test
@TestMetadata("sameNamedEnumEntry.kt")
public void testSameNamedEnumEntry() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/sameNamedEnumEntry.kt");
}
@Test
@TestMetadata("simple.kt")
public void testSimple() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/simple.kt");
}
@Test
@TestMetadata("simpleDisabledFeature.kt")
public void testSimpleDisabledFeature() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/simpleDisabledFeature.kt");
}
@Test
@TestMetadata("tryCatchStatements.kt")
public void testTryCatchStatements() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/tryCatchStatements.kt");
}
@Test
@TestMetadata("tryCatchStatementsEnum.kt")
public void testTryCatchStatementsEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/tryCatchStatementsEnum.kt");
}
@Test
@TestMetadata("whenIfLastStatement.kt")
public void testWhenIfLastStatement() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/whenIfLastStatement.kt");
}
@Test
@TestMetadata("whenIfLastStatementEnum.kt")
public void testWhenIfLastStatementEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/otherExpectedTypePositions/whenIfLastStatementEnum.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition")
@TestDataPath("$PROJECT_ROOT")
public class TypePosition {
@Test
public void testAllFilesPresentInTypePosition() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("ambiguouslyImported.kt")
public void testAmbiguouslyImported() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/ambiguouslyImported.kt");
}
@Test
@TestMetadata("ambiguouslyImportedInvisible.kt")
public void testAmbiguouslyImportedInvisible() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/ambiguouslyImportedInvisible.kt");
}
@Test
@TestMetadata("either.kt")
public void testEither() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/either.kt");
}
@Test
@TestMetadata("eitherInDifferentPositions.kt")
public void testEitherInDifferentPositions() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/eitherInDifferentPositions.kt");
}
@Test
@TestMetadata("innerClassInGeneric.kt")
public void testInnerClassInGeneric() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/innerClassInGeneric.kt");
}
@Test
@TestMetadata("invisibleImported.kt")
public void testInvisibleImported() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/invisibleImported.kt");
}
@Test
@TestMetadata("isInIf.kt")
public void testIsInIf() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/isInIf.kt");
}
@Test
@TestMetadata("isInWhen.kt")
public void testIsInWhen() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/isInWhen.kt");
}
@Test
@TestMetadata("javaInterop.kt")
public void testJavaInterop() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/javaInterop.kt");
}
@Test
@TestMetadata("nonSealed.kt")
public void testNonSealed() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/nonSealed.kt");
}
@Test
@TestMetadata("nonTrivialTypes.kt")
public void testNonTrivialTypes() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/nonTrivialTypes.kt");
}
@Test
@TestMetadata("regularlyResolved.kt")
public void testRegularlyResolved() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/regularlyResolved.kt");
}
@Test
@TestMetadata("regularlyResolvedNoGenericArgument.kt")
public void testRegularlyResolvedNoGenericArgument() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/regularlyResolvedNoGenericArgument.kt");
}
@Test
@TestMetadata("sealedWithNonSealedSubclass.kt")
public void testSealedWithNonSealedSubclass() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/sealedWithNonSealedSubclass.kt");
}
@Test
@TestMetadata("simple.kt")
public void testSimple() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/simple.kt");
}
@Test
@TestMetadata("simpleDisabledFeature.kt")
public void testSimpleDisabledFeature() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/simpleDisabledFeature.kt");
}
@Test
@TestMetadata("singleDefiniteExpectedType.kt")
public void testSingleDefiniteExpectedType() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/singleDefiniteExpectedType.kt");
}
@Test
@TestMetadata("typeCast.kt")
public void testTypeCast() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/typeCast.kt");
}
@Test
@TestMetadata("typeParameter.kt")
public void testTypeParameter() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/typeParameter.kt");
}
@Test
@TestMetadata("typeParametersComplex.kt")
public void testTypeParametersComplex() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/typeParametersComplex.kt");
}
@Test
@TestMetadata("unsupportedTypePosition.kt")
public void testUnsupportedTypePosition() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/typePosition/unsupportedTypePosition.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum")
@TestDataPath("$PROJECT_ROOT")
public class UnqualifiedEnum {
@Test
public void testAllFilesPresentInUnqualifiedEnum() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("correctJava.kt")
public void testCorrectJava() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum/correctJava.kt");
}
@Test
@TestMetadata("enumWithTheSameNameAsEntry.kt")
public void testEnumWithTheSameNameAsEntry() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum/enumWithTheSameNameAsEntry.kt");
}
@Test
@TestMetadata("incorrectJava.kt")
public void testIncorrectJava() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum/incorrectJava.kt");
}
@Test
@TestMetadata("nested.kt")
public void testNested() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum/nested.kt");
}
@Test
@TestMetadata("notInsideBranches.kt")
public void testNotInsideBranches() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum/notInsideBranches.kt");
}
@Test
@TestMetadata("priority.kt")
public void testPriority() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum/priority.kt");
}
@Test
@TestMetadata("typeAlias.kt")
public void testTypeAlias() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum/typeAlias.kt");
}
@Test
@TestMetadata("unqualifiedEnum.kt")
public void testUnqualifiedEnum() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/contextSensitiveResolution/unqualifiedEnum/unqualifiedEnum.kt");
}
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring")
@TestDataPath("$PROJECT_ROOT")
public class Destructuring {
@Test
public void testAllFilesPresentInDestructuring() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("nameBasedDestructuringFullForm.kt")
public void testNameBasedDestructuringFullForm() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring/nameBasedDestructuringFullForm.kt");
}
@Test
@TestMetadata("nameBasedDestructuringFullFormErrors.kt")
public void testNameBasedDestructuringFullFormErrors() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring/nameBasedDestructuringFullFormErrors.kt");
}
@Test
@TestMetadata("nameBasedDestructuringShortFormAfter.kt")
public void testNameBasedDestructuringShortFormAfter() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring/nameBasedDestructuringShortFormAfter.kt");
}
@Test
@TestMetadata("nameBasedDestructuringShortFormErrorsAfter.kt")
public void testNameBasedDestructuringShortFormErrorsAfter() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring/nameBasedDestructuringShortFormErrorsAfter.kt");
}
@Test
@TestMetadata("positionalDestructuringFullForm.kt")
public void testPositionalDestructuringFullForm() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring/positionalDestructuringFullForm.kt");
}
@Test
@TestMetadata("positionalDestructuringFullFormErrors.kt")
public void testPositionalDestructuringFullFormErrors() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring/positionalDestructuringFullFormErrors.kt");
}
@Test
@TestMetadata("positionalDestructuringShortForm.kt")
public void testPositionalDestructuringShortForm() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring/positionalDestructuringShortForm.kt");
}
@Test
@TestMetadata("positionalDestructuringShortFormErrors.kt")
public void testPositionalDestructuringShortFormErrors() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/destructuring/positionalDestructuringShortFormErrors.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/imports")
@TestDataPath("$PROJECT_ROOT")
public class Imports {
@Test
public void testAllFilesPresentInImports() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/imports"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("enumEntry.kt")
public void testEnumEntry() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/enumEntry.kt");
}
@Test
@TestMetadata("javaClass.kt")
public void testJavaClass() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/javaClass.kt");
}
@Test
@TestMetadata("javaClassWithBaseClass.kt")
public void testJavaClassWithBaseClass() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/javaClassWithBaseClass.kt");
}
@Test
@TestMetadata("javaClass_rootPackage.kt")
public void testJavaClass_rootPackage() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/javaClass_rootPackage.kt");
}
@Test
@TestMetadata("simple.kt")
public void testSimple() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/simple.kt");
}
@Test
@TestMetadata("star.kt")
public void testStar() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/star.kt");
}
@Test
@TestMetadata("topLevelObject.kt")
public void testTopLevelObject() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/topLevelObject.kt");
}
@Test
@TestMetadata("topLevelObjectWithBaseClass.kt")
public void testTopLevelObjectWithBaseClass() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/topLevelObjectWithBaseClass.kt");
}
@Test
@TestMetadata("topLevelObject_rootPackage.kt")
public void testTopLevelObject_rootPackage() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/imports/topLevelObject_rootPackage.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/typeAlias")
@TestDataPath("$PROJECT_ROOT")
public class TypeAlias {
@Test
public void testAllFilesPresentInTypeAlias() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/typeAlias"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("TypeAliasedCompanionObjectAsQualifier.kt")
public void testTypeAliasedCompanionObjectAsQualifier() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/typeAlias/TypeAliasedCompanionObjectAsQualifier.kt");
}
@Test
@TestMetadata("TypeAliasedObjectAsQualifier.kt")
public void testTypeAliasedObjectAsQualifier() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/typeAlias/TypeAliasedObjectAsQualifier.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors")
@TestDataPath("$PROJECT_ROOT")
public class WithErrors {
@Test
public void testAllFilesPresentInWithErrors() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("assignOperatorAmbiguity.kt")
public void testAssignOperatorAmbiguity() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/assignOperatorAmbiguity.kt");
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier")
@TestDataPath("$PROJECT_ROOT")
public class PartiallyUnresolvedTypeQualifier {
@Test
public void testAllFilesPresentInPartiallyUnresolvedTypeQualifier() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("ClassNameBeforeOneUnsresolvedClass.kt")
public void testClassNameBeforeOneUnsresolvedClass() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier/ClassNameBeforeOneUnsresolvedClass.kt");
}
@Test
@TestMetadata("ClassNameBeforeOneUnsresolvedClassWithDot.kt")
public void testClassNameBeforeOneUnsresolvedClassWithDot() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier/ClassNameBeforeOneUnsresolvedClassWithDot.kt");
}
@Test
@TestMetadata("ClassNameBeforeOneUnsresolvedClassWithTwoResolved.kt")
public void testClassNameBeforeOneUnsresolvedClassWithTwoResolved() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier/ClassNameBeforeOneUnsresolvedClassWithTwoResolved.kt");
}
@Test
@TestMetadata("ClassNameBeforeOneUnsresolvedClassWithTwoResolvedWithDot.kt")
public void testClassNameBeforeOneUnsresolvedClassWithTwoResolvedWithDot() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier/ClassNameBeforeOneUnsresolvedClassWithTwoResolvedWithDot.kt");
}
@Test
@TestMetadata("ClassNameBeforeTwoUnsresolvedClasses.kt")
public void testClassNameBeforeTwoUnsresolvedClasses() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier/ClassNameBeforeTwoUnsresolvedClasses.kt");
}
@Test
@TestMetadata("ClassNameBeforeTwoUnsresolvedClassesTwoResolved.kt")
public void testClassNameBeforeTwoUnsresolvedClassesTwoResolved() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier/ClassNameBeforeTwoUnsresolvedClassesTwoResolved.kt");
}
@Test
@TestMetadata("ClassNameBeforeTwoUnsresolvedClassesWithDot.kt")
public void testClassNameBeforeTwoUnsresolvedClassesWithDot() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier/ClassNameBeforeTwoUnsresolvedClassesWithDot.kt");
}
@Test
@TestMetadata("GenericClassNameBeforeOneUnresolvedClass.kt")
public void testGenericClassNameBeforeOneUnresolvedClass() {
runTest("analysis/analysis-api/testData/components/resolver/allByPsi/withErrors/partiallyUnresolvedTypeQualifier/GenericClassNameBeforeOneUnresolvedClass.kt");
}
}
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/resolver/FirIdeNormalAnalysisSourceModuleResolveCandidatesByFileTestGenerated.java |
# Copyright 2021 The Cockroach Authors.
#
# Use of this software is governed by the CockroachDB Software License
# included in the /LICENSE file.
# Common logic used by the nightly roachtest scripts (Bazel and non-Bazel).
# Set up Google credentials. Note that we need this for all clouds since we upload
# perf artifacts to Google Storage at the end.
if [[ "$GOOGLE_EPHEMERAL_CREDENTIALS" ]]; then
echo "$GOOGLE_EPHEMERAL_CREDENTIALS" > creds.json
gcloud auth activate-service-account --key-file=creds.json
export ROACHPROD_USER=teamcity
# Set GOOGLE_APPLICATION_CREDENTIALS so that gcp go libraries can find it.
export GOOGLE_APPLICATION_CREDENTIALS="$(pwd)/creds.json"
else
echo 'warning: GOOGLE_EPHEMERAL_CREDENTIALS not set' >&2
echo "Assuming that you've run \`gcloud auth login\` from inside the builder." >&2
fi
# defines get_host_arch
source $root/build/teamcity/util/roachtest_arch_util.sh
# Early bind the stats dir. Roachtest invocations can take ages, and we want the
# date at the time of the start of the run (which identifies the version of the
# code run best).
stats_dir="$(date +"%Y%m%d")-${TC_BUILD_ID}"
stats_file_name="stats.json"
# Provide a default value for EXPORT_OPENMETRICS if it is not set
EXPORT_OPENMETRICS="${EXPORT_OPENMETRICS:-false}"
if [[ "${EXPORT_OPENMETRICS}" == "true" ]]; then
# Use * to upload aggregated and other stats file
stats_file_name="*stats.om"
fi
COMMIT_SHA=$(git rev-parse --short HEAD)
# Set up a function we'll invoke at the end.
function upload_stats {
if tc_release_branch; then
bucket="${ROACHTEST_BUCKET:-cockroach-nightly-${CLOUD}}"
if [[ "${EXPORT_OPENMETRICS}" == "true" ]]; then
bucket="${ROACHTEST_BUCKET:-crl-artifacts-roachperf-openmetrics/${CLOUD}}"
fi
if [[ "${CLOUD}" == "gce" && "${EXPORT_OPENMETRICS}" == "false" ]]; then
# GCE, having been there first, gets an exemption.
bucket="cockroach-nightly"
fi
branch=$(tc_build_branch)
remote_artifacts_dir="artifacts-${branch}"
if [[ "${branch}" == "master" ]]; then
# The master branch is special, as roachperf hard-codes
# the location.
remote_artifacts_dir="artifacts"
fi
# TODO: FIPS_ENABLED is deprecated, use roachtest --metamorphic-fips-probability, instead.
# In FIPS-mode, keep artifacts separate by using the 'fips' suffix.
if [[ ${FIPS_ENABLED:-0} == 1 ]]; then
remote_artifacts_dir="${remote_artifacts_dir}-fips"
fi
# The ${stats_file_name} files need some path translation:
# ${artifacts}/path/to/test/${stats_file_name}
# to
# gs://${bucket}/artifacts/${stats_dir}/path/to/test/${stats_file_name}
#
# `find` below will expand "{}" as ./path/to/test/${stats_file_name}. We need
# to bend over backwards to remove the `./` prefix or gsutil will have
# a `.` folder in ${stats_dir}, which we don't want.
(cd "${artifacts}" && \
while IFS= read -r f; do
if [[ -n "${f}" ]]; then
artifacts_dir="${remote_artifacts_dir}"
# If 'cpu_arch=xxx' is encoded in the path, use it as suffix to separate artifacts by cpu_arch.
if [[ "${f}" == *"/cpu_arch=arm64/"* ]]; then
artifacts_dir="${artifacts_dir}-arm64"
elif [[ "${f}" == *"/cpu_arch=fips/"* ]]; then
artifacts_dir="${artifacts_dir}-fips"
elif [[ "${f}" == *"/cpu_arch=s390x/"* ]]; then
artifacts_dir="${artifacts_dir}-s390x"
fi
gsutil cp "${f}" "gs://${bucket}/${artifacts_dir}/${stats_dir}/${f}"
fi
done <<< "$(find . -name "${stats_file_name}" | sed 's/^\.\///')")
fi
}
set -x
# Uploads roachprod and roachtest binaries to GCS.
function upload_binaries {
if tc_release_branch; then
bucket="cockroach-nightly"
branch=$(tc_build_branch)
arch=$(get_host_arch)
os=linux
sha=${BUILD_VCS_NUMBER}
gsutil cp bin/roachprod gs://$bucket/binaries/$branch/$os/$arch/roachprod.$sha
gsutil cp bin/roachtest gs://$bucket/binaries/$branch/$os/$arch/roachtest.$sha
# N.B. both binaries are built from the same SHA, so one blob will suffice.
echo "$sha" | gsutil cp - gs://$bucket/binaries/$branch/$os/$arch/latest_sha
fi
}
function upload_all {
upload_binaries
upload_stats
}
# Upload any ${stats_file_name} we can find, and some binaries, no matter what happens.
trap upload_all EXIT
# Set up the parameters for the roachtest invocation.
PARALLELISM="${PARALLELISM-16}"
CPUQUOTA="${CPUQUOTA-1024}"
TESTS="${TESTS-}" | unknown | github | https://github.com/cockroachdb/cockroach | build/teamcity/util/roachtest_util.sh |
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class FirewallRule(GCPResource):
'''Object to represent a gcp forwarding rule'''
resource_type = "compute.v1.firewall"
# pylint: disable=too-many-arguments
def __init__(self,
rname,
project,
zone,
desc,
network,
allowed,
target_tags,
source_ranges=None,
source_tags=None,
):
'''constructor for gcp resource'''
super(FirewallRule, self).__init__(rname,
FirewallRule.resource_type,
project,
zone)
self._desc = desc
self._allowed = allowed
self._network = '$(ref.%s.selfLink)' % network
self._target_tags = target_tags
self._source_ranges = []
if source_ranges:
self._source_ranges = source_ranges
self._source_tags = []
if source_tags:
self._source_tags = source_tags
@property
def description(self):
'''property for resource description'''
return self._desc
@property
def target_tags(self):
'''property for resource target_tags'''
return self._target_tags
@property
def source_tags(self):
'''property for resource source_tags'''
return self._source_tags
@property
def source_ranges(self):
'''property for resource source_ranges'''
return self._source_ranges
@property
def allowed(self):
'''property for resource allowed'''
return self._allowed
@property
def network(self):
'''property for resource network'''
return self._network
def to_resource(self):
""" return the resource representation"""
return {'name': self.name,
'type': FirewallRule.resource_type,
'properties': {'description': self.description,
'network': self.network,
'sourceRanges': self.source_ranges,
'sourceTags': self.source_tags,
'allowed': self.allowed,
'targetTags': self.target_tags,
}
} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feature_extractor."""
import json
import os
import feature_extractor
import numpy
from PIL import Image
from six.moves import cPickle
from tensorflow.python.platform import googletest
def _FilePath(filename):
return os.path.join('testdata', filename)
def _MeanElementWiseDifference(a, b):
"""Calculates element-wise percent difference between two numpy matrices."""
difference = numpy.abs(a - b)
denominator = numpy.maximum(numpy.abs(a), numpy.abs(b))
# We dont care if one is 0 and another is 0.01
return (difference / (0.01 + denominator)).mean()
class FeatureExtractorTest(googletest.TestCase):
def setUp(self):
self._extractor = feature_extractor.YouTube8MFeatureExtractor()
def testPCAOnFeatureVector(self):
sports_1m_test_data = cPickle.load(open(_FilePath('sports1m_frame.pkl')))
actual_pca = self._extractor.apply_pca(sports_1m_test_data['original'])
expected_pca = sports_1m_test_data['pca']
self.assertLess(_MeanElementWiseDifference(actual_pca, expected_pca), 1e-5)
if __name__ == '__main__':
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
test_description='test combined/stat/moved interaction'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
# This test covers a weird 3-way interaction between "--cc -p", which will run
# the combined diff code, along with "--stat", which will be computed as a
# first-parent stat during the combined diff, and "--color-moved", which
# enables the emitted_symbols list to store the diff in memory.
test_expect_success 'set up history with a merge' '
test_commit A &&
test_commit B &&
git checkout -b side HEAD^ &&
test_commit C &&
git merge -m M main &&
test_commit D
'
test_expect_success 'log --cc -p --stat --color-moved' '
cat >expect <<-EOF &&
commit D
---
D.t | 1 +
1 file changed, 1 insertion(+)
diff --git a/D.t b/D.t
new file mode 100644
index 0000000..$(git rev-parse --short D:D.t)
--- /dev/null
+++ b/D.t
@@ -0,0 +1 @@
+D
commit M
B.t | 1 +
1 file changed, 1 insertion(+)
commit C
---
C.t | 1 +
1 file changed, 1 insertion(+)
diff --git a/C.t b/C.t
new file mode 100644
index 0000000..$(git rev-parse --short C:C.t)
--- /dev/null
+++ b/C.t
@@ -0,0 +1 @@
+C
commit B
---
B.t | 1 +
1 file changed, 1 insertion(+)
diff --git a/B.t b/B.t
new file mode 100644
index 0000000..$(git rev-parse --short B:B.t)
--- /dev/null
+++ b/B.t
@@ -0,0 +1 @@
+B
commit A
---
A.t | 1 +
1 file changed, 1 insertion(+)
diff --git a/A.t b/A.t
new file mode 100644
index 0000000..$(git rev-parse --short A:A.t)
--- /dev/null
+++ b/A.t
@@ -0,0 +1 @@
+A
EOF
git log --format="commit %s" --cc -p --stat --color-moved >actual &&
test_cmp expect actual
'
test_done | unknown | github | https://github.com/git/git | t/t4066-diff-emit-delay.sh |
//// [tests/cases/compiler/asyncArrowInClassES5.ts] ////
//// [asyncArrowInClassES5.ts]
// https://github.com/Microsoft/TypeScript/issues/16924
// Should capture `this`
class Test {
static member = async (x: string) => { };
}
//// [asyncArrowInClassES5.js]
"use strict";
// https://github.com/Microsoft/TypeScript/issues/16924
// Should capture `this`
var _a;
class Test {
}
_a = Test;
Test.member = (x) => __awaiter(void 0, void 0, void 0, function* () { }); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/asyncArrowInClassES5(target=es2015).js |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.erasurecode.coder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.erasurecode.ECBlock;
import org.apache.hadoop.io.erasurecode.ECBlockGroup;
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
/**
* An abstract erasure encoder that's to be inherited by new encoders.
*
* It implements the {@link ErasureCoder} interface.
*/
@InterfaceAudience.Private
public abstract class ErasureEncoder extends Configured
implements ErasureCoder {
private final int numDataUnits;
private final int numParityUnits;
private final ErasureCoderOptions options;
public ErasureEncoder(ErasureCoderOptions options) {
this.options = options;
this.numDataUnits = options.getNumDataUnits();
this.numParityUnits = options.getNumParityUnits();
}
@Override
public ErasureCodingStep calculateCoding(ECBlockGroup blockGroup) {
// We may have more than this when considering complicate cases. HADOOP-11550
return prepareEncodingStep(blockGroup);
}
@Override
public int getNumDataUnits() {
return numDataUnits;
}
@Override
public int getNumParityUnits() {
return numParityUnits;
}
@Override
public ErasureCoderOptions getOptions() {
return options;
}
protected ECBlock[] getInputBlocks(ECBlockGroup blockGroup) {
return blockGroup.getDataBlocks();
}
protected ECBlock[] getOutputBlocks(ECBlockGroup blockGroup) {
return blockGroup.getParityBlocks();
}
@Override
public boolean preferDirectBuffer() {
return false;
}
@Override
public void release() {
// Nothing to do by default
}
/**
* Perform encoding against a block group.
* @param blockGroup blockGroup.
* @return encoding step for caller to do the real work
*/
protected abstract ErasureCodingStep prepareEncodingStep(
ECBlockGroup blockGroup);
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncoder.java |
# -*- coding: utf-8 -*-
"""
Contraction map, used to expand contractions in text
@author: Eric
"""
CONTRACTION_MAP = {
"ain't": "is not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"i'd": "i would",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
} | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Traffic control library for constraining the network configuration on a port.
The traffic controller sets up a constrained network configuration on a port.
Traffic to the constrained port is forwarded to a specified server port.
"""
import logging
import os
import re
import subprocess
# The maximum bandwidth limit.
_DEFAULT_MAX_BANDWIDTH_KBIT = 1000000
class TrafficControlError(BaseException):
"""Exception raised for errors in traffic control library.
Attributes:
msg: User defined error message.
cmd: Command for which the exception was raised.
returncode: Return code of running the command.
stdout: Output of running the command.
stderr: Error output of running the command.
"""
def __init__(self, msg, cmd=None, returncode=None, output=None,
error=None):
BaseException.__init__(self, msg)
self.msg = msg
self.cmd = cmd
self.returncode = returncode
self.output = output
self.error = error
def CheckRequirements():
"""Checks if permissions are available to run traffic control commands.
Raises:
TrafficControlError: If permissions to run traffic control commands are not
available.
"""
if os.geteuid() != 0:
_Exec(['sudo', '-n', 'tc', '-help'],
msg=('Cannot run \'tc\' command. Traffic Control must be run as root '
'or have password-less sudo access to this command.'))
_Exec(['sudo', '-n', 'iptables', '-help'],
msg=('Cannot run \'iptables\' command. Traffic Control must be run '
'as root or have password-less sudo access to this command.'))
def CreateConstrainedPort(config):
"""Creates a new constrained port.
Imposes packet level constraints such as bandwidth, latency, and packet loss
on a given port using the specified configuration dictionary. Traffic to that
port is forwarded to a specified server port.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
server_port: Port to redirect traffic on [port] to (integer 1-65535).
interface: Network interface name (string).
latency: Delay added on each packet sent (integer in ms).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
loss: Percentage of packets to drop (integer 0-100).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface', 'port', 'server_port')
_AddRootQdisc(config['interface'])
try:
_ConfigureClass('add', config)
_AddSubQdisc(config)
_AddFilter(config['interface'], config['port'])
_AddIptableRule(config['interface'], config['port'], config['server_port'])
except TrafficControlError as e:
logging.debug('Error creating constrained port %d.\nError: %s\n'
'Deleting constrained port.', config['port'], e.error)
DeleteConstrainedPort(config)
raise e
def DeleteConstrainedPort(config):
"""Deletes an existing constrained port.
Deletes constraints set on a given port and the traffic forwarding rule from
the constrained port to a specified server port.
The original constrained network configuration used to create the constrained
port must be passed in.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
server_port: Port to redirect traffic on [port] to (integer 1-65535).
interface: Network interface name (string).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface', 'port', 'server_port')
try:
# Delete filters first so it frees the class.
_DeleteFilter(config['interface'], config['port'])
finally:
try:
# Deleting the class deletes attached qdisc as well.
_ConfigureClass('del', config)
finally:
_DeleteIptableRule(config['interface'], config['port'],
config['server_port'])
def TearDown(config):
"""Deletes the root qdisc and all iptables rules.
Args:
config: Constraint configuration dictionary, format:
interface: Network interface name (string).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface')
command = ['sudo', 'tc', 'qdisc', 'del', 'dev', config['interface'], 'root']
try:
_Exec(command, msg='Could not delete root qdisc.')
finally:
_DeleteAllIpTableRules()
def _CheckArgsExist(config, *args):
"""Check that the args exist in config dictionary and are not None.
Args:
config: Any dictionary.
*args: The list of key names to check.
Raises:
TrafficControlError: If any key name does not exist in config or is None.
"""
for key in args:
if key not in config.keys() or config[key] is None:
raise TrafficControlError('Missing "%s" parameter.' % key)
def _AddRootQdisc(interface):
"""Sets up the default root qdisc.
Args:
interface: Network interface name.
Raises:
TrafficControlError: If adding the root qdisc fails for a reason other than
it already exists.
"""
command = ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'root', 'handle',
'1:', 'htb']
try:
_Exec(command, msg=('Error creating root qdisc. '
'Make sure you have root access'))
except TrafficControlError as e:
# Ignore the error if root already exists.
if not 'File exists' in e.error:
raise e
def _ConfigureClass(option, config):
"""Adds or deletes a class and qdisc attached to the root.
The class specifies bandwidth, and qdisc specifies delay and packet loss. The
class ID is based on the config port.
Args:
option: Adds or deletes a class option [add|del].
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
interface: Network interface name (string).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
"""
# Use constrained port as class ID so we can attach the qdisc and filter to
# it, as well as delete the class, using only the port number.
class_id = '1:%x' % config['port']
if 'bandwidth' not in config.keys() or not config['bandwidth']:
bandwidth = _DEFAULT_MAX_BANDWIDTH_KBIT
else:
bandwidth = config['bandwidth']
bandwidth = '%dkbit' % bandwidth
command = ['sudo', 'tc', 'class', option, 'dev', config['interface'],
'parent', '1:', 'classid', class_id, 'htb', 'rate', bandwidth,
'ceil', bandwidth]
_Exec(command, msg=('Error configuring class ID %s using "%s" command.' %
(class_id, option)))
def _AddSubQdisc(config):
"""Adds a qdisc attached to the class identified by the config port.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
interface: Network interface name (string).
latency: Delay added on each packet sent (integer in ms).
loss: Percentage of packets to drop (integer 0-100).
"""
port_hex = '%x' % config['port']
class_id = '1:%x' % config['port']
command = ['sudo', 'tc', 'qdisc', 'add', 'dev', config['interface'], 'parent',
class_id, 'handle', port_hex + ':0', 'netem']
# Check if packet-loss is set in the configuration.
if 'loss' in config.keys() and config['loss']:
loss = '%d%%' % config['loss']
command.extend(['loss', loss])
# Check if latency is set in the configuration.
if 'latency' in config.keys() and config['latency']:
latency = '%dms' % config['latency']
command.extend(['delay', latency])
_Exec(command, msg='Could not attach qdisc to class ID %s.' % class_id)
def _AddFilter(interface, port):
"""Redirects packets coming to a specified port into the constrained class.
Args:
interface: Interface name to attach the filter to (string).
port: Port number to filter packets with (integer 1-65535).
"""
class_id = '1:%x' % port
command = ['sudo', 'tc', 'filter', 'add', 'dev', interface, 'protocol', 'ip',
'parent', '1:', 'prio', '1', 'u32', 'match', 'ip', 'sport', port,
'0xffff', 'flowid', class_id]
_Exec(command, msg='Error adding filter on port %d.' % port)
def _DeleteFilter(interface, port):
"""Deletes the filter attached to the configured port.
Args:
interface: Interface name the filter is attached to (string).
port: Port number being filtered (integer 1-65535).
"""
handle_id = _GetFilterHandleId(interface, port)
command = ['sudo', 'tc', 'filter', 'del', 'dev', interface, 'protocol', 'ip',
'parent', '1:0', 'handle', handle_id, 'prio', '1', 'u32']
_Exec(command, msg='Error deleting filter on port %d.' % port)
def _GetFilterHandleId(interface, port):
"""Searches for the handle ID of the filter identified by the config port.
Args:
interface: Interface name the filter is attached to (string).
port: Port number being filtered (integer 1-65535).
Returns:
The handle ID.
Raises:
TrafficControlError: If handle ID was not found.
"""
command = ['sudo', 'tc', 'filter', 'list', 'dev', interface, 'parent', '1:']
output = _Exec(command, msg='Error listing filters.')
# Search for the filter handle ID associated with class ID '1:port'.
handle_id_re = re.search(
'([0-9a-fA-F]{3}::[0-9a-fA-F]{3}).*(?=flowid 1:%x\s)' % port, output)
if handle_id_re:
return handle_id_re.group(1)
raise TrafficControlError(('Could not find filter handle ID for class ID '
'1:%x.') % port)
def _AddIptableRule(interface, port, server_port):
"""Forwards traffic from constrained port to a specified server port.
Args:
interface: Interface name to attach the filter to (string).
port: Port of incoming packets (integer 1-65535).
server_port: Server port to forward the packets to (integer 1-65535).
"""
# Preroute rules for accessing the port through external connections.
command = ['sudo', 'iptables', '-t', 'nat', '-A', 'PREROUTING', '-i',
interface, '-p', 'tcp', '--dport', port, '-j', 'REDIRECT',
'--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
# Output rules for accessing the rule through localhost or 127.0.0.1
command = ['sudo', 'iptables', '-t', 'nat', '-A', 'OUTPUT', '-p', 'tcp',
'--dport', port, '-j', 'REDIRECT', '--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
def _DeleteIptableRule(interface, port, server_port):
"""Deletes the iptable rule associated with specified port number.
Args:
interface: Interface name to attach the filter to (string).
port: Port of incoming packets (integer 1-65535).
server_port: Server port packets are forwarded to (integer 1-65535).
"""
command = ['sudo', 'iptables', '-t', 'nat', '-D', 'PREROUTING', '-i',
interface, '-p', 'tcp', '--dport', port, '-j', 'REDIRECT',
'--to-port', server_port]
_Exec(command, msg='Error deleting iptables rule for port %d.' % port)
command = ['sudo', 'iptables', '-t', 'nat', '-D', 'OUTPUT', '-p', 'tcp',
'--dport', port, '-j', 'REDIRECT', '--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
def _DeleteAllIpTableRules():
"""Deletes all iptables rules."""
command = ['sudo', 'iptables', '-t', 'nat', '-F']
_Exec(command, msg='Error deleting all iptables rules.')
def _Exec(command, msg=None):
"""Executes a command.
Args:
command: Command list to execute.
msg: Message describing the error in case the command fails.
Returns:
The standard output from running the command.
Raises:
TrafficControlError: If command fails. Message is set by the msg parameter.
"""
cmd_list = [str(x) for x in command]
cmd = ' '.join(cmd_list)
logging.debug('Running command: %s', cmd)
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise TrafficControlError(msg, cmd, p.returncode, output, error)
return output.strip() | unknown | codeparrot/codeparrot-clean | ||
"""Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
iter_lim=None, show=False, calc_var=False):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||b - Ax||^2`` or
``min ||Ax - b||^2 + d^2 ||x||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve A*x = b
2. Linear least squares -- solve A*x = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( 0 )
in the least-squares sense
Parameters
----------
A : {sparse matrix, ndarray, LinearOperatorLinear}
Representation of an m-by-n matrix. It is required that
the linear operator can produce ``Ax`` and ``A^T x``.
b : (m,) ndarray
Right-hand side vector ``b``.
damp : float
Damping coefficient.
atol, btol : float, default 1.0e-8
Stopping tolerances. If both are 1.0e-9 (say), the final
residual norm should be accurate to about 9 digits. (The
final x will usually have fewer correct digits, depending on
cond(A) and the size of damp.)
conlim : float
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive.
iter_lim : int
Explicit limitation on number of iterations (for safety).
show : bool
Display an iteration log.
calc_var : bool
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if
``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'*r - damp^2*x)``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A*x0``.
2. Use LSQR to solve the system ``A*dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A*x = b and k2 iterations to solve A*dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A*x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A*dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M*x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A*M(inverse)*z =
b``, after which x can be recovered by solving M*x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
"""
A = aslinearoperator(A)
if len(b.shape) > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = 'The matrix A has %8g rows and %8g cols' % (m, n)
str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var)
str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)
str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim)
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
nstop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
"""
Set up the first vectors u and v for the bidiagonalization.
These satisfy beta*u = b, alfa*v = A'u.
"""
__xm = np.zeros(m) # a matrix for temporary holding
__xn = np.zeros(n) # a matrix for temporary holding
v = np.zeros(n)
u = b
x = np.zeros(n)
alfa = 0
beta = np.linalg.norm(u)
w = np.zeros(n)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
bnorm = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
"""
% Perform the next step of the bidiagonalization to obtain the
% next beta, u, alfa, v. These satisfy the relations
% beta*u = a*v - alfa*u,
% alfa*v = A'*u - beta*v.
"""
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
rhobar1 = sqrt(rhobar**2 + damp**2)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
# Although there is cancellation, it might be accurate enough.
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
if show:
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (anorm, acond)
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm)
str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm)
str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm)
str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm)
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var | unknown | codeparrot/codeparrot-clean | ||
"""HTML utilities suitable for global use."""
import re
import string
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.http import urlquote
# Configuration for urlize() function.
LEADING_PUNCTUATION = ['(', '<', '<']
TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>']
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\xe2\x80\xa2', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \
('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]),
'|'.join([re.escape(x) for x in TRAILING_PUNCTUATION])))
simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
del x # Temporary variable
def escape(html):
"""
Returns the given HTML with ampersands, quotes and angle brackets encoded.
"""
return mark_safe(force_unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, unicode)
_base_js_escapes = (
('\\', r'\u005C'),
('\'', r'\u0027'),
('"', r'\u0022'),
('>', r'\u003E'),
('<', r'\u003C'),
('&', r'\u0026'),
('=', r'\u003D'),
('-', r'\u002D'),
(';', r'\u003B'),
(u'\u2028', r'\u2028'),
(u'\u2029', r'\u2029')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\u%04X' % z) for z in range(32)]))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in _js_escapes:
value = mark_safe(force_unicode(value).replace(bad, good))
return value
escapejs = allow_lazy(escapejs, unicode)
def conditional_escape(html):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if isinstance(html, SafeData):
return html
else:
return escape(html)
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', force_unicode(value)) # normalize newlines
paras = re.split('\n{2,}', value)
if autoescape:
paras = [u'<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = [u'<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return u'\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, unicode)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
return re.sub(r'<[^>]*?>', '', force_unicode(value))
strip_tags = allow_lazy(strip_tags)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_unicode(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, unicode)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_unicode(value))
strip_entities = allow_lazy(strip_entities, unicode)
def fix_ampersands(value):
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
return unencoded_ampersands_re.sub('&', force_unicode(value))
fix_ampersands = allow_lazy(fix_ampersands, unicode)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links and links ending in .org, .net or
.com. Links can have trailing punctuation (periods, commas, close-parens)
and leading punctuation (opening parens) and it'll still do the right
thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_unicode(text))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
match = punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
# Make URL we want to point to.
url = None
if middle.startswith('http://') or middle.startswith('https://'):
url = urlquote(middle, safe='/&=:;#?+*')
elif middle.startswith('www.') or ('@' not in middle and \
middle and middle[0] in string.ascii_letters + string.digits and \
(middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))):
url = urlquote('http://%s' % middle, safe='/&=:;#?+*')
elif '@' in middle and not ':' in middle and simple_email_re.match(middle):
url = 'mailto:%s' % middle
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return u''.join(words)
urlize = allow_lazy(urlize, unicode)
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert <b> and <i> to <strong> and <em>.
* Encode all ampersands correctly.
* Remove all "target" attributes from <a> tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and <br clear="all">.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "<p> </p>", but only if it's at the
bottom of the text.
"""
from django.utils.text import normalize_newlines
text = normalize_newlines(force_unicode(text))
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
text = fix_ampersands(text)
# Remove all target="" attributes from <a> tags.
text = link_target_attribute_re.sub('\\1', text)
# Trim stupid HTML such as <br clear="all">.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('</p>', '</li>')
for d in DOTS:
s = s.replace('<p>%s' % d, '<li>')
return u'<ul>\n%s\n</ul>' % s
text = hard_coded_bullets_re.sub(replace_p_tags, text)
# Remove stuff like "<p> </p>", but only if it's at the bottom
# of the text.
text = trailing_empty_content_re.sub('', text)
return text
clean_html = allow_lazy(clean_html, unicode) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Data namespace.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/gdata/docs/2.0/elements.html
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import atom.data
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
EVENT_LOCATION = 'http://schemas.google.com/g/2005#event'
ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate'
PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking'
CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled'
CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed'
TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative'
CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential'
DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default'
PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private'
PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public'
OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque'
TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent'
CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat'
INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox'
SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent'
SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam'
STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred'
UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread'
BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc'
CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc'
SENDER = 'http://schemas.google.com/g/2005#message.from'
REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to'
TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to'
ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant'
CALLBACK_REL = 'http://schemas.google.com/g/2005#callback'
CAR_REL = 'http://schemas.google.com/g/2005#car'
COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main'
FAX_REL = 'http://schemas.google.com/g/2005#fax'
HOME_REL = 'http://schemas.google.com/g/2005#home'
HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax'
ISDN_REL = 'http://schemas.google.com/g/2005#isdn'
MAIN_REL = 'http://schemas.google.com/g/2005#main'
MOBILE_REL = 'http://schemas.google.com/g/2005#mobile'
OTHER_REL = 'http://schemas.google.com/g/2005#other'
OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax'
PAGER_REL = 'http://schemas.google.com/g/2005#pager'
RADIO_REL = 'http://schemas.google.com/g/2005#radio'
TELEX_REL = 'http://schemas.google.com/g/2005#telex'
TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd'
WORK_REL = 'http://schemas.google.com/g/2005#work'
WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax'
WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile'
WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager'
NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting'
OVERALL_REL = 'http://schemas.google.com/g/2005#overall'
PRICE_REL = 'http://schemas.google.com/g/2005#price'
QUALITY_REL = 'http://schemas.google.com/g/2005#quality'
EVENT_REL = 'http://schemas.google.com/g/2005#event'
EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate'
EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking'
AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM'
MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN'
YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO'
SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE'
QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ'
GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ'
JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER'
REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular'
REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews'
MAIL_BOTH = 'http://schemas.google.com/g/2005#both'
MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters'
MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels'
MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither'
GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general'
LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local'
OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional'
REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required'
ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted'
ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined'
ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited'
ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class LinkFinder(atom.data.LinkFinder):
"""Mixin used in Feed and Entry classes to simplify link lookups by type.
Provides lookup methods for edit, edit-media, post, ACL and other special
links which are common across Google Data APIs.
"""
def find_html_link(self):
"""Finds the first link with rel of alternate and type of text/html."""
for link in self.link:
if link.rel == 'alternate' and link.type == 'text/html':
return link.href
return None
FindHtmlLink = find_html_link
def get_html_link(self):
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
GetHtmlLink = get_html_link
def find_post_link(self):
"""Get the URL to which new entries should be POSTed.
The POST target URL is used to insert new entries.
Returns:
A str for the URL in the link with a rel matching the POST type.
"""
return self.find_url('http://schemas.google.com/g/2005#post')
FindPostLink = find_post_link
def get_post_link(self):
return self.get_link('http://schemas.google.com/g/2005#post')
GetPostLink = get_post_link
def find_acl_link(self):
return self.find_url(
'http://schemas.google.com/acl/2007#accessControlList')
FindAclLink = find_acl_link
def get_acl_link(self):
return self.get_link(
'http://schemas.google.com/acl/2007#accessControlList')
GetAclLink = get_acl_link
def find_feed_link(self):
return self.find_url('http://schemas.google.com/g/2005#feed')
FindFeedLink = find_feed_link
def get_feed_link(self):
return self.get_link('http://schemas.google.com/g/2005#feed')
GetFeedLink = get_feed_link
def find_previous_link(self):
return self.find_url('previous')
FindPreviousLink = find_previous_link
def get_previous_link(self):
return self.get_link('previous')
GetPreviousLink = get_previous_link
class TotalResults(atom.core.XmlElement):
"""opensearch:TotalResults for a GData feed."""
_qname = OPENSEARCH_TEMPLATE % 'totalResults'
class StartIndex(atom.core.XmlElement):
"""The opensearch:startIndex element in GData feed."""
_qname = OPENSEARCH_TEMPLATE % 'startIndex'
class ItemsPerPage(atom.core.XmlElement):
"""The opensearch:itemsPerPage element in GData feed."""
_qname = OPENSEARCH_TEMPLATE % 'itemsPerPage'
class ExtendedProperty(atom.core.XmlElement):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_qname = GDATA_TEMPLATE % 'extendedProperty'
name = 'name'
value = 'value'
def get_xml_blob(self):
"""Returns the XML blob as an atom.core.XmlElement.
Returns:
An XmlElement representing the blob's XML, or None if no
blob was set.
"""
if self._other_elements:
return self._other_elements[0]
else:
return None
GetXmlBlob = get_xml_blob
def set_xml_blob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting member elements
in this object.
Args:
blob: str or atom.core.XmlElement representing the XML blob stored in
the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
if isinstance(blob, atom.core.XmlElement):
self._other_elements = [blob]
else:
self._other_elements = [atom.core.parse(str(blob))]
SetXmlBlob = set_xml_blob
class GDEntry(atom.data.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
etag = '{http://schemas.google.com/g/2005}etag'
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def is_media(self):
if self.find_media_edit_link():
return True
return False
IsMedia = is_media
def find_media_link(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if self.is_media():
return self.content.src
return None
FindMediaLink = find_media_link
class GDFeed(atom.data.Feed, LinkFinder):
"""A Feed from a GData service."""
etag = '{http://schemas.google.com/g/2005}etag'
total_results = TotalResults
start_index = StartIndex
items_per_page = ItemsPerPage
entry = [GDEntry]
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def get_generator(self):
if self.generator and self.generator.text:
return self.generator.text.strip()
return None
class BatchId(atom.core.XmlElement):
"""Identifies a single operation in a batch request."""
_qname = BATCH_TEMPLATE % 'id'
class BatchOperation(atom.core.XmlElement):
"""The CRUD operation which this batch entry represents."""
_qname = BATCH_TEMPLATE % 'operation'
type = 'type'
class BatchStatus(atom.core.XmlElement):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'status'
code = 'code'
reason = 'reason'
content_type = 'content-type'
class BatchEntry(GDEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
batch_operation = BatchOperation
batch_id = BatchId
batch_status = BatchStatus
class BatchInterrupted(atom.core.XmlElement):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'interrupted'
reason = 'reason'
success = 'success'
failures = 'failures'
parsed = 'parsed'
class BatchFeed(GDFeed):
"""A feed containing a list of batch request entries."""
interrupted = BatchInterrupted
entry = [BatchEntry]
def add_batch_entry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional)
The entry which will be sent to the server as part of the batch
request. The item must have a valid atom id so that the server
knows which entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(id=atom.data.Id(text=id_url_string))
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(type=operation_string)
self.entry.append(entry)
return entry
AddBatchEntry = add_batch_entry
def add_insert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
AddInsert = add_insert
def add_update(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
AddUpdate = add_update
def add_delete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_DELETE)
AddDelete = add_delete
def add_query(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_QUERY)
AddQuery = add_query
def find_batch_link(self):
return self.find_url('http://schemas.google.com/g/2005#batch')
FindBatchLink = find_batch_link
class EntryLink(atom.core.XmlElement):
"""The gd:entryLink element.
Represents a logically nested entry. For example, a <gd:who>
representing a contact might have a nested entry from a contact feed.
"""
_qname = GDATA_TEMPLATE % 'entryLink'
entry = GDEntry
rel = 'rel'
read_only = 'readOnly'
href = 'href'
class FeedLink(atom.core.XmlElement):
"""The gd:feedLink element.
Represents a logically nested feed. For example, a calendar feed might
have a nested feed representing all comments on entries.
"""
_qname = GDATA_TEMPLATE % 'feedLink'
feed = GDFeed
rel = 'rel'
read_only = 'readOnly'
count_hint = 'countHint'
href = 'href'
class AdditionalName(atom.core.XmlElement):
"""The gd:additionalName element.
Specifies additional (eg. middle) name of the person.
Contains an attribute for the phonetic representaton of the name.
"""
_qname = GDATA_TEMPLATE % 'additionalName'
yomi = 'yomi'
class Comments(atom.core.XmlElement):
"""The gd:comments element.
Contains a comments feed for the enclosing entry (such as a calendar event).
"""
_qname = GDATA_TEMPLATE % 'comments'
rel = 'rel'
feed_link = FeedLink
class Country(atom.core.XmlElement):
"""The gd:country element.
Country name along with optional country code. The country code is
given in accordance with ISO 3166-1 alpha-2:
http://www.iso.org/iso/iso-3166-1_decoding_table
"""
_qname = GDATA_TEMPLATE % 'country'
code = 'code'
class EmailImParent(atom.core.XmlElement):
address = 'address'
label = 'label'
rel = 'rel'
primary = 'primary'
class Email(EmailImParent):
"""The gd:email element.
An email address associated with the containing entity (which is
usually an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'email'
display_name = 'displayName'
class FamilyName(atom.core.XmlElement):
"""The gd:familyName element.
Specifies family name of the person, eg. "Smith".
"""
_qname = GDATA_TEMPLATE % 'familyName'
yomi = 'yomi'
class Im(EmailImParent):
"""The gd:im element.
An instant messaging address associated with the containing entity.
"""
_qname = GDATA_TEMPLATE % 'im'
protocol = 'protocol'
class GivenName(atom.core.XmlElement):
"""The gd:givenName element.
Specifies given name of the person, eg. "John".
"""
_qname = GDATA_TEMPLATE % 'givenName'
yomi = 'yomi'
class NamePrefix(atom.core.XmlElement):
"""The gd:namePrefix element.
Honorific prefix, eg. 'Mr' or 'Mrs'.
"""
_qname = GDATA_TEMPLATE % 'namePrefix'
class NameSuffix(atom.core.XmlElement):
"""The gd:nameSuffix element.
Honorific suffix, eg. 'san' or 'III'.
"""
_qname = GDATA_TEMPLATE % 'nameSuffix'
class FullName(atom.core.XmlElement):
"""The gd:fullName element.
Unstructured representation of the name.
"""
_qname = GDATA_TEMPLATE % 'fullName'
class Name(atom.core.XmlElement):
"""The gd:name element.
Allows storing person's name in a structured way. Consists of
given name, additional name, family name, prefix, suffix and full name.
"""
_qname = GDATA_TEMPLATE % 'name'
given_name = GivenName
additional_name = AdditionalName
family_name = FamilyName
name_prefix = NamePrefix
name_suffix = NameSuffix
full_name = FullName
class OrgDepartment(atom.core.XmlElement):
"""The gd:orgDepartment element.
Describes a department within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgDepartment'
class OrgJobDescription(atom.core.XmlElement):
"""The gd:orgJobDescription element.
Describes a job within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgJobDescription'
class OrgName(atom.core.XmlElement):
"""The gd:orgName element.
The name of the organization. Must appear within a gd:organization
element.
Contains a Yomigana attribute (Japanese reading aid) for the
organization name.
"""
_qname = GDATA_TEMPLATE % 'orgName'
yomi = 'yomi'
class OrgSymbol(atom.core.XmlElement):
"""The gd:orgSymbol element.
Provides a symbol of an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgSymbol'
class OrgTitle(atom.core.XmlElement):
"""The gd:orgTitle element.
The title of a person within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgTitle'
class Organization(atom.core.XmlElement):
"""The gd:organization element.
An organization, typically associated with a contact.
"""
_qname = GDATA_TEMPLATE % 'organization'
label = 'label'
primary = 'primary'
rel = 'rel'
department = OrgDepartment
job_description = OrgJobDescription
name = OrgName
symbol = OrgSymbol
title = OrgTitle
class When(atom.core.XmlElement):
"""The gd:when element.
Represents a period of time or an instant.
"""
_qname = GDATA_TEMPLATE % 'when'
end = 'endTime'
start = 'startTime'
value = 'valueString'
class OriginalEvent(atom.core.XmlElement):
"""The gd:originalEvent element.
Equivalent to the Recurrence ID property specified in section 4.8.4.4
of RFC 2445. Appears in every instance of a recurring event, to identify
the original event.
Contains a <gd:when> element specifying the original start time of the
instance that has become an exception.
"""
_qname = GDATA_TEMPLATE % 'originalEvent'
id = 'id'
href = 'href'
when = When
class PhoneNumber(atom.core.XmlElement):
"""The gd:phoneNumber element.
A phone number associated with the containing entity (which is usually
an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'phoneNumber'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class PostalAddress(atom.core.XmlElement):
"""The gd:postalAddress element."""
_qname = GDATA_TEMPLATE % 'postalAddress'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class Rating(atom.core.XmlElement):
"""The gd:rating element.
Represents a numeric rating of the enclosing entity, such as a
comment. Each rating supplies its own scale, although it may be
normalized by a service; for example, some services might convert all
ratings to a scale from 1 to 5.
"""
_qname = GDATA_TEMPLATE % 'rating'
average = 'average'
max = 'max'
min = 'min'
num_raters = 'numRaters'
rel = 'rel'
value = 'value'
class Recurrence(atom.core.XmlElement):
"""The gd:recurrence element.
Represents the dates and times when a recurring event takes place.
The string that defines the recurrence consists of a set of properties,
each of which is defined in the iCalendar standard (RFC 2445).
Specifically, the string usually begins with a DTSTART property that
indicates the starting time of the first instance of the event, and
often a DTEND property or a DURATION property to indicate when the
first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE
properties, which collectively define a recurring event and its
exceptions (but see below). (See section 4.8.5 of RFC 2445 for more
information about these recurrence component properties.) Last comes a
VTIMEZONE component, providing detailed timezone rules for any timezone
ID mentioned in the preceding properties.
Google services like Google Calendar don't generally generate EXRULE
and EXDATE properties to represent exceptions to recurring events;
instead, they generate <gd:recurrenceException> elements. However,
Google services may include EXRULE and/or EXDATE properties anyway;
for example, users can import events and exceptions into Calendar, and
if those imported events contain EXRULE or EXDATE properties, then
Calendar will provide those properties when it sends a <gd:recurrence>
element.
Note the the use of <gd:recurrenceException> means that you can't be
sure just from examining a <gd:recurrence> element whether there are
any exceptions to the recurrence description. To ensure that you find
all exceptions, look for <gd:recurrenceException> elements in the feed,
and use their <gd:originalEvent> elements to match them up with
<gd:recurrence> elements.
"""
_qname = GDATA_TEMPLATE % 'recurrence'
class RecurrenceException(atom.core.XmlElement):
"""The gd:recurrenceException element.
Represents an event that's an exception to a recurring event-that is,
an instance of a recurring event in which one or more aspects of the
recurring event (such as attendance list, time, or location) have been
changed.
Contains a <gd:originalEvent> element that specifies the original
recurring event that this event is an exception to.
When you change an instance of a recurring event, that instance becomes
an exception. Depending on what change you made to it, the exception
behaves in either of two different ways when the original recurring
event is changed:
- If you add, change, or remove comments, attendees, or attendee
responses, then the exception remains tied to the original event, and
changes to the original event also change the exception.
- If you make any other changes to the exception (such as changing the
time or location) then the instance becomes "specialized," which means
that it's no longer as tightly tied to the original event. If you
change the original event, specialized exceptions don't change. But
see below.
For example, say you have a meeting every Tuesday and Thursday at
2:00 p.m. If you change the attendance list for this Thursday's meeting
(but not for the regularly scheduled meeting), then it becomes an
exception. If you change the time for this Thursday's meeting (but not
for the regularly scheduled meeting), then it becomes specialized.
Regardless of whether an exception is specialized or not, if you do
something that deletes the instance that the exception was derived from,
then the exception is deleted. Note that changing the day or time of a
recurring event deletes all instances, and creates new ones.
For example, after you've specialized this Thursday's meeting, say you
change the recurring meeting to happen on Monday, Wednesday, and Friday.
That change deletes all of the recurring instances of the
Tuesday/Thursday meeting, including the specialized one.
If a particular instance of a recurring event is deleted, then that
instance appears as a <gd:recurrenceException> containing a
<gd:entryLink> that has its <gd:eventStatus> set to
"http://schemas.google.com/g/2005#event.canceled". (For more
information about canceled events, see RFC 2445.)
"""
_qname = GDATA_TEMPLATE % 'recurrenceException'
specialized = 'specialized'
entry_link = EntryLink
original_event = OriginalEvent
class Reminder(atom.core.XmlElement):
"""The gd:reminder element.
A time interval, indicating how long before the containing entity's start
time or due time attribute a reminder should be issued. Alternatively,
may specify an absolute time at which a reminder should be issued. Also
specifies a notification method, indicating what medium the system
should use to remind the user.
"""
_qname = GDATA_TEMPLATE % 'reminder'
absolute_time = 'absoluteTime'
method = 'method'
days = 'days'
hours = 'hours'
minutes = 'minutes'
class Agent(atom.core.XmlElement):
"""The gd:agent element.
The agent who actually receives the mail. Used in work addresses.
Also for 'in care of' or 'c/o'.
"""
_qname = GDATA_TEMPLATE % 'agent'
class HouseName(atom.core.XmlElement):
"""The gd:housename element.
Used in places where houses or buildings have names (and not
necessarily numbers), eg. "The Pillars".
"""
_qname = GDATA_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""The gd:street element.
Can be street, avenue, road, etc. This element also includes the
house number and room/apartment/flat/floor number.
"""
_qname = GDATA_TEMPLATE % 'street'
class PoBox(atom.core.XmlElement):
"""The gd:pobox element.
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually
but not always mutually exclusive with street.
"""
_qname = GDATA_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""The gd:neighborhood element.
This is used to disambiguate a street address when a city contains more
than one street with the same name, or to specify a small place whose
mail is routed through a larger postal town. In China it could be a
county or a minor city.
"""
_qname = GDATA_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""The gd:city element.
Can be city, village, town, borough, etc. This is the postal town and
not necessarily the place of residence or place of business.
"""
_qname = GDATA_TEMPLATE % 'city'
class Subregion(atom.core.XmlElement):
"""The gd:subregion element.
Handles administrative districts such as U.S. or U.K. counties that are
not used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = GDATA_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""The gd:region element.
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = GDATA_TEMPLATE % 'region'
class Postcode(atom.core.XmlElement):
"""The gd:postcode element.
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = GDATA_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
"""The gd:country element.
The name or code of the country.
"""
_qname = GDATA_TEMPLATE % 'country'
class FormattedAddress(atom.core.XmlElement):
"""The gd:formattedAddress element.
The full, unstructured postal address.
"""
_qname = GDATA_TEMPLATE % 'formattedAddress'
class StructuredPostalAddress(atom.core.XmlElement):
"""The gd:structuredPostalAddress element.
Postal address split into components. It allows to store the address
in locale independent format. The fields can be interpreted and used
to generate formatted, locale dependent address. The following elements
reperesent parts of the address: agent, house name, street, P.O. box,
neighborhood, city, subregion, region, postal code, country. The
subregion element is not used for postal addresses, it is provided for
extended uses of addresses only. In order to store postal address in an
unstructured form formatted address field is provided.
"""
_qname = GDATA_TEMPLATE % 'structuredPostalAddress'
rel = 'rel'
mail_class = 'mailClass'
usage = 'usage'
label = 'label'
primary = 'primary'
agent = Agent
house_name = HouseName
street = Street
po_box = PoBox
neighborhood = Neighborhood
city = City
subregion = Subregion
region = Region
postcode = Postcode
country = Country
formatted_address = FormattedAddress
class Where(atom.core.XmlElement):
"""The gd:where element.
A place (such as an event location) associated with the containing
entity. The type of the association is determined by the rel attribute;
the details of the location are contained in an embedded or linked-to
Contact entry.
A <gd:where> element is more general than a <gd:geoPt> element. The
former identifies a place using a text description and/or a Contact
entry, while the latter identifies a place using a specific geographic
location.
"""
_qname = GDATA_TEMPLATE % 'where'
label = 'label'
rel = 'rel'
value = 'valueString'
entry_link = EntryLink
class AttendeeType(atom.core.XmlElement):
"""The gd:attendeeType element."""
_qname = GDATA_TEMPLATE % 'attendeeType'
value = 'value'
class AttendeeStatus(atom.core.XmlElement):
"""The gd:attendeeStatus element."""
_qname = GDATA_TEMPLATE % 'attendeeStatus'
value = 'value'
class Who(atom.core.XmlElement):
"""The gd:who element.
A person associated with the containing entity. The type of the
association is determined by the rel attribute; the details about the
person are contained in an embedded or linked-to Contact entry.
The <gd:who> element can be used to specify email senders and
recipients, calendar event organizers, and so on.
"""
_qname = GDATA_TEMPLATE % 'who'
email = 'email'
rel = 'rel'
value = 'valueString'
attendee_status = AttendeeStatus
attendee_type = AttendeeType
entry_link = EntryLink | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.benchmark.compute.operator;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.compute.data.Block;
import org.elasticsearch.compute.data.BooleanBlock;
import org.elasticsearch.compute.data.BooleanVector;
import org.elasticsearch.compute.data.BytesRefBlock;
import org.elasticsearch.compute.data.DoubleBlock;
import org.elasticsearch.compute.data.IntBlock;
import org.elasticsearch.compute.data.LongBlock;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import java.util.concurrent.TimeUnit;
@Warmup(iterations = 5)
@Measurement(iterations = 7)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Thread)
@Fork(1)
public class BlockKeepMaskBenchmark extends BlockBenchmark {
static {
if (false == "true".equals(System.getProperty("skipSelfTest"))) {
// Smoke test all the expected values and force loading subclasses more like prod
selfTest();
}
}
static void selfTest() {
int totalPositions = 10;
for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) {
String[] params = paramString.split("/");
String dataType = params[0];
String blockKind = params[1];
BooleanVector mask = buildMask(totalPositions);
BenchmarkBlocks data = buildBenchmarkBlocks(dataType, blockKind, mask, totalPositions);
Block[] results = new Block[NUM_BLOCKS_PER_ITERATION];
run(data, mask, results);
assertCheckSums(dataType, blockKind, data, results, totalPositions);
}
}
record BenchmarkBlocks(Block[] blocks, long[] checkSums) {};
static BenchmarkBlocks buildBenchmarkBlocks(String dataType, String blockKind, BooleanVector mask, int totalPositions) {
Block[] blocks = BlockBenchmark.buildBlocks(dataType, blockKind, totalPositions);
return new BenchmarkBlocks(blocks, checksumsFor(dataType, blocks, mask));
}
static long[] checksumsFor(String dataType, Block[] blocks, BooleanVector mask) {
long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION];
switch (dataType) {
case "boolean" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
BooleanBlock block = (BooleanBlock) blocks[blockIndex];
checkSums[blockIndex] = computeBooleanCheckSum(block, mask);
}
}
case "BytesRef" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
BytesRefBlock block = (BytesRefBlock) blocks[blockIndex];
checkSums[blockIndex] = computeBytesRefCheckSum(block, mask);
}
}
case "double" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
DoubleBlock block = (DoubleBlock) blocks[blockIndex];
checkSums[blockIndex] = computeDoubleCheckSum(block, mask);
}
}
case "int" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
IntBlock block = (IntBlock) blocks[blockIndex];
checkSums[blockIndex] = computeIntCheckSum(block, mask);
}
}
case "long" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
LongBlock block = (LongBlock) blocks[blockIndex];
checkSums[blockIndex] = computeLongCheckSum(block, mask);
}
}
// TODO float
default -> throw new IllegalStateException("illegal data type [" + dataType + "]");
}
return checkSums;
}
static BooleanVector buildMask(int totalPositions) {
try (BooleanVector.FixedBuilder builder = blockFactory.newBooleanVectorFixedBuilder(totalPositions)) {
for (int p = 0; p < totalPositions; p++) {
builder.appendBoolean(p % 2 == 0);
}
return builder.build();
}
}
private static void run(BenchmarkBlocks data, BooleanVector mask, Block[] results) {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
results[blockIndex] = data.blocks[blockIndex].keepMask(mask);
}
}
private static void assertCheckSums(String dataType, String blockKind, BenchmarkBlocks data, Block[] results, int positionCount) {
long[] checkSums = checksumsFor(dataType, results, blockFactory.newConstantBooleanVector(true, positionCount));
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
if (checkSums[blockIndex] != data.checkSums[blockIndex]) {
throw new AssertionError(
"checksums do not match for block ["
+ blockIndex
+ "]["
+ dataType
+ "]["
+ blockKind
+ "]: "
+ checkSums[blockIndex]
+ " vs "
+ data.checkSums[blockIndex]
);
}
}
}
private static long computeBooleanCheckSum(BooleanBlock block, BooleanVector mask) {
long sum = 0;
for (int p = 0; p < block.getPositionCount(); p++) {
if (block.isNull(p) || mask.getBoolean(p) == false) {
continue;
}
int start = block.getFirstValueIndex(p);
int end = start + block.getValueCount(p);
for (int i = start; i < end; i++) {
sum += block.getBoolean(i) ? 1 : 0;
}
}
return sum;
}
private static long computeBytesRefCheckSum(BytesRefBlock block, BooleanVector mask) {
long sum = 0;
BytesRef scratch = new BytesRef();
for (int p = 0; p < block.getPositionCount(); p++) {
if (block.isNull(p) || mask.getBoolean(p) == false) {
continue;
}
int start = block.getFirstValueIndex(p);
int end = start + block.getValueCount(p);
for (int i = start; i < end; i++) {
BytesRef v = block.getBytesRef(i, scratch);
sum += v.length > 0 ? v.bytes[v.offset] : 0;
}
}
return sum;
}
private static long computeDoubleCheckSum(DoubleBlock block, BooleanVector mask) {
long sum = 0;
for (int p = 0; p < block.getPositionCount(); p++) {
if (block.isNull(p) || mask.getBoolean(p) == false) {
continue;
}
int start = block.getFirstValueIndex(p);
int end = start + block.getValueCount(p);
for (int i = start; i < end; i++) {
sum += (long) block.getDouble(i);
}
}
return sum;
}
private static long computeIntCheckSum(IntBlock block, BooleanVector mask) {
int sum = 0;
for (int p = 0; p < block.getPositionCount(); p++) {
if (block.isNull(p) || mask.getBoolean(p) == false) {
continue;
}
int start = block.getFirstValueIndex(p);
int end = start + block.getValueCount(p);
for (int i = start; i < end; i++) {
sum += block.getInt(i);
}
}
return sum;
}
private static long computeLongCheckSum(LongBlock block, BooleanVector mask) {
long sum = 0;
for (int p = 0; p < block.getPositionCount(); p++) {
if (block.isNull(p) || mask.getBoolean(p) == false) {
continue;
}
int start = block.getFirstValueIndex(p);
int end = start + block.getValueCount(p);
for (int i = start; i < end; i++) {
sum += block.getLong(i);
}
}
return sum;
}
/**
* Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS}
*/
@Param(
{
"boolean/array",
"boolean/array-multivalue-null",
"boolean/big-array",
"boolean/big-array-multivalue-null",
"boolean/vector",
"boolean/vector-big-array",
"boolean/vector-const",
"BytesRef/array",
"BytesRef/array-multivalue-null",
"BytesRef/vector",
"BytesRef/vector-const",
"double/array",
"double/array-multivalue-null",
"double/big-array",
"double/big-array-multivalue-null",
"double/vector",
"double/vector-big-array",
"double/vector-const",
"int/array",
"int/array-multivalue-null",
"int/big-array",
"int/big-array-multivalue-null",
"int/vector",
"int/vector-big-array",
"int/vector-const",
"long/array",
"long/array-multivalue-null",
"long/big-array",
"long/big-array-multivalue-null",
"long/vector",
"long/vector-big-array",
"long/vector-const" }
)
public String dataTypeAndBlockKind;
private BenchmarkBlocks data;
private final BooleanVector mask = buildMask(BLOCK_TOTAL_POSITIONS);
private final Block[] results = new Block[NUM_BLOCKS_PER_ITERATION];
@Setup
public void setup() {
String[] params = dataTypeAndBlockKind.split("/");
String dataType = params[0];
String blockKind = params[1];
data = buildBenchmarkBlocks(dataType, blockKind, mask, BLOCK_TOTAL_POSITIONS);
}
@Benchmark
@OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS)
public void run() {
run(data, mask, results);
}
@TearDown(Level.Iteration)
public void assertCheckSums() {
String[] params = dataTypeAndBlockKind.split("/");
String dataType = params[0];
String blockKind = params[1];
assertCheckSums(dataType, blockKind, data, results, BLOCK_TOTAL_POSITIONS);
}
} | java | github | https://github.com/elastic/elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockKeepMaskBenchmark.java |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
@pytest.mark.online
class TestInputSites(object):
config = ("""
templates:
global:
headers:
User-Agent: "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 """ +
"""(KHTML, like Gecko) Chrome/35.0.1916.114 Safari/537.36"
tasks:
test_sceper:
sceper: http://sceper.ws/category/movies/movies-dvd-rip
test_apple_trailers:
apple_trailers:
quality: 480p
genres: ['Action and Adventure']
test_apple_trailers_simple:
apple_trailers: 720p
""")
@pytest.mark.skip(reason='Missing a usable urlrewriter for uploadgig?')
def test_sceper(self, execute_task):
task = execute_task('test_sceper')
assert task.entries, 'no entries created / site may be down'
def test_apple_trailers(self, execute_task, use_vcr):
task = execute_task('test_apple_trailers')
assert task.entries, 'no entries created / site may be down'
def test_apple_trailers_simple(self, execute_task):
task = execute_task('test_apple_trailers_simple')
assert task.entries, 'no entries created / site may be down' | unknown | codeparrot/codeparrot-clean | ||
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#include "urldata.h"
#include "cfilters.h"
#include "curlx/dynbuf.h"
#include "doh.h"
#include "progress.h"
#include "request.h"
#include "sendf.h"
#include "curl_trc.h"
#include "transfer.h"
#include "url.h"
#include "curlx/strparse.h"
void Curl_req_init(struct SingleRequest *req)
{
memset(req, 0, sizeof(*req));
}
CURLcode Curl_req_soft_reset(struct SingleRequest *req,
struct Curl_easy *data)
{
CURLcode result;
req->done = FALSE;
req->upload_done = FALSE;
req->upload_aborted = FALSE;
req->download_done = FALSE;
req->eos_written = FALSE;
req->eos_read = FALSE;
req->eos_sent = FALSE;
req->ignorebody = FALSE;
req->shutdown = FALSE;
req->bytecount = 0;
req->writebytecount = 0;
req->header = FALSE;
req->headerline = 0;
req->headerbytecount = 0;
req->allheadercount = 0;
req->deductheadercount = 0;
req->httpversion_sent = 0;
req->httpversion = 0;
req->sendbuf_hds_len = 0;
result = Curl_client_start(data);
if(result)
return result;
if(!req->sendbuf_init) {
Curl_bufq_init2(&req->sendbuf, data->set.upload_buffer_size, 1,
BUFQ_OPT_SOFT_LIMIT);
req->sendbuf_init = TRUE;
}
else {
Curl_bufq_reset(&req->sendbuf);
if(data->set.upload_buffer_size != req->sendbuf.chunk_size) {
Curl_bufq_free(&req->sendbuf);
Curl_bufq_init2(&req->sendbuf, data->set.upload_buffer_size, 1,
BUFQ_OPT_SOFT_LIMIT);
}
}
return CURLE_OK;
}
CURLcode Curl_req_start(struct SingleRequest *req,
struct Curl_easy *data)
{
req->start = *Curl_pgrs_now(data);
return Curl_req_soft_reset(req, data);
}
static CURLcode req_flush(struct Curl_easy *data);
CURLcode Curl_req_done(struct SingleRequest *req,
struct Curl_easy *data, bool aborted)
{
(void)req;
if(!aborted)
(void)req_flush(data);
Curl_client_reset(data);
#ifndef CURL_DISABLE_DOH
Curl_doh_close(data);
#endif
return CURLE_OK;
}
void Curl_req_hard_reset(struct SingleRequest *req, struct Curl_easy *data)
{
struct curltime t0 = { 0, 0 };
Curl_safefree(req->newurl);
Curl_client_reset(data);
if(req->sendbuf_init)
Curl_bufq_reset(&req->sendbuf);
#ifndef CURL_DISABLE_DOH
Curl_doh_close(data);
#endif
/* Can no longer memset() this struct as we need to keep some state */
req->size = -1;
req->maxdownload = -1;
req->bytecount = 0;
req->writebytecount = 0;
req->start = t0;
req->headerbytecount = 0;
req->allheadercount = 0;
req->deductheadercount = 0;
req->headerline = 0;
req->offset = 0;
req->httpcode = 0;
req->keepon = 0;
req->upgr101 = UPGR101_NONE;
req->sendbuf_hds_len = 0;
req->timeofdoc = 0;
req->location = NULL;
req->newurl = NULL;
#ifndef CURL_DISABLE_COOKIES
req->setcookies = 0;
#endif
req->header = FALSE;
req->content_range = FALSE;
req->download_done = FALSE;
req->eos_written = FALSE;
req->eos_read = FALSE;
req->eos_sent = FALSE;
req->rewind_read = FALSE;
req->upload_done = FALSE;
req->upload_aborted = FALSE;
req->ignorebody = FALSE;
req->http_bodyless = FALSE;
req->chunk = FALSE;
req->ignore_cl = FALSE;
req->upload_chunky = FALSE;
req->no_body = data->set.opt_no_body;
req->authneg = FALSE;
req->shutdown = FALSE;
}
void Curl_req_free(struct SingleRequest *req, struct Curl_easy *data)
{
Curl_safefree(req->newurl);
if(req->sendbuf_init)
Curl_bufq_free(&req->sendbuf);
Curl_client_cleanup(data);
}
static CURLcode xfer_send(struct Curl_easy *data,
const char *buf, size_t blen,
size_t hds_len, size_t *pnwritten)
{
CURLcode result = CURLE_OK;
bool eos = FALSE;
*pnwritten = 0;
DEBUGASSERT(hds_len <= blen);
#ifdef DEBUGBUILD
{
/* Allow debug builds to override this logic to force short initial
sends */
size_t body_len = blen - hds_len;
if(body_len) {
const char *p = getenv("CURL_SMALLREQSEND");
if(p) {
curl_off_t body_small;
if(!curlx_str_number(&p, &body_small, body_len))
blen = hds_len + (size_t)body_small;
}
}
}
#endif
/* Make sure this does not send more body bytes than what the max send
speed says. The headers do not count to the max speed. */
if(data->set.max_send_speed) {
size_t body_bytes = blen - hds_len;
if((curl_off_t)body_bytes > data->set.max_send_speed)
blen = hds_len + (size_t)data->set.max_send_speed;
}
if(data->req.eos_read &&
(Curl_bufq_is_empty(&data->req.sendbuf) ||
Curl_bufq_len(&data->req.sendbuf) == blen)) {
DEBUGF(infof(data, "sending last upload chunk of %zu bytes", blen));
eos = TRUE;
}
result = Curl_xfer_send(data, buf, blen, eos, pnwritten);
if(!result) {
if(eos && (blen == *pnwritten))
data->req.eos_sent = TRUE;
if(*pnwritten) {
if(hds_len)
Curl_debug(data, CURLINFO_HEADER_OUT, buf,
CURLMIN(hds_len, *pnwritten));
if(*pnwritten > hds_len) {
size_t body_len = *pnwritten - hds_len;
Curl_debug(data, CURLINFO_DATA_OUT, buf + hds_len, body_len);
data->req.writebytecount += body_len;
Curl_pgrs_upload_inc(data, body_len);
}
}
}
return result;
}
static CURLcode req_send_buffer_flush(struct Curl_easy *data)
{
CURLcode result = CURLE_OK;
const unsigned char *buf;
size_t blen;
while(Curl_bufq_peek(&data->req.sendbuf, &buf, &blen)) {
size_t nwritten, hds_len = CURLMIN(data->req.sendbuf_hds_len, blen);
result = xfer_send(data, (const char *)buf, blen, hds_len, &nwritten);
if(result)
break;
Curl_bufq_skip(&data->req.sendbuf, nwritten);
if(hds_len) {
data->req.sendbuf_hds_len -= CURLMIN(hds_len, nwritten);
}
/* leave if we could not send all. Maybe network blocking or
* speed limits on transfer */
if(nwritten < blen)
break;
}
return result;
}
static CURLcode req_set_upload_done(struct Curl_easy *data)
{
DEBUGASSERT(!data->req.upload_done);
data->req.upload_done = TRUE;
data->req.keepon &= ~KEEP_SEND; /* we are done sending */
Curl_pgrsTime(data, TIMER_POSTRANSFER);
Curl_creader_done(data, data->req.upload_aborted);
if(data->req.upload_aborted) {
Curl_bufq_reset(&data->req.sendbuf);
if(data->req.writebytecount)
infof(data, "abort upload after having sent %" FMT_OFF_T " bytes",
data->req.writebytecount);
else
infof(data, "abort upload");
}
else if(data->req.writebytecount)
infof(data, "upload completely sent off: %" FMT_OFF_T " bytes",
data->req.writebytecount);
else if(!data->req.download_done) {
DEBUGASSERT(Curl_bufq_is_empty(&data->req.sendbuf));
infof(data, Curl_creader_total_length(data) ?
"We are completely uploaded and fine" :
"Request completely sent off");
}
return Curl_xfer_send_close(data);
}
static CURLcode req_flush(struct Curl_easy *data)
{
CURLcode result;
if(!data || !data->conn)
return CURLE_FAILED_INIT;
if(!Curl_bufq_is_empty(&data->req.sendbuf)) {
result = req_send_buffer_flush(data);
if(result)
return result;
if(!Curl_bufq_is_empty(&data->req.sendbuf)) {
DEBUGF(infof(data, "Curl_req_flush(len=%zu) -> EAGAIN",
Curl_bufq_len(&data->req.sendbuf)));
return CURLE_AGAIN;
}
}
else if(Curl_xfer_needs_flush(data)) {
DEBUGF(infof(data, "Curl_req_flush(), xfer send_pending"));
return Curl_xfer_flush(data);
}
if(data->req.eos_read && !data->req.eos_sent) {
char tmp = 0;
size_t nwritten;
result = xfer_send(data, &tmp, 0, 0, &nwritten);
if(result)
return result;
DEBUGASSERT(data->req.eos_sent);
}
if(!data->req.upload_done && data->req.eos_read && data->req.eos_sent) {
DEBUGASSERT(Curl_bufq_is_empty(&data->req.sendbuf));
if(data->req.shutdown) {
bool done;
result = Curl_xfer_send_shutdown(data, &done);
if(result && data->req.shutdown_err_ignore) {
infof(data, "Shutdown send direction error: %d. Broken server? "
"Proceeding as if everything is ok.", result);
result = CURLE_OK;
done = TRUE;
}
if(result)
return result;
if(!done)
return CURLE_AGAIN;
}
return req_set_upload_done(data);
}
return CURLE_OK;
}
static CURLcode add_from_client(void *reader_ctx,
unsigned char *buf, size_t buflen,
size_t *pnread)
{
struct Curl_easy *data = reader_ctx;
CURLcode result;
bool eos;
result = Curl_client_read(data, (char *)buf, buflen, pnread, &eos);
if(!result && eos)
data->req.eos_read = TRUE;
return result;
}
static CURLcode req_send_buffer_add(struct Curl_easy *data,
const char *buf, size_t blen,
size_t hds_len)
{
CURLcode result = CURLE_OK;
size_t n;
result = Curl_bufq_cwrite(&data->req.sendbuf, buf, blen, &n);
if(result)
return result;
/* We rely on a SOFTLIMIT on sendbuf, so it can take all data in */
DEBUGASSERT(n == blen);
data->req.sendbuf_hds_len += hds_len;
return CURLE_OK;
}
CURLcode Curl_req_send(struct Curl_easy *data, struct dynbuf *req,
unsigned char httpversion)
{
CURLcode result;
const char *buf;
size_t blen, nwritten;
if(!data || !data->conn)
return CURLE_FAILED_INIT;
data->req.httpversion_sent = httpversion;
buf = curlx_dyn_ptr(req);
blen = curlx_dyn_len(req);
/* if the sendbuf is empty and the request without body and
* the length to send fits info a sendbuf chunk, we send it directly.
* If `blen` is larger then `chunk_size`, we can not. Because we
* might have to retry a blocked send later from sendbuf and that
* would result in retry sends with a shrunken length. That is trouble. */
if(Curl_bufq_is_empty(&data->req.sendbuf) &&
!Curl_creader_total_length(data) &&
(blen <= data->req.sendbuf.chunk_size)) {
data->req.eos_read = TRUE;
result = xfer_send(data, buf, blen, blen, &nwritten);
if(result)
return result;
buf += nwritten;
blen -= nwritten;
if(!blen) {
result = req_set_upload_done(data);
if(result)
return result;
}
}
if(blen) {
/* Either we have a request body, or we could not send the complete
* request in one go. Buffer the remainder and try to add as much
* body bytes as room is left in the buffer. Then flush. */
result = req_send_buffer_add(data, buf, blen, blen);
if(result)
return result;
return Curl_req_send_more(data);
}
return CURLE_OK;
}
bool Curl_req_sendbuf_empty(struct Curl_easy *data)
{
return !data->req.sendbuf_init || Curl_bufq_is_empty(&data->req.sendbuf);
}
bool Curl_req_want_send(struct Curl_easy *data)
{
/* Not done and upload not blocked and either one of
* - KEEP_SEND
* - request has buffered data to send
* - connection has pending data to send */
return !data->req.done &&
!Curl_rlimit_is_blocked(&data->progress.ul.rlimit) &&
((data->req.keepon & KEEP_SEND) ||
!Curl_req_sendbuf_empty(data) ||
Curl_xfer_needs_flush(data));
}
bool Curl_req_want_recv(struct Curl_easy *data)
{
/* Not done and download not blocked and KEEP_RECV */
return !data->req.done &&
!Curl_rlimit_is_blocked(&data->progress.dl.rlimit) &&
(data->req.keepon & KEEP_RECV);
}
bool Curl_req_done_sending(struct Curl_easy *data)
{
return data->req.upload_done && !Curl_req_want_send(data);
}
CURLcode Curl_req_send_more(struct Curl_easy *data)
{
CURLcode result;
/* Fill our send buffer if more from client can be read. */
if(!data->req.upload_aborted &&
!data->req.eos_read &&
!Curl_xfer_send_is_paused(data) &&
!Curl_bufq_is_full(&data->req.sendbuf)) {
size_t nread;
result = Curl_bufq_sipn(&data->req.sendbuf, 0,
add_from_client, data, &nread);
if(result && result != CURLE_AGAIN)
return result;
}
result = req_flush(data);
if(result == CURLE_AGAIN)
result = CURLE_OK;
return result;
}
CURLcode Curl_req_abort_sending(struct Curl_easy *data)
{
if(!data->req.upload_done) {
Curl_bufq_reset(&data->req.sendbuf);
data->req.upload_aborted = TRUE;
data->req.keepon &= ~KEEP_SEND;
return req_set_upload_done(data);
}
return CURLE_OK;
}
CURLcode Curl_req_stop_send_recv(struct Curl_easy *data)
{
/* stop receiving and ALL sending as well, including PAUSE and HOLD.
* We might still be paused on receive client writes though, so
* keep those bits around. */
CURLcode result = CURLE_OK;
if(data->req.keepon & KEEP_SEND)
result = Curl_req_abort_sending(data);
data->req.keepon &= ~(KEEP_RECV | KEEP_SEND);
return result;
} | c | github | https://github.com/curl/curl | lib/request.c |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Memory(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Memory configuration and use
"""
plugin_name = 'memory'
profiles = ('system', 'hardware', 'memory')
def setup(self):
self.add_copy_spec([
"/proc/pci",
"/proc/meminfo",
"/proc/vmstat",
"/proc/swaps",
"/proc/slabinfo",
"/proc/pagetypeinfo",
"/proc/vmallocinfo",
"/sys/kernel/mm/ksm",
"/sys/kernel/mm/transparent_hugepage/enabled"
])
self.add_cmd_output("free", root_symlink="free")
self.add_cmd_output([
"free -m",
"swapon --bytes --show"
])
# vim: set et ts=4 sw=4 : | unknown | codeparrot/codeparrot-clean | ||
import gzip
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from representation import parseJsonLineWithPlace
def docs2chars(docs, char2Idx):
#We create a three dimensional tensor with
#Number of samples; Max number of tokens; Max number of characters
nSamples = len(docs) #Number of samples
maxTokensSentences = max([len(x) for x in docs]) #Max token per sentence
maxCharsToken = max([max([len(y) for y in x]) for x in docs]) #Max chars per token
x = np.zeros((nSamples,
maxTokensSentences,
maxCharsToken
)).astype('int32')#probably int32 is to large
for i, doc in enumerate(docs):
for j, token in enumerate(doc):
tokenRepresentation = [char2Idx.get(char, len(char2Idx) + 1) for char in token]
x[i, j, :len(tokenRepresentation)] = tokenRepresentation
return(x)
def batch_generator(twitterFile, classEncoder, textTokenizer, char2Idx, maxlen, unknownClass="unk", batch_size=64):
while True:
print("Opening file '" +twitterFile +"'")
with gzip.open(twitterFile, 'rb') as file:
trainTexts = []; trainLabels=[]
for line in file:
#Reset after each batch
if len(trainTexts) == batch_size:
trainTexts = []; trainLabels=[]
instance = parseJsonLineWithPlace(line.decode('utf-8'))
trainTexts.append(instance.text)
if instance.place._name not in classEncoder.classes_:
trainLabels.append(unknownClass)
else:
trainLabels.append(instance.place._name)
if len(trainTexts) == batch_size:
#Character embeddings
trainCharacters = docs2chars(trainTexts, char2Idx=char2Idx)
#Text Tweet
trainTexts = textTokenizer.texts_to_sequences(trainTexts)
trainTexts = np.asarray(trainTexts) # Convert to ndArraytop
trainTexts = pad_sequences(trainTexts, maxlen=maxlen)
# class label
classes = classEncoder.transform(trainLabels)
#yield trainTexts, classes
yield ({
'inputText' : trainTexts,
'char_embedding' : trainCharacters,
},
classes
)
print("Reached end of generator; usualle End-of-epoch")
#Return the last batch of instances after reaching end of file...
if len(trainTexts) > 0:
trainTexts = textTokenizer.texts_to_sequences(trainTexts)
trainTexts = np.asarray(trainTexts) # Convert to ndArraytop
trainTexts = pad_sequences(trainTexts, maxlen=maxlen)
# class label
classes = classEncoder.transform(trainLabels)
yield ({
'inputText': trainTexts,
},
classes
) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
from appengine_wrappers import urlfetch
from future import Future
class _AsyncFetchDelegate(object):
def __init__(self, rpc):
self._rpc = rpc
def Get(self):
return self._rpc.get_result()
def _MakeHeaders(username, password):
headers = { 'Cache-Control': 'max-age=0' }
if username is not None and password is not None:
headers['Authorization'] = 'Basic %s' % base64.encodestring(
'%s:%s' % (username, password))
return headers
class AppEngineUrlFetcher(object):
"""A wrapper around the App Engine urlfetch module that allows for easy
async fetches.
"""
def __init__(self, base_path=None):
self._base_path = base_path
def Fetch(self, url, username=None, password=None):
"""Fetches a file synchronously.
"""
headers = _MakeHeaders(username, password)
if self._base_path is not None:
return urlfetch.fetch('%s/%s' % (self._base_path, url), headers=headers)
else:
return urlfetch.fetch(url, headers={ 'Cache-Control': 'max-age=0' })
def FetchAsync(self, url, username=None, password=None):
"""Fetches a file asynchronously, and returns a Future with the result.
"""
rpc = urlfetch.create_rpc()
headers = _MakeHeaders(username, password)
if self._base_path is not None:
urlfetch.make_fetch_call(rpc,
'%s/%s' % (self._base_path, url),
headers=headers)
else:
urlfetch.make_fetch_call(rpc, url, headers=headers)
return Future(delegate=_AsyncFetchDelegate(rpc)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
import re
import uuid
import eventlet
import ipaddress
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import requests
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder import interface
from cinder import utils
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import utils as volutils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
d_opts = [
cfg.StrOpt('datera_api_port',
default='7717',
help='Datera API port.'),
cfg.StrOpt('datera_api_version',
default='2',
help='Datera API version.'),
cfg.IntOpt('datera_num_replicas',
default='3',
deprecated_for_removal=True,
help='Number of replicas to create of an inode.'),
cfg.IntOpt('datera_503_timeout',
default='120',
help='Timeout for HTTP 503 retry messages'),
cfg.IntOpt('datera_503_interval',
default='5',
help='Interval between 503 retries'),
cfg.BoolOpt('datera_debug',
default=False,
help="True to set function arg and return logging"),
cfg.BoolOpt('datera_acl_allow_all',
default=False,
deprecated_for_removal=True,
help="True to set acl 'allow_all' on volumes "
"created"),
cfg.BoolOpt('datera_debug_replica_count_override',
default=False,
help="ONLY FOR DEBUG/TESTING PURPOSES\n"
"True to set replica_count to 1")
]
CONF = cfg.CONF
CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
CONF.register_opts(d_opts)
DEFAULT_SI_SLEEP = 10
INITIATOR_GROUP_PREFIX = "IG-"
OS_PREFIX = "OS-"
UNMANAGE_PREFIX = "UNMANAGED-"
# Taken from this SO post :
# http://stackoverflow.com/a/18516125
# Using old-style string formatting because of the nature of the regex
# conflicting with new-style curly braces
UUID4_STR_RE = ("%s[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]"
"[a-f0-9]{3}-?[a-f0-9]{12}")
UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX)
# Recursive dict to assemble basic url structure for the most common
# API URL endpoints. Most others are constructed from these
URL_TEMPLATES = {
'ai': lambda: 'app_instances',
'ai_inst': lambda: (URL_TEMPLATES['ai']() + '/{}'),
'si': lambda: (URL_TEMPLATES['ai_inst']() + '/storage_instances'),
'si_inst': lambda storage_name: (
(URL_TEMPLATES['si']() + '/{}').format(
'{}', storage_name)),
'vol': lambda storage_name: (
(URL_TEMPLATES['si_inst'](storage_name) + '/volumes')),
'vol_inst': lambda storage_name, volume_name: (
(URL_TEMPLATES['vol'](storage_name) + '/{}').format(
'{}', volume_name))}
def _get_name(name):
return "".join((OS_PREFIX, name))
def _get_unmanaged(name):
return "".join((UNMANAGE_PREFIX, name))
def _authenticated(func):
"""Ensure the driver is authenticated to make a request.
In do_setup() we fetch an auth token and store it. If that expires when
we do API request, we'll fetch a new one.
"""
@functools.wraps(func)
def func_wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except exception.NotAuthorized:
# Prevent recursion loop. After the self arg is the
# resource_type arg from _issue_api_request(). If attempt to
# login failed, we should just give up.
if args[0] == 'login':
raise
# Token might've expired, get a new one, try again.
self._login()
return func(self, *args, **kwargs)
return func_wrapper
@interface.volumedriver
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class DateraDriver(san.SanISCSIDriver):
"""The OpenStack Datera Driver
Version history:
1.0 - Initial driver
1.1 - Look for lun-0 instead of lun-1.
2.0 - Update For Datera API v2
2.1 - Multipath, ACL and reorg
2.2 - Capabilites List, Extended Volume-Type Support
Naming convention change,
Volume Manage/Unmanage support
"""
VERSION = '2.2'
CI_WIKI_NAME = "datera-ci"
def __init__(self, *args, **kwargs):
super(DateraDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(d_opts)
self.username = self.configuration.san_login
self.password = self.configuration.san_password
self.cluster_stats = {}
self.datera_api_token = None
self.interval = self.configuration.datera_503_interval
self.retry_attempts = (self.configuration.datera_503_timeout /
self.interval)
self.driver_prefix = str(uuid.uuid4())[:4]
self.datera_debug = self.configuration.datera_debug
if self.datera_debug:
utils.setup_tracing(['method'])
def do_setup(self, context):
# If we can't authenticate through the old and new method, just fail
# now.
if not all([self.username, self.password]):
msg = _("san_login and/or san_password is not set for Datera "
"driver in the cinder.conf. Set this information and "
"start the cinder-volume service again.")
LOG.error(msg)
raise exception.InvalidInput(msg)
self._login()
@utils.retry(exception.VolumeDriverException, retries=3)
def _wait_for_resource(self, id, resource_type, policies):
result = self._issue_api_request(resource_type, 'get', id)
if result['storage_instances'][
policies['default_storage_name']]['volumes'][
policies['default_volume_name']]['op_state'] == 'available':
return
else:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
def _create_resource(self, resource, resource_type, body):
result = None
try:
result = self._issue_api_request(resource_type, 'post', body=body)
except exception.Invalid:
type_id = resource.get('volume_type_id', None)
if resource_type == 'volumes' and type_id:
LOG.error(_LE("Creation request failed. Please verify the "
"extra-specs set for your volume types are "
"entered correctly."))
raise
else:
policies = self._get_policies_for_resource(resource)
# Handle updating QOS Policies
if resource_type == URL_TEMPLATES['ai']():
self._update_qos(resource, policies)
if result['storage_instances'][policies['default_storage_name']][
'volumes'][policies['default_volume_name']][
'op_state'] == 'available':
return
self._wait_for_resource(_get_name(resource['id']),
resource_type,
policies)
def create_volume(self, volume):
"""Create a logical volume."""
# Generate App Instance, Storage Instance and Volume
# Volume ID will be used as the App Instance Name
# Storage Instance and Volumes will have standard names
policies = self._get_policies_for_resource(volume)
num_replicas = int(policies['replica_count'])
storage_name = policies['default_storage_name']
volume_name = policies['default_volume_name']
app_params = (
{
'create_mode': "openstack",
'uuid': str(volume['id']),
'name': _get_name(volume['id']),
'access_control_mode': 'deny_all',
'storage_instances': {
storage_name: {
'name': storage_name,
'volumes': {
volume_name: {
'name': volume_name,
'size': volume['size'],
'replica_count': num_replicas,
'snapshot_policies': {
}
}
}
}
}
})
self._create_resource(volume, URL_TEMPLATES['ai'](), body=app_params)
def extend_volume(self, volume, new_size):
# Offline App Instance, if necessary
reonline = False
app_inst = self._issue_api_request(
URL_TEMPLATES['ai_inst']().format(_get_name(volume['id'])))
if app_inst['admin_state'] == 'online':
reonline = True
self.detach_volume(None, volume, delete_initiator=False)
# Change Volume Size
app_inst = _get_name(volume['id'])
data = {
'size': new_size
}
policies = self._get_policies_for_resource(volume)
self._issue_api_request(
URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']).format(app_inst),
method='put',
body=data)
# Online Volume, if it was online before
if reonline:
self.create_export(None, volume, None)
def create_cloned_volume(self, volume, src_vref):
policies = self._get_policies_for_resource(volume)
src = "/" + URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']).format(_get_name(src_vref['id']))
data = {
'create_mode': 'openstack',
'name': _get_name(volume['id']),
'uuid': str(volume['id']),
'clone_src': src,
}
self._issue_api_request(URL_TEMPLATES['ai'](), 'post', body=data)
if volume['size'] > src_vref['size']:
self.extend_volume(volume, volume['size'])
def delete_volume(self, volume):
self.detach_volume(None, volume)
app_inst = _get_name(volume['id'])
try:
self._issue_api_request(URL_TEMPLATES['ai_inst']().format(
app_inst),
method='delete')
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, _get_name(volume['id']))
def ensure_export(self, context, volume, connector):
"""Gets the associated account, retrieves CHAP info and updates."""
return self.create_export(context, volume, connector)
def initialize_connection(self, volume, connector):
# Now online the app_instance (which will online all storage_instances)
multipath = connector.get('multipath', False)
url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))
data = {
'admin_state': 'online'
}
app_inst = self._issue_api_request(url, method='put', body=data)
storage_instances = app_inst["storage_instances"]
si_names = list(storage_instances.keys())
portal = storage_instances[si_names[0]]['access']['ips'][0] + ':3260'
iqn = storage_instances[si_names[0]]['access']['iqn']
if multipath:
portals = [p + ':3260' for p in
storage_instances[si_names[0]]['access']['ips']]
iqns = [iqn for _ in
storage_instances[si_names[0]]['access']['ips']]
lunids = [self._get_lunid() for _ in
storage_instances[si_names[0]]['access']['ips']]
return {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_iqns': iqns,
'target_portal': portal,
'target_portals': portals,
'target_lun': self._get_lunid(),
'target_luns': lunids,
'volume_id': volume['id'],
'discard': False}}
else:
return {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_portal': portal,
'target_lun': self._get_lunid(),
'volume_id': volume['id'],
'discard': False}}
def create_export(self, context, volume, connector):
# Online volume in case it hasn't been already
url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))
data = {
'admin_state': 'online'
}
self._issue_api_request(url, method='put', body=data)
# Check if we've already setup everything for this volume
url = (URL_TEMPLATES['si']().format(_get_name(volume['id'])))
storage_instances = self._issue_api_request(url)
# Handle adding initiator to product if necessary
# Then add initiator to ACL
policies = self._get_policies_for_resource(volume)
if (connector and
connector.get('initiator') and
not policies['acl_allow_all']):
initiator_name = "OpenStack_{}_{}".format(
self.driver_prefix, str(uuid.uuid4())[:4])
initiator_group = INITIATOR_GROUP_PREFIX + volume['id']
found = False
initiator = connector['initiator']
current_initiators = self._issue_api_request('initiators')
for iqn, values in current_initiators.items():
if initiator == iqn:
found = True
break
# If we didn't find a matching initiator, create one
if not found:
data = {'id': initiator, 'name': initiator_name}
# Try and create the initiator
# If we get a conflict, ignore it because race conditions
self._issue_api_request("initiators",
method="post",
body=data,
conflict_ok=True)
# Create initiator group with initiator in it
initiator_path = "/initiators/{}".format(initiator)
initiator_group_path = "/initiator_groups/{}".format(
initiator_group)
ig_data = {'name': initiator_group, 'members': [initiator_path]}
self._issue_api_request("initiator_groups",
method="post",
body=ig_data,
conflict_ok=True)
# Create ACL with initiator group as reference for each
# storage_instance in app_instance
# TODO(_alastor_): We need to avoid changing the ACLs if the
# template already specifies an ACL policy.
for si_name in storage_instances.keys():
acl_url = (URL_TEMPLATES['si']() + "/{}/acl_policy").format(
_get_name(volume['id']), si_name)
data = {'initiator_groups': [initiator_group_path]}
self._issue_api_request(acl_url,
method="put",
body=data)
if connector and connector.get('ip'):
try:
# Case where volume_type has non default IP Pool info
if policies['ip_pool'] != 'default':
initiator_ip_pool_path = self._issue_api_request(
"access_network_ip_pools/{}".format(
policies['ip_pool']))['path']
# Fallback to trying reasonable IP based guess
else:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
connector['ip'])
ip_pool_url = URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(
_get_name(volume['id']))
ip_pool_data = {'ip_pool': initiator_ip_pool_path}
self._issue_api_request(ip_pool_url,
method="put",
body=ip_pool_data)
except exception.DateraAPIException:
# Datera product 1.0 support
pass
# Check to ensure we're ready for go-time
self._si_poll(volume, policies)
def detach_volume(self, context, volume, attachment=None):
url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))
data = {
'admin_state': 'offline',
'force': True
}
try:
self._issue_api_request(url, method='put', body=data)
except exception.NotFound:
msg = _LI("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# TODO(_alastor_): Make acl cleaning multi-attach aware
self._clean_acl(volume)
def _check_for_acl(self, initiator_path):
"""Returns True if an acl is found for initiator_path """
# TODO(_alastor_) when we get a /initiators/:initiator/acl_policies
# endpoint use that instead of this monstrosity
initiator_groups = self._issue_api_request("initiator_groups")
for ig, igdata in initiator_groups.items():
if initiator_path in igdata['members']:
LOG.debug("Found initiator_group: %s for initiator: %s",
ig, initiator_path)
return True
LOG.debug("No initiator_group found for initiator: %s", initiator_path)
return False
def _clean_acl(self, volume):
policies = self._get_policies_for_resource(volume)
acl_url = (URL_TEMPLATES["si_inst"](
policies['default_storage_name']) + "/acl_policy").format(
_get_name(volume['id']))
try:
initiator_group = self._issue_api_request(acl_url)[
'initiator_groups'][0]
initiator_iqn_path = self._issue_api_request(
initiator_group.lstrip("/"))["members"][0]
# Clear out ACL and delete initiator group
self._issue_api_request(acl_url,
method="put",
body={'initiator_groups': []})
self._issue_api_request(initiator_group.lstrip("/"),
method="delete")
if not self._check_for_acl(initiator_iqn_path):
self._issue_api_request(initiator_iqn_path.lstrip("/"),
method="delete")
except (IndexError, exception.NotFound):
LOG.debug("Did not find any initiator groups for volume: %s",
volume)
def create_snapshot(self, snapshot):
policies = self._get_policies_for_resource(snapshot)
url_template = URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/snapshots'
url = url_template.format(_get_name(snapshot['volume_id']))
snap_params = {
'uuid': snapshot['id'],
}
self._issue_api_request(url, method='post', body=snap_params)
def delete_snapshot(self, snapshot):
policies = self._get_policies_for_resource(snapshot)
snap_temp = URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/snapshots'
snapu = snap_temp.format(_get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(snapu, method='get')
try:
for ts, snap in snapshots.items():
if snap['uuid'] == snapshot['id']:
url_template = snapu + '/{}'
url = url_template.format(ts)
self._issue_api_request(url, method='delete')
break
else:
raise exception.NotFound
except exception.NotFound:
msg = _LI("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, _get_name(snapshot['id']))
def create_volume_from_snapshot(self, volume, snapshot):
policies = self._get_policies_for_resource(snapshot)
snap_temp = URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/snapshots'
snapu = snap_temp.format(_get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(snapu, method='get')
for ts, snap in snapshots.items():
if snap['uuid'] == snapshot['id']:
found_ts = ts
break
else:
raise exception.NotFound
src = "/" + (snap_temp + '/{}').format(
_get_name(snapshot['volume_id']), found_ts)
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': _get_name(volume['id']),
'clone_src': src,
})
self._issue_api_request(
URL_TEMPLATES['ai'](),
method='post',
body=app_params)
def manage_existing(self, volume, existing_ref):
"""Manage an existing volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
existing_ref['source-name'] == app_inst_name:storage_inst_name:vol_name
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name = existing_ref.split(":")[0]
LOG.debug("Managing existing Datera volume %(volume)s. "
"Changing name to %(existing)s",
existing=existing_ref, volume=_get_name(volume['id']))
data = {'name': _get_name(volume['id'])}
self._issue_api_request(URL_TEMPLATES['ai_inst']().format(
app_inst_name), method='put', body=data)
def manage_existing_get_size(self, volume, existing_ref):
"""Get the size of an unmanaged volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
existing_ref == app_inst_name:storage_inst_name:vol_name
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume on the Datera backend
"""
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name, si_name, vol_name = existing_ref.split(":")
app_inst = self._issue_api_request(
URL_TEMPLATES['ai_inst']().format(app_inst_name))
return self._get_size(volume, app_inst, si_name, vol_name)
def _get_size(self, volume, app_inst=None, si_name=None, vol_name=None):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
policies = self._get_policies_for_resource(volume)
si_name = si_name if si_name else policies['default_storage_name']
vol_name = vol_name if vol_name else policies['default_volume_name']
if not app_inst:
vol_url = URL_TEMPLATES['ai_inst']().format(
_get_name(volume['id']))
app_inst = self._issue_api_request(vol_url)
size = app_inst[
'storage_instances'][si_name]['volumes'][vol_name]['size']
return size
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder.
Returns a list of dictionaries, each specifying a volume in the host,
with the following keys:
- reference (dictionary): The reference for a volume, which can be
passed to "manage_existing".
- size (int): The size of the volume according to the storage
backend, rounded up to the nearest GB.
- safe_to_manage (boolean): Whether or not this volume is safe to
manage according to the storage backend. For example, is the volume
in use or invalid for any reason.
- reason_not_safe (string): If safe_to_manage is False, the reason why.
- cinder_id (string): If already managed, provide the Cinder ID.
- extra_info (string): Any extra information to return to the user
:param cinder_volumes: A list of volumes in this host that Cinder
currently manages, used to determine if
a volume is manageable or not.
:param marker: The last item of the previous page; we return the
next results after this value (after sorting)
:param limit: Maximum number of items to return
:param offset: Number of items to skip after marker
:param sort_keys: List of keys to sort results by (valid keys are
'identifier' and 'size')
:param sort_dirs: List of directions to sort by, corresponding to
sort_keys (valid directions are 'asc' and 'desc')
"""
LOG.debug("Listing manageable Datera volumes")
app_instances = self._issue_api_request(URL_TEMPLATES['ai']()).values()
results = []
cinder_volume_ids = [vol['id'] for vol in cinder_volumes]
for ai in app_instances:
ai_name = ai['name']
reference = None
size = None
safe_to_manage = False
reason_not_safe = None
cinder_id = None
extra_info = None
if re.match(UUID4_RE, ai_name):
cinder_id = ai_name.lstrip(OS_PREFIX)
if (not cinder_id and
ai_name.lstrip(OS_PREFIX) not in cinder_volume_ids):
safe_to_manage = self._is_manageable(ai)
if safe_to_manage:
si = list(ai['storage_instances'].values())[0]
si_name = si['name']
vol = list(si['volumes'].values())[0]
vol_name = vol['name']
size = vol['size']
reference = {"source-name": "{}:{}:{}".format(
ai_name, si_name, vol_name)}
results.append({
'reference': reference,
'size': size,
'safe_to_manage': safe_to_manage,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': extra_info})
page_results = volutils.paginate_entries_list(
results, marker, limit, offset, sort_keys, sort_dirs)
return page_results
def _is_manageable(self, app_inst):
if len(app_inst['storage_instances']) == 1:
si = list(app_inst['storage_instances'].values())[0]
if len(si['volumes']) == 1:
return True
return False
def unmanage(self, volume):
"""Unmanage a currently managed volume in Cinder
:param volume: Cinder volume to unmanage
"""
LOG.debug("Unmanaging Cinder volume %s. Changing name to %s",
volume['id'], _get_unmanaged(volume['id']))
data = {'name': _get_unmanaged(volume['id'])}
self._issue_api_request(URL_TEMPLATES['ai_inst']().format(
_get_name(volume['id'])), method='put', body=data)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data.
"""
if refresh or not self.cluster_stats:
try:
self._update_cluster_stats()
except exception.DateraAPIException:
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
return self.cluster_stats
def _update_cluster_stats(self):
LOG.debug("Updating cluster stats info.")
results = self._issue_api_request('system')
if 'uuid' not in results:
LOG.error(_LE('Failed to get updated stats from Datera Cluster.'))
backend_name = self.configuration.safe_get('volume_backend_name')
stats = {
'volume_backend_name': backend_name or 'Datera',
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': int(results['total_capacity']) / units.Gi,
'free_capacity_gb': int(results['available_capacity']) / units.Gi,
'reserved_percentage': 0,
}
self.cluster_stats = stats
def _login(self):
"""Use the san_login and san_password to set token."""
body = {
'name': self.username,
'password': self.password
}
# Unset token now, otherwise potential expired token will be sent
# along to be used for authorization when trying to login.
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request('login', 'put', body=body,
sensitive=True)
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.'))
def _get_lunid(self):
return 0
def _init_vendor_properties(self):
"""Create a dictionary of vendor unique properties.
This method creates a dictionary of vendor unique properties
and returns both created dictionary and vendor name.
Returned vendor name is used to check for name of vendor
unique properties.
- Vendor name shouldn't include colon(:) because of the separator
and it is automatically replaced by underscore(_).
ex. abc:d -> abc_d
- Vendor prefix is equal to vendor name.
ex. abcd
- Vendor unique properties must start with vendor prefix + ':'.
ex. abcd:maxIOPS
Each backend driver needs to override this method to expose
its own properties using _set_property() like this:
self._set_property(
properties,
"vendorPrefix:specific_property",
"Title of property",
_("Description of property"),
"type")
: return dictionary of vendor unique properties
: return vendor name
prefix: DF --> Datera Fabric
"""
properties = {}
if self.configuration.get('datera_debug_replica_count_override'):
replica_count = 1
else:
replica_count = 3
self._set_property(
properties,
"DF:replica_count",
"Datera Volume Replica Count",
_("Specifies number of replicas for each volume. Can only be "
"increased once volume is created"),
"integer",
minimum=1,
default=replica_count)
self._set_property(
properties,
"DF:acl_allow_all",
"Datera ACL Allow All",
_("True to set acl 'allow_all' on volumes created. Cannot be "
"changed on volume once set"),
"boolean",
default=False)
self._set_property(
properties,
"DF:ip_pool",
"Datera IP Pool",
_("Specifies IP pool to use for volume"),
"string",
default="default")
# ###### QoS Settings ###### #
self._set_property(
properties,
"DF:read_bandwidth_max",
"Datera QoS Max Bandwidth Read",
_("Max read bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:default_storage_name",
"Datera Default Storage Instance Name",
_("The name to use for storage instances created"),
"string",
default="storage-1")
self._set_property(
properties,
"DF:default_volume_name",
"Datera Default Volume Name",
_("The name to use for volumes created"),
"string",
default="volume-1")
self._set_property(
properties,
"DF:write_bandwidth_max",
"Datera QoS Max Bandwidth Write",
_("Max write bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_bandwidth_max",
"Datera QoS Max Bandwidth Total",
_("Max total bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:read_iops_max",
"Datera QoS Max iops Read",
_("Max read iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:write_iops_max",
"Datera QoS Max IOPS Write",
_("Max write iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_iops_max",
"Datera QoS Max IOPS Total",
_("Max total iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
# ###### End QoS Settings ###### #
return properties, 'DF'
def _get_policies_for_resource(self, resource):
"""Get extra_specs and qos_specs of a volume_type.
This fetches the scoped keys from the volume type. Anything set from
qos_specs will override key/values set from extra_specs.
"""
type_id = resource.get('volume_type_id', None)
# Handle case of volume with no type. We still want the
# specified defaults from above
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
else:
volume_type = None
specs = {}
# Set defaults:
policies = {k.lstrip('DF:'): str(v['default']) for (k, v)
in self._init_vendor_properties()[0].items()}
if volume_type:
# Populate updated value
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
policies.update(qos_kvs)
# Cast everything except booleans int that can be cast
for k, v in policies.items():
# Handle String Boolean case
if v == 'True' or v == 'False':
policies[k] = policies[k] == 'True'
continue
# Int cast
try:
policies[k] = int(v)
except ValueError:
pass
return policies
def _si_poll(self, volume, policies):
# Initial 4 second sleep required for some Datera versions
eventlet.sleep(DEFAULT_SI_SLEEP)
TIMEOUT = 10
retry = 0
check_url = URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(_get_name(volume['id']))
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
si = self._issue_api_request(check_url)
if si['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
def _update_qos(self, resource, policies):
url = URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/performance_policy'
url = url.format(_get_name(resource['id']))
type_id = resource.get('volume_type_id', None)
if type_id is not None:
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
if fpolicies:
self._issue_api_request(url, 'post', body=fpolicies)
def _get_ip_pool_for_string_ip(self, ip):
"""Takes a string ipaddress and return the ip_pool API object dict """
pool = 'default'
ip_obj = ipaddress.ip_address(six.text_type(ip))
ip_pools = self._issue_api_request("access_network_ip_pools")
for ip_pool, ipdata in ip_pools.items():
for access, adata in ipdata['network_paths'].items():
if not adata.get('start_ip'):
continue
pool_if = ipaddress.ip_interface(
"/".join((adata['start_ip'], str(adata['netmask']))))
if ip_obj in pool_if.network:
pool = ip_pool
return self._issue_api_request(
"access_network_ip_pools/{}".format(pool))['path']
def _request(self, connection_string, method, payload, header, cert_data):
LOG.debug("Endpoint for Datera API call: %s", connection_string)
try:
response = getattr(requests, method)(connection_string,
data=payload, headers=header,
verify=False, cert=cert_data)
return response
except requests.exceptions.RequestException as ex:
msg = _(
'Failed to make a request to Datera cluster endpoint due '
'to the following reason: %s') % six.text_type(
ex.message)
LOG.error(msg)
raise exception.DateraAPIException(msg)
def _raise_response(self, response):
msg = _('Request to Datera cluster returned bad status:'
' %(status)s | %(reason)s') % {
'status': response.status_code,
'reason': response.reason}
LOG.error(msg)
raise exception.DateraAPIException(msg)
def _handle_bad_status(self,
response,
connection_string,
method,
payload,
header,
cert_data,
sensitive=False,
conflict_ok=False):
if not sensitive:
LOG.debug(("Datera Response URL: %s\n"
"Datera Response Payload: %s\n"
"Response Object: %s\n"),
response.url,
payload,
vars(response))
if response.status_code == 404:
raise exception.NotFound(response.json()['message'])
elif response.status_code in [403, 401]:
raise exception.NotAuthorized()
elif response.status_code == 409 and conflict_ok:
# Don't raise, because we're expecting a conflict
pass
elif response.status_code == 503:
current_retry = 0
while current_retry <= self.retry_attempts:
LOG.debug("Datera 503 response, trying request again")
eventlet.sleep(self.interval)
resp = self._request(connection_string,
method,
payload,
header,
cert_data)
if resp.ok:
return response.json()
elif resp.status_code != 503:
self._raise_response(resp)
else:
self._raise_response(response)
@_authenticated
def _issue_api_request(self, resource_url, method='get', body=None,
sensitive=False, conflict_ok=False):
"""All API requests to Datera cluster go through this method.
:param resource_url: the url of the resource
:param method: the request verb
:param body: a dict with options for the action_type
:returns: a dict of the response from the Datera cluster
"""
host = self.configuration.san_ip
port = self.configuration.datera_api_port
api_token = self.datera_api_token
api_version = self.configuration.datera_api_version
payload = json.dumps(body, ensure_ascii=False)
payload.encode('utf-8')
header = {'Content-Type': 'application/json; charset=utf-8',
'Datera-Driver': 'OpenStack-Cinder-{}'.format(self.VERSION)}
protocol = 'http'
if self.configuration.driver_use_ssl:
protocol = 'https'
if api_token:
header['Auth-Token'] = api_token
client_cert = self.configuration.driver_client_cert
client_cert_key = self.configuration.driver_client_cert_key
cert_data = None
if client_cert:
protocol = 'https'
cert_data = (client_cert, client_cert_key)
connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
api_version, resource_url)
response = self._request(connection_string,
method,
payload,
header,
cert_data)
data = response.json()
if not response.ok:
self._handle_bad_status(response,
connection_string,
method,
payload,
header,
cert_data,
conflict_ok=conflict_ok)
return data | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import logging.handlers
class SingletonType(type):
def __call__(cls, *args, **kwargs):
try:
return cls.__instance
except AttributeError:
cls.__instance = super(SingletonType, cls).__call__(*args, **kwargs)
return cls.__instance
class LogUtil(object):
__metaclass__ = SingletonType
_logger = None
_filename = None
def __init__(self, filename=None):
self._filename = filename
# logger
self._logger = logging.getLogger('logger')
# remove default handler
self._logger.propagate = False
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter('[%(levelname)8s][%(asctime)s.%(msecs)03d] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
stream_handler.setFormatter(stream_formatter)
if self._filename is not None:
file_max_bytes = 10 * 1024 * 1024
file_handler = logging.handlers.RotatingFileHandler(filename='./log/' + self._filename,
maxBytes=file_max_bytes,
backupCount=10)
file_formatter = logging.Formatter('[%(levelname)8s][%(asctime)s.%(msecs)03d] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
file_handler.setFormatter(file_formatter)
self._logger.addHandler(file_handler)
self._logger.addHandler(stream_handler)
self._logger.setLevel(logging.DEBUG)
def getlogger(self):
return self._logger | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# (c) 2015 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import openerp.tests.common as common
class TestSaleOrderType(common.TransactionCase):
def setUp(self):
super(TestSaleOrderType, self).setUp()
self.sale_type_model = self.env['sale.order.type']
self.sale_order_model = self.env['sale.order']
self.stock_picking_model = self.env['stock.picking']
self.picking_type_out = self.env.ref('stock.picking_type_out')
self.sale_line_model = self.env['sale.order.line']
self.picking_model = self.env['stock.picking']
self.partner = self.env.ref('base.res_partner_1')
self.sequence = self.env['ir.sequence'].create({
'name': 'Test Sales Order',
'code': 'sale.order',
'prefix': 'TSO',
'padding': 3,
})
self.journal = self.env.ref('account.sales_journal')
self.refund_journal = self.env.ref('account.refund_sales_journal')
self.warehouse = self.env.ref('stock.stock_warehouse_shop0')
self.product = self.env.ref('product.product_product_4')
self.sale_type = self.sale_type_model.create({
'name': 'Test Sale Order Type',
'sequence_id': self.sequence.id,
'journal_id': self.journal.id,
'refund_journal_id': self.refund_journal.id,
'warehouse_id': self.warehouse.id,
'picking_policy': 'one',
'order_policy': 'picking',
'invoice_state': '2binvoiced',
})
self.partner.sale_type = self.sale_type
def test_sale_order_onchange_partner(self):
onchange_partner = self.sale_order_model.onchange_partner_id(
self.partner.id)
self.assertEqual(self.sale_type.id,
onchange_partner['value']['type_id'])
def test_sale_order_onchange_type(self):
sale_order = self.sale_order_model.new({'type_id': self.sale_type.id})
sale_order.onchange_type_id()
self.assertEqual(self.sale_type.warehouse_id,
sale_order.warehouse_id)
self.assertEqual(self.sale_type.picking_policy,
sale_order.picking_policy)
self.assertEqual(self.sale_type.order_policy, sale_order.order_policy)
def test_sale_order_confirm(self):
sale_order_dict = self.sale_order_model.onchange_partner_id(
self.partner.id)['value']
sale_order_dict['partner_id'] = self.partner.id
sale_line_dict = {
'product_id': self.product.id,
'name': self.product.name,
'product_uom_qty': 1.0,
'price_unit': self.product.lst_price,
}
sale_order_dict['order_line'] = [(0, 0, sale_line_dict)]
sale_order = self.sale_order_model.create(sale_order_dict)
sale_order.onchange_type_id()
sale_order.action_button_confirm()
for picking in sale_order.picking_ids:
self.assertEqual(self.sale_type.invoice_state,
picking.invoice_state)
def test_stock_picking_create(self):
self.picking_out = self.picking_model.create({
'partner_id': self.partner.id,
'picking_type_id': self.picking_type_out.id
})
self.assertTrue(self.picking_out.id) | unknown | codeparrot/codeparrot-clean | ||
import sys
import os
#For baseline and redundacy-detecion to prepare message size picture
def MessageSize(typePrefix, directory):
wf = open("%(typePrefix)s-message.data"%vars(), 'w')
wf.write("#Suggest Filename: %(typePrefix)s-message.data\n#Data for drawing message overall size"%vars())
wf.write("#Timeout: 0 0.1 0.2 0.3 0.41 0.5 0.6 0.7 0.8 0.9\n")
num = 100 # may subject to change by the simulation node number
for timeout in [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]:
wf.write(str(timeout)+" ")
for retry in [0,1,2,3,4,5,6,7,8,9,10]:
file = open("%(directory)s%(typePrefix)s_rty%(retry)s_rtt%(timeout)s_overhead.data"%vars())
for line in file:
if line[0] != "#":
numbers = line.split(' ')
wf.write(numbers[7]+" ")
wf.write("\n")
file.close()
wf.close()
def RecvToSendRatio(typePrefix, directory):
writefile = open("%(typePrefix)s-rsratio-nonce.data"%vars(), "w")
writefile.write("#Suggest Filename: %(typePrefix)s-rsratio.data\n#Data for drawing each package in different Amount/Redundancy\n"%vars())
writefile.write("#MPM100 ratio MPM200 ratio MPM300 ratio MPM400 ratio MPM500 ratio MPM600 ratio NoLimit ratio\n")
writefile.write("0 0 0 0 0 0 0 0 0 0 0 0 0 0\n")
backofftime = 2.5 # may subject to change by the data amount wanted to observe
msgcount = {}
ratiotemp = {}
ratio = {}
for mms in [100,200,300,400,500,600,-1]:
msgcount[mms] = {}
for logcontent in ['SI','SD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Pa":
nonce = x.split(':')[1]
if msgcount[mms].has_key(nonce):
msgcount[mms][nonce]["s"] += 1
else:
msgcount[mms][nonce] = {}
msgcount[mms][nonce]["s"] = 1
msgcount[mms][nonce]["r"] = 0
msgcount[mms][nonce]["rs"] = 0
for logcontent in ['RI','DRD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent or line[0:3] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Pa":
nonce = x.split(':')[1]
if(msgcount[mms].has_key(nonce)):
msgcount[mms][nonce]["r"] += 1
else:
print logcontent, mms, nonce
for nonce in msgcount[mms]:
msgcount[mms][nonce]['rs'] = float(msgcount[mms][nonce]['r']) / float(msgcount[mms][nonce]['s'])
msg = sorted(msgcount[mms].iteritems(), key=lambda s: s[1]['rs'])
for x in range(len(msg)):
ratiotemp[msg[x][1]["rs"]] = float(x+1) / len(msg);
ratio[mms] = sorted(ratiotemp.iteritems())
ratiotemp.clear()
length = max(len(ratio[100]),len(ratio[200]),len(ratio[300]),len(ratio[400]),len(ratio[500]),len(ratio[-1]))
for j in range(length):
for i in [100,200,300,400,500,600,-1]:
if(len(ratio[i])<=j):
writefile.write("null null")
else:
writefile.write(str(ratio[i][j][0])+" "+str(ratio[i][j][1]))
writefile.write("\n")
def RecvToSendRatioHopnonce(typePrefix, directory):
writefile = open("%(typePrefix)s-rsratio-hopnonce.data"%vars(),"w")
writefile.write("#Suggest Filename: %(typePrefix)s-rsratio.data\n#Data for drawing receive to send ratio\n"%vars())
writefile.write("#backofftime: 0 0.5 1 1.5 2 2.5 3\n")
writefile.write("0 0 0 0 0 0 0 0 0 0 0 0 0 0\n")
#backofftime = 2.5 # may subject to change by the data amount wanted to observe
mms = -1
msgcount = {}
ratiotemp = {}
ratio = {}
for backofftime in [0,0.5,1,1.5,2,2.5,3]:
msgcount[backofftime] = {}
for logcontent in ['SI','SD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if msgcount[backofftime].has_key(nonce):
msgcount[backofftime][nonce]["s"] += 1
else:
msgcount[backofftime][nonce] = {}
msgcount[backofftime][nonce]["s"] = 1
msgcount[backofftime][nonce]["r"] = 0
msgcount[backofftime][nonce]["rs"] = 0
for logcontent in ['RI','DRD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent or line[0:3] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if(msgcount[backofftime].has_key(nonce)):
msgcount[backofftime][nonce]["r"] += 1
else:
print logcontent, backofftime, nonce
for nonce in msgcount[backofftime]:
msgcount[backofftime][nonce]['rs'] = float(msgcount[backofftime][nonce]['r']) / float(msgcount[backofftime][nonce]['s'])
msg = sorted(msgcount[backofftime].iteritems(), key=lambda s: s[1]['rs'])
for x in range(len(msg)):
ratiotemp[msg[x][1]["rs"]] = float(x+1) / len(msg);
ratio[backofftime] = sorted(ratiotemp.iteritems())
ratiotemp.clear()
length = max(len(ratio[0]),len(ratio[0.5]),len(ratio[1]),len(ratio[1.5]),len(ratio[2]),len(ratio[2.5]),len(ratio[3]))
for j in range(length):
for i in [0,0.5,1,1.5,2,2.5,3]:
if(len(ratio[i])<=j):
writefile.write("null null")
else:
writefile.write(str(ratio[i][j][0])+" "+str(ratio[i][j][1]) + " ")
writefile.write("\n")
#Get recall and latency
def RecallAndLatency(typePrefix, directory):
recallf = open("./%(typePrefix)s-recall.data"%vars(), "w")
latencyf = open("./%(typePrefix)s-latency.data"%vars(), "w")
recallf.write("#Data for recall of the %(typePrefix)s\n"%vars())
latencyf.write("#Data for latency of the %(typePrefix)s\n"%vars())
recallf.write("#Timeout: 0 0.1 0.2 0.3 0.41 0.5 0.6 0.7 0.8 0.9\n")
latencyf.write("#Timeout: 0 0.1 0.2 0.3 0.41 0.5 0.6 0.7 0.8 0.9\n")
for timeout in [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]:
recallf.write(str(timeout)+" ")
latencyf.write(str(timeout)+" ")
for retry in [0,1,2,3,4,5,6,7,8,9,10]:
file = open("%(directory)s%(typePrefix)s_rty%(retry)s_rtt%(timeout)s_0.data"%vars())
line = file.readlines()[-1].split()
recallf.write(str(float(line[1])/50000)+" ")
latencyf.write(line[0]+" ")
file.close()
recallf.write("\n")
latencyf.write("\n")
recallf.close()
latencyf.close()
#os.system("gnuplot collision-avoidance-recall.gp")
#os.system("gnuplot collision-avoidance-latency.gp")
def RSRHeatmap(typePrefix, directory):
backofftime = 2
mms = -1
sendList = []
recvList = []
ratiolist = []
for i in xrange(100):
sendList.append([])
recvList.append([])
ratiolist.append(0)
for logcontent in ['SI','SD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if(line[0:2] == logcontent):
info = line.split(" ")
hopnonce = 0
for x in info:
if x[0:2] == "Ho":
hopnonce = int(x.split(":")[1])
if hopnonce != 0:
sendList[int(info[1])].append(hopnonce)
file.close()
for logcontent in ['RI','DRD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if(line[0:2] == logcontent or line[0:3] == logcontent):
info = line.split(" ")
hopnonce = 0
for x in info:
if x[0:2] == "Ho":
hopnonce = int(x.split(":")[1])
if hopnonce != 0:
recvList[int(info[1])].append(hopnonce)
file.close()
for i in xrange(100):
for x in sendList[i]:
recv = 0
for ki in [-11,-10,-9,-1,1,9,10,11]:
if (i+ki >99 or i+ki<0):
continue
elif(i %10 == 0 and (ki == -1 or ki == -11 or ki == 9)):
continue
elif(i % 10 == 9 and (ki == 1 or ki == 11 or ki == -9)):
continue
recv += recvList[i+ki].count(x)
ratiolist[i] += recv
ratiolist[i] /= float(len(sendList[i]))
writefile = open("./%(typePrefix)s-heatmap.data"%vars(), "w")
writefile.write("#Data for receive send ratio on each ndoe of the %(typePrefix)s\n"%vars())
for i in xrange(10):
for j in xrange(10):
writefile.write(str(ratiolist[i*10 + j])+"\t")
writefile.write("\n")
writefile.close()
#os.system("gnuplot collision-avoidance-heatmap.gp")
def RetryTime(typePrefix):
retrytf = open("./%(typePrefix)s-retrycount.data"%vars(),"w")
retrytf.write("#Suggest Filename: %(typePrefix)s-retrycount.data\n#Data for drawing number of retries \n"%vars())
retrycdff = open("./%(typePrefix)s-retrycdf.data"%vars(),"w")
retrycdff.write("#Suggest Filename: %(typePrefix)s-retrycdf.data\n#Data for drawing CDF of retries \n"%vars())
num = 100 # may subject to change by the simulation node number
cdf = {}
for retry in [1,2,3,4,5,6,7,8,9,10]:
m = {}
retrytf.write(str(retry)+" ")
for to in [0.4]:
file1 = open("./%(typePrefix)s_rty%(retry)s_rtt%(to)s_SD.data"%vars())
if retry != 1:
file = open("./%(typePrefix)s_rty%(retry)s_rtt%(to)s_T.data"%vars())
for line in file:
info = line.split(' ')
key=info[3] + info[5].strip()
if m.has_key(key):
m[key] += 1
else:
m[key] = 1
file.close()
for line1 in file1:
if line1[0]=="S":
info1 = line1.split(' ')
key=info1[3] + info1[5].strip()
if m.has_key(key):
m[key] += 1
else:
m[key] = 1
file1.close()
c = 0.0
for k in m:
c += m[k]
c /= float(len(m))
retrytf.write(str(c))
file2 = open("./%(typePrefix)s_rty%(retry)s_rtt%(to)s_RK.data"%vars())
t={}
for line2 in file2:
info2 = line2.split(' ')
key=info2[3] + info2[5].strip()
t[key] = 1
file2.close()
for k in m:
if m[k]==retry:
if not t.has_key(k):
print "aa"
m[k]=10000
cdft = sorted(m.iteritems(),key=lambda s: s[1])
cot = {}
for x in xrange(len(cdft)):
cot[cdft[x][1]] = float(x+1) / len(cdft);
cdf[retry]=sorted(cot.iteritems())
retrytf.write("\n")
retrytf.close()
print cdf
length = max(len(cdf[1]),len(cdf[2]),len(cdf[3]),len(cdf[4]),len(cdf[5]),len(cdf[6]))
length = max(len(cdf[7]),len(cdf[8]),len(cdf[9]),len(cdf[10]))
for j in xrange(length):
for i in [1,2,3,4,5,6,7,8,9,10]:
if(len(cdf[i])<=j):
retrycdff.write("null null ")
elif cdf[i][j][0]==10000:
retrycdff.write("null null ")
else:
retrycdff.write(str(cdf[i][j][0])+" "+str(cdf[i][j][1])+ " ")
retrycdff.write("\n")
#MessageSize("baseline", "F:\\Data_baseline\\")
#MessageSize("redundancy_detection", "F:\\Data_redundancy\\")
#RecvToSendRatio("collision_avoidance", "/home/theodore/pecns3/")
#RecallAndLatency("collision_avoidance", "/home/theodore/pecns3/")
#RecvToSendRatioHopnonce("collision_avoidance", "/home/theodore/pecns3/")
#ReceivedConsumer("baseline", "F:\\Data_baseline\\", 44)
#RSRHeatmap("collision_avoidance", "/home/theodore/pecns3/")
#main
if len(sys.argv) <=1:
print "This python program is the automatic processing for the data log of the ns3 simulator of the pec. \nThis particular for the collision avoidance.\n"
print "Useage: choose the function by the first parameter."
print "a. Receive send ratio of each message."
print "b. Receive send ratio of each message by hop."
print "c. Receive send ratio heatmap for each node by each message hop."
print "d. Recall and latency."
print "Put this file into the same directory of the data files. Then run \"python\" + filename + chiose to get the picture direcely."
else:
if sys.argv[1] == "a":
RecvToSendRatio("collision_avoidance", "./")
elif sys.argv[1] == "b":
RecvToSendRatioHopnonce("collision_avoidance", "./")
elif sys.argv[1] == "c":
RSRHeatmap("collision_avoidance", "./")
elif sys.argv[1] == "d":
RecallAndLatency("ack", "./")
elif sys.argv[1] == "e":
MessageSize("ack", "./")
elif sys.argv[1] == "f":
RetryTime("ack") | unknown | codeparrot/codeparrot-clean | ||
from .base import Browser, ExecutorBrowser, require_arg
from ..webdriver_server import ChromeDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor, # noqa: F401
SeleniumRefTestExecutor) # noqa: F401
from ..executors.executorchrome import ChromeDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "chrome",
"check_args": "check_args",
"browser": "ChromeBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor",
"wdspec": "ChromeDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options"}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(test_type, run_info_data, **kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
from selenium.webdriver import DesiredCapabilities
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data,
**kwargs)
executor_kwargs["close_after_done"] = True
capabilities = dict(DesiredCapabilities.CHROME.items())
capabilities.setdefault("chromeOptions", {})["prefs"] = {
"profile": {
"default_content_setting_values": {
"popups": 1
}
}
}
for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
if kwargs[kwarg] is not None:
capabilities["chromeOptions"][capability] = kwargs[kwarg]
if test_type == "testharness":
capabilities["chromeOptions"]["useAutomationExtension"] = False
capabilities["chromeOptions"]["excludeSwitches"] = ["enable-automation"]
if test_type == "wdspec":
capabilities["chromeOptions"]["w3c"] = True
executor_kwargs["capabilities"] = capabilities
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {}
class ChromeBrowser(Browser):
"""Chrome is backed by chromedriver, which is supplied through
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="chromedriver",
webdriver_args=None):
"""Creates a new representation of Chrome. The `binary` argument gives
the browser binary to use for testing."""
Browser.__init__(self, logger)
self.binary = binary
self.server = ChromeDriverServer(self.logger,
binary=webdriver_binary,
args=webdriver_args)
def start(self, **kwargs):
self.server.start(block=False)
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url} | unknown | codeparrot/codeparrot-clean | ||
prelude: |
# frozen_string_literal
unless Time.method_defined?(:xmlschema)
class Time
def xmlschema(fraction_digits=0)
fraction_digits = fraction_digits.to_i
s = strftime("%FT%T")
if fraction_digits > 0
s << strftime(".%#{fraction_digits}N")
end
s << (utc? ? 'Z' : strftime("%:z"))
end
end
end
time = Time.now
utc_time = Time.now.utc
fraction_sec = Time.at(123456789.quo(9999999999)).getlocal("+09:00")
future_time = Time.utc(10000)
benchmark:
- time.xmlschema
- utc_time.xmlschema
- time.xmlschema(6)
- utc_time.xmlschema(6)
- time.xmlschema(9)
- utc_time.xmlschema(9)
- fraction_sec.xmlschema(10)
- future_time.xmlschema | unknown | github | https://github.com/ruby/ruby | benchmark/time_xmlschema.yml |
from six.moves.urllib.parse import urljoin
import funcy as fn
import json
import logging
import requests
import html2text
from cachecontrol import CacheControl
from django.http import StreamingHttpResponse
from oauthlib.oauth2 import WebApplicationClient #, BackendApplicationClient
from requests_oauthlib import OAuth2Session
from ..exceptions import (
UnauthenticatedException,
PermissionDenied,
APIError,
NotFound,
BadRequest,
Unprocessable,
)
log = logging.getLogger(__name__)
class OauthClient(object):
def __init__(self,
access_token=None,
refresh_token=None,
client_id=None,
client_secret=None,
scope=None,
api_url=None,
redirect_url=None,
verify=False,
debug=False,
):
self.access_token = access_token
self.refresh_token = refresh_token
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.api_url = api_url
self.redirect_url = redirect_url
self.verify = verify
self.debug = debug
self.token_url = self.make_url('/token/')
def make_url(self, resource):
return urljoin(self.api_url, resource)
def get_token(self, authorization_code):
return self.client.fetch_token(self.token_url,
client_secret=self.client_secret,
code=authorization_code,
verify=self.verify)
def get(self, resource, **kwargs):
url = self.make_url(resource)
params = kwargs.pop('params', {})
client = self.client
retry_get = fn.retry(2, errors=requests.ConnectionError)(client.get)
response = retry_get(url, params=params, verify=self.verify, **kwargs)
self._handle_errors(response)
data = self._get_json(response)
self._log(url, data, params)
return data
def post(self, resource, **kwargs):
client = self.client
if kwargs.get('files', None) is None:
return self._non_idempotent_send(resource, client.post, **kwargs)
else:
# files and json cannot be sent together, but files and data can
json = kwargs.pop('json', None)
return self._non_idempotent_send(resource, client.post, data=json, **kwargs)
def delete(self, resource, **kwargs):
client = self.client
return self._non_idempotent_send(resource, client.delete, **kwargs)
def patch(self, resource, **kwargs):
client = self.client
return self._non_idempotent_send(resource, client.patch, **kwargs)
def put(self, resource, **kwargs):
client = self.client
return self._non_idempotent_send(resource, client.put, **kwargs)
def _non_idempotent_send(self, resource, method, **kwargs):
url = self.make_url(resource)
response = method(url, verify=self.verify, **kwargs)
self._handle_errors(response)
data = self._get_json(response)
self._log(url, data)
return data
@fn.cached_property
def base_client(self):
return WebApplicationClient(self.client_id,
access_token=self.access_token,
refresh_token=self.refresh_token)
@fn.cached_property
def client(self):
token = {
'access_token': self.access_token,
'refresh_token': self.refresh_token,
}
return CacheControl(
OAuth2Session(client=self.base_client,
token=token,
redirect_uri=self.redirect_url,
)
)
def _get_json(self, response):
try:
return response.json()
except Exception:
log.exception('url: %s\ncontent:\n%s', response.url, response.content, exc_info=True)
raise
def _decode_if_json(self, response):
"""
Decode a JSON response, or the raw response if it's not JSON
"""
try:
return self._get_json(response)
except ValueError:
return response.content
def _handle_errors(self, response):
code = response.status_code
if code == 400:
raise BadRequest(self._decode_if_json(response))
if code == 401:
raise UnauthenticatedException(self._decode_if_json(response))
if code == 403:
raise PermissionDenied(self._decode_if_json(response))
if code == 404:
raise NotFound(self._decode_if_json(response))
if code == 422:
raise Unprocessable(self._decode_if_json(response))
if code == 500:
if self.debug:
self._log_error(response)
raise APIError()
def _log_error(self, response):
log.error(
'\n\n -- API ERROR --\n%s: %s\n%s %s\n\n%s' % (
response.status_code, response.reason, response.request.method,
response.url, html2text.html2text(response.text)
)
)
raise APIError(response.reason)
def _log(self, url, data, params=None):
st = 'URL: %s\n Params: %s\n Response: %s' % (url, params, json.dumps(data, indent=2))
log.debug(st)
def download(self, resource, filename=None, chunk_size=4096):
"""Relay a streaming response from the API to a file download."""
url = self.make_url(resource)
api_response = self.client.get(url, verify=self.verify)
self._handle_errors(api_response)
file_response = StreamingHttpResponse(api_response.iter_content(chunk_size=chunk_size),
content_type=api_response.headers['content-type'])
if filename:
file_response['Content-Disposition'] = 'attachment; filename="%s"' % filename
elif 'Content-Disposition' in api_response.headers:
file_response['Content-Disposition'] = api_response.headers['Content-Disposition']
else:
file_response['Content-Disposition'] = 'attachment'
try:
file_response['Content-Length'] = api_response.headers['Content-Length']
except KeyError:
pass # leave them guessing
return file_response | unknown | codeparrot/codeparrot-clean | ||
#!/usr/env python
from error import SaveError,LoadError,CompressedError
from zipfs import fsopen,isZip,GetFileNameInZip
import os
import pygame
class TextureConverter:
def __init__(self,palette=None):
if palette is not None:
self.palette_surf=palette
else:
self.palette_surf=pygame.image.load('code/palette.bmp')
def palettesMatch(self,othersurf):
return othersurf.get_palette()==self.palette_surf.get_palette()
def quantizeImage(self,infile,outfile,dither):
import quantizer2.quantizer
return quantizer2.quantizer.quantize(infile,outfile,'palette.bmp',dither)
def fixPalette(self,surf):
newsurf=pygame.Surface(surf.get_size(),0,surf)
newsurf.set_palette(self.palette_surf.get_palette())
newsurf.blit(surf,(0,0)) #palette mapping should save us.
return newsurf
def getTexture(self,filename,dither=False,changedir=True):
gdither=dither
if filename[0] in ('-','+'):
gdither=filename[0]=='+'
filename=filename[1:]
if not os.path.exists(filename):
raise SaveError('Image %s does not exist!' % (filename))
try:
texsurf=pygame.image.load(fsopen(filename,'rb'))
if texsurf.get_bitsize()==8 and self.palettesMatch(texsurf):
return texsurf
except pygame.error:
pass
# Try to quantize
if changedir:
olddir=os.getcwd()
os.chdir('..')
try:
qfilename='quant_temp_in.bmp'
pygame.image.save(texsurf,qfilename)
if not self.quantizeImage(qfilename,'quant_temp.tga',gdither):
os.unlink(qfilename)
raise SaveError('Quantizing image failed!')
else:
texsurf=pygame.image.load('quant_temp.tga')
texsurf=self.fixPalette(texsurf)
os.unlink('quant_temp.tga')
os.unlink(qfilename)
if changedir:
os.chdir(olddir)
return texsurf
except ImportError:
if isZip(filename):
os.unlink(qfilename)
if changedir:
os.chdir(olddir)
raise# SaveError('Bad palette, and missing quantizer!') | unknown | codeparrot/codeparrot-clean | ||
module.exports = {
images: {
// add the Umbraco server domain as allowed domain for serving images
domains: [process.env.UMBRACO_SERVER_URL.match(/.*\/\/([^:/]*).*/)[1]],
},
}; | javascript | github | https://github.com/vercel/next.js | examples/cms-umbraco/next.config.js |
- hosts: testhost
gather_facts: false
tasks:
- name: template in register warns, but no template should not
debug: msg=unimportant
register: thisshouldnotwarn | unknown | github | https://github.com/ansible/ansible | test/integration/targets/templating_settings/dont_warn_register.yml |
/*-------------------------------------------------------------------------
*
* BIG5 <--> UTF8
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "fmgr.h"
#include "mb/pg_wchar.h"
#include "../../Unicode/big5_to_utf8.map"
#include "../../Unicode/utf8_to_big5.map"
PG_MODULE_MAGIC_EXT(
.name = "utf8_and_big5",
.version = PG_VERSION
);
PG_FUNCTION_INFO_V1(big5_to_utf8);
PG_FUNCTION_INFO_V1(utf8_to_big5);
/* ----------
* conv_proc(
* INTEGER, -- source encoding id
* INTEGER, -- destination encoding id
* CSTRING, -- source string (null terminated C string)
* CSTRING, -- destination string (null terminated C string)
* INTEGER, -- source string length
* BOOL -- if true, don't throw an error if conversion fails
* ) returns INTEGER;
*
* Returns the number of bytes successfully converted.
* ----------
*/
Datum
big5_to_utf8(PG_FUNCTION_ARGS)
{
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
bool noError = PG_GETARG_BOOL(5);
int converted;
CHECK_ENCODING_CONVERSION_ARGS(PG_BIG5, PG_UTF8);
converted = LocalToUtf(src, len, dest,
&big5_to_unicode_tree,
NULL, 0,
NULL,
PG_BIG5,
noError);
PG_RETURN_INT32(converted);
}
Datum
utf8_to_big5(PG_FUNCTION_ARGS)
{
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
bool noError = PG_GETARG_BOOL(5);
int converted;
CHECK_ENCODING_CONVERSION_ARGS(PG_UTF8, PG_BIG5);
converted = UtfToLocal(src, len, dest,
&big5_from_unicode_tree,
NULL, 0,
NULL,
PG_BIG5,
noError);
PG_RETURN_INT32(converted);
} | c | github | https://github.com/postgres/postgres | src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c |
# <License type="Sun Cloud BSD" version="2.2">
#
# Copyright (c) 2005-2009, Sun Microsystems, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Neither the name Sun Microsystems, Inc. nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# </License>
'''Common classes for descriptors
A descriptor is a new-style class functionality which allows one to create
custom properties (actually 'property' is an implementation of a descriptor).
Descriptors can provide 3 special methods: __get__, __set__ and __delete___,
which are executed when the corresponding action (getattr, setattr, delattr)
is performed on an instance of a class using the descriptor.
Here's a sample, which implements the functionality of the builtin 'property':
class Property(object):
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self._fget = fget
self._fset = fset
self._fdel = fdel
self.__doc__ = doc
def __get__(self, obj, obj_type=None):
if not self._fget:
raise AttributeError('Can\'t read property')
return self._fget(obj)
def __set__(self, obj, value):
if not self._fset:
raise AttributeError('Can\'t set property')
self._fset(obj, value)
def __delete__(self, obj):
if not self._fdel:
raise AttributeError('Can\t' delete property')
self._fdel(obj)
Some interesting links:
* http://users.rcn.com/python/download/Descriptor.htm
* http://gulopine.gamemusic.org/2007/nov/23/python-descriptors-part-1-of-2/
'''
class BaseDescriptor(property):
'''Base class for pmtypes descriptors
This class performs pmtypes checks on __set__. It expects the
corresponding pmtype to be set as PMTYPE attribute on class level.
'''
def __init__(self, fget=None, fset=None, fdel=None, doc=None, check=None):
'''Initializer for pmtypes descriptors
All arguments are the same arguments you'd provide to the builtin
'property' descriptor. The extra C{check} parameter allows one to add
an extra custom check callable.
@param fget: Executed on getattr
@type fget: callable
@param fset: Executed on setattr
@type fset: callable
@param fdel: Executed on delattr
@type fdel: callable
@param doc: Documentation for the attribute
@type doc: string
@param check: Extra check function (f(self, value))
@type check: callable
'''
property.__init__(self, fget=fget, fset=fset, fdel=fdel, doc=doc)
if check and not callable(check):
raise ValueError('check argument should be a callable')
self._check = check
def __set__(self, obj, value):
if not self.PMTYPE.check(value):
raise ValueError('Invalid value for type %s: %r' % \
(self.PMTYPE.__name__, value))
if self._check and not self._check(obj, value):
raise ValueError('Invalid value for property, invalidated by custom check method')
property.__set__(self, obj, value) | unknown | codeparrot/codeparrot-clean | ||
"""
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import mimetypes
import os
import re
import stat
import sys
import urllib
from django.utils.http import http_date
from django.utils._os import safe_join
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException, e
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
sys.stderr.write("[%s] %s\n" % (self.log_date_time_string(), format % args))
class AdminMediaHandler(object):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
"""
def __init__(self, application, media_dir=None):
from django.conf import settings
self.application = application
if not media_dir:
import django
self.media_dir = \
os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
else:
self.media_dir = media_dir
self.media_url = settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ADMIN_MEDIA_PREFIX. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ADMIN_MEDIA_PREFIX.
relative_url = url[len(self.media_url):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.media_dir, relative_path)
def __call__(self, environ, start_response):
import os.path
# Ignore requests that aren't under ADMIN_MEDIA_PREFIX. Also ignore
# all requests if ADMIN_MEDIA_PREFIX isn't a relative URL.
if self.media_url.startswith('http://') or self.media_url.startswith('https://') \
or not environ['PATH_INFO'].startswith(self.media_url):
return self.application(environ, start_response)
# Find the admin file and serve it up, if it exists and is readable.
try:
file_path = self.file_path(environ['PATH_INFO'])
except ValueError: # Resulting file path was not valid.
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
start_response(status, headers.items())
return output
if not os.path.exists(file_path):
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
else:
try:
fp = open(file_path, 'rb')
except IOError:
status = '401 UNAUTHORIZED'
headers = {'Content-type': 'text/plain'}
output = ['Permission denied: %s' % environ['PATH_INFO']]
else:
# This is a very simple implementation of conditional GET with
# the Last-Modified header. It makes media files a bit speedier
# because the files are only read off disk for the first
# request (assuming the browser/client supports conditional
# GET).
mtime = http_date(os.stat(file_path)[stat.ST_MTIME])
headers = {'Last-Modified': mtime}
if environ.get('HTTP_IF_MODIFIED_SINCE', None) == mtime:
status = '304 NOT MODIFIED'
output = []
else:
status = '200 OK'
mime_type = mimetypes.guess_type(file_path)[0]
if mime_type:
headers['Content-Type'] = mime_type
output = [fp.read()]
fp.close()
start_response(status, headers.items())
return output
def run(addr, port, wsgi_handler):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever() | unknown | codeparrot/codeparrot-clean | ||
import sys
import zipfile
from django.contrib.contenttypes.models import ContentType
from django.db import models
from file_import.compat import AUTH_USER_MODEL
if sys.version_info >= (3,0):
unicode = str
class ImportLog(models.Model):
""" A log of all import attempts """
name = models.CharField(max_length=255, verbose_name='job name')
user = models.ForeignKey(AUTH_USER_MODEL, editable=False, related_name='file_import_log')
date = models.DateTimeField(auto_now_add=True, verbose_name='date created')
import_file = models.FileField(upload_to='import_file')
update_key = models.CharField(max_length=200, blank=True)
file_field = models.CharField(max_length=200, blank=True)
content_type = models.ForeignKey(ContentType, verbose_name='destination model')
def __unicode__(self):
return unicode(self.name)
def clean(self):
from django.core.exceptions import ValidationError
if not self.import_file or not zipfile.is_zipfile(self.import_file):
raise ValidationError("The file selected does not appear to be a ZIP archive. Please try again.") | unknown | codeparrot/codeparrot-clean | ||
"""Unit tests for evaluators."""
import deepchem as dc
import numpy as np
import unittest
import sklearn
from deepchem.utils.evaluate import Evaluator
from deepchem.utils.evaluate import GeneratorEvaluator
def test_multiclass_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 5)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.argmax(y, axis=1))
def test_binary_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 2)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y, threshold=0.3)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.where(y[:, 1] >= 0.3, np.ones(10), np.zeros(10)))
def test_evaluator_dc_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_sklearn_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
rf = sklearn.ensemble.RandomForestClassifier(50)
model = dc.models.SklearnModel(rf)
model.fit(dataset)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_evaluate_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
multitask_scores = model.evaluate(
dataset, dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_multitask_evaluator():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores, all_task_scores = evaluator.compute_model_performance(
metric, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
def test_model_evaluate_dc_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = model.evaluate(dataset, metric, [])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_multitask_model_evaluate_sklearn():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores, all_task_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['metric-1'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
def test_multitask_model_evaluate():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
multitask_scores, all_task_scores = model.evaluate(
dataset, dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] > 0
assert isinstance(all_task_scores, dict)
def test_evaluator_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric1 = dc.metrics.Metric(dc.metrics.mae_score, n_tasks=2)
metric2 = dc.metrics.Metric(dc.metrics.r2_score, n_tasks=2)
multitask_scores = evaluator.compute_model_performance([metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
def test_model_evaluate_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric1 = dc.metrics.Metric(dc.metrics.mae_score)
metric2 = dc.metrics.Metric(dc.metrics.r2_score)
multitask_scores = model.evaluate(dataset, [metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
def test_generator_evaluator_dc_metric_multitask_single_point():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert len(multitask_scores) == 1
def test_evaluator_sklearn_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
def test_generator_evaluator_dc_metric_multitask():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_model_evaluate_sklearn_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(dataset, dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
def test_evaluator_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
[dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
def test_model_evaluate_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(
dataset, [dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
def test_gc_binary_classification():
"""Test multiclass classification evaluation."""
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_gc_binary_kappa_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC", "CO", "CCC", "CCCC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.kappa_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] <= 1
assert multitask_scores["metric-1"] >= -1
def test_gc_multiclass_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(5, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification", n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0 | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.monitoring.dashboard_v1.types import scorecard as gmd_scorecard
from google.monitoring.dashboard_v1.types import text as gmd_text
from google.monitoring.dashboard_v1.types import xychart
from google.protobuf import empty_pb2 as empty # type: ignore
__protobuf__ = proto.module(
package="google.monitoring.dashboard.v1", manifest={"Widget",},
)
class Widget(proto.Message):
r"""Widget contains a single dashboard component and
configuration of how to present the component in the dashboard.
Attributes:
title (str):
Optional. The title of the widget.
xy_chart (~.xychart.XyChart):
A chart of time series data.
scorecard (~.gmd_scorecard.Scorecard):
A scorecard summarizing time series data.
text (~.gmd_text.Text):
A raw string or markdown displaying textual
content.
blank (~.empty.Empty):
A blank space.
"""
title = proto.Field(proto.STRING, number=1)
xy_chart = proto.Field(
proto.MESSAGE, number=2, oneof="content", message=xychart.XyChart,
)
scorecard = proto.Field(
proto.MESSAGE, number=3, oneof="content", message=gmd_scorecard.Scorecard,
)
text = proto.Field(proto.MESSAGE, number=4, oneof="content", message=gmd_text.Text,)
blank = proto.Field(proto.MESSAGE, number=5, oneof="content", message=empty.Empty,)
__all__ = tuple(sorted(__protobuf__.manifest)) | unknown | codeparrot/codeparrot-clean | ||
import logging
import shutil
import threading
import os
import xml.etree.ElementTree as etree
from datetime import datetime
import requests
import vlc
from dateutil import parser
from dateutil.tz import tzutc
from i3pystatus import IntervalModule
from i3pystatus.core.desktop import DesktopNotification
from i3pystatus.core.util import internet, require
class State:
PLAYING = 1
PAUSED = 2
STOPPED = 3
class ABCRadio(IntervalModule):
"""
Streams ABC Australia radio - https://radio.abc.net.au/. Currently uses VLC to do the
actual streaming.
Requires the PyPI packages `python-vlc`, `python-dateutil` and `requests`. Also requires VLC
- https://www.videolan.org/vlc/index.html
.. rubric:: Available formatters
* `{station}` — Current station
* `{title}` — Title of current show
* `{url}` — Show's URL
* `{remaining}` — Time left for current show
* `{player_state}` — Unicode icons representing play, pause and stop
"""
settings = (
("format", "format string for when the player is inactive"),
("format_playing", "format string for when the player is playing"),
("target_stations", "list of station ids to select from. Station ids can be obtained "
"from the following XML - http://www.abc.net.au/radio/data/stations_apps_v3.xml. "
"If the list is empty, all stations will be accessible."),
)
format = "{station} {title} {player_state}"
format_playing = "{station} {title} {remaining} {player_state}"
on_leftclick = 'toggle_play'
on_upscroll = ['cycle_stations', 1]
on_downscroll = ['cycle_stations', -1]
on_doubleleftclick = 'display_notification'
interval = 1
# Destroy the player after this many seconds of inactivity
PLAYER_LIFETIME = 5
# Do not suspend the player when i3bar is hidden.
keep_alive = True
show_info = {}
player = None
station_info = None
station_id = None
stations = None
prev_title = None
prev_station = None
target_stations = []
end = None
start = None
destroy_timer = None
cycle_lock = threading.Lock()
player_icons = {
State.PAUSED: "▷",
State.PLAYING: "▶",
State.STOPPED: "◾",
}
def init(self):
self.station_info = ABCStationInfo()
@require(internet)
def run(self):
if self.station_id is None:
self.stations = self.station_info.get_stations()
# Select the first station in the list
self.cycle_stations(1)
if self.end and self.end <= datetime.now(tz=tzutc()):
self.update_show_info()
format_dict = self.show_info.copy()
format_dict['player_state'] = self.get_player_state()
format_dict['remaining'] = self.get_remaining()
format_template = self.format_playing if self.player else self.format
self.output = {
"full_text": format_template.format(**format_dict)
}
def update_show_info(self):
log.debug("Updating: show_info - %s" % datetime.now())
self.show_info = dict.fromkeys(
('title', 'url', 'start', 'end', 'duration', 'stream', 'remaining', 'station', 'description', 'title',
'short_synopsis', 'url'), '')
self.show_info.update(self.stations[self.station_id])
self.show_info.update(self.station_info.currently_playing(self.station_id))
# Show a notification when the show changes if the user is actively listening.
should_show = self.prev_station == self.show_info['station'] and self.prev_title != self.show_info[
'title'] and self.player
if should_show:
self.display_notification()
self.prev_title = self.show_info['title']
self.prev_station = self.show_info['station']
self.end = self.show_info['end'] if self.show_info['end'] else None
self.start = self.show_info['start'] if self.show_info['start'] else None
def get_player_state(self):
if self.player:
return self.player_icons[self.player.player_state]
else:
return self.player_icons[State.STOPPED]
def get_remaining(self):
if self.end and self.end > datetime.now(tz=tzutc()):
return str(self.end - datetime.now(tz=tzutc())).split(".")[0]
return ''
def cycle_stations(self, increment=1):
with self.cycle_lock:
target_array = self.target_stations if len(self.target_stations) > 0 else list(self.stations.keys())
if self.station_id in target_array:
next_index = (target_array.index(self.station_id) + increment) % len(target_array)
self.station_id = target_array[next_index]
else:
self.station_id = target_array[0]
log.debug("Cycle to: {}".format(self.station_id))
if self.player:
current_state = self.player.player_state
self.player.stop()
else:
current_state = State.STOPPED
self.update_show_info()
if self.player:
self.player.load_stream(self.show_info['stream'])
self.player.set_state(current_state)
def display_notification(self):
if self.show_info:
station, title, synopsis = self.show_info['station'], self.show_info['title'], self.show_info[
'short_synopsis']
title = "{} - {}".format(station, title)
def get_image():
image_link = self.show_info.get('image_link', None)
if image_link:
try:
image_path = "/tmp/{}.icon".format(station)
if not os.path.isfile(image_path):
response = requests.get(image_link, stream=True)
with open(image_path, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
return image_path
except:
pass
DesktopNotification(title=title, body=synopsis, icon=get_image()).display()
log.info("Displayed notification")
def toggle_play(self):
if not self.player:
self.init_player()
if self.player.is_playing():
self.player.pause()
self.destroy_timer = threading.Timer(self.PLAYER_LIFETIME, self.destroy)
self.destroy_timer.start()
else:
if self.destroy_timer:
self.destroy_timer.cancel()
self.destroy_timer = None
self.player.play()
self.run()
def init_player(self):
if self.show_info:
self.player = VLCPlayer()
log.info("Created player: {}".format(id(self.player)))
if not self.player.stream_loaded():
log.info("Loading stream: {}".format(self.show_info['stream']))
self.player.load_stream(self.show_info['stream'])
if not self.player.is_alive():
self.player.start()
def destroy(self):
log.debug("Destroying player: {}".format(id(self.player)))
if self.player:
self.player.destroy()
self.player = None
class ABCStationInfo:
PLAYING_URL = "https://program.abcradio.net.au/api/v1/programitems/{}/live.json?include=now"
def currently_playing(self, station_id):
station_info = self._get(self.PLAYING_URL.format(station_id)).json()
try:
return dict(
title=station_info['now']['program']['title'],
url=station_info['now']['primary_webpage']['url'],
start=parser.parse(station_info['now']['live'][0]['start']),
end=parser.parse(station_info['now']['live'][0]['end']),
duration=station_info['now']['live'][0]['duration_seconds'],
short_synopsis=station_info['now']['short_synopsis'],
stream=sorted(station_info['now']['live'][0]['outlets'][0]['audio_streams'], key=lambda x: x['type'])[0]['url']
)
except (KeyError, IndexError):
return {}
def get_stations(self):
stations = dict()
station_xml = etree.fromstring(self._get('http://www.abc.net.au/radio/data/stations_apps_v3.xml').content)
for element in station_xml:
attrib = element.attrib
if attrib["showInAndroidApp"] == 'true':
stations[attrib['id']] = dict(
id=attrib['id'],
station=attrib['name'],
description=attrib.get('description', None),
link=attrib.get('linkUrl', None),
image_link=attrib.get('WEBimageUrl', None),
stream=attrib.get('hlsStreamUrl', None),
)
return stations
def _get(self, url):
result = requests.get(url=url)
if result.status_code not in range(200, 300):
result.raise_for_status()
return result
log = logging.getLogger(__name__)
class VLCPlayer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.idle = threading.Event()
self.die = threading.Event()
self.instance = vlc.Instance()
self.player_state = State.STOPPED
self.player = self.instance.media_player_new()
def run(self):
states = {
State.STOPPED: self.player.stop,
State.PLAYING: self.player.play,
State.PAUSED: self.player.pause,
}
while not self.die.is_set():
self.idle.wait()
states[self.player_state]()
self.idle.clear()
def load_stream(self, url):
self.player.set_media(self.instance.media_new(url))
def stream_loaded(self):
return self.player.get_media() is not None
def play(self):
self.set_state(State.PLAYING)
def pause(self):
self.set_state(State.PAUSED)
def stop(self):
self.set_state(State.STOPPED)
def destroy(self):
self.die.set()
self.idle.set()
self.player.stop()
self.player.release()
def set_state(self, state):
log.info("{} -> {}".format(self.player_state, state))
self.player_state = state
self.idle.set()
def is_playing(self):
return self.player.is_playing() | unknown | codeparrot/codeparrot-clean | ||
from boxbranding import getMachineBrand
from enigma import ePicLoad, eTimer, getDesktop, gMainDC, eSize
from Screens.Screen import Screen
from Tools.Directories import resolveFilename, pathExists, SCOPE_MEDIA, SCOPE_ACTIVE_SKIN
from Components.Pixmap import Pixmap, MovingPixmap
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.FileList import FileList
from Components.AVSwitch import AVSwitch
from Components.Sources.List import List
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSelection, ConfigText, ConfigYesNo, getConfigListEntry
import skin
def getScale():
return AVSwitch().getFramebufferScale()
config.pic = ConfigSubsection()
config.pic.framesize = ConfigInteger(default=30, limits=(5, 99))
config.pic.slidetime = ConfigInteger(default=10, limits=(1, 60))
config.pic.resize = ConfigSelection(default="1", choices = [("0", _("simple")), ("1", _("better"))])
config.pic.cache = ConfigYesNo(default=True)
config.pic.lastDir = ConfigText(default=resolveFilename(SCOPE_MEDIA))
config.pic.infoline = ConfigYesNo(default=True)
config.pic.loop = ConfigYesNo(default=True)
config.pic.bgcolor = ConfigSelection(default="#00000000", choices = [("#00000000", _("black")),("#009eb9ff", _("blue")),("#00ff5a51", _("red")), ("#00ffe875", _("yellow")), ("#0038FF48", _("green"))])
config.pic.textcolor = ConfigSelection(default="#0038FF48", choices = [("#00000000", _("black")),("#009eb9ff", _("blue")),("#00ff5a51", _("red")), ("#00ffe875", _("yellow")), ("#0038FF48", _("green"))])
class picshow(Screen):
skin = """
<screen name="picshow" position="center,center" size="560,440" title="Picture player" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="label" render="Label" position="5,55" size="350,140" font="Regular;19" backgroundColor="#25062748" transparent="1" />
<widget name="thn" position="360,40" size="180,160" alphatest="on" />
<widget name="filelist" position="5,205" zPosition="2" size="550,230" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MenuActions"],
{
"cancel": self.KeyExit,
"red": self.KeyExit,
"green": self.KeyGreen,
"yellow": self.KeyYellow,
"menu": self.KeyMenu,
"ok": self.KeyOk
}, -1)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Thumbnails"))
self["key_yellow"] = StaticText("")
self["label"] = StaticText("")
self["thn"] = Pixmap()
currDir = config.pic.lastDir.value
if not pathExists(currDir):
currDir = "/"
self.filelist = FileList(currDir, matchingPattern = "(?i)^.*\.(jpeg|jpg|jpe|png|bmp|gif)")
self["filelist"] = self.filelist
self["filelist"].onSelectionChanged.append(self.selectionChanged)
self.ThumbTimer = eTimer()
self.ThumbTimer.callback.append(self.showThumb)
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.onLayoutFinish.append(self.setConf)
def showPic(self, picInfo=""):
ptr = self.picload.getData()
if ptr is not None:
self["thn"].instance.setPixmap(ptr.__deref__())
self["thn"].show()
text = picInfo.split('\n',1)
self["label"].setText(text[1])
self["key_yellow"].setText(_("Exif"))
def showThumb(self):
if not self.filelist.canDescent():
if self.filelist.getCurrentDirectory() and self.filelist.getFilename():
if self.picload.getThumbnail(self.filelist.getCurrentDirectory() + self.filelist.getFilename()) == 1:
self.ThumbTimer.start(500, True)
def selectionChanged(self):
if not self.filelist.canDescent():
self.ThumbTimer.start(500, True)
else:
self["label"].setText("")
self["thn"].hide()
self["key_yellow"].setText("")
def KeyGreen(self):
#if not self.filelist.canDescent():
self.session.openWithCallback(self.callbackView, Pic_Thumb, self.filelist.getFileList(), self.filelist.getSelectionIndex(), self.filelist.getCurrentDirectory())
def KeyYellow(self):
if not self.filelist.canDescent():
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist.getCurrentDirectory() + self.filelist.getFilename()))
def KeyMenu(self):
self.session.openWithCallback(self.setConf ,Pic_Setup)
def KeyOk(self):
if self.filelist.canDescent():
self.filelist.descent()
else:
self.session.openWithCallback(self.callbackView, Pic_Full_View, self.filelist.getFileList(), self.filelist.getSelectionIndex(), self.filelist.getCurrentDirectory())
def setConf(self, retval=None):
self.setTitle(_("Picture player"))
sc = getScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara((self["thn"].instance.size().width(), self["thn"].instance.size().height(), sc[0], sc[1], config.pic.cache.value, int(config.pic.resize.value), "#00000000"))
def callbackView(self, val=0):
if val > 0:
self.filelist.moveToIndex(val)
def KeyExit(self):
del self.picload
if self.filelist.getCurrentDirectory() is None:
config.pic.lastDir.setValue("/")
else:
config.pic.lastDir.setValue(self.filelist.getCurrentDirectory())
config.pic.save()
self.close()
#------------------------------------------------------------------------------------------
class Pic_Setup(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
self.setTitle(_("PicturePlayer"))
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["PicturePlayerSetup", "Setup"]
self.setup_title = _("Settings")
self.onChangedEntry = []
self.session = session
ConfigListScreen.__init__(self, [], session = session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
"ok": self.keySave,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["footnote"] = StaticText("")
self["description"] = StaticText("")
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
setup_list = [
getConfigListEntry(_("Slide show interval (sec.)"), config.pic.slidetime),
getConfigListEntry(_("Scaling mode"), config.pic.resize),
getConfigListEntry(_("Cache thumbnails"), config.pic.cache),
getConfigListEntry(_("Show info line"), config.pic.infoline),
getConfigListEntry(_("Frame size in full view"), config.pic.framesize),
getConfigListEntry(_("Slide picture in loop"), config.pic.loop),
getConfigListEntry(_("Background color"), config.pic.bgcolor),
getConfigListEntry(_("Text color"), config.pic.textcolor),
getConfigListEntry(_("Fulview resulution"), config.usage.pic_resolution),
]
self["config"].list = setup_list
self["config"].l.setList(setup_list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyCancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
#---------------------------------------------------------------------------
class Pic_Exif(Screen):
skin = """
<screen name="Pic_Exif" position="center,center" size="560,360" title="Info" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="menu" render="Listbox" position="5,50" size="550,310" scrollbarMode="showOnDemand" selectionDisabled="1" >
<convert type="TemplatedMultiContent">
{
"template": [ MultiContentEntryText(pos = (5, 5), size = (250, 30), flags = RT_HALIGN_LEFT, text = 0), MultiContentEntryText(pos = (260, 5), size = (290, 30), flags = RT_HALIGN_LEFT, text = 1)],
"fonts": [gFont("Regular", 20)],
"itemHeight": 30
}
</convert>
</widget>
</screen>"""
def __init__(self, session, exiflist):
Screen.__init__(self, session)
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.close
}, -1)
self["key_red"] = StaticText(_("Close"))
exifdesc = [_("filename")+':', "EXIF-Version:", "Make:", "Camera:", "Date/Time:", "Width / Height:", "Flash used:", "Orientation:", "User Comments:", "Metering Mode:", "Exposure Program:", "Light Source:", "CompressedBitsPerPixel:", "ISO Speed Rating:", "X-Resolution:", "Y-Resolution:", "Resolution Unit:", "Brightness:", "Exposure Time:", "Exposure Bias:", "Distance:", "CCD-Width:", "ApertureFNumber:"]
list = []
for x in range(len(exiflist)):
if x>0:
list.append((exifdesc[x], exiflist[x]))
else:
name = exiflist[x].split('/')[-1]
list.append((exifdesc[x], name))
self["menu"] = List(list)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Info"))
#----------------------------------------------------------------------------------------
T_INDEX = 0
T_FRAME_POS = 1
T_PAGE = 2
T_NAME = 3
T_FULL = 4
class Pic_Thumb(Screen):
def __init__(self, session, piclist, lastindex, path):
self.textcolor = config.pic.textcolor.value
self.color = config.pic.bgcolor.value
self.spaceX, self.picX, self.spaceY, self.picY, textsize, thumtxt = skin.parameters.get("PicturePlayerThumb",(35, 190, 30, 200, 20, 14))
pic_frame = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/pic_frame.png")
self.size_w = getDesktop(0).size().width()
self.size_h = getDesktop(0).size().height()
self.thumbsX = self.size_w / (self.spaceX + self.picX) # thumbnails in X
self.thumbsY = self.size_h / (self.spaceY + self.picY) # thumbnails in Y
self.thumbsC = self.thumbsX * self.thumbsY # all thumbnails
self.positionlist = []
skincontent = ""
posX = -1
for x in range(self.thumbsC):
posY = x / self.thumbsX
posX += 1
if posX >= self.thumbsX:
posX = 0
absX = self.spaceX + (posX*(self.spaceX + self.picX))
absY = self.spaceY + (posY*(self.spaceY + self.picY))
self.positionlist.append((absX, absY))
skincontent += "<widget source=\"label" + str(x) + "\" render=\"Label\" position=\"" + str(absX+5) + "," + str(absY+self.picY-textsize) + "\" size=\"" + str(self.picX - 10) + "," + str(textsize) \
+ "\" font=\"Regular;" + str(thumtxt) + "\" zPosition=\"2\" transparent=\"1\" noWrap=\"1\" foregroundColor=\"" + self.textcolor + "\" />"
skincontent += "<widget name=\"thumb" + str(x) + "\" position=\"" + str(absX+5)+ "," + str(absY+5) + "\" size=\"" + str(self.picX -10) + "," + str(self.picY - (textsize*2)) + "\" zPosition=\"2\" transparent=\"1\" alphatest=\"on\" />"
# Screen, backgroundlabel and MovingPixmap
self.skin = "<screen position=\"0,0\" size=\"" + str(self.size_w) + "," + str(self.size_h) + "\" flags=\"wfNoBorder\" > \
<eLabel position=\"0,0\" zPosition=\"0\" size=\""+ str(self.size_w) + "," + str(self.size_h) + "\" backgroundColor=\"" + self.color + "\" />" \
+ "<widget name=\"frame\" position=\"" + str(self.spaceX)+ "," + str(self.spaceY)+ "\" size=\"" + str(self.picX) + "," + str(self.picY) + "\" pixmap=\"" + pic_frame + "\" zPosition=\"1\" alphatest=\"on\" />" \
+ skincontent + "</screen>"
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MovieSelectionActions"],
{
"cancel": self.Exit,
"ok": self.KeyOk,
"left": self.key_left,
"right": self.key_right,
"up": self.key_up,
"down": self.key_down,
"showEventInfo": self.StartExif,
}, -1)
self["frame"] = MovingPixmap()
for x in range(self.thumbsC):
self["label"+str(x)] = StaticText()
self["thumb"+str(x)] = Pixmap()
self.Thumbnaillist = []
self.filelist = []
self.currPage = -1
self.dirlistcount = 0
self.path = path
index = 0
framePos = 0
Page = 0
for x in piclist:
if not x[0][1]:
self.filelist.append((index, framePos, Page, x[0][0], path + x[0][0]))
index += 1
framePos += 1
if framePos > (self.thumbsC -1):
framePos = 0
Page += 1
else:
self.dirlistcount += 1
self.maxentry = len(self.filelist)-1
self.index = lastindex - self.dirlistcount
if self.index < 0:
self.index = 0
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.onLayoutFinish.append(self.setPicloadConf)
self.ThumbTimer = eTimer()
self.ThumbTimer.callback.append(self.showPic)
def setPicloadConf(self):
sc = getScale()
self.picload.setPara([self["thumb0"].instance.size().width(), self["thumb0"].instance.size().height(), sc[0], sc[1], config.pic.cache.value, int(config.pic.resize.value), self.color])
self.paintFrame()
def paintFrame(self):
#print "index=" + str(self.index)
if self.maxentry < self.index or self.index < 0:
return
pos = self.positionlist[self.filelist[self.index][T_FRAME_POS]]
self["frame"].moveTo( pos[0], pos[1], 1)
self["frame"].startMoving()
if self.currPage != self.filelist[self.index][T_PAGE]:
self.currPage = self.filelist[self.index][T_PAGE]
self.newPage()
def newPage(self):
self.Thumbnaillist = []
#clear Labels and Thumbnail
for x in range(self.thumbsC):
self["label"+str(x)].setText("")
self["thumb"+str(x)].hide()
#paint Labels and fill Thumbnail-List
for x in self.filelist:
if x[T_PAGE] == self.currPage:
self["label"+str(x[T_FRAME_POS])].setText("(" + str(x[T_INDEX]+1) + ") " + x[T_NAME])
self.Thumbnaillist.append([0, x[T_FRAME_POS], x[T_FULL]])
#paint Thumbnail start
self.showPic()
def showPic(self, picInfo=""):
for x in range(len(self.Thumbnaillist)):
if self.Thumbnaillist[x][0] == 0:
if self.picload.getThumbnail(self.Thumbnaillist[x][2]) == 1: #zu tun probier noch mal
self.ThumbTimer.start(500, True)
else:
self.Thumbnaillist[x][0] = 1
break
elif self.Thumbnaillist[x][0] == 1:
self.Thumbnaillist[x][0] = 2
ptr = self.picload.getData()
if ptr is not None:
self["thumb" + str(self.Thumbnaillist[x][1])].instance.setPixmap(ptr.__deref__())
self["thumb" + str(self.Thumbnaillist[x][1])].show()
def key_left(self):
self.index -= 1
if self.index < 0:
self.index = self.maxentry
self.paintFrame()
def key_right(self):
self.index += 1
if self.index > self.maxentry:
self.index = 0
self.paintFrame()
def key_up(self):
self.index -= self.thumbsX
if self.index < 0:
self.index =self.maxentry
self.paintFrame()
def key_down(self):
self.index += self.thumbsX
if self.index > self.maxentry:
self.index = 0
self.paintFrame()
def StartExif(self):
if self.maxentry < 0:
return
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist[self.index][T_FULL]))
def KeyOk(self):
if self.maxentry < 0:
return
self.old_index = self.index
self.session.openWithCallback(self.callbackView, Pic_Full_View, self.filelist, self.index, self.path)
def callbackView(self, val=0):
self.index = val
if self.old_index != self.index:
self.paintFrame()
def Exit(self):
del self.picload
self.close(self.index + self.dirlistcount)
#---------------------------------------------------------------------------
class Pic_Full_View(Screen):
def __init__(self, session, filelist, index, path):
self.textcolor = config.pic.textcolor.value
self.bgcolor = config.pic.bgcolor.value
space = config.pic.framesize.value
self.size_w = size_w = getDesktop(0).size().width()
self.size_h = size_h = getDesktop(0).size().height()
if config.usage.pic_resolution.value and (size_w, size_h) != eval(config.usage.pic_resolution.value):
(size_w, size_h) = eval(config.usage.pic_resolution.value)
gMainDC.getInstance().setResolution(size_w, size_h)
getDesktop(0).resize(eSize(size_w, size_h))
self.skin = "<screen position=\"0,0\" size=\"" + str(size_w) + "," + str(size_h) + "\" flags=\"wfNoBorder\" > \
<eLabel position=\"0,0\" zPosition=\"0\" size=\""+ str(size_w) + "," + str(size_h) + "\" backgroundColor=\""+ self.bgcolor +"\" /><widget name=\"pic\" position=\"" + str(space) + "," + str(space) + "\" size=\"" + str(size_w-(space*2)) + "," + str(size_h-(space*2)) + "\" zPosition=\"1\" alphatest=\"on\" /> \
<widget name=\"point\" position=\""+ str(space+5) + "," + str(space+2) + "\" size=\"20,20\" zPosition=\"2\" pixmap=\"skin_default/icons/record.png\" alphatest=\"on\" /> \
<widget name=\"play_icon\" position=\""+ str(space+25) + "," + str(space+2) + "\" size=\"20,20\" zPosition=\"2\" pixmap=\"skin_default/icons/ico_mp_play.png\" alphatest=\"on\" /> \
<widget source=\"file\" render=\"Label\" position=\""+ str(space+45) + "," + str(space) + "\" size=\""+ str(size_w-(space*2)-50) + ",25\" font=\"Regular;20\" borderWidth=\"1\" borderColor=\"#000000\" halign=\"left\" foregroundColor=\"" + self.textcolor + "\" zPosition=\"2\" noWrap=\"1\" transparent=\"1\" /></screen>"
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MovieSelectionActions"],
{
"cancel": self.Exit,
"green": self.PlayPause,
"yellow": self.PlayPause,
"blue": self.nextPic,
"red": self.prevPic,
"left": self.prevPic,
"right": self.nextPic,
"showEventInfo": self.StartExif,
}, -1)
self["point"] = Pixmap()
self["pic"] = Pixmap()
self["play_icon"] = Pixmap()
self["file"] = StaticText(_("please wait, loading picture..."))
self.old_index = 0
self.filelist = []
self.lastindex = index
self.currPic = []
self.shownow = True
self.dirlistcount = 0
for x in filelist:
if len(filelist[0]) == 3: #orig. filelist
if not x[0][1]:
self.filelist.append(path + x[0][0])
else:
self.dirlistcount += 1
elif len(filelist[0]) == 2: #scanlist
if not x[0][1]:
self.filelist.append(x[0][0])
else:
self.dirlistcount += 1
else: # thumbnaillist
self.filelist.append(x[T_FULL])
self.maxentry = len(self.filelist)-1
self.index = index - self.dirlistcount
if self.index < 0:
self.index = 0
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.finish_decode)
self.slideTimer = eTimer()
self.slideTimer.callback.append(self.slidePic)
if self.maxentry >= 0:
self.onLayoutFinish.append(self.setPicloadConf)
def setPicloadConf(self):
sc = getScale()
self.picload.setPara([self["pic"].instance.size().width(), self["pic"].instance.size().height(), sc[0], sc[1], 0, int(config.pic.resize.value), self.bgcolor])
self["play_icon"].hide()
if not config.pic.infoline.value:
self["file"].setText("")
self.start_decode()
def ShowPicture(self):
if self.shownow and len(self.currPic):
self.shownow = False
if config.pic.infoline.value:
self["file"].setText(self.currPic[0])
else:
self["file"].setText("")
self.lastindex = self.currPic[1]
self["pic"].instance.setPixmap(self.currPic[2].__deref__())
self.currPic = []
self.next()
self.start_decode()
def finish_decode(self, picInfo=""):
self["point"].hide()
ptr = self.picload.getData()
if ptr is not None:
text = ""
try:
text = picInfo.split('\n',1)
text = "(" + str(self.index+1) + "/" + str(self.maxentry+1) + ") " + text[0].split('/')[-1]
except:
pass
self.currPic = []
self.currPic.append(text)
self.currPic.append(self.index)
self.currPic.append(ptr)
self.ShowPicture()
def start_decode(self):
self.picload.startDecode(self.filelist[self.index])
self["point"].show()
def next(self):
self.index += 1
if self.index > self.maxentry:
self.index = 0
def prev(self):
self.index -= 1
if self.index < 0:
self.index = self.maxentry
def slidePic(self):
print "slide to next Picture index=" + str(self.lastindex)
if config.pic.loop.value == False and self.lastindex == self.maxentry:
self.PlayPause()
self.shownow = True
self.ShowPicture()
def PlayPause(self):
if self.slideTimer.isActive():
self.slideTimer.stop()
self["play_icon"].hide()
else:
self.slideTimer.start(config.pic.slidetime.value*1000)
self["play_icon"].show()
self.nextPic()
def prevPic(self):
self.currPic = []
self.index = self.lastindex
self.prev()
self.start_decode()
self.shownow = True
def nextPic(self):
self.shownow = True
self.ShowPicture()
def StartExif(self):
if self.maxentry < 0:
return
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist[self.lastindex]))
def Exit(self):
del self.picload
if config.usage.pic_resolution.value and (self.size_w, self.size_h) != eval(config.usage.pic_resolution.value):
gMainDC.getInstance().setResolution(self.size_w, self.size_h)
getDesktop(0).resize(eSize(self.size_w, self.size_h))
self.close(self.lastindex + self.dirlistcount) | unknown | codeparrot/codeparrot-clean | ||
from typing import Optional
from ctypes import *
from vcx.common import do_call, create_cb
from vcx.error import VcxError, ErrorCode
from vcx.api.vcx_stateful import VcxStateful
import json
class Schema(VcxStateful):
"""
Object that represents a schema written on the ledger.
Attributes:
source_id: user generated unique identifier
schema_id: the ledger ID of the schema
attrs: attribute/value pairs (the number of attributes should be less or equal than 125)
version: version of the schema
transaction: schema transaction that must be published to the ledger by Endorser
"""
def __init__(self, source_id: str, name: str, version: str, attrs: list, transaction: Optional[str] = None):
VcxStateful.__init__(self, source_id)
self._source_id = source_id
self._schema_id = None
self._attrs = attrs
self._name = name
self._version = version
self._transaction = transaction
def __del__(self):
self.release()
self.logger.debug("Deleted {} obj: {}".format(Schema, self.handle))
@property
def schema_id(self):
return self._schema_id
@schema_id.setter
def schema_id(self, x):
self._schema_id = x
@property
def name(self):
return self._name
@name.setter
def name(self, x):
self._name = x
@property
def attrs(self):
return self._attrs
@attrs.setter
def attrs(self, x):
self._attrs = x
@property
def version(self):
return self._version
@version.setter
def version(self, x):
self._version = x
@property
def transaction(self):
return self._transaction
@transaction.setter
def transaction(self, x):
self._transaction = x
@staticmethod
async def create(source_id: str, name: str, version: str, attrs: list, payment_handle: int):
"""
Creates a new schema object that is written to the ledger
:param source_id: Institution's unique ID for the schema
:param name: Name of schema
:param version: Version of the schema
:param attrs: Atttributes of the schema
:param payment_handle: NYI - payment of ledger fee is taken from wallet automatically
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema = await Schema.create(source_id, name, version, attrs, payment_handle)
:return: schema object, written to ledger
"""
constructor_params = (source_id, name, version, attrs)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
c_version = c_char_p(version.encode('utf-8'))
c_schema_data = c_char_p(json.dumps(attrs).encode('utf-8'))
c_payment = c_uint32(payment_handle)
c_params = (c_source_id, c_name, c_version, c_schema_data, c_payment)
schema = await Schema._create("vcx_schema_create", constructor_params, c_params)
schema.schema_id = await schema.get_schema_id()
return schema
@staticmethod
async def prepare_for_endorser(source_id: str, name: str, version: str, attrs: list, endorser: str):
"""
Create a new Schema object that will be published by Endorser later.
:param source_id: Institution's unique ID for the schema
:param name: Name of schema
:param version: Version of the schema
:param attrs: Atttributes of the schema
:param endorser: DID of the Endorser that will submit the transaction.
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
endorser = 'V4SGRU86Z58d6TV7PBUe6f'
schema = await Schema.prepare_for_endorser(source_id, name, version, attrs, endorser)
:return: schema object, schema transaction that should be passed to Endorser for witting to ledger
"""
try:
schema = Schema(source_id, '', '', [])
if not hasattr(Schema.prepare_for_endorser, "cb"):
schema.logger.debug("vcx_schema_prepare_for_endorser: Creating callback")
Schema.prepare_for_endorser.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_uint32, c_char_p))
c_source_id = c_char_p(source_id.encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
c_version = c_char_p(version.encode('utf-8'))
c_schema_data = c_char_p(json.dumps(attrs).encode('utf-8'))
c_endorser = c_char_p(endorser.encode('utf-8'))
handle, transaction = await do_call('vcx_schema_prepare_for_endorser',
c_source_id,
c_name,
c_version,
c_schema_data,
c_endorser,
Schema.prepare_for_endorser.cb)
schema.logger.debug("created schema object")
schema.attrs = attrs
schema.name = name
schema.version = version
schema.handle = handle
schema.transaction = transaction
return schema
except KeyError:
raise VcxError(ErrorCode.InvalidSchema)
@staticmethod
async def deserialize(data: dict):
"""
Create the object from a previously serialized object.
:param data: The output of the "serialize" call
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
data1 = await schema1.serialize()
:return: A re-instantiated object
"""
try:
# Todo: Find better way to access attr_names. Potential for issues.
schema = await Schema._deserialize("vcx_schema_deserialize",
json.dumps(data),
data['data']['source_id'],
data['data']['name'],
data['data']['version'],
data['data']['data'])
schema.schema_id = await schema.get_schema_id()
return schema
except KeyError:
raise VcxError(ErrorCode.InvalidSchema)
@staticmethod
async def lookup(source_id: str, schema_id: str):
"""
Create a new schema object from an existing ledger schema
:param source_id: Institution's personal identification for the schema
:param schema_id: Ledger schema ID for lookup
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
id1 = await schema.get_schema_id()
data = await Schema.lookup(source_id, schema_id)
assert data.attrs.sort() == ['sex', 'age', 'name', 'height'].sort()
assert data.name == 'test-licence'
assert data.handle > 0
:return: schema object
"""
try:
schema = Schema(source_id, '', '', [])
if not hasattr(Schema.lookup, "cb"):
schema.logger.debug("vcx_schema_get_attributes: Creating callback")
Schema.lookup.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_uint32, c_char_p))
c_source_id = c_char_p(source_id.encode('utf-8'))
c_schema_id = c_char_p(schema_id.encode('utf-8'))
handle, data = await do_call('vcx_schema_get_attributes',
c_source_id,
c_schema_id,
Schema.lookup.cb)
schema.logger.debug("created schema object")
schema_result = json.loads(data.decode())
schema.attrs = schema_result['data']
schema.name = schema_result['name']
schema.version = schema_result['version']
schema.handle = handle
return schema
except KeyError:
raise VcxError(ErrorCode.InvalidSchema)
async def serialize(self) -> dict:
"""
Serialize the object for storage
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
data1 = await schema1.serialize()
:return: serialized object
"""
return await self._serialize(Schema, 'vcx_schema_serialize')
def release(self) -> None:
"""
destroy the object and release any memory associated with it
:return: None
"""
self._release(Schema, 'vcx_schema_release')
async def get_schema_id(self):
"""
Get the ledger ID of the object
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
id1 = await schema.get_schema_id()
:return: ID string
"""
cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_handle = c_uint32(self.handle)
id = await do_call('vcx_schema_get_schema_id', c_handle, cb)
return id.decode()
async def get_payment_txn(self):
"""
Get the payment transaction information generated when paying the ledger fee
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
txn = await schema1.get_payment_txn()
:return: JSON object with input address and output UTXOs
"""
if not hasattr(Schema.get_payment_txn, "cb"):
self.logger.debug("vcx_schema_get_payment_txn: Creating callback")
Schema.get_payment_txn.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_credential_handle = c_uint32(self.handle)
payment_txn = await do_call('vcx_schema_get_payment_txn',
c_credential_handle,
Schema.get_payment_txn.cb)
return json.loads(payment_txn.decode())
async def update_state(self) -> int:
"""
Checks if schema is published on the Ledger and updates the the state
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema = await Schema.create(source_id, name, version, attrs, payment_handle)
assert await schema.update_state() == PublicEntityState.Published
:return: Current state of the schema
"""
return await self._update_state(Schema, 'vcx_schema_update_state')
async def get_state(self) -> int:
"""
Get the current state of the schema object
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema = await Schema.create(source_id, name, version, attrs, payment_handle)
assert await schema.get_state() == PublicEntityState.Published
:return: Current internal state of the schema
"""
return await self._get_state(Schema, 'vcx_schema_get_state') | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import django_sites as sites
from taiga.base.utils.urls import get_absolute_url, is_absolute_url, build_url
from taiga.base.utils.db import save_in_bulk, update_in_bulk, update_in_bulk_with_ids
def test_is_absolute_url():
assert is_absolute_url("http://domain/path")
assert is_absolute_url("https://domain/path")
assert not is_absolute_url("://domain/path")
def test_get_absolute_url():
site = sites.get_current()
assert get_absolute_url("http://domain/path") == "http://domain/path"
assert get_absolute_url("/path") == build_url("/path", domain=site.domain, scheme=site.scheme)
def test_save_in_bulk():
instance = mock.Mock()
instances = [instance, instance]
save_in_bulk(instances)
assert instance.save.call_count == 2
def test_save_in_bulk_with_a_callback():
instance = mock.Mock()
callback = mock.Mock()
instances = [instance, instance]
save_in_bulk(instances, callback)
assert callback.call_count == 2
def test_update_in_bulk():
instance = mock.Mock()
instances = [instance, instance]
new_values = [{"field1": 1}, {"field2": 2}]
update_in_bulk(instances, new_values)
assert instance.save.call_count == 2
assert instance.field1 == 1
assert instance.field2 == 2
def test_update_in_bulk_with_a_callback():
instance = mock.Mock()
callback = mock.Mock()
instances = [instance, instance]
new_values = [{"field1": 1}, {"field2": 2}]
update_in_bulk(instances, new_values, callback)
assert callback.call_count == 2
def test_update_in_bulk_with_ids():
ids = [1, 2]
new_values = [{"field1": 1}, {"field2": 2}]
model = mock.Mock()
update_in_bulk_with_ids(ids, new_values, model)
expected_calls = [
mock.call(id=1), mock.call().update(field1=1),
mock.call(id=2), mock.call().update(field2=2)
]
model.objects.filter.assert_has_calls(expected_calls) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Lists all the unique parts and their colors in a .mpd file. This
is sometimes useful for determining the name of a part and/or a
color.
Hazen 04/15
"""
import os
import re
import sys
import opensdraw.lcad_lib.datFileParser as datFileParser
if (len(sys.argv) != 2):
print("usage: <ldraw file (input)>")
exit()
parts = {}
def arrayToString(array):
"""
Convert an array to a string & clean it up. The assumption is that
no part in the LDraw library is going to have a space in the name..
"""
return re.sub(r'[^a-zA-Z0-9\.]', '_', "".join(array))
class PartsFinder(datFileParser.Parser):
"""
This class finds all the parts in ldraw format file and records the
name, id and color of the parts that also exist in the ldraw parts
library.
"""
def __init__(self):
datFileParser.Parser.__init__(self, None, None)
self.sub_parts = {}
self.parts = {}
def command(self, parsed_line):
pass
def endFile(self):
pass
def line(self, parsed_line):
pass
def newFile(self, parsed_line):
ldraw_color = parsed_line[1]
ldraw_id = parsed_line[14]
part_file = None
# Try and find part in parts folder.
try:
part_file = datFileParser.findPartFile(ldraw_id)
except IOError:
pass
# If this part exists, figure out whether it is a part or a sub-part
# based on the path & add to the appropriate dictionary.
if part_file is not None:
is_part = True
if (os.path.split(os.path.split(part_file)[0])[1] != "parts"):
is_part = False
fp = open(part_file)
description = fp.readline()[2:].strip()
fp.close()
part_id = ldraw_id + "_" + ldraw_color
if is_part:
self.parts[part_id] = description
else:
self.sub_parts[part_id] = description
def optionalLine(self, parsed_line):
pass
def quadrilateral(self, parsed_line):
pass
def startFile(self, depth):
pass
def triangle(self, parsed_line):
pass
# Find all the parts.
partsFinder = PartsFinder()
datFileParser.parsePartFile(partsFinder, sys.argv[1])
print("Parts:")
for key in sorted(partsFinder.parts, key = partsFinder.parts.get):
[part_id, part_color] = key.split("_")
print(part_id[:-4] + ", " + part_color + ", " + partsFinder.parts[key])
print("\n")
print("Sub Parts:")
for key in sorted(partsFinder.sub_parts, key = partsFinder.sub_parts.get):
[part_id, part_color] = key.split("_")
print(part_id[:-4] + ", " + part_color + ", " + partsFinder.sub_parts[key])
print("\n")
#
# The MIT License
#
# Copyright (c) 2015 Hazen Babcock
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# | unknown | codeparrot/codeparrot-clean | ||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Config.h>
#include <c10/util/error.h>
#include <thread>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_nnpack_available_native.h>
#include <ATen/ops/_nnpack_spatial_convolution_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros.h>
#endif
#if !AT_NNPACK_ENABLED()
namespace at::native {
at::Tensor _nnpack_spatial_convolution(
const Tensor& input,
const Tensor& weight, const std::optional<Tensor>& bias_opt,
const IntArrayRef padding,
const IntArrayRef stride) {
TORCH_CHECK(false, "nnpack_spatial_convolution: ATen not compiled with NNPACK support");
}
bool _nnpack_available() {
return false;
}
} // namespace at::native
#else
#include <nnpack.h>
#include <caffe2/utils/threadpool/pthreadpool-cpp.h>
#include <ATen/native/ConvUtils.h>
#include <ATen/Parallel.h>
#include <c10/util/irange.h>
namespace at::native {
static bool init_nnpack() {
const static nnp_status nnpack_status = nnp_initialize();
auto nnpack_successfully_initialized_ = (nnp_status_success == nnpack_status);
if (nnpack_status != nnp_status_success) {
if (nnpack_status == nnp_status_out_of_memory) {
LOG(WARNING) << "Could not initialize NNPACK! Reason: Out of memory.";
} else if (nnpack_status == nnp_status_unsupported_hardware) {
LOG(WARNING) << "Could not initialize NNPACK! Reason: Unsupported hardware.";
} else {
LOG(WARNING) << "Could not initialize NNPACK! Reason: Unknown error!";
}
}
return nnpack_successfully_initialized_;
}
static pthreadpool_t nnpack_threadpool() {
#ifdef C10_MOBILE
return caffe2::pthreadpool_();
#else
static pthreadpool_t nnpack_threadpool_ = nullptr;
static bool called_nnpack_threadpool_ = false;
if (!called_nnpack_threadpool_) {
called_nnpack_threadpool_ = true;
#ifdef INTRA_OP_PARALLEL
const uint32_t threads = at::get_num_threads();
#else
const uint32_t threads = std::thread::hardware_concurrency();
#endif
nnpack_threadpool_ = pthreadpool_create(threads);
if (!nnpack_threadpool_) {
LOG(WARNING) << "Failed to initialize pthreadpool! Running NNPACK in single-threaded mode.";
}
}
return nnpack_threadpool_;
#endif
}
bool _nnpack_available() {
return init_nnpack();
}
namespace {
struct Workspace {
void* buffer = nullptr;
size_t size = 0;
void deallocate() {
if (buffer) {
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
std::free(buffer);
buffer = nullptr;
}
}
void allocate() {
deallocate();
// NNPack has alignment requirements
constexpr size_t nnpack_memory_alignment_boundary = 64;
// Won't work on Windows, but NNPACK doesn't support Windows either
auto res = posix_memalign(&buffer, nnpack_memory_alignment_boundary, size);
if (res != 0) {
TORCH_CHECK(false, "posix_memalign failed:", c10::utils::str_error(errno), " (", errno, ")");
}
return;
}
~Workspace() {
deallocate();
}
};
} // namespace
// Make thread_local for safety in cases where we have multiple threads running
// Convs at once
static thread_local Workspace workspace;
Tensor _nnpack_spatial_convolution(
const Tensor& input,
const Tensor& weight, const std::optional<Tensor>& bias_opt,
const IntArrayRef padding,
const IntArrayRef stride) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
at::Tensor output = at::empty(
conv_output_size(input.sizes(), weight.sizes(), padding, stride),
input.options());
// Our input Tensor must be in the form N,C,H,W
TORCH_CHECK(
input.ndimension() == 4,
"NNPack convolutionOutput expects 4D input Tensor N,C,H,W");
// Our weight Tensor must be in the form oC,iC,kH,kW
TORCH_CHECK(
weight.ndimension() == 4,
"NNPack convolutionOutput expects 4D weight Tensor oC,iC,kH,kW");
// Our output Tensor must be in the form N,oC,oH,oW
TORCH_CHECK(
output.ndimension() == 4,
"NNPack convolutionOutput expects 4D output Tensor N,oC,oH,oW");
// Some basic shape checking, not comprehensive
TORCH_CHECK(
input.size(1) == weight.size(1),
"Mismatch between number of input channels in input Tensor (",
input.size(1),
") and weight Tensor (",
weight.size(1),
") in NNPack convolutionOutput");
TORCH_CHECK(
weight.size(0) == output.size(1),
"Mismatch between number of output channels in weight Tensor (",
weight.size(0),
") and output Tensor (",
output.size(1),
") in NNPack convolutionOutput");
TORCH_CHECK(
input.size(0) == output.size(0),
"Mismatch between batch size in input Tensor (",
input.size(0),
") and output Tensor (",
output.size(0),
") in NNPack convolutionOutput");
// All Tensors must be float Tensors
if (input.device().type() != kCPU || input.scalar_type() != kFloat ||
weight.device().type() != kCPU || weight.scalar_type() != kFloat ||
output.device().type() != kCPU || output.scalar_type() != kFloat ||
(bias.defined() && (bias.device().type() != kCPU || bias.scalar_type() != kFloat))) {
TORCH_CHECK(false, "Mismatched Tensor types in NNPack convolutionOutput");
}
const auto algorithm = nnp_convolution_algorithm_auto;
const size_t input_channels = input.size(1);
const size_t output_channels = weight.size(0);
const struct nnp_size input_size = {
.width = static_cast<size_t>(input.size(3)),
.height = static_cast<size_t>(input.size(2)),
};
const struct nnp_padding input_padding = {
.top = static_cast<size_t>(padding[0]),
.right = static_cast<size_t>(padding[1]),
.bottom = static_cast<size_t>(padding[0]),
.left = static_cast<size_t>(padding[1]),
};
const struct nnp_size kernel_size = {
.width = static_cast<size_t>(weight.size(3)),
.height = static_cast<size_t>(weight.size(2)),
};
const struct nnp_size output_size = {
.width = static_cast<size_t>(output.size(3)),
.height = static_cast<size_t>(output.size(2)),
};
const nnp_size output_subsample = {
.width = static_cast<std::size_t>(stride[1]),
.height = static_cast<std::size_t>(stride[0]),
};
const auto input_ = input.contiguous();
const auto weight_ = weight.contiguous();
// If we don't have a defined bias Tensor, we need to create one filled with zeroes
const auto bias_ = bias.defined() ? bias.contiguous() : at::zeros({weight.size(0)}, input.options());
const auto compute = [&](const size_t batch_size) -> nnp_status {
if ((batch_size == 1) || (output_subsample.width != 1) || (output_subsample.height != 1)) {
const size_t input_size_per_batch = input_channels * input_size.width * input_size.height;
const size_t output_size_per_batch = output_channels * output_size.width * output_size.height;
for (const auto batch : c10::irange(0u, batch_size)) {
const nnp_status status = nnp_convolution_inference(
algorithm,
nnp_convolution_transform_strategy_compute,
input_channels,
output_channels,
input_size,
input_padding,
kernel_size,
output_subsample,
input_.data_ptr<float>() + batch * input_size_per_batch,
weight_.data_ptr<float>(),
bias_.data_ptr<float>(),
output.data_ptr<float>() + batch * output_size_per_batch,
workspace.buffer,
&workspace.size,
nnp_activation_identity,
nullptr,
nnpack_threadpool(),
nullptr );
if (nnp_status_success != status) {
return status;
}
}
return nnp_status_success;
}
else {
return nnp_convolution_output(
algorithm,
batch_size,
input_channels,
output_channels,
input_size,
input_padding,
kernel_size,
input_.data_ptr<float>(),
weight_.data_ptr<float>(),
bias_.data_ptr<float>(),
output.data_ptr<float>(),
workspace.buffer,
&workspace.size,
nnp_activation_identity,
nullptr,
nnpack_threadpool(),
nullptr );
}
};
const size_t batch_size = input.size(0);
auto size_and_allocate_ws = [&]() {
// Run a single pass to get the size of memory workspace buffer
const auto status = compute(batch_size);
TORCH_CHECK(
status == nnp_status_success,
"NNPACK SpatialConvolution_updateOutput failed");
workspace.allocate();
};
// If no workspace created yet, allocate it
if (workspace.buffer == nullptr) {
size_and_allocate_ws();
}
// Try to run with the newly created, or existing workspace
auto status = compute(batch_size);
if (status == nnp_status_insufficient_buffer) {
// Need to reallocate the workspace
workspace.deallocate();
size_and_allocate_ws();
// Try one more time
status = compute(batch_size);
}
TORCH_CHECK(
status == nnp_status_success,
"NNPACK SpatialConvolution_updateOutput failed");
return output;
}
} // namespace at::native
#endif // AT_NNPACK_ENABLED | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/NNPACK.cpp |
# Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com
# All rights reserved.
#
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
from django.contrib.gis.geos.mutable_list import ListMixin
from django.utils import unittest
class UserListA(ListMixin):
_mytype = tuple
def __init__(self, i_list, *args, **kwargs):
self._list = self._mytype(i_list)
super(UserListA, self).__init__(*args, **kwargs)
def __len__(self): return len(self._list)
def __str__(self): return str(self._list)
def __repr__(self): return repr(self._list)
def _set_list(self, length, items):
# this would work:
# self._list = self._mytype(items)
# but then we wouldn't be testing length parameter
itemList = ['x'] * length
for i, v in enumerate(items):
itemList[i] = v
self._list = self._mytype(itemList)
def _get_single_external(self, index):
return self._list[index]
class UserListB(UserListA):
_mytype = list
def _set_single(self, index, value):
self._list[index] = value
def nextRange(length):
nextRange.start += 100
return range(nextRange.start, nextRange.start + length)
nextRange.start = 0
class ListMixinTest(unittest.TestCase):
"""
Tests base class ListMixin by comparing a list clone which is
a ListMixin subclass with a real Python list.
"""
limit = 3
listType = UserListA
def lists_of_len(self, length=None):
if length is None: length = self.limit
pl = range(length)
return pl, self.listType(pl)
def limits_plus(self, b):
return range(-self.limit - b, self.limit + b)
def step_range(self):
return range(-1 - self.limit, 0) + range(1, 1 + self.limit)
def test01_getslice(self):
'Slice retrieval'
pl, ul = self.lists_of_len()
for i in self.limits_plus(1):
self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i))
self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i))
for j in self.limits_plus(1):
self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i,j))
for k in self.step_range():
self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i,j,k))
for k in self.step_range():
self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i,k))
self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i,k))
for k in self.step_range():
self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k))
def test02_setslice(self):
'Slice assignment'
def setfcn(x,i,j,k,L): x[i:j:k] = range(L)
pl, ul = self.lists_of_len()
for slen in range(self.limit + 1):
ssl = nextRange(slen)
ul[:] = ssl
pl[:] = ssl
self.assertEqual(pl, ul[:], 'set slice [:]')
for i in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:] = ssl
pl[i:] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i))
ssl = nextRange(slen)
ul[:i] = ssl
pl[:i] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i))
for j in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:j] = ssl
pl[i:j] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j))
for k in self.step_range():
ssl = nextRange( len(ul[i:j:k]) )
ul[i:j:k] = ssl
pl[i:j:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k))
sliceLen = len(ul[i:j:k])
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen + 1)
if sliceLen > 2:
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen - 1)
for k in self.step_range():
ssl = nextRange( len(ul[i::k]) )
ul[i::k] = ssl
pl[i::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k))
ssl = nextRange( len(ul[:i:k]) )
ul[:i:k] = ssl
pl[:i:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k))
for k in self.step_range():
ssl = nextRange(len(ul[::k]))
ul[::k] = ssl
pl[::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k))
def test03_delslice(self):
'Delete slice'
for Len in range(self.limit):
pl, ul = self.lists_of_len(Len)
del pl[:]
del ul[:]
self.assertEqual(pl[:], ul[:], 'del slice [:]')
for i in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:]
del ul[i:]
self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i))
pl, ul = self.lists_of_len(Len)
del pl[:i]
del ul[:i]
self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i))
for j in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:j]
del ul[i:j]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i,j))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[i:j:k]
del ul[i:j:k]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i,j,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[:i:k]
del ul[:i:k]
self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i,k))
pl, ul = self.lists_of_len(Len)
del pl[i::k]
del ul[i::k]
self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[::k]
del ul[::k]
self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k))
def test04_get_set_del_single(self):
'Get/set/delete single item'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
pl[i] = 100
ul[i] = 100
self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
del pl[i]
del ul[i]
self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i)
def test05_out_of_range_exceptions(self):
'Out of range exceptions'
def setfcn(x, i): x[i] = 20
def getfcn(x, i): return x[i]
def delfcn(x, i): del x[i]
pl, ul = self.lists_of_len()
for i in (-1 - self.limit, self.limit):
self.assertRaises(IndexError, setfcn, ul, i) # 'set index %d' % i)
self.assertRaises(IndexError, getfcn, ul, i) # 'get index %d' % i)
self.assertRaises(IndexError, delfcn, ul, i) # 'del index %d' % i)
def test06_list_methods(self):
'List methods'
pl, ul = self.lists_of_len()
pl.append(40)
ul.append(40)
self.assertEqual(pl[:], ul[:], 'append')
pl.extend(range(50,55))
ul.extend(range(50,55))
self.assertEqual(pl[:], ul[:], 'extend')
pl.reverse()
ul.reverse()
self.assertEqual(pl[:], ul[:], 'reverse')
for i in self.limits_plus(1):
pl, ul = self.lists_of_len()
pl.insert(i,50)
ul.insert(i,50)
self.assertEqual(pl[:], ul[:], 'insert at %d' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i)
self.assertEqual(pl[:], ul[:], 'after pop at %d' % i)
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(), ul.pop(i), 'popped value')
self.assertEqual(pl[:], ul[:], 'after pop')
pl, ul = self.lists_of_len()
def popfcn(x, i): x.pop(i)
self.assertRaises(IndexError, popfcn, ul, self.limit)
self.assertRaises(IndexError, popfcn, ul, -1 - self.limit)
pl, ul = self.lists_of_len()
for val in range(self.limit):
self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val)
for val in self.limits_plus(2):
self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val)
for val in range(self.limit):
pl, ul = self.lists_of_len()
pl.remove(val)
ul.remove(val)
self.assertEqual(pl[:], ul[:], 'after remove val %d' % val)
def indexfcn(x, v): return x.index(v)
def removefcn(x, v): return x.remove(v)
self.assertRaises(ValueError, indexfcn, ul, 40)
self.assertRaises(ValueError, removefcn, ul, 40)
def test07_allowed_types(self):
'Type-restricted list'
pl, ul = self.lists_of_len()
ul._allowed = (int, long)
ul[1] = 50
ul[:2] = [60, 70, 80]
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, 2, 'hello')
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), ('hello','goodbye'))
def test08_min_length(self):
'Length limits'
pl, ul = self.lists_of_len()
ul._minlength = 1
def delfcn(x,i): del x[:i]
def setfcn(x,i): x[:i] = []
for i in range(self.limit - ul._minlength + 1, self.limit + 1):
self.assertRaises(ValueError, delfcn, ul, i)
self.assertRaises(ValueError, setfcn, ul, i)
del ul[:ul._minlength]
ul._maxlength = 4
for i in range(0, ul._maxlength - len(ul)):
ul.append(i)
self.assertRaises(ValueError, ul.append, 10)
def test09_iterable_check(self):
'Error on assigning non-iterable to slice'
pl, ul = self.lists_of_len(self.limit + 1)
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), 2)
def test10_checkindex(self):
'Index check'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
if i < 0:
self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)')
else:
self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)')
for i in (-self.limit - 1, self.limit):
self.assertRaises(IndexError, ul._checkindex, i)
ul._IndexError = TypeError
self.assertRaises(TypeError, ul._checkindex, -self.limit - 1)
def test_11_sorting(self):
'Sorting'
pl, ul = self.lists_of_len()
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort()
ul.sort()
self.assertEqual(pl[:], ul[:], 'sort')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort(reverse=True)
ul.sort(reverse=True)
self.assertEqual(pl[:], ul[:], 'sort w/ reverse')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
def test_12_arithmetic(self):
'Arithmetic'
pl, ul = self.lists_of_len()
al = range(10,14)
self.assertEqual(list(pl + al), list(ul + al), 'add')
self.assertEqual(type(ul), type(ul + al), 'type of add result')
self.assertEqual(list(al + pl), list(al + ul), 'radd')
self.assertEqual(type(al), type(al + ul), 'type of radd result')
objid = id(ul)
pl += al
ul += al
self.assertEqual(pl[:], ul[:], 'in-place add')
self.assertEqual(objid, id(ul), 'in-place add id')
for n in (-1,0,1,3):
pl, ul = self.lists_of_len()
self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n)
self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n)
self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n)
self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n)
objid = id(ul)
pl *= n
ul *= n
self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n)
self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n)
pl, ul = self.lists_of_len()
self.assertEqual(pl, ul, 'cmp for equal')
self.assert_(pl >= ul, 'cmp for gte self')
self.assert_(pl <= ul, 'cmp for lte self')
self.assert_(ul >= pl, 'cmp for self gte')
self.assert_(ul <= pl, 'cmp for self lte')
self.assert_(pl + [5] > ul, 'cmp')
self.assert_(pl + [5] >= ul, 'cmp')
self.assert_(pl < ul + [2], 'cmp')
self.assert_(pl <= ul + [2], 'cmp')
self.assert_(ul + [5] > pl, 'cmp')
self.assert_(ul + [5] >= pl, 'cmp')
self.assert_(ul < pl + [2], 'cmp')
self.assert_(ul <= pl + [2], 'cmp')
pl[1] = 20
self.assert_(pl > ul, 'cmp for gt self')
self.assert_(ul < pl, 'cmp for self lt')
pl[1] = -20
self.assert_(pl < ul, 'cmp for lt self')
self.assert_(pl < ul, 'cmp for lt self')
class ListMixinTestSingle(ListMixinTest):
listType = UserListB
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(ListMixinTest))
s.addTest(unittest.makeSuite(ListMixinTestSingle))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run() | unknown | codeparrot/codeparrot-clean | ||
{
"definitions": {
"rule": {
"description": "Condition used to match resource (string, RegExp or Function).",
"anyOf": [
{
"instanceof": "RegExp",
"tsType": "RegExp"
},
{
"type": "string",
"minLength": 1
},
{
"instanceof": "Function",
"tsType": "import('../../lib/ModuleFilenameHelpers').MatcherFn"
}
]
},
"rules": {
"description": "One or multiple conditions used to match resource.",
"anyOf": [
{
"type": "array",
"items": {
"description": "A rule condition.",
"oneOf": [
{
"$ref": "#/definitions/rule"
}
]
}
},
{
"$ref": "#/definitions/rule"
}
]
}
},
"title": "SourceMapDevToolPluginOptions",
"type": "object",
"additionalProperties": false,
"properties": {
"append": {
"description": "Appends the given value to the original asset. Usually the #sourceMappingURL comment. [url] is replaced with a URL to the source map file. false disables the appending.",
"anyOf": [
{
"description": "Append no SourceMap comment to the bundle, but still generate SourceMaps.",
"enum": [false, null]
},
{
"type": "string",
"minLength": 1
},
{
"instanceof": "Function",
"tsType": "import('../../lib/TemplatedPathPlugin').TemplatePathFn"
}
]
},
"columns": {
"description": "Indicates whether column mappings should be used (defaults to true).",
"type": "boolean"
},
"debugIds": {
"description": "Emit debug IDs into source and SourceMap.",
"type": "boolean"
},
"exclude": {
"description": "Exclude modules that match the given value from source map generation.",
"oneOf": [
{
"$ref": "#/definitions/rules"
}
]
},
"fallbackModuleFilenameTemplate": {
"description": "Generator string or function to create identifiers of modules for the 'sources' array in the SourceMap used only if 'moduleFilenameTemplate' would result in a conflict.",
"anyOf": [
{
"type": "string",
"minLength": 1
},
{
"description": "Custom function generating the identifier.",
"instanceof": "Function",
"tsType": "import('../../lib/ModuleFilenameHelpers').ModuleFilenameTemplateFunction"
}
]
},
"fileContext": {
"description": "Path prefix to which the [file] placeholder is relative to.",
"type": "string"
},
"filename": {
"description": "Defines the output filename of the SourceMap (will be inlined if no value is provided).",
"anyOf": [
{
"description": "Disable separate SourceMap file and inline SourceMap as DataUrl.",
"enum": [false, null]
},
{
"type": "string",
"absolutePath": false,
"minLength": 1
}
]
},
"ignoreList": {
"description": "Decide whether to ignore source files that match the specified value in the SourceMap.",
"oneOf": [
{
"$ref": "#/definitions/rules"
}
]
},
"include": {
"description": "Include source maps for module paths that match the given value.",
"oneOf": [
{
"$ref": "#/definitions/rules"
}
]
},
"module": {
"description": "Indicates whether SourceMaps from loaders should be used (defaults to true).",
"type": "boolean"
},
"moduleFilenameTemplate": {
"description": "Generator string or function to create identifiers of modules for the 'sources' array in the SourceMap.",
"anyOf": [
{
"type": "string",
"minLength": 1
},
{
"description": "Custom function generating the identifier.",
"instanceof": "Function",
"tsType": "import('../../lib/ModuleFilenameHelpers').ModuleFilenameTemplateFunction"
}
]
},
"namespace": {
"description": "Namespace prefix to allow multiple webpack roots in the devtools.",
"type": "string"
},
"noSources": {
"description": "Omit the 'sourceContents' array from the SourceMap.",
"type": "boolean"
},
"publicPath": {
"description": "Provide a custom public path for the SourceMapping comment.",
"type": "string"
},
"sourceRoot": {
"description": "Provide a custom value for the 'sourceRoot' property in the SourceMap.",
"type": "string"
},
"test": {
"description": "Include source maps for modules based on their extension (defaults to .js and .css).",
"oneOf": [
{
"$ref": "#/definitions/rules"
}
]
}
}
} | json | github | https://github.com/webpack/webpack | schemas/plugins/SourceMapDevToolPlugin.json |
"""This module exists for purists who believe that ``unipath.Path`` shouldn't
inherit from ``unipath.PathName``. It provides an ``FSPath`` class that
mimics ``Path``. This current implementation punts by subclassing ``Path`` and
disabling the forbidden methods/properies, but there are instructions below to
make it a totally independent class. The reason this hasn't been done yet is
it requires refactoring all methods, which is a PITA when the API is still in
flux.
I tried to make ``unipath.Path`` independent in the way described below -- I
really did -- but it just became too cumbersome to use in applications. For
instance, should ``.cwd``, ``.rel_path_to``, and ``.walk`` et al return
``FSPath`` objects or ``PathName`` objects? It depends what the user
intends do do with them. Perhaps they want to call a filesystem operation
immediately, or perhaps they want to combine it into a larger path. Because
``PathName`` can't contain ``FSPath``, it's easier to go the other way round
and make your long-term variables ``FSPath``, and access its ``.path`` attribute
(the ``PathName`` object) as needed. However, things get messy when you
have to deconstruct the path to modify it, then invoke the ``FSPath``
constructor again in order to call a filesystem operation:
top_directory = FSPath("...")
FSPath(PathName(my_directory, "subdir")).mkdir()
It gets even more verbose if one decides that the ``FSPath`` constructor should
accept only a premade ``PathName`` object rather than passing its own
arguments to the ``PathName`` constructor:
top_directory = FSPath(PathName("..."))
This module exists for demonstration and so people can compare ``Path`` vs
``FSPath`` side by side. I (Mike Orr) personally expect to use ``Path``
so that's where the bulk of my development effort will go. If the ``FSPath``
model is chosen for Python 3000's stdlib, it's possible that the API will
diverge from ``Path`` and they will end up as totally separate modules with
different maintainers.
(c) 2006 by Mike Orr <sluggoster@gmail.com>.
Permission granted to redistribute, modify, and include in commercial or
noncommerical products under the terms of the Python license (i.e., the "PSF
license agreement for Python 2.3", in section B of
http://www.python.org/download/releases/2.3.2/license/).
See unipath.py for history, credits, and documentation.
A test suite is available in unipath_test.py.
HOW TO MAKE FSPath NOT SUBCLASS unipath.Path
============================================
1) Subclass ``object`` instead of ``unipath.Path``.
2) Add the ``.__init__`` method and the other code below.
3) Add all ``unipath.Path`` methods and refactor them to pass ``self.path``
instead of ``self`` to the ``os.*`` and ``shutil`` functions.
# THIS CODE IS PART OF THE DOCSTRING!
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], Path):
if args[0].pathlib is os.path:
self.path = args[0]
else:
os.path.__name__, args[0]
reason = "arg is a path for a non-%s platform: %s"
raise TypeError(reason % tup)
else:
self.path = Path(*args)
def __hash__(self):
return self.path.__hash__()
__slots__ = ["path"]
# END CODE INCLUDED IN DOCSTRING
CHANGELOG
=========
2006-XX-XX Initial release.
"""
from unipath import PathName
from unipath import Path as _Path
class FSPath(_Path):
"""A unipath Path object that cannot access PathName methods or
properties or string methods via inheritance.
"""
def __init__(self, *args, **kw):
_Path.__init__(self, *args, **kw)
self.path = self # For applications to access the PathName object.
def __getattribute__(self, attr):
"""Disable access to PathName methods/properties and string methods.
(The latter happens implictly because Path inherits from str or
unicode.) Attributes beginning with "__" are not disabled to avoid
messing up Python special methods.
"""
if not attr.startswith("__") and attr in dir(_Path):
raise AttributeError("use a PathName object for '%s'" % attr)
return _Path.__getattr__(self, attr) | unknown | codeparrot/codeparrot-clean | ||
import tensorflow as tf
import urllib.request
def load_image(url):
"""Read in the image_data to be classified."""
response = urllib.request.urlopen(url)
return response.read()
def load_labels(filename):
"""Read in labels, one label per line."""
return [line.rstrip() for line in tf.gfile.GFile(filename)]
def load_graph(filename):
"""Unpersists graph from file as default graph."""
with tf.gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def run_graph(image_data, labels, input_layer_name, output_layer_name,
num_top_predictions):
with tf.Session() as sess:
# Feed the image_data as input to the graph.
# predictions will contain a two-dimensional array, where one
# dimension represents the input image count, and the other has
# predictions per class
softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
predictions, = sess.run(softmax_tensor, {input_layer_name: image_data})
# Sort to show labels in order of confidence
top_k = predictions.argsort()[-num_top_predictions:][::-1]
results = []
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
results.append({'label': human_string.title(),
'score': "{0:.2f}%".format(score * 100)})
return results
# load labels
labels = load_labels('output_labels.txt')
# load graph, which is stored in the default session
load_graph('output_graph.pb')
def label_image(image_url, no_predict):
# load image
image_data = load_image(image_url)
return run_graph(image_data, labels, 'DecodeJpeg/contents:0', 'final_result:0', no_predict) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
from item import Item, Items
from shinken.property import StringProp
from shinken.util import to_name_if_possible
from shinken.log import logger
class MacroModulation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'macromodulation'
properties = Item.properties.copy()
properties.update({
'macromodulation_name': StringProp(fill_brok=['full_status']),
'modulation_period': StringProp(brok_transformation=to_name_if_possible,
fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
_special_properties = ('modulation_period',)
macros = {}
# For debugging purpose only (nice name)
def get_name(self):
return self.macromodulation_name
# Will say if we are active or not
def is_active(self):
now = int(time.time())
if not self.modulation_period or self.modulation_period.is_time_valid(now):
return True
return False
# Should have all properties, or a void macro_period
def is_correct(self):
state = True
cls = self.__class__
# Raised all previously saw errors like unknown commands or timeperiods
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s", self.get_name(), err)
for prop, entry in cls.properties.items():
if prop not in cls._special_properties:
if not hasattr(self, prop) and entry.required:
logger.warning(
"[macromodulation::%s] %s property not set", self.get_name(), prop
)
state = False # Bad boy...
# Ok just put None as modulation_period, means 24x7
if not hasattr(self, 'modulation_period'):
self.modulation_period = None
return state
class MacroModulations(Items):
name_property = "macromodulation_name"
inner_class = MacroModulation
def linkify(self, timeperiods):
self.linkify_with_timeperiods(timeperiods, 'modulation_period') | unknown | codeparrot/codeparrot-clean | ||
use std::collections::hash_map::Entry;
use std::marker::PhantomData;
use std::ops::Range;
use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx};
use rustc_data_structures::fx::FxHashMap;
use rustc_index::IndexVec;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{Instance, Ty};
use rustc_middle::{bug, mir, ty};
use rustc_session::config::DebugInfo;
use rustc_span::{BytePos, Span, Symbol, hygiene, sym};
use super::operand::{OperandRef, OperandValue};
use super::place::{PlaceRef, PlaceValue};
use super::{FunctionCx, LocalRef, PerLocalVarDebugInfoIndexVec};
use crate::traits::*;
pub struct FunctionDebugContext<'tcx, S, L> {
/// Maps from source code to the corresponding debug info scope.
pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>,
/// Maps from an inlined function to its debug info declaration.
pub inlined_function_scopes: FxHashMap<Instance<'tcx>, S>,
}
#[derive(Copy, Clone)]
pub enum VariableKind {
ArgumentVariable(usize /*index*/),
LocalVariable,
}
/// Like `mir::VarDebugInfo`, but within a `mir::Local`.
#[derive(Clone)]
pub struct PerLocalVarDebugInfo<'tcx, D> {
pub name: Symbol,
pub source_info: mir::SourceInfo,
/// `DIVariable` returned by `create_dbg_var`.
pub dbg_var: Option<D>,
/// Byte range in the `dbg_var` covered by this fragment,
/// if this is a fragment of a composite `VarDebugInfo`.
pub fragment: Option<Range<Size>>,
/// `.place.projection` from `mir::VarDebugInfo`.
pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
}
/// Information needed to emit a constant.
pub struct ConstDebugInfo<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
pub name: String,
pub source_info: mir::SourceInfo,
pub operand: OperandRef<'tcx, Bx::Value>,
pub dbg_var: Bx::DIVariable,
pub dbg_loc: Bx::DILocation,
pub fragment: Option<Range<Size>>,
pub _phantom: PhantomData<&'a ()>,
}
#[derive(Clone, Copy, Debug)]
pub struct DebugScope<S, L> {
pub dbg_scope: S,
/// Call site location, if this scope was inlined from another function.
pub inlined_at: Option<L>,
// Start and end offsets of the file to which this DIScope belongs.
// These are used to quickly determine whether some span refers to the same file.
pub file_start_pos: BytePos,
pub file_end_pos: BytePos,
}
impl<'tcx, S: Copy, L: Copy> DebugScope<S, L> {
/// DILocations inherit source file name from the parent DIScope. Due to macro expansions
/// it may so happen that the current span belongs to a different file than the DIScope
/// corresponding to span's containing source scope. If so, we need to create a DIScope
/// "extension" into that file.
pub fn adjust_dbg_scope_for_span<Cx: CodegenMethods<'tcx, DIScope = S, DILocation = L>>(
&self,
cx: &Cx,
span: Span,
) -> S {
let pos = span.lo();
if pos < self.file_start_pos || pos >= self.file_end_pos {
let sm = cx.sess().source_map();
cx.extend_scope_to_file(self.dbg_scope, &sm.lookup_char_pos(pos).file)
} else {
self.dbg_scope
}
}
}
trait DebugInfoOffsetLocation<'tcx, Bx> {
fn deref(&self, bx: &mut Bx) -> Self;
fn layout(&self) -> TyAndLayout<'tcx>;
fn project_field(&self, bx: &mut Bx, field: FieldIdx) -> Self;
fn project_constant_index(&self, bx: &mut Bx, offset: u64) -> Self;
fn downcast(&self, bx: &mut Bx, variant: VariantIdx) -> Self;
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> DebugInfoOffsetLocation<'tcx, Bx>
for PlaceRef<'tcx, Bx::Value>
{
fn deref(&self, bx: &mut Bx) -> Self {
bx.load_operand(*self).deref(bx.cx())
}
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
fn project_field(&self, bx: &mut Bx, field: FieldIdx) -> Self {
PlaceRef::project_field(*self, bx, field.index())
}
fn project_constant_index(&self, bx: &mut Bx, offset: u64) -> Self {
let lloffset = bx.cx().const_usize(offset);
self.project_index(bx, lloffset)
}
fn downcast(&self, bx: &mut Bx, variant: VariantIdx) -> Self {
self.project_downcast(bx, variant)
}
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> DebugInfoOffsetLocation<'tcx, Bx>
for TyAndLayout<'tcx>
{
fn deref(&self, bx: &mut Bx) -> Self {
bx.cx().layout_of(
self.ty.builtin_deref(true).unwrap_or_else(|| bug!("cannot deref `{}`", self.ty)),
)
}
fn layout(&self) -> TyAndLayout<'tcx> {
*self
}
fn project_field(&self, bx: &mut Bx, field: FieldIdx) -> Self {
self.field(bx.cx(), field.index())
}
fn project_constant_index(&self, bx: &mut Bx, index: u64) -> Self {
self.field(bx.cx(), index as usize)
}
fn downcast(&self, bx: &mut Bx, variant: VariantIdx) -> Self {
self.for_variant(bx.cx(), variant)
}
}
struct DebugInfoOffset<T> {
/// Offset from the `base` used to calculate the debuginfo offset.
direct_offset: Size,
/// Each offset in this vector indicates one level of indirection from the base or previous
/// indirect offset plus a dereference.
indirect_offsets: Vec<Size>,
/// The final location debuginfo should point to.
result: T,
}
fn calculate_debuginfo_offset<
'a,
'tcx,
Bx: BuilderMethods<'a, 'tcx>,
L: DebugInfoOffsetLocation<'tcx, Bx>,
>(
bx: &mut Bx,
projection: &[mir::PlaceElem<'tcx>],
base: L,
) -> DebugInfoOffset<L> {
let mut direct_offset = Size::ZERO;
// FIXME(eddyb) use smallvec here.
let mut indirect_offsets = vec![];
let mut place = base;
for elem in projection {
match *elem {
mir::ProjectionElem::Deref => {
indirect_offsets.push(Size::ZERO);
place = place.deref(bx);
}
mir::ProjectionElem::Field(field, _) => {
let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
*offset += place.layout().fields.offset(field.index());
place = place.project_field(bx, field);
}
mir::ProjectionElem::Downcast(_, variant) => {
place = place.downcast(bx, variant);
}
mir::ProjectionElem::ConstantIndex {
offset: index,
min_length: _,
from_end: false,
} => {
let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
let FieldsShape::Array { stride, count: _ } = place.layout().fields else {
bug!("ConstantIndex on non-array type {:?}", place.layout())
};
*offset += stride * index;
place = place.project_constant_index(bx, index);
}
_ => {
// Sanity check for `can_use_in_debuginfo`.
assert!(!elem.can_use_in_debuginfo());
bug!("unsupported var debuginfo projection `{:?}`", projection)
}
}
}
DebugInfoOffset { direct_offset, indirect_offsets, result: place }
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn set_debug_loc(&self, bx: &mut Bx, source_info: mir::SourceInfo) {
bx.set_span(source_info.span);
if let Some(dbg_loc) = self.dbg_loc(source_info) {
bx.set_dbg_loc(dbg_loc);
}
}
fn dbg_loc(&self, source_info: mir::SourceInfo) -> Option<Bx::DILocation> {
let (dbg_scope, inlined_at, span) = self.adjusted_span_and_dbg_scope(source_info)?;
Some(self.cx.dbg_loc(dbg_scope, inlined_at, span))
}
fn adjusted_span_and_dbg_scope(
&self,
source_info: mir::SourceInfo,
) -> Option<(Bx::DIScope, Option<Bx::DILocation>, Span)> {
let scope = &self.debug_context.as_ref()?.scopes[source_info.scope];
let span = hygiene::walk_chain_collapsed(source_info.span, self.mir.span);
Some((scope.adjust_dbg_scope_for_span(self.cx, span), scope.inlined_at, span))
}
fn spill_operand_to_stack(
operand: OperandRef<'tcx, Bx::Value>,
name: Option<String>,
bx: &mut Bx,
) -> PlaceRef<'tcx, Bx::Value> {
// "Spill" the value onto the stack, for debuginfo,
// without forcing non-debuginfo uses of the local
// to also load from the stack every single time.
// FIXME(#68817) use `llvm.dbg.value` instead,
// at least for the cases which LLVM handles correctly.
let spill_slot = PlaceRef::alloca(bx, operand.layout);
if let Some(name) = name {
bx.set_var_name(spill_slot.val.llval, &(name + ".dbg.spill"));
}
operand.val.store(bx, spill_slot);
spill_slot
}
// Indicates that local is set to a new value. The `layout` and `projection` are used to
// calculate the offset.
pub(crate) fn debug_new_val_to_local(
&self,
bx: &mut Bx,
local: mir::Local,
base: PlaceRef<'tcx, Bx::Value>,
projection: &[mir::PlaceElem<'tcx>],
) {
let full_debug_info = bx.sess().opts.debuginfo == DebugInfo::Full;
if !full_debug_info {
return;
}
let vars = match &self.per_local_var_debug_info {
Some(per_local) => &per_local[local],
None => return,
};
let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } =
calculate_debuginfo_offset(bx, projection, base.layout);
for var in vars.iter() {
let Some(dbg_var) = var.dbg_var else {
continue;
};
let Some(dbg_loc) = self.dbg_loc(var.source_info) else {
continue;
};
bx.dbg_var_value(
dbg_var,
dbg_loc,
base.val.llval,
direct_offset,
&indirect_offsets,
&var.fragment,
);
}
}
pub(crate) fn debug_poison_to_local(&self, bx: &mut Bx, local: mir::Local) {
let ty = self.monomorphize(self.mir.local_decls[local].ty);
let layout = bx.cx().layout_of(ty);
let to_backend_ty = bx.cx().immediate_backend_type(layout);
let place_ref = PlaceRef::new_sized(bx.cx().const_poison(to_backend_ty), layout);
self.debug_new_val_to_local(bx, local, place_ref, &[]);
}
/// Apply debuginfo and/or name, after creating the `alloca` for a local,
/// or initializing the local with an operand (whichever applies).
pub(crate) fn debug_introduce_local(&self, bx: &mut Bx, local: mir::Local) {
let full_debug_info = bx.sess().opts.debuginfo == DebugInfo::Full;
let vars = match &self.per_local_var_debug_info {
Some(per_local) => &per_local[local],
None => return,
};
let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).cloned();
let has_proj = || vars.iter().any(|var| !var.projection.is_empty());
let fallback_var = if self.mir.local_kind(local) == mir::LocalKind::Arg {
let arg_index = local.index() - 1;
// Add debuginfo even to unnamed arguments.
// FIXME(eddyb) is this really needed?
if arg_index == 0 && has_proj() {
// Hide closure environments from debuginfo.
// FIXME(eddyb) shouldn't `ArgumentVariable` indices
// be offset to account for the hidden environment?
None
} else if whole_local_var.is_some() {
// No need to make up anything, there is a `mir::VarDebugInfo`
// covering the whole local.
// FIXME(eddyb) take `whole_local_var.source_info.scope` into
// account, just in case it doesn't use `ArgumentVariable`
// (after #67586 gets fixed).
None
} else {
let name = sym::empty;
let decl = &self.mir.local_decls[local];
let dbg_var = if full_debug_info {
self.adjusted_span_and_dbg_scope(decl.source_info).map(
|(dbg_scope, _, span)| {
// FIXME(eddyb) is this `+ 1` needed at all?
let kind = VariableKind::ArgumentVariable(arg_index + 1);
let arg_ty = self.monomorphize(decl.ty);
self.cx.create_dbg_var(name, arg_ty, dbg_scope, kind, span)
},
)
} else {
None
};
Some(PerLocalVarDebugInfo {
name,
source_info: decl.source_info,
dbg_var,
fragment: None,
projection: ty::List::empty(),
})
}
} else {
None
};
let local_ref = &self.locals[local];
let name = if bx.sess().fewer_names() {
None
} else {
Some(match whole_local_var.or_else(|| fallback_var.clone()) {
Some(var) if var.name != sym::empty => var.name.to_string(),
_ => format!("{local:?}"),
})
};
if let Some(name) = &name {
match local_ref {
LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
bx.set_var_name(place.val.llval, name);
}
LocalRef::Operand(operand) => match operand.val {
OperandValue::Ref(PlaceValue { llval: x, .. }) | OperandValue::Immediate(x) => {
bx.set_var_name(x, name);
}
OperandValue::Pair(a, b) => {
// FIXME(eddyb) these are scalar components,
// maybe extract the high-level fields?
bx.set_var_name(a, &(name.clone() + ".0"));
bx.set_var_name(b, &(name.clone() + ".1"));
}
OperandValue::ZeroSized => {
// These never have a value to talk about
}
},
LocalRef::PendingOperand => {}
}
}
if !full_debug_info || vars.is_empty() && fallback_var.is_none() {
return;
}
let base = match local_ref {
LocalRef::PendingOperand => return,
LocalRef::Operand(operand) => {
// Don't spill operands onto the stack in naked functions.
// See: https://github.com/rust-lang/rust/issues/42779
let attrs = bx.tcx().codegen_instance_attrs(self.instance.def);
if attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
return;
}
// Don't spill `<vscale x N x i1>` for `N != 16`:
//
// SVE predicates are only one bit for each byte in an SVE vector (which makes
// sense, the predicate only needs to keep track of whether a lane is
// enabled/disabled). i.e. a `<vscale x 16 x i8>` vector has a `<vscale x 16 x i1>`
// predicate type. `<vscale x 16 x i1>` corresponds to two bytes of storage,
// multiplied by the `vscale`, with one bit for each of the sixteen lanes.
//
// For a vector with fewer elements, such as `svint32_t`/`<vscale x 4 x i32>`,
// while only a `<vscale x 4 x i1>` predicate type would be strictly necessary,
// relevant intrinsics still take a `svbool_t`/`<vscale x 16 x i1>` - this is
// because a `<vscale x 4 x i1>` is only half of a byte (for `vscale=1`), and with
// memory being byte-addressable, it's unclear how to store that.
//
// Due to this, LLVM ultimately decided not to support stores of `<vscale x N x i1>`
// for `N != 16`. As for `vscale=1` and `N` fewer than sixteen, partial bytes would
// need to be stored (except for `N=8`, but that also isn't supported). `N` can
// never be greater than sixteen as that ends up larger than the 128-bit increment
// size.
//
// Internally, with an intrinsic operating on a `svint32_t`/`<vscale x 4 x i32>`
// (for example), the intrinsic takes the `svbool_t`/`<vscale x 16 x i1>` predicate
// and casts it to a `svbool4_t`/`<vscale x 4 x i1>`. Therefore, it's important that
// the `<vscale x 4 x i1>` never spills because that'll cause errors during
// instruction selection. Spilling to the stack to create debuginfo for these
// intermediate values must be avoided and doing so won't affect the
// debugging experience anyway.
if operand.layout.ty.is_scalable_vector()
&& bx.sess().target.arch == rustc_target::spec::Arch::AArch64
{
let (count, element_ty) =
operand.layout.ty.scalable_vector_element_count_and_type(bx.tcx());
// i.e. `<vscale x N x i1>` when `N != 16`
if element_ty.is_bool() && count != 16 {
return;
}
}
Self::spill_operand_to_stack(*operand, name, bx)
}
LocalRef::Place(place) => *place,
// FIXME(eddyb) add debuginfo for unsized places too.
LocalRef::UnsizedPlace(_) => return,
};
let vars = vars.iter().cloned().chain(fallback_var);
for var in vars {
self.debug_introduce_local_as_var(bx, local, base, var);
}
}
fn debug_introduce_local_as_var(
&self,
bx: &mut Bx,
local: mir::Local,
base: PlaceRef<'tcx, Bx::Value>,
var: PerLocalVarDebugInfo<'tcx, Bx::DIVariable>,
) {
let Some(dbg_var) = var.dbg_var else { return };
let Some(dbg_loc) = self.dbg_loc(var.source_info) else { return };
let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } =
calculate_debuginfo_offset(bx, var.projection, base.layout);
// When targeting MSVC, create extra allocas for arguments instead of pointing multiple
// dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
// not DWARF and LLVM doesn't support translating the resulting
// [DW_OP_deref, DW_OP_plus_uconst, offset, DW_OP_deref] debug info to CodeView.
// Creating extra allocas on the stack makes the resulting debug info simple enough
// that LLVM can generate correct CodeView records and thus the values appear in the
// debugger. (#83709)
let should_create_individual_allocas = bx.cx().sess().target.is_like_msvc
&& self.mir.local_kind(local) == mir::LocalKind::Arg
// LLVM can handle simple things but anything more complex than just a direct
// offset or one indirect offset of 0 is too complex for it to generate CV records
// correctly.
&& (direct_offset != Size::ZERO || !matches!(&indirect_offsets[..], [Size::ZERO] | []));
if should_create_individual_allocas {
let DebugInfoOffset { direct_offset: _, indirect_offsets: _, result: place } =
calculate_debuginfo_offset(bx, var.projection, base);
// Create a variable which will be a pointer to the actual value
let ptr_ty = Ty::new_mut_ptr(bx.tcx(), place.layout.ty);
let ptr_layout = bx.layout_of(ptr_ty);
let alloca = PlaceRef::alloca(bx, ptr_layout);
bx.set_var_name(alloca.val.llval, &(var.name.to_string() + ".dbg.spill"));
// Write the pointer to the variable
bx.store_to_place(place.val.llval, alloca.val);
// Point the debug info to `*alloca` for the current variable
bx.dbg_var_addr(
dbg_var,
dbg_loc,
alloca.val.llval,
Size::ZERO,
&[Size::ZERO],
&var.fragment,
);
} else {
bx.dbg_var_addr(
dbg_var,
dbg_loc,
base.val.llval,
direct_offset,
&indirect_offsets,
&var.fragment,
);
}
}
pub(crate) fn debug_introduce_locals(
&self,
bx: &mut Bx,
consts: Vec<ConstDebugInfo<'a, 'tcx, Bx>>,
) {
if bx.sess().opts.debuginfo == DebugInfo::Full || !bx.sess().fewer_names() {
for local in self.locals.indices() {
self.debug_introduce_local(bx, local);
}
for ConstDebugInfo { name, source_info, operand, dbg_var, dbg_loc, fragment, .. } in
consts.into_iter()
{
self.set_debug_loc(bx, source_info);
let base = FunctionCx::spill_operand_to_stack(operand, Some(name), bx);
bx.clear_dbg_loc();
bx.dbg_var_addr(dbg_var, dbg_loc, base.val.llval, Size::ZERO, &[], &fragment);
}
}
}
/// Partition all `VarDebugInfo` in `self.mir`, by their base `Local`.
pub(crate) fn compute_per_local_var_debug_info(
&self,
bx: &mut Bx,
) -> Option<(
PerLocalVarDebugInfoIndexVec<'tcx, Bx::DIVariable>,
Vec<ConstDebugInfo<'a, 'tcx, Bx>>,
)> {
let full_debug_info = self.cx.sess().opts.debuginfo == DebugInfo::Full;
let target_is_msvc = self.cx.sess().target.is_like_msvc;
if !full_debug_info && self.cx.sess().fewer_names() {
return None;
}
let mut per_local = IndexVec::from_elem(vec![], &self.mir.local_decls);
let mut constants = vec![];
let mut params_seen: FxHashMap<_, Bx::DIVariable> = Default::default();
for var in &self.mir.var_debug_info {
let dbg_scope_and_span = if full_debug_info {
self.adjusted_span_and_dbg_scope(var.source_info)
} else {
None
};
let var_ty = if let Some(ref fragment) = var.composite {
self.monomorphize(fragment.ty)
} else {
match var.value {
mir::VarDebugInfoContents::Place(place) => {
self.monomorphized_place_ty(place.as_ref())
}
mir::VarDebugInfoContents::Const(c) => self.monomorphize(c.ty()),
}
};
let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
let var_kind = if let Some(arg_index) = var.argument_index
&& var.composite.is_none()
&& let mir::VarDebugInfoContents::Place(place) = var.value
&& place.projection.is_empty()
{
let arg_index = arg_index as usize;
if target_is_msvc {
// ScalarPair parameters are spilled to the stack so they need to
// be marked as a `LocalVariable` for MSVC debuggers to visualize
// their data correctly. (See #81894 & #88625)
let var_ty_layout = self.cx.layout_of(var_ty);
if let BackendRepr::ScalarPair(_, _) = var_ty_layout.backend_repr {
VariableKind::LocalVariable
} else {
VariableKind::ArgumentVariable(arg_index)
}
} else {
// FIXME(eddyb) shouldn't `ArgumentVariable` indices be
// offset in closures to account for the hidden environment?
VariableKind::ArgumentVariable(arg_index)
}
} else {
VariableKind::LocalVariable
};
if let VariableKind::ArgumentVariable(arg_index) = var_kind {
match params_seen.entry((dbg_scope, arg_index)) {
Entry::Occupied(o) => o.get().clone(),
Entry::Vacant(v) => v
.insert(
self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span),
)
.clone(),
}
} else {
self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
}
});
let fragment = if let Some(ref fragment) = var.composite {
let var_layout = self.cx.layout_of(var_ty);
let DebugInfoOffset { direct_offset, indirect_offsets, result: fragment_layout } =
calculate_debuginfo_offset(bx, &fragment.projection, var_layout);
assert!(indirect_offsets.is_empty());
if fragment_layout.size == Size::ZERO {
// Fragment is a ZST, so does not represent anything. Avoid generating anything
// as this may conflict with a fragment that covers the entire variable.
continue;
} else if fragment_layout.size == var_layout.size {
// Fragment covers entire variable, so as far as
// DWARF is concerned, it's not really a fragment.
None
} else {
Some(direct_offset..direct_offset + fragment_layout.size)
}
} else {
None
};
match var.value {
mir::VarDebugInfoContents::Place(place) => {
per_local[place.local].push(PerLocalVarDebugInfo {
name: var.name,
source_info: var.source_info,
dbg_var,
fragment,
projection: place.projection,
});
}
mir::VarDebugInfoContents::Const(c) => {
if let Some(dbg_var) = dbg_var {
let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
let operand = self.eval_mir_constant_to_operand(bx, &c);
constants.push(ConstDebugInfo {
name: var.name.to_string(),
source_info: var.source_info,
operand,
dbg_var,
dbg_loc,
fragment,
_phantom: PhantomData,
});
}
}
}
}
Some((per_local, constants))
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_codegen_ssa/src/mir/debuginfo.rs |
#!/bin/sh
test_description='check quarantine of objects during push'
. ./test-lib.sh
test_expect_success 'create picky dest repo' '
git init --bare dest.git &&
test_hook --setup -C dest.git pre-receive <<-\EOF
while read old new ref; do
test "$(git log -1 --format=%s $new)" = reject && exit 1
done
exit 0
EOF
'
test_expect_success 'accepted objects work' '
test_commit ok &&
git push dest.git HEAD &&
commit=$(git rev-parse HEAD) &&
git --git-dir=dest.git cat-file commit $commit
'
test_expect_success 'rejected objects are not installed' '
test_commit reject &&
commit=$(git rev-parse HEAD) &&
test_must_fail git push dest.git reject &&
test_must_fail git --git-dir=dest.git cat-file commit $commit
'
test_expect_success 'rejected objects are removed' '
echo "incoming-*" >expect &&
(cd dest.git/objects && echo incoming-*) >actual &&
test_cmp expect actual
'
test_expect_success 'push to repo path with path separator (colon)' '
# The interesting failure case here is when the
# receiving end cannot access its original object directory,
# so make it likely for us to generate a delta by having
# a non-trivial file with multiple versions.
test-tool genrandom foo 4096 >file.bin &&
git add file.bin &&
git commit -m bin &&
if test_have_prereq MINGW
then
pathsep=";"
else
pathsep=":"
fi &&
git clone --bare . "xxx${pathsep}yyy.git" &&
echo change >>file.bin &&
git commit -am change &&
# Note that we have to use the full path here, or it gets confused
# with the ssh host:path syntax.
git push "$(pwd)/xxx${pathsep}yyy.git" HEAD
'
test_expect_success 'updating a ref from quarantine is forbidden' '
git init --bare update.git &&
test_hook -C update.git pre-receive <<-\EOF &&
read old new refname
git update-ref refs/heads/unrelated $new
exit 1
EOF
test_must_fail git push update.git HEAD &&
git -C update.git fsck
'
test_done | unknown | github | https://github.com/git/git | t/t5547-push-quarantine.sh |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
DirExists,
FileExists
)
import integration_tests
class TarPluginTestCase(integration_tests.TestCase):
def test_stage_nil_plugin(self):
project_dir = 'simple-zip'
self.run_snapcraft('stage', project_dir)
expected_files = [
'top-simple',
os.path.join('dir-simple', 'sub')
]
for expected_file in expected_files:
self.assertThat(
os.path.join(project_dir, 'stage', expected_file),
FileExists())
expected_dirs = [
'dir-simple',
]
for expected_dir in expected_dirs:
self.assertThat(
os.path.join(project_dir, 'stage', expected_dir),
DirExists())
# Regression test for
# https://bugs.launchpad.net/snapcraft/+bug/1500728
self.run_snapcraft('pull', project_dir) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-11 20:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stats', '0002_auto_20151109_0319'),
('counties', '0002_county_state'),
]
operations = [
migrations.CreateModel(
name='CountyCohorts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(blank=True, choices=[('Female', 'Female'), ('Male', 'Male')], max_length=30, verbose_name='Gender')),
('ethnicity', models.CharField(blank=True, choices=[('White', 'White'), ('Hispanic', 'Hispanic'), ('African American', 'African American'), ('Others', 'Others')], max_length=30, verbose_name='Ethnicity')),
('economic_status', models.CharField(blank=True, choices=[('Economically Disadvantaged', 'Economically Disadvantaged'), ('Not Economically Disadvantaged', 'Not Economically Disadvantaged')], max_length=30, verbose_name='Economic status')),
('enrolled_8th', models.IntegerField(null=True)),
('enrolled_9th', models.IntegerField(null=True)),
('enrolled_9th_percent', models.FloatField(null=True)),
('enrolled_10th', models.IntegerField(null=True)),
('enrolled_10th_percent', models.FloatField(null=True)),
('lessthan_10th_enrolled', models.IntegerField(null=True)),
('lessthan_10th_enrolled_percent', models.FloatField(null=True)),
('graduated', models.IntegerField(null=True)),
('graduated_percent', models.FloatField(null=True)),
('enrolled_4yr', models.IntegerField(null=True)),
('enrolled_4yr_percent', models.FloatField(null=True)),
('enrolled_2yr', models.IntegerField(null=True)),
('enrolled_2yr_percent', models.FloatField(null=True)),
('enrolled_out_of_state', models.IntegerField(null=True)),
('enrolled_out_of_state_percent', models.IntegerField(null=True)),
('total_enrolled', models.IntegerField(null=True)),
('total_enrolled_percent', models.FloatField(null=True)),
('enrolled_wo_record', models.IntegerField(null=True)),
('enrolled_wo_record_percent', models.FloatField(null=True)),
('total_degrees', models.IntegerField(null=True)),
('total_degrees_percent', models.FloatField(null=True)),
('bacc', models.IntegerField(null=True)),
('bacc_acc', models.IntegerField(null=True)),
('bacc_cert', models.IntegerField(null=True)),
('assoc', models.IntegerField(null=True)),
('accoc_cert', models.IntegerField(null=True)),
('cert', models.IntegerField(null=True)),
('county', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cohorts', to='counties.County')),
('year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='county_cohorts', to='stats.SchoolYear')),
],
),
migrations.AlterUniqueTogether(
name='countycohorts',
unique_together=set([('county', 'year', 'economic_status', 'gender', 'ethnicity')]),
),
] | unknown | codeparrot/codeparrot-clean | ||
{
"columns": {
"description": "Descripció",
"key": "Clau",
"name": "Nom",
"team": "Equip",
"value": "Valor"
},
"config": {
"columns": {
"section": "Secció"
},
"title": "Configuració d'Airflow"
},
"connections": {
"add": "Afegir connexió",
"columns": {
"connectionId": "ID de connexió",
"connectionType": "Tipus de connexió",
"host": "Host",
"port": "Port"
},
"connection_one": "Connexió",
"connection_other": "Connexions",
"delete": {
"deleteConnection_one": "Eliminar 1 connexió",
"deleteConnection_other": "Eliminar {{count}} connexions",
"firstConfirmMessage_one": "Estàs a punt d'eliminar la següent connexió:",
"firstConfirmMessage_other": "Estàs a punt d'eliminar les següents connexions:",
"title": "Eliminar connexió"
},
"edit": "Editar connexió",
"form": {
"connectionIdRequired": "L'ID de connexió és obligatori",
"connectionIdRequirement": "L'ID de connexió no pot tenir únicament espais",
"connectionTypeRequired": "El tipus de connexió és obligatori",
"extraFields": "Camps addicionals",
"extraFieldsJson": "Camps addicionals JSON",
"helperText": "Falta el tipus de connexió? Assegureu-vos d'haver instal·lat el paquet de proveïdors d'Airflow corresponent.",
"helperTextForRedactedFields": "Els camps redactats ('***') romandran sense canvis si no es modifiquen.",
"selectConnectionType": "Seleccioneu el tipus de connexió",
"standardFields": "Camps estàndard"
},
"nothingFound": {
"description": "Les connexions definides a través de variables d'entorn o gestors de secrets no es mostren aquí.",
"documentationLink": "Més informació a la documentació d'Airflow.",
"learnMore": "Aquestes es resolen en temps d'execució i no són visibles a la interfície d'usuari.",
"title": "No s'ha trobat cap connexió!"
},
"searchPlaceholder": "Cercar connexions",
"test": "Provar connexió",
"testDisabled": "La funció de prova de connexió està desactivada. Si us plau, contacteu amb un administrador per activar-la.",
"testError": {
"title": "Prova de connexió fallida"
},
"testSuccess": {
"title": "Prova de connexió exitosa"
},
"typeMeta": {
"error": "No s'ha pogut recuperar la informació del tipus de connexió",
"standardFields": {
"description": "Descripció",
"host": "Host",
"login": "Inici de sessió",
"password": "Contrasenya",
"port": "Port",
"url_schema": "Esquema"
}
}
},
"deleteActions": {
"button": "Eliminar",
"modal": {
"confirmButton": "Sí, eliminar",
"secondConfirmMessage": "Aquesta acció és permanent i no es pot desfer.",
"thirdConfirmMessage": "Estàs segur que vols continuar?"
},
"selected": "Seleccionades",
"tooltip": "Eliminar connexions seleccionades"
},
"formActions": {
"save": "Desar"
},
"plugins": {
"columns": {
"source": "Font"
},
"importError_one": "Error d'importació d'extensió",
"importError_other": "Errors d'importació d'extensió",
"searchPlaceholder": "Cercar per fitxer"
},
"pools": {
"add": "Afegir pool",
"deferredSlotsIncluded": "Slots diferits inclosos",
"delete": {
"title": "Eliminar pool",
"warning": "Això eliminarà tota la metadada relacionada amb el pool i pot afectar les tasques que utilitzin aquest pool."
},
"edit": "Editar pool",
"form": {
"checkbox": "Marqueu per incloure les tasques diferides per calcular els slots oberts del pool",
"description": "Descripció",
"includeDeferred": "Incloure diferides",
"nameMaxLength": "El nom pot contenir un màxim de 256 caràcters",
"nameRequired": "El nom és obligatori",
"slots": "Slots"
},
"noPoolsFound": "No s'han trobat pools",
"pool_one": "Pool",
"pool_other": "Pools",
"searchPlaceholder": "Cercar pools",
"sort": {
"asc": "Nom (A-Z)",
"desc": "Nom (Z-A)",
"placeholder": "Ordenar per"
}
},
"providers": {
"columns": {
"packageName": "Nom del paquet",
"version": "Versió"
}
},
"variables": {
"add": "Afegir variable",
"columns": {
"isEncrypted": "Està xifrat"
},
"delete": {
"deleteVariable_one": "Eliminar 1 Variable",
"deleteVariable_other": "Eliminar {{count}} Variables",
"firstConfirmMessage_one": "Estàs a punt d'eliminar la següent variable:",
"firstConfirmMessage_other": "Estàs a punt d'eliminar les següents variables:",
"title": "Eliminar Variable",
"tooltip": "Eliminar variables seleccionades"
},
"edit": "Editar Variable",
"form": {
"invalidJson": "JSON no vàlid",
"keyMaxLength": "La clau pot contenir un màxim de 250 caràcters",
"keyRequired": "La clau és obligatòria",
"valueRequired": "El valor és obligatori"
},
"import": {
"button": "Importar",
"conflictResolution": "Seleccionar resolució de conflicte de variables",
"errorParsingJsonFile": "Error a l'analitzar el fitxer JSON: Puja un fitxer JSON que contingui variables (per exemple, {\"key\": \"value\", ...}).",
"options": {
"fail": {
"description": "Error en la importació si es detecten variables existents.",
"title": "Error"
},
"overwrite": {
"description": "Sobreescriu la variable en cas de conflicte.",
"title": "Sobreescriure"
},
"skip": {
"description": "Ignora la importació de variables que ja existeixen.",
"title": "Ignorar"
}
},
"title": "Importar Variables",
"upload": "Penjar un fitxer JSON",
"uploadPlaceholder": "Penjar un fitxer JSON que contingui variables (per exemple, {\"key\": \"value\", ...})"
},
"noRowsMessage": "No s'han trobat variables",
"searchPlaceholder": "Cercar claus",
"variable_one": "Variable",
"variable_other": "Variables"
}
} | json | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/public/i18n/locales/ca/admin.json |
"""Flexible enumeration of C types."""
from Enumeration import *
# TODO:
# - struct improvements (flexible arrays, packed &
# unpacked, alignment)
# - objective-c qualified id
# - anonymous / transparent unions
# - VLAs
# - block types
# - K&R functions
# - pass arguments of different types (test extension, transparent union)
# - varargs
###
# Actual type types
class Type:
def isBitField(self):
return False
def isPaddingBitField(self):
return False
def getTypeName(self, printer):
name = 'T%d' % len(printer.types)
typedef = self.getTypedefDef(name, printer)
printer.addDeclaration(typedef)
return name
class BuiltinType(Type):
def __init__(self, name, size, bitFieldSize=None):
self.name = name
self.size = size
self.bitFieldSize = bitFieldSize
def isBitField(self):
return self.bitFieldSize is not None
def isPaddingBitField(self):
return self.bitFieldSize is 0
def getBitFieldSize(self):
assert self.isBitField()
return self.bitFieldSize
def getTypeName(self, printer):
return self.name
def sizeof(self):
return self.size
def __str__(self):
return self.name
class EnumType(Type):
def __init__(self, index, enumerators):
self.index = index
self.enumerators = enumerators
def getEnumerators(self):
result = ''
for i, init in enumerate(self.enumerators):
if i > 0:
result = result + ', '
result = result + 'enum%dval%d' % (self.index, i)
if init:
result = result + ' = %s' % (init)
return result
def __str__(self):
return 'enum { %s }' % (self.getEnumerators())
def getTypedefDef(self, name, printer):
return 'typedef enum %s { %s } %s;'%(name, self.getEnumerators(), name)
class RecordType(Type):
def __init__(self, index, isUnion, fields):
self.index = index
self.isUnion = isUnion
self.fields = fields
self.name = None
def __str__(self):
def getField(t):
if t.isBitField():
return "%s : %d;" % (t, t.getBitFieldSize())
else:
return "%s;" % t
return '%s { %s }'%(('struct','union')[self.isUnion],
' '.join(map(getField, self.fields)))
def getTypedefDef(self, name, printer):
def getField((i, t)):
if t.isBitField():
if t.isPaddingBitField():
return '%s : 0;'%(printer.getTypeName(t),)
else:
return '%s field%d : %d;'%(printer.getTypeName(t),i,
t.getBitFieldSize())
else:
return '%s field%d;'%(printer.getTypeName(t),i)
fields = map(getField, enumerate(self.fields))
# Name the struct for more readable LLVM IR.
return 'typedef %s %s { %s } %s;'%(('struct','union')[self.isUnion],
name, ' '.join(fields), name)
class ArrayType(Type):
def __init__(self, index, isVector, elementType, size):
if isVector:
# Note that for vectors, this is the size in bytes.
assert size > 0
else:
assert size is None or size >= 0
self.index = index
self.isVector = isVector
self.elementType = elementType
self.size = size
if isVector:
eltSize = self.elementType.sizeof()
assert not (self.size % eltSize)
self.numElements = self.size // eltSize
else:
self.numElements = self.size
def __str__(self):
if self.isVector:
return 'vector (%s)[%d]'%(self.elementType,self.size)
elif self.size is not None:
return '(%s)[%d]'%(self.elementType,self.size)
else:
return '(%s)[]'%(self.elementType,)
def getTypedefDef(self, name, printer):
elementName = printer.getTypeName(self.elementType)
if self.isVector:
return 'typedef %s %s __attribute__ ((vector_size (%d)));'%(elementName,
name,
self.size)
else:
if self.size is None:
sizeStr = ''
else:
sizeStr = str(self.size)
return 'typedef %s %s[%s];'%(elementName, name, sizeStr)
class ComplexType(Type):
def __init__(self, index, elementType):
self.index = index
self.elementType = elementType
def __str__(self):
return '_Complex (%s)'%(self.elementType)
def getTypedefDef(self, name, printer):
return 'typedef _Complex %s %s;'%(printer.getTypeName(self.elementType), name)
class FunctionType(Type):
def __init__(self, index, returnType, argTypes):
self.index = index
self.returnType = returnType
self.argTypes = argTypes
def __str__(self):
if self.returnType is None:
rt = 'void'
else:
rt = str(self.returnType)
if not self.argTypes:
at = 'void'
else:
at = ', '.join(map(str, self.argTypes))
return '%s (*)(%s)'%(rt, at)
def getTypedefDef(self, name, printer):
if self.returnType is None:
rt = 'void'
else:
rt = str(self.returnType)
if not self.argTypes:
at = 'void'
else:
at = ', '.join(map(str, self.argTypes))
return 'typedef %s (*%s)(%s);'%(rt, name, at)
###
# Type enumerators
class TypeGenerator(object):
def __init__(self):
self.cache = {}
def setCardinality(self):
abstract
def get(self, N):
T = self.cache.get(N)
if T is None:
assert 0 <= N < self.cardinality
T = self.cache[N] = self.generateType(N)
return T
def generateType(self, N):
abstract
class FixedTypeGenerator(TypeGenerator):
def __init__(self, types):
TypeGenerator.__init__(self)
self.types = types
self.setCardinality()
def setCardinality(self):
self.cardinality = len(self.types)
def generateType(self, N):
return self.types[N]
# Factorial
def fact(n):
result = 1
while n > 0:
result = result * n
n = n - 1
return result
# Compute the number of combinations (n choose k)
def num_combinations(n, k):
return fact(n) / (fact(k) * fact(n - k))
# Enumerate the combinations choosing k elements from the list of values
def combinations(values, k):
# From ActiveState Recipe 190465: Generator for permutations,
# combinations, selections of a sequence
if k==0: yield []
else:
for i in xrange(len(values)-k+1):
for cc in combinations(values[i+1:],k-1):
yield [values[i]]+cc
class EnumTypeGenerator(TypeGenerator):
def __init__(self, values, minEnumerators, maxEnumerators):
TypeGenerator.__init__(self)
self.values = values
self.minEnumerators = minEnumerators
self.maxEnumerators = maxEnumerators
self.setCardinality()
def setCardinality(self):
self.cardinality = 0
for num in range(self.minEnumerators, self.maxEnumerators + 1):
self.cardinality += num_combinations(len(self.values), num)
def generateType(self, n):
# Figure out the number of enumerators in this type
numEnumerators = self.minEnumerators
valuesCovered = 0
while numEnumerators < self.maxEnumerators:
comb = num_combinations(len(self.values), numEnumerators)
if valuesCovered + comb > n:
break
numEnumerators = numEnumerators + 1
valuesCovered += comb
# Find the requested combination of enumerators and build a
# type from it.
i = 0
for enumerators in combinations(self.values, numEnumerators):
if i == n - valuesCovered:
return EnumType(n, enumerators)
i = i + 1
assert False
class ComplexTypeGenerator(TypeGenerator):
def __init__(self, typeGen):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.setCardinality()
def setCardinality(self):
self.cardinality = self.typeGen.cardinality
def generateType(self, N):
return ComplexType(N, self.typeGen.get(N))
class VectorTypeGenerator(TypeGenerator):
def __init__(self, typeGen, sizes):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.sizes = tuple(map(int,sizes))
self.setCardinality()
def setCardinality(self):
self.cardinality = len(self.sizes)*self.typeGen.cardinality
def generateType(self, N):
S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
return ArrayType(N, True, self.typeGen.get(T), self.sizes[S])
class FixedArrayTypeGenerator(TypeGenerator):
def __init__(self, typeGen, sizes):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.sizes = tuple(size)
self.setCardinality()
def setCardinality(self):
self.cardinality = len(self.sizes)*self.typeGen.cardinality
def generateType(self, N):
S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
return ArrayType(N, false, self.typeGen.get(T), self.sizes[S])
class ArrayTypeGenerator(TypeGenerator):
def __init__(self, typeGen, maxSize, useIncomplete=False, useZero=False):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.useIncomplete = useIncomplete
self.useZero = useZero
self.maxSize = int(maxSize)
self.W = useIncomplete + useZero + self.maxSize
self.setCardinality()
def setCardinality(self):
self.cardinality = self.W * self.typeGen.cardinality
def generateType(self, N):
S,T = getNthPairBounded(N, self.W, self.typeGen.cardinality)
if self.useIncomplete:
if S==0:
size = None
S = None
else:
S = S - 1
if S is not None:
if self.useZero:
size = S
else:
size = S + 1
return ArrayType(N, False, self.typeGen.get(T), size)
class RecordTypeGenerator(TypeGenerator):
def __init__(self, typeGen, useUnion, maxSize):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.useUnion = bool(useUnion)
self.maxSize = int(maxSize)
self.setCardinality()
def setCardinality(self):
M = 1 + self.useUnion
if self.maxSize is aleph0:
S = aleph0 * self.typeGen.cardinality
else:
S = 0
for i in range(self.maxSize+1):
S += M * (self.typeGen.cardinality ** i)
self.cardinality = S
def generateType(self, N):
isUnion,I = False,N
if self.useUnion:
isUnion,I = (I&1),I>>1
fields = map(self.typeGen.get,getNthTuple(I,self.maxSize,self.typeGen.cardinality))
return RecordType(N, isUnion, fields)
class FunctionTypeGenerator(TypeGenerator):
def __init__(self, typeGen, useReturn, maxSize):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.useReturn = useReturn
self.maxSize = maxSize
self.setCardinality()
def setCardinality(self):
if self.maxSize is aleph0:
S = aleph0 * self.typeGen.cardinality()
elif self.useReturn:
S = 0
for i in range(1,self.maxSize+1+1):
S += self.typeGen.cardinality ** i
else:
S = 0
for i in range(self.maxSize+1):
S += self.typeGen.cardinality ** i
self.cardinality = S
def generateType(self, N):
if self.useReturn:
# Skip the empty tuple
argIndices = getNthTuple(N+1, self.maxSize+1, self.typeGen.cardinality)
retIndex,argIndices = argIndices[0],argIndices[1:]
retTy = self.typeGen.get(retIndex)
else:
retTy = None
argIndices = getNthTuple(N, self.maxSize, self.typeGen.cardinality)
args = map(self.typeGen.get, argIndices)
return FunctionType(N, retTy, args)
class AnyTypeGenerator(TypeGenerator):
def __init__(self):
TypeGenerator.__init__(self)
self.generators = []
self.bounds = []
self.setCardinality()
self._cardinality = None
def getCardinality(self):
if self._cardinality is None:
return aleph0
else:
return self._cardinality
def setCardinality(self):
self.bounds = [g.cardinality for g in self.generators]
self._cardinality = sum(self.bounds)
cardinality = property(getCardinality, None)
def addGenerator(self, g):
self.generators.append(g)
for i in range(100):
prev = self._cardinality
self._cardinality = None
for g in self.generators:
g.setCardinality()
self.setCardinality()
if (self._cardinality is aleph0) or prev==self._cardinality:
break
else:
raise RuntimeError,"Infinite loop in setting cardinality"
def generateType(self, N):
index,M = getNthPairVariableBounds(N, self.bounds)
return self.generators[index].get(M)
def test():
fbtg = FixedTypeGenerator([BuiltinType('char', 4),
BuiltinType('char', 4, 0),
BuiltinType('int', 4, 5)])
fields1 = AnyTypeGenerator()
fields1.addGenerator( fbtg )
fields0 = AnyTypeGenerator()
fields0.addGenerator( fbtg )
# fields0.addGenerator( RecordTypeGenerator(fields1, False, 4) )
btg = FixedTypeGenerator([BuiltinType('char', 4),
BuiltinType('int', 4)])
etg = EnumTypeGenerator([None, '-1', '1', '1u'], 0, 3)
atg = AnyTypeGenerator()
atg.addGenerator( btg )
atg.addGenerator( RecordTypeGenerator(fields0, False, 4) )
atg.addGenerator( etg )
print 'Cardinality:',atg.cardinality
for i in range(100):
if i == atg.cardinality:
try:
atg.get(i)
raise RuntimeError,"Cardinality was wrong"
except AssertionError:
break
print '%4d: %s'%(i, atg.get(i))
if __name__ == '__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
"""Filename globbing utility."""
import os
import fnmatch
import re
__all__ = ["glob"]
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
if not has_magic(pathname):
if os.path.exists(pathname):
return [pathname]
else:
return []
dirname, basename = os.path.split(pathname)
if has_magic(dirname):
list = glob(dirname)
else:
list = [dirname]
if not has_magic(basename):
result = []
for dirname in list:
if basename or os.path.isdir(dirname):
name = os.path.join(dirname, basename)
if os.path.exists(name):
result.append(name)
else:
result = []
for dirname in list:
sublist = glob1(dirname, basename)
for name in sublist:
result.append(os.path.join(dirname, name))
return result
def glob1(dirname, pattern):
if not dirname: dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return []
result = []
for name in names:
if name[0] != '.' or pattern[0] == '.':
if fnmatch.fnmatch(name, pattern):
result.append(name)
return result
magic_check = re.compile('[*?[]')
def has_magic(s):
return magic_check.search(s) is not None | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser engine for the grammar tables generated by pgen.
The grammar table must be loaded first.
See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.
"""
import pytoken as token
import pysymbol as symbol
class Node:
def __init__(self, type, value, context, children):
self.type = type
self.value = value
self.context = context
self.children = children
def __repr__(self):
if self.type > token.NT_OFFSET:
res = "(%s" % symbol.sym_name[self.type]
else:
res = "(%s" % token.tok_name[self.type]
if self.value:
res += ", %s" % repr(self.value)
if self.children:
res += ", [%s]" % ', '.join(map(str, self.children))
return res + ")"
def ___repr__(self):
cm = "None"
if self.children:
cm = "[%s]" % ', '.join(map(str, self.children))
return "(%s, %s, %s, %s)" % (str(self.type), repr(self.value), str(self.context), cm)
class ParseError(Exception):
"""Exception to signal the parser is stuck."""
def __init__(self, msg, type, value, context):
Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
(msg, type, value, context))
self.msg = msg
self.type = type
self.value = value
self.context = context
class Parser(object):
"""Parser engine.
The proper usage sequence is:
p = Parser(grammar, [converter]) # create instance
p.setup([start]) # prepare for parsing
<for each input token>:
if p.addtoken(...): # parse a token; may raise ParseError
break
root = p.rootnode # root of abstract syntax tree
A Parser instance may be reused by calling setup() repeatedly.
A Parser instance contains state pertaining to the current token
sequence, and should not be used concurrently by different threads
to parse separate token sequences.
See driver.py for how to get input tokens by tokenizing a file or
string.
Parsing is complete when addtoken() returns True; the root of the
abstract syntax tree can then be retrieved from the rootnode
instance variable. When a syntax error occurs, addtoken() raises
the ParseError exception. There is no error recovery; the parser
cannot be used after a syntax error was reported (but it can be
reinitialized by calling setup()).
"""
def __init__(self, grammar, convert=None):
"""Constructor.
The grammar argument is a grammar.Grammar instance; see the
grammar module for more information.
The parser is not ready yet for parsing; you must call the
setup() method to get it started.
The optional convert argument is a function mapping concrete
syntax tree nodes to abstract syntax tree nodes. If not
given, no conversion is done and the syntax tree produced is
the concrete syntax tree. If given, it must be a function of
two arguments, the first being the grammar (a grammar.Grammar
instance), and the second being the concrete syntax tree node
to be converted. The syntax tree is converted from the bottom
up.
A concrete syntax tree node is a (type, value, context, nodes)
tuple, where type is the node type (a token or symbol number),
value is None for symbols and a string for tokens, context is
None or an opaque value used for error reporting (typically a
(lineno, offset) pair), and nodes is a list of children for
symbols, and None for tokens.
An abstract syntax tree node may be anything; this is entirely
up to the converter function.
"""
self.grammar = grammar
self.convert = convert or (lambda grammar, node: node)
def setup(self, start=None):
"""Prepare for parsing.
This *must* be called before starting to parse.
The optional argument is an alternative start symbol; it
defaults to the grammar's start symbol.
You can use a Parser instance to parse any number of programs;
each time you call setup() the parser is reset to an initial
state determined by the (implicit or explicit) start symbol.
"""
if start is None:
start = self.grammar.start
# Each stack entry is a tuple: (dfa, state, node).
# A node is a tuple: (type, value, context, children),
# where children is a list of nodes or None, and context may be None.
newnode = Node(start, None, None, [])
stackentry = (self.grammar.dfas[start], 0, newnode)
self.stack = [stackentry]
self.rootnode = None
self.used_names = set() # Aliased to self.rootnode.used_names in pop()
def addtoken(self, type, value, context):
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabel = self.classify(type, value, context)
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
t, v = self.grammar.labels[i]
if ilabel == i:
# Look it up in the list of labels
assert t < 256
# Shift a token; we're done with it
self.shift(type, value, newstate, context)
# Pop while we are in an accept-only state
state = newstate
while states[state] == [(0, state)]:
self.pop()
if not self.stack:
# Done parsing!
return True
dfa, state, node = self.stack[-1]
states, first = dfa
# Done with this token
return False
elif t >= 256:
# See if it's a symbol and if we're in its first set
itsdfa = self.grammar.dfas[t]
itsstates, itsfirst = itsdfa
if ilabel in itsfirst:
# Push a symbol
self.push(t, self.grammar.dfas[t], newstate, context)
break # To continue the outer while loop
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
self.pop()
if not self.stack:
# Done parsing, but another token is input
raise ParseError("too much input",
type, value, context)
else:
# No success finding a transition
raise ParseError("bad input", type, value, context)
def classify(self, type, value, context):
"""Turn a token into a label. (Internal)"""
if type == token.NAME:
# Keep a listing of all used names
self.used_names.add(value)
# Check for reserved words
ilabel = self.grammar.keywords.get(value)
if ilabel is not None:
return ilabel
ilabel = self.grammar.tokens.get(type)
if ilabel is None:
raise ParseError("bad token", type, value, context)
return ilabel
def shift(self, type, value, newstate, context):
"""Shift a token. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = Node(type, value, context, None)
newnode = self.convert(self.grammar, newnode)
if newnode is not None:
node.children.append(newnode)
self.stack[-1] = (dfa, newstate, node)
def push(self, type, newdfa, newstate, context):
"""Push a nonterminal. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = Node(type, None, context, [])
self.stack[-1] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
def pop(self):
"""Pop a nonterminal. (Internal)"""
popdfa, popstate, popnode = self.stack.pop()
newnode = self.convert(self.grammar, popnode)
if newnode is not None:
if self.stack:
dfa, state, node = self.stack[-1]
node.children.append(newnode)
else:
self.rootnode = newnode
#self.rootnode.used_names = self.used_names | unknown | codeparrot/codeparrot-clean | ||
//===--- ArrayCallKind.h -------------------------------------- -*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2025 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef ARRAY_CALL_KIND_H
#define ARRAY_CALL_KIND_H
/// The kind of array operation identified by looking at the semantics attribute
/// of the called function.
enum class ArrayCallKind {
kNone = 0,
kArrayPropsIsNativeTypeChecked,
kCheckSubscript,
kCheckIndex,
kGetCount,
kGetCapacity,
kGetElement,
kGetElementAddress,
kMakeMutable,
kEndMutation,
kMutateUnknown,
kReserveCapacityForAppend,
kWithUnsafeMutableBufferPointer,
kAppendContentsOf,
kAppendElement,
// The following two semantic function kinds return the result @owned
// instead of operating on self passed as parameter. If you are adding
// a function, and it has a self parameter, make sure that it is defined
// before this comment.
kArrayInit,
kArrayInitEmpty,
kArrayUninitialized,
kArrayUninitializedIntrinsic,
kArrayFinalizeIntrinsic
};
#endif | c | github | https://github.com/apple/swift | include/swift/SILOptimizer/Analysis/ArrayCallKind.h |
import yaml, canonical
def test_canonical_scanner(canonical_filename, verbose=False):
data = open(canonical_filename, 'rb').read()
tokens = list(yaml.canonical_scan(data))
assert tokens, tokens
if verbose:
for token in tokens:
print token
test_canonical_scanner.unittest = ['.canonical']
def test_canonical_parser(canonical_filename, verbose=False):
data = open(canonical_filename, 'rb').read()
events = list(yaml.canonical_parse(data))
assert events, events
if verbose:
for event in events:
print event
test_canonical_parser.unittest = ['.canonical']
def test_canonical_error(data_filename, canonical_filename, verbose=False):
data = open(data_filename, 'rb').read()
try:
output = list(yaml.canonical_load_all(data))
except yaml.YAMLError, exc:
if verbose:
print exc
else:
raise AssertionError("expected an exception")
test_canonical_error.unittest = ['.data', '.canonical']
test_canonical_error.skip = ['.empty']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado #
# (pacoqueen@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## consulta_ventas_solo_tickets.py - sum((PVP - IVA) * porcentaje_tarifa)
###################################################################
## NOTAS:
## - Solo cuenta lineas de venta, no servicios (que además no se
## pueden vender por TPV, ergo no tienen ticket).
## - Las LDVs de tickets facturados se ignoran.
###################################################################
## Changelog:
##
###################################################################
## Consulta tanto por ticket como agrupada por tipo de material (familia).
###################################################################
from ventana import Ventana
import utils
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade, time, sqlobject
import sys, os
try:
import pclases
except ImportError:
sys.path.append(os.path.join('..', 'framework'))
import pclases
import mx
try:
import geninformes
except ImportError:
sys.path.append(os.path.join('..', 'informes'))
import geninformes
try:
from treeview2pdf import treeview2pdf
except ImportError:
sys.path.append(os.path.join("..", "informes"))
from treeview2pdf import treeview2pdf
try:
from treeview2csv import treeview2csv
except ImportError:
sys.path.append(os.path.join("..", "informes"))
from treeview2pdf import treeview2pdf
from informes import abrir_pdf, abrir_csv
import ventana_progreso
class ConsultaBeneficioSoloTickets(Ventana):
def __init__(self, objeto = None, usuario = None):
self.usuario = usuario
Ventana.__init__(self, 'consulta_ventas_ticket.glade', objeto)
connections = {'b_salir/clicked': self.salir,
'b_buscar/clicked': self.buscar,
'b_imprimir/clicked': self.imprimir,
'b_exportar/clicked': self.exportar,
'b_fecha_inicio/clicked': self.set_inicio,
'b_fecha_fin/clicked': self.set_fin}
self.add_connections(connections)
cols = (('Fecha', 'gobject.TYPE_STRING', False, True, False, None),
('Ticket', 'gobject.TYPE_STRING',False,True,True,None),
('Imp. total', 'gobject.TYPE_STRING',False,True,False,None),
('Imp. (s/IVA)','gobject.TYPE_STRING',False,True,False,None),
('Ben. sobre tarifa', 'gobject.TYPE_STRING',
False, True, False, None),
('ID','gobject.TYPE_STRING', False, False, False, None))
utils.preparar_treeview(self.wids['tv_datos'], cols)
for col in self.wids['tv_datos'].get_columns()[2:]:
for cell in col.get_cell_renderers():
cell.set_property("xalign", 1.0)
col.set_alignment(0.5)
self.wids['tv_datos'].connect("row-activated", self.abrir_producto)
self.fin = mx.DateTime.today()
#self.inicio = mx.DateTime.DateTimeFrom(day = 1, month = self.fin.month, year = self.fin.year)
self.inicio = self.fin
self.wids['e_fechafin'].set_text(utils.str_fecha(self.fin))
self.wids['e_fechainicio'].set_text(utils.str_fecha(self.inicio))
self.wids['hbox1'].set_property("visible", False)
self.wids['hbox6'].set_property("visible", False)
gtk.main()
def abrir_producto(self, tv, path, vc):
"""
Abre el producto al que se le ha hecho doble clic en una ventana nueva.
"""
model = tv.get_model()
tipo_e_id = model[path][-1]
if "LDV" in tipo_e_id:
tipo, id = tipo_e_id.split(':')
ldv = pclases.LineaDeVenta.get(id)
producto = ldv.producto
if isinstance(producto, pclases.ProductoVenta):
if producto.es_rollo():
import productos_de_venta_rollos
ventana_producto = productos_de_venta_rollos.ProductosDeVentaRollos(producto, usuario = self.usuario)
elif producto.es_bala() or producto.es_bigbag():
import productos_de_venta_balas
ventana_producto = productos_de_venta_balas.ProductosDeVentaBalas(producto, usuario = self.usuario)
elif isinstance(producto, pclases.ProductoCompra):
import productos_compra
ventana_producto = productos_compra.ProductosCompra(producto, usuario = self.usuario)
def chequear_cambios(self):
pass
def rellenar_tabla_por_ticket(self, resultados):
"""
Rellena el model con los items de la consulta
"""
model = self.wids['tv_datos'].get_model()
model.clear()
totfact = totsiniva = totbeneficio = totbeneficio_cobro = 0.0
self.wids['tv_datos'].freeze_child_notify()
self.wids['tv_datos'].set_model(None)
totcobrado = totpendiente = 0.0
total_costo = total_costo_cobrado = 0.0
tratados = {}
for material in resultados:
for fecha in resultados[material]:
if fecha:
str_fecha = utils.str_fecha(fecha)
else:
str_fecha = ""
for ldv in resultados[material][fecha]:
# Para que coincidan los totales con la suma de las
# columnas y por coherencia con todas las cifras de la
# ventana, es necesario redondear a 2 decimales.
subtotal = round(ldv.get_subtotal(iva = True), 2)
subtotal_siva = round(ldv.get_subtotal(iva = False), 2)
beneficio = round(ldv.calcular_beneficio(), 2)
costo = round(ldv.calcular_precio_costo()*ldv.cantidad, 2)
fac_alb_tic = "Ticket %d" % ldv.ticket.numticket
cobrado = subtotal
pendiente = 0.0
beneficio_cobro = beneficio
costo_cobrado = costo
# Los tickets se asume que se cobran siempre, por
# tanto el costo de los productos sobre lo cobrado
# es del 100%.
desc_producto = utils.wrap(ldv.producto.descripcion, 40)
try:
beneficio_costo = 100.0 * beneficio / costo
except ZeroDivisionError:
beneficio_costo = 0.0
if ldv.ticket not in tratados:
padre_ticket = model.append(None,
(str_fecha,
ldv.ticket.numticket,
"0",
"0",
"0",
""))
tratados[ldv.ticket] = padre_ticket
else:
padre_ticket = tratados[ldv.ticket]
model.append(padre_ticket, (desc_producto,
fac_alb_tic,
utils.float2str(subtotal),
utils.float2str(subtotal_siva),
"%s (%s%%)" % (
utils.float2str(beneficio),
utils.float2str(
beneficio_costo)),
"LDV:%d" % ldv.id))
# Actualizo totales en memoria y en nodos padre TreeView
totfact += subtotal
totsiniva += subtotal_siva
totbeneficio += beneficio
totbeneficio_cobro += beneficio_cobro
totcobrado += cobrado
totpendiente += pendiente
total_costo += costo
total_costo_cobrado += costo_cobrado
model[padre_ticket][2] = utils.float2str(
utils._float(model[padre_ticket][2])
+ subtotal)
model[padre_ticket][3] = utils.float2str(
utils._float(model[padre_ticket][3])
+ subtotal_siva)
model[padre_ticket][4] = utils.float2str(
utils._float(model[padre_ticket][4])
+ beneficio)
self.rellenar_totales(totfact, totsiniva, totbeneficio,
totcobrado, totpendiente, totbeneficio_cobro,
total_costo, total_costo_cobrado)
self.wids['tv_datos'].set_model(model)
self.wids['tv_datos'].thaw_child_notify()
def rellenar_tabla(self, resultados):
"""
Rellena el model con los items de la consulta
"""
model = self.wids['tv_datos'].get_model()
model.clear()
totfact = totsiniva = totbeneficio = totbeneficio_cobro = 0.0
self.wids['tv_datos'].freeze_child_notify()
self.wids['tv_datos'].set_model(None)
totcobrado = totpendiente = 0.0
total_costo = total_costo_cobrado = 0.0
for material in resultados:
if material != None:
nombre_mat = material.descripcion
else:
nombre_mat = ""
padre_mat = model.append(None, (nombre_mat,
"",
"0",
"0",
"0",
"M:%d" % (material
and material.id
or -1)))
for fecha in resultados[material]:
if fecha:
str_fecha = utils.str_fecha(fecha)
else:
str_fecha = ""
padre_fec = model.append(padre_mat, (str_fecha,
"",
"0",
"0",
"0",
""))
for ldv in resultados[material][fecha]:
# Para que coincidan los totales con la suma de las
# columnas y por coherencia con todas las cifras de la
# ventana, es necesario redondear a 2 decimales.
subtotal = round(ldv.get_subtotal(iva = True), 2)
subtotal_siva = round(ldv.get_subtotal(iva = False), 2)
beneficio = round(ldv.calcular_beneficio(), 2)
costo = round(ldv.calcular_precio_costo()*ldv.cantidad, 2)
if ldv.facturaVenta:
fac_alb_tic = ldv.facturaVenta.numfactura
cobradofra = ldv.facturaVenta.calcular_cobrado()
pendientefra = ldv.facturaVenta.calcular_pendiente_cobro()
try:
fraccion = cobradofra / (cobradofra + pendientefra)
except ZeroDivisionError:
fraccion = 1.0
cobrado = subtotal * fraccion
pendiente = subtotal - cobrado
beneficio_cobro = beneficio * fraccion
costo_cobrado = costo * fraccion
elif ldv.albaranSalida:
fac_alb_tic = ldv.albaranSalida.numalbaran
cobrado = 0.0
pendiente = subtotal
beneficio_cobro = 0.0
costo_cobrado = 0.0
elif ldv.ticket:
fac_alb_tic = "Ticket %d" % ldv.ticket.numticket
cobrado = subtotal
pendiente = 0.0
beneficio_cobro = beneficio
costo_cobrado = costo
# Los tickets se asume que se cobran siempre, por
# tanto el costo de los productos sobre lo cobrado
# es del 100%.
else:
fac_alb_tic = ""
cobrado = pendiente = beneficio_cobro = 0.0
costo_cobrado = 0.0
desc_producto = utils.wrap(ldv.producto.descripcion, 40)
try:
beneficio_costo = 100.0 * beneficio / costo
except ZeroDivisionError:
beneficio_costo = 0.0
model.append(padre_fec, (desc_producto,
fac_alb_tic,
utils.float2str(subtotal),
utils.float2str(subtotal_siva),
"%s (%s%%)" % (
utils.float2str(beneficio),
utils.float2str(
beneficio_costo)),
"LDV:%d" % ldv.id))
# Actualizo totales en memoria y en nodos padre TreeView
totfact += subtotal
totsiniva += subtotal_siva
totbeneficio += beneficio
totbeneficio_cobro += beneficio_cobro
totcobrado += cobrado
totpendiente += pendiente
total_costo += costo
total_costo_cobrado += costo_cobrado
model[padre_fec][2] = utils.float2str(
utils._float(model[padre_fec][2])
+ subtotal)
model[padre_fec][3] = utils.float2str(
utils._float(model[padre_fec][3])
+ subtotal_siva)
model[padre_fec][4] = utils.float2str(
utils._float(model[padre_fec][4])
+ beneficio)
model[padre_mat][2] = utils.float2str(
utils._float(model[padre_mat][2])
+ subtotal)
model[padre_mat][3] = utils.float2str(
utils._float(model[padre_mat][3])
+ subtotal_siva)
model[padre_mat][4] = utils.float2str(
utils._float(model[padre_mat][4])
+ beneficio)
self.rellenar_totales(totfact, totsiniva, totbeneficio,
totcobrado, totpendiente, totbeneficio_cobro,
total_costo, total_costo_cobrado)
self.wids['tv_datos'].set_model(model)
self.wids['tv_datos'].thaw_child_notify()
def rellenar_tabla_por_proveedor(self, resultados):
"""
Rellena el model con los items de la consulta
"""
model = self.wids['tv_datos'].get_model()
model.clear()
totfact = totsiniva = totbeneficio = totbeneficio_cobro = 0.0
self.wids['tv_datos'].freeze_child_notify()
self.wids['tv_datos'].set_model(None)
totcobrado = totpendiente = 0.0
total_costo = total_costo_cobrado = 0.0
padres = {}
for material in resultados:
for fecha in resultados[material]:
for ldv in resultados[material][fecha]:
# Para que coincidan los totales con la suma de las
# columnas y por coherencia con todas las cifras de la
# ventana, es necesario redondear a 2 decimales.
subtotal = round(ldv.get_subtotal(iva = True), 2)
subtotal_siva = round(ldv.get_subtotal(iva = False), 2)
beneficio = round(ldv.calcular_beneficio(), 2)
costo = round(ldv.calcular_precio_costo()*ldv.cantidad, 2)
if ldv.facturaVenta:
fac_alb_tic = ldv.facturaVenta.numfactura
cobradofra = ldv.facturaVenta.calcular_cobrado()
pendientefra = ldv.facturaVenta.calcular_pendiente_cobro()
try:
fraccion = cobradofra / (cobradofra + pendientefra)
except ZeroDivisionError:
fraccion = 1.0
cobrado = subtotal * fraccion
pendiente = subtotal - cobrado
beneficio_cobro = beneficio * fraccion
costo_cobrado = costo * fraccion
elif ldv.albaranSalida:
fac_alb_tic = ldv.albaranSalida.numalbaran
cobrado = 0.0
pendiente = subtotal
beneficio_cobro = 0.0
costo_cobrado = 0.0
elif ldv.ticket:
fac_alb_tic = "Ticket %d" % ldv.ticket.numticket
cobrado = subtotal
pendiente = 0.0
beneficio_cobro = beneficio
costo_cobrado = costo
# Los tickets se asume que se cobran siempre, por
# tanto el costo de los productos sobre lo cobrado
# es del 100%.
else:
fac_alb_tic = ""
cobrado = pendiente = beneficio_cobro = 0.0
costo_cobrado = 0.0
desc_producto = utils.wrap(ldv.producto.descripcion, 40)
try:
beneficio_costo = 100.0 * beneficio / costo
except ZeroDivisionError:
beneficio_costo = 0.0
proveedor = ldv.get_proveedor()
try:
padre = padres[proveedor]
except KeyError:
padre = padres[proveedor] = model.append(None,
(proveedor and proveedor.nombre
or "Sin proveedor",
"-",
utils.float2str(0.0),
utils.float2str(0.0),
utils.float2str(0.0),
proveedor and proveedor.get_puid() or ""
))
model.append(padre, (desc_producto,
fac_alb_tic,
utils.float2str(subtotal),
utils.float2str(subtotal_siva),
"%s (%s%%)" % (
utils.float2str(beneficio),
utils.float2str(
beneficio_costo)),
ldv.get_puid()
))
# Actualizo totales en memoria y en nodos padre TreeView
totfact += subtotal
totsiniva += subtotal_siva
totbeneficio += beneficio
totbeneficio_cobro += beneficio_cobro
totcobrado += cobrado
totpendiente += pendiente
total_costo += costo
total_costo_cobrado += costo_cobrado
model[padre][2] = utils.float2str(
utils._float(model[padre][2])
+ subtotal)
model[padre][3] = utils.float2str(
utils._float(model[padre][3])
+ subtotal_siva)
model[padre][4] = utils.float2str(
utils._float(model[padre][4])
+ beneficio)
self.rellenar_totales(totfact, totsiniva, totbeneficio,
totcobrado, totpendiente, totbeneficio_cobro,
total_costo, total_costo_cobrado)
self.wids['tv_datos'].set_model(model)
self.wids['tv_datos'].thaw_child_notify()
def rellenar_totales(self,
total_facturado,
total_siniva,
total_beneficio,
total_cobrado,
total_pendiente_de_cobro,
total_beneficio_de_lo_cobrado,
total_costo,
total_costo_cobrado):
"""
Introduce los totales en los "entries".
"""
self.wids['e_total'].set_text(utils.float2str(total_facturado))
self.wids['e_siniva'].set_text(utils.float2str(total_siniva))
try:
beneficio = total_beneficio * 100.0 / total_siniva
except ZeroDivisionError:
beneficio = 0
try:
beneficio_sobre_costo = total_beneficio * 100.0 / total_costo
except ZeroDivisionError:
beneficio_sobre_costo = 0
try:
beneficio_cobro = (total_beneficio_de_lo_cobrado * 100.0
/ total_cobrado)
except ZeroDivisionError:
beneficio_cobro = 0
try:
beneficio_cobro_sobre_costo = (100 *
total_beneficio_de_lo_cobrado / total_costo_cobrado)
except ZeroDivisionError:
beneficio_cobro_sobre_costo = 0
#self.wids['e_beneficio'].set_text("%s (%s%% de las ventas; %s%% sobre precio defecto)" % (
self.wids['e_beneficio'].set_text("%s (%s%% sobre precio defecto)" % (
utils.float2str(total_beneficio),
utils.float2str(beneficio_sobre_costo, 2, autodec = True)))
#self.wids['e_beneficio_cobro'].set_text("%s (%s%% de lo cobrado; %s%% sobre precio defecto en cobros)" % (
self.wids['e_beneficio_cobro'].set_text("%s (%s%% sobre precio defecto en cobros)" % (
utils.float2str(total_beneficio_de_lo_cobrado),
utils.float2str(beneficio_cobro_sobre_costo, 2, autodec = True)))
self.wids['e_cobrado'].set_text(utils.float2str(total_cobrado))
self.wids['e_pendiente'].set_text(
utils.float2str(total_pendiente_de_cobro))
def set_inicio(self, boton):
temp = utils.mostrar_calendario(fecha_defecto = self.inicio, padre = self.wids['ventana'])
self.inicio = mx.DateTime.DateTimeFrom(day = temp[0], month = temp[1], year = temp[2])
self.wids['e_fechainicio'].set_text(utils.str_fecha(self.inicio))
self.fin = self.inicio
self.wids['e_fechafin'].set_text(utils.str_fecha(self.fin))
def set_fin(self, boton):
temp = utils.mostrar_calendario(fecha_defecto = self.fin, padre = self.wids['ventana'])
self.fin = temp
self.fin = mx.DateTime.DateTimeFrom(day = temp[0], month = temp[1], year = temp[2])
self.wids['e_fechafin'].set_text(utils.str_fecha(self.fin))
def buscar(self,boton):
"""
Dadas fecha de inicio y de fin, busca todas las LDV de
tickets sin factura (ni albarán, lógicamente).
OJO: NO CUENTA SERVICIOS.
"""
vpro = ventana_progreso.VentanaProgreso(padre = self.wids['ventana'])
vpro.mostrar()
inicio = self.inicio
fin = self.fin
LDV = pclases.LineaDeVenta
FV = pclases.FacturaVenta
AS = pclases.AlbaranSalida
T = pclases.Ticket
ldvst = LDV.select(pclases.AND(LDV.q.ticketID == T.q.id,
LDV.q.albaranSalidaID == None,
LDV.q.facturaVentaID == None,
T.q.fechahora >= inicio,
T.q.fechahora < fin + mx.DateTime.oneDay))
self.resultados = {}
act = 0.0; tot = ldvst.count()
for ldv in ldvst:
vpro.set_valor(act/tot, "Calculando beneficio tickets...")
add_ldv_a_diccionario_resultados(ldv, self.resultados)
act += 1
vpro.set_valor(1.0, "Calculando totales...")
if self.wids['rb_por_familia'].get_active():
self.rellenar_tabla(self.resultados)
elif self.wids['rb_por_ticket'].get_active():
self.rellenar_tabla_por_ticket(self.resultados)
elif self.wids['rb_por_proveedor'].get_active():
self.rellenar_tabla_por_proveedor(self.resultados)
vpro.ocultar()
def imprimir(self, boton):
"""
Prepara la vista preliminar para la impresión del informe.
"""
resp = utils.dialogo(titulo = "¿IMPRIMIR DESGLOSE?",
texto = "Puede imprimir un resumen o todo el "
"contenido de la consulta\n"
"¿Desea imprimir toda la información "
"desglosada?",
padre = self.wids['ventana'])
if resp:
tv = self.wids['tv_datos']
tv.expand_all()
while gtk.events_pending(): gtk.main_iteration(False)
colstotales = []
else:
tv = self.wids['tv_datos']
tv.collapse_all()
while gtk.events_pending(): gtk.main_iteration(False)
from consulta_ventas_por_producto import convertir_a_listview
tv = convertir_a_listview(tv)
colstotales = [2, 3, 4]
strfecha = "De %s a %s" % (self.wids['e_fechainicio'].get_text(),
self.wids['e_fechafin'].get_text())
abrir_pdf(treeview2pdf(tv,
titulo = "Beneficio sobre tarifa (tickets)",
fecha = strfecha,
numcols_a_totalizar = colstotales))
def exportar(self, boton):
"""
Exporta el TreeView a CSV.
"""
abrir_csv(treeview2csv(self.wids['tv_datos']))
def add_ldv_a_diccionario_resultados(ldv, r):
if ldv.productoCompra:
material = ldv.productoCompra.tipoDeMaterial
else:
material = None
if material not in r:
r[material] = {}
if ldv.facturaVenta:
fecha = ldv.facturaVenta.fecha
elif ldv.albaranSalida:
fecha = ldv.albaranSalida.fecha
elif ldv.ticket:
fecha = utils.abs_mxfecha(ldv.ticket.fechahora)
else:
fecha = None
if fecha not in r[material]:
r[material][fecha] = [ldv]
else:
r[material][fecha].append(ldv)
if __name__ == '__main__':
t = ConsultaBeneficioSoloTickets() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import urllib
import urllib2
class RestClient:
def get(self,url,headers):
req = urllib2.Request(url,headers=headers)
response = urllib2.urlopen(req)
return response.read()
def post(self,url,para,headers):
#value = urllib.urlencode(para)
req = urllib2.Request(url,para,headers)
response = urllib2.urlopen(req)
return response.read()
def put(self,url,headers):
req = urllib2.Request(url,headers=headers)
req.get_method = lambda:'PUT'
try:
response = urllib2.urlopen(req)
return response.getcode()
except:
return 410
def delete(self,url,headers):
req = urllib2.Request(url,headers=headers)
req.get_method = lambda:'DELETE'
try:
response = urllib2.urlopen(req)
return response.getcode()
except:
return 410
if __name__== '__main__':
client = RestClient()
url = "https://myservername.cn/api/v1/instances"
headers= {'Authorization':' Bearer 15fd7abctsh88j6o82j}
para = {"service_name":"my_service01"}
print client.post(url,json.dumps(para),headers)
print client.get(url,headers)
print client.put(url,headers)
print client.delete(url,headers) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp import api, fields, models
from openerp.exceptions import UserError
class PersonManagement(models.Model):
_name = 'myo.person.mng'
name = fields.Char('Name', required=True)
alias = fields.Char('Alias', help='Common name that the Person is referred.')
code = fields.Char(string='Person Code', required=False)
notes = fields.Text(string='Notes')
date_inclusion = fields.Datetime("Inclusion Date", required=False, readonly=False,
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
batch_name = fields.Char('Batch Name', required=False)
country_id_2 = fields.Many2one('res.country', 'Nationality')
birthday = fields.Date("Date of Birth")
age = fields.Char(
string='Age',
compute='_compute_age',
store=True
)
estimated_age = fields.Char(string='Estimated Age', required=False)
spouse_name = fields.Char('Spouse Name')
spouse_id = fields.Many2one('myo.person', 'Spouse', ondelete='restrict')
father_name = fields.Char('Father Name')
father_id = fields.Many2one('myo.person', 'Father', ondelete='restrict')
mother_name = fields.Char('Mother Name')
mother_id = fields.Many2one('myo.person', 'Mother', ondelete='restrict')
responsible_name = fields.Char('Responsible Name')
responsible_id = fields.Many2one('myo.person', 'Responsible', ondelete='restrict')
identification_id = fields.Char('Person ID')
otherid = fields.Char('Other ID')
gender = fields.Selection(
[('M', 'Male'),
('F', 'Female')
], 'Gender'
)
marital = fields.Selection(
[('single', 'Single'),
('married', 'Married'),
('widower', 'Widower'),
('divorced', 'Divorced'),
], 'Marital Status'
)
active = fields.Boolean('Active',
help="If unchecked, it will allow you to hide the person without removing it.",
default=1)
person_id = fields.Many2one('myo.person', 'Person')
_order = 'name'
_sql_constraints = [
('code_uniq',
'UNIQUE(code)',
u'Error! The Person Code must be unique!'
)
]
@api.multi
@api.constrains('birthday')
def _check_birthday(self):
for person in self:
if person.birthday > fields.Date.today():
raise UserError(u'Error! Date of Birth must be in the past!')
@api.one
@api.depends('birthday')
def _compute_age(self):
now = datetime.now()
if self.birthday:
dob = datetime.strptime(self.birthday, '%Y-%m-%d')
delta = relativedelta(now, dob)
# self.age = str(delta.years) + "y " + str(delta.months) + "m " + str(delta.days) + "d"
self.age = str(delta.years)
else:
self.age = "No Date of Birth!" | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_json_output."""
import re
def normalize(obj):
"""Normalize output object.
Args:
obj: Google Test's JSON output object to normalize.
Returns:
Normalized output without any references to transient information that may
change from run to run.
"""
def _normalize(key, value):
if key == 'time':
return re.sub(r'^\d+(\.\d+)?s$', '*', value)
elif key == 'timestamp':
return re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ$', '*', value)
elif key == 'failure':
value = re.sub(r'^.*[/\\](.*:)\d+\n', '\\1*\n', value)
return re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', value)
elif key == 'message':
value = re.sub(r'^.*[/\\](.*:)\d+\n', '\\1*\n', value)
return re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', value)
elif key == 'file':
return re.sub(r'^.*[/\\](.*)', '\\1', value)
else:
return normalize(value)
if isinstance(obj, dict):
return {k: _normalize(k, v) for k, v in obj.items()}
if isinstance(obj, list):
return [normalize(x) for x in obj]
else:
return obj | python | github | https://github.com/google/googletest | googletest/test/gtest_json_test_utils.py |
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import SQLITE_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.sqlite.enumeration import Enumeration
from plugins.dbms.sqlite.filesystem import Filesystem
from plugins.dbms.sqlite.fingerprint import Fingerprint
from plugins.dbms.sqlite.syntax import Syntax
from plugins.dbms.sqlite.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class SQLiteMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines SQLite methods
"""
def __init__(self):
self.excludeDbsList = SQLITE_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.SQLITE] = Syntax.escape | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2016 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import functools
import os
from ycmd.tests.test_utils import ClearCompletionsCache, SetUpApp
shared_app = None
def PathToTestFile( *args ):
dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
return os.path.join( dir_of_current_script, 'testdata', *args )
def setUpPackage():
"""Initializes the ycmd server as a WebTest application that will be shared
by all tests using the SharedYcmd decorator in this package. Additional
configuration that is common to these tests, like starting a semantic
subserver, should be done here."""
global shared_app
shared_app = SetUpApp()
def SharedYcmd( test ):
"""Defines a decorator to be attached to tests of this package. This decorator
passes the shared ycmd application as a parameter.
Do NOT attach it to test generators but directly to the yielded tests."""
global shared_app
@functools.wraps( test )
def Wrapper( *args, **kwargs ):
ClearCompletionsCache()
return test( shared_app, *args, **kwargs )
return Wrapper | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 13:06:14 2013
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.stats.power import TTestPower, TTestIndPower, tt_solve_power
if __name__ == '__main__':
effect_size, alpha, power = 0.5, 0.05, 0.8
ttest_pow = TTestPower()
print('\nroundtrip - root with respect to all variables')
print('\n calculated, desired')
nobs_p = ttest_pow.solve_power(effect_size=effect_size, nobs=None, alpha=alpha, power=power)
print('nobs ', nobs_p)
print('effect', ttest_pow.solve_power(effect_size=None, nobs=nobs_p, alpha=alpha, power=power), effect_size)
print('alpha ', ttest_pow.solve_power(effect_size=effect_size, nobs=nobs_p, alpha=None, power=power), alpha)
print('power ', ttest_pow.solve_power(effect_size=effect_size, nobs=nobs_p, alpha=alpha, power=None), power)
print('\nroundtrip - root with respect to all variables')
print('\n calculated, desired')
print('nobs ', tt_solve_power(effect_size=effect_size, nobs=None, alpha=alpha, power=power), nobs_p)
print('effect', tt_solve_power(effect_size=None, nobs=nobs_p, alpha=alpha, power=power), effect_size)
print('alpha ', tt_solve_power(effect_size=effect_size, nobs=nobs_p, alpha=None, power=power), alpha)
print('power ', tt_solve_power(effect_size=effect_size, nobs=nobs_p, alpha=alpha, power=None), power)
print('\none sided')
nobs_p1 = tt_solve_power(effect_size=effect_size, nobs=None, alpha=alpha, power=power, alternative='larger')
print('nobs ', nobs_p1)
print('effect', tt_solve_power(effect_size=None, nobs=nobs_p1, alpha=alpha, power=power, alternative='larger'), effect_size)
print('alpha ', tt_solve_power(effect_size=effect_size, nobs=nobs_p1, alpha=None, power=power, alternative='larger'), alpha)
print('power ', tt_solve_power(effect_size=effect_size, nobs=nobs_p1, alpha=alpha, power=None, alternative='larger'), power)
#start_ttp = dict(effect_size=0.01, nobs1=10., alpha=0.15, power=0.6)
ttind_solve_power = TTestIndPower().solve_power
print('\nroundtrip - root with respect to all variables')
print('\n calculated, desired')
nobs_p2 = ttind_solve_power(effect_size=effect_size, nobs1=None, alpha=alpha, power=power)
print('nobs ', nobs_p2)
print('effect', ttind_solve_power(effect_size=None, nobs1=nobs_p2, alpha=alpha, power=power), effect_size)
print('alpha ', ttind_solve_power(effect_size=effect_size, nobs1=nobs_p2, alpha=None, power=power), alpha)
print('power ', ttind_solve_power(effect_size=effect_size, nobs1=nobs_p2, alpha=alpha, power=None), power)
print('ratio ', ttind_solve_power(effect_size=effect_size, nobs1=nobs_p2, alpha=alpha, power=power, ratio=None), 1)
print('\ncheck ratio')
print('smaller power', ttind_solve_power(effect_size=effect_size, nobs1=nobs_p2, alpha=alpha, power=0.7, ratio=None), '< 1')
print('larger power ', ttind_solve_power(effect_size=effect_size, nobs1=nobs_p2, alpha=alpha, power=0.9, ratio=None), '> 1') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
/***************************************************************************
vfkPluginDialog
A QGIS plugin
Plugin umoznujici praci s daty katastru nemovitosti
-------------------
begin : 2015-06-11
git sha : $Format:%H$
copyright : (C) 2015 by Stepan Bambula
email : stepan.bambula@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from abc import ABCMeta, abstractmethod
class TPair(object):
def __init__(self, first=u'', second=u''):
self.first = first
self.second = second
class VfkDocument:
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def header(self):
pass
@abstractmethod
def footer(self):
pass
@abstractmethod
def heading1(self, text):
pass
@abstractmethod
def heading2(self, text):
pass
@abstractmethod
def heading3(self, text):
pass
@abstractmethod
def beginItemize(self):
pass
@abstractmethod
def endItemize(self):
pass
@abstractmethod
def beginItem(self):
pass
@abstractmethod
def endItem(self):
pass
@abstractmethod
def item(self, text):
pass
@abstractmethod
def beginTable(self):
pass
@abstractmethod
def endTable(self):
pass
@abstractmethod
def tableHeader(self, columns):
pass
@abstractmethod
def tableRow(self, columns):
pass
@abstractmethod
def tableRowOneColumnSpan(self, text):
pass
@abstractmethod
def link(self, href, text):
pass
@abstractmethod
def superScript(self, text):
pass
@abstractmethod
def newLine(self):
pass
@abstractmethod
def keyValueTable(self, content):
pass
@abstractmethod
def paragraph(self, text):
pass
@abstractmethod
def table(self, content, header):
pass
@abstractmethod
def text(self, text):
pass
@abstractmethod
def discardLastBeginTable(self):
pass
@abstractmethod
def isLastTableEmpty(self):
pass | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import os
import platform
import hyperspeed.utils
import shutil
from distutils.spawn import find_executable
import hyperspeed
import hyperspeed.utils
from hyperspeed import mistika
desktop_template = '''[Desktop Entry]
Categories=Multimedia;Mistika;
Exec=%s %%f
Icon=%s
MimeType=
Name=%s
Path=%s
StartupNotify=true
Terminal=%s
TerminalOptions=
Type=Application
Version=1.0
X-DBUS-ServiceName=
X-DBUS-StartupType=
X-KDE-SubstituteUID=false
X-KDE-Username=
'''
def get_desktop_links():
tools = []
desktop_folder_path = os.path.expanduser('~/Desktop/')
for basename in os.listdir(desktop_folder_path):
abs_path = os.path.join(desktop_folder_path, basename)
if platform.system() == 'Darwin':
darwin_executable_path = os.path.join(abs_path, 'Contents/MacOS', os.path.splitext(basename)[0])
if os.path.isfile(darwin_executable_path):
tools.append(os.path.realpath(darwin_executable_path))
elif basename.endswith('.desktop'):
for line in open(abs_path):
if line.lower().startswith('exec='):
executable = line.split('=', 1)[1].split('%')[0].strip()
tools.append(os.path.realpath(executable))
else:
tools.append(os.path.realpath(abs_path))
return tools
def get_mistika_links():
tools = []
if not os.path.isfile(mistika.tools_path):
return tools
for line in open(mistika.tools_path):
if len(line.strip().split()) < 2:
continue
line_alias, line_path = line.strip().split()[:2]
line_path = os.path.realpath(os.path.normpath(line_path))
tools.append(line_path)
return tools
def desktop_link(alias, file_path, activated=True, icon_path=False):
file_path = os.path.normpath(file_path)
file_folder = os.path.dirname(file_path)
if not icon_path:
if 'icon.png' in os.listdir(file_folder):
icon_path = os.path.join(file_folder, 'icon.png')
else:
icon_path = 'res/img/hyperspeed_1024px.png'
icon_path = os.path.abspath(icon_path)
stored = False
desktop_folder_path = os.path.expanduser('~/Desktop/')
for basename in os.listdir(desktop_folder_path):
abs_path = os.path.join(desktop_folder_path, basename)
real_path = os.path.normpath(os.path.realpath(abs_path))
if platform.system() == 'Darwin':
darwin_executable_path = os.path.join(abs_path, 'Contents/MacOS', os.path.splitext(basename)[0])
if os.path.isfile(darwin_executable_path) and os.path.realpath(darwin_executable_path) == file_path:
if activated:
stored = True
break
else:
print 'Removing app:', abs_path
try:
shutil.rmtree(abs_path)
except shutil.Error as e:
print 'Could not remove app:', e
else: # Linux
if os.path.islink(abs_path) and real_path == os.path.realpath(file_path):
if activated:
stored = True
break
else:
print 'Removing link:', abs_path
os.remove(abs_path)
elif basename.endswith('.desktop'):
for line in open(abs_path):
if line.lower().startswith('exec='):
executable = line.split('=', 1)[1].split('%')[0].strip()
if os.path.realpath(executable) == os.path.realpath(file_path):
if activated:
stored = True
break
else:
print 'Removing desktop entry:', abs_path
os.remove(abs_path)
if activated and not stored:
desktop_file_path = os.path.join(desktop_folder_path, alias)
if platform.system() == 'Darwin':
print 'Creating desktop entry:', desktop_file_path
hyperspeed.utils.mac_app_link(
file_path,
desktop_file_path,
icon_path=icon_path
)
else:
desktop_file_path += '.desktop'
print 'Creating desktop entry:', desktop_file_path
terminal = False
desktop_file = desktop_template % (file_path, icon_path, alias, os.path.dirname(file_path), terminal)
try:
open(desktop_file_path, 'w').write(desktop_file)
except IOError as e:
print e
return False
return True
def mistika_link(alias, file_path, activated=True):
if not os.path.exists(mistika.tools_path):
return False
file_path = os.path.normpath(file_path)
new_config = ''
stored = False
for line in open(mistika.tools_path):
if len(line.strip().split()) < 2:
continue
line_alias, line_path = line.strip().split()[:2]
line_path = os.path.normpath(line_path)
if os.path.realpath(file_path) == os.path.realpath(line_path):
if activated:
new_config += '%s %s %%a\n' % (alias, file_path)
stored = True
else:
continue
else:
line_path = find_executable(line_path)
if line_path == None or not os.path.exists(line_path):
continue
new_config += line
if activated and not stored:
new_config += '%s %s %%a\n' % (alias, file_path)
print '\nNew config:'
print new_config
open(mistika.tools_path, 'w').write(new_config)
return True | unknown | codeparrot/codeparrot-clean | ||
from setuptools import setup, find_packages
import platform
with open('README.rst') as f:
readme = f.read()
with open('substance/_version.py') as versionFile:
exec(versionFile.read())
install_requires = [
'setuptools>=1.1.3',
'PyYAML',
'tabulate',
'paramiko>=2.4.1',
'netaddr',
'requests',
'tinydb',
'python_hosts==0.3.3',
'jinja2'
]
setup(name='substance',
version=__version__,
author='Turbulent',
author_email='oss@turbulent.ca',
url='https://substance.readthedocs.io/',
license='Apache License 2.0',
long_description=readme,
description='Substance - Local dockerized development environment',
install_requires=install_requires,
extras_require={
':sys.platform == "darwin"': ['macfsevents'],
':sys.platform == "linux"': ['watchdog']
},
python_requires='>=3',
packages=find_packages(),
package_data={'substance': ['support/*']},
test_suite='tests',
zip_safe=False,
include_package_data=True,
entry_points={
'console_scripts': [
'substance = substance.cli:cli',
'subenv = substance.subenv.cli:cli'
],
}) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# pyechonest documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 30 15:51:03 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, inspect
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0,os.path.abspath("../../pyechonest"))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'pyechonest'
copyright = u'2013, The Echo Nest'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '9.0.0'
# The full version, including alpha/beta/rc tags.
release = '9.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['themes/']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '200x160_lt.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
"index": "index.html",
}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyechonestdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyechonest.tex', u'pyechonest Documentation',
u'The Echo Nest', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyechonest', u'pyechonest Documentation',
[u'The Echo Nest'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyechonest'
epub_author = u'The Echo Nest'
epub_publisher = u'The Echo Nest'
epub_copyright = u'2012, The Echo Nest'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# don't document the properties!
def maybe_skip_member(app, what, name, obj, skip, options):
if what == 'module':
return False
else:
return not inspect.ismethod(obj)
def setup(app):
app.connect('autodoc-skip-member', maybe_skip_member) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
import time
import uuid
import pytest
from google.cloud import storage
WAIT_TIME = 300
ARTIFACTS_BUCKET = os.environ['EXAMPLE_ZOO_ARTIFACTS_BUCKET']
PROJECT_ID = os.environ['EXAMPLE_ZOO_PROJECT_ID']
SUBMIT_SCRIPTS = ['submit_27.sh', 'submit_35.sh']
@pytest.fixture(scope='session')
def gcs_bucket_prefix():
# Create a temporary prefix for storing the artifacts.
storage_client = storage.Client()
bucket = storage_client.get_bucket(ARTIFACTS_BUCKET)
prefix = os.path.join('example_zoo_artifacts', str(uuid.uuid4()))
yield (bucket, prefix)
# Clean up after sleeping for another minute.
time.sleep(120)
for blob in bucket.list_blobs(prefix=prefix):
blob.delete()
@pytest.mark.parametrize('submit_script', SUBMIT_SCRIPTS)
def test_mnist(gcs_bucket_prefix, submit_script):
bucket, prefix = gcs_bucket_prefix
subprocess_env = os.environ.copy()
subprocess_env['EXAMPLE_ZOO_ARTIFACTS_BUCKET'] = 'gs://{}/{}'.format(os.environ['EXAMPLE_ZOO_ARTIFACTS_BUCKET'], prefix)
out = subprocess.check_output(['bash', submit_script], env=subprocess_env)
out_str = out.decode('ascii')
assert 'QUEUED' in out_str, 'Job submission failed: {}'.format(out_str)
# Get jobId so we can cancel the job easily.
job_id = re.match(r'jobId: (.+)\b', out_str).group(1)
time.sleep(WAIT_TIME)
# Cancel the job.
subprocess.check_call(['gcloud', 'ai-platform', 'jobs', 'cancel', job_id, '--project', PROJECT_ID])
blob_names = [blob.name for blob in bucket.list_blobs(prefix=prefix)]
out_str = ' '.join(blob_names)
assert 'saved_model.pb' in out_str, 'Artifact "saved_model.pb" not found in bucket {} with prefix {} after {} seconds.'.format(bucket, prefix, WAIT_TIME) | unknown | codeparrot/codeparrot-clean | ||
import os
import sys
def load_setup_modules(client_dir):
try:
sys.path.insert(0, client_dir)
import setup_modules
finally:
sys.path.pop(0)
return setup_modules
dirname = os.path.dirname(sys.modules[__name__].__file__)
virt_test_dir = os.path.abspath(os.path.join(dirname, ".."))
sys.path.insert(0, virt_test_dir)
try:
import autotest.client.setup_modules as setup_modules
client_dir = os.path.dirname(setup_modules.__file__)
sm = setup_modules
except ImportError:
try:
client_dir = os.path.abspath(os.path.join(dirname, "..", "..", ".."))
sm = load_setup_modules(client_dir)
except:
try:
client_dir = os.path.join(os.environ['AUTOTEST_PATH'], 'client')
except KeyError:
print("Environment variable $AUTOTEST_PATH not set. "
"please set it to a path containing an autotest checkout")
print("Or install the autotest-framework package for your distro")
sys.exit(1)
if not os.path.isdir(client_dir):
print('Autotest client library directory was not found at: "%s"' %
client_dir)
print('Please check if the environment variable "$AUTOTEST_PATH" '
'points to a valid location')
sys.exit(1)
sm = load_setup_modules(client_dir)
sm.setup(base_path=client_dir, root_module_name="autotest.client") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: s3_sync
short_description: Efficiently upload multiple files to S3
description:
- The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, inclusions/exclusions, mime types, expiration mapping, recursion, and smart directory mapping.
version_added: "2.3"
options:
mode:
description:
- sync direction.
required: true
default: 'push'
choices: [ push ]
file_change_strategy:
description:
- Difference determination method to allow changes-only syncing. Unlike rsync, files are not patched- they are fully skipped or fully uploaded.
- date_size will upload if file sizes don't match or if local file modified date is newer than s3's version
- checksum will compare etag values based on s3's implementation of chunked md5s.
- force will always upload all files.
required: false
default: 'date_size'
choices: [ force, checksum, date_size ]
bucket:
description:
- Bucket name.
required: true
key_prefix:
description:
- In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary.
required: false
file_root:
description:
- File/directory path for synchronization. This is a local path.
- This root path is scrubbed from the key name, so subdirectories will remain as keys.
required: true
permission:
description:
- Canned ACL to apply to synced files.
- Changing this ACL only changes newly synced files, it does not trigger a full reupload.
required: false
choices: [ '', private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control ]
mime_map:
description:
- 'Dict entry from extension to MIME type. This will override any default/sniffed MIME type. For example C({".txt": "application/text", ".yml": "appication/text"})'
required: false
include:
description:
- Shell pattern-style file matching.
- Used before exclude to determine eligible files (for instance, only "*.gif")
- For multiple patterns, comma-separate them.
required: false
default: "*"
exclude:
description:
- Shell pattern-style file matching.
- Used after include to remove files (for instance, skip "*.txt")
- For multiple patterns, comma-separate them.
required: false
default: ".*"
author: tedder
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: basic upload
s3_sync:
bucket: tedder
file_root: roles/s3/files/
- name: all the options
s3_sync:
bucket: tedder
file_root: roles/s3/files
mime_map:
.yml: application/text
.json: application/text
key_prefix: config_files/web
file_change_strategy: force
permission: public-read
include: "*"
exclude: "*.txt,.*"
'''
RETURN = '''
filelist_initial:
description: file listing (dicts) from inital globbing
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"modified_epoch": 1477416706
}]
filelist_local_etag:
description: file listing (dicts) including calculated local etag
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706,
"s3_path": "s3sync/policy.json"
}]
filelist_s3:
description: file listing (dicts) including information about previously-uploaded versions
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706,
"s3_path": "s3sync/policy.json"
}]
filelist_typed:
description: file listing (dicts) with calculated or overridden mime types
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477416706
}]
filelist_actionable:
description: file listing (dicts) of files that will be uploaded after the strategy decision
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"mime_type": "application/json",
"modified_epoch": 1477931256,
"s3_path": "s3sync/policy.json",
"whysize": "151 / 151",
"whytime": "1477931256 / 1477929260"
}]
uploaded:
description: file listing (dicts) of files that were actually uploaded
returned: always
type: list
sample: [{
"bytes": 151,
"chopped_path": "policy.json",
"fullpath": "roles/cf/files/policy.json",
"s3_path": "s3sync/policy.json",
"whysize": "151 / 151",
"whytime": "1477931637 / 1477931489"
}]
'''
import os
import stat as osstat # os.stat constants
import mimetypes
import datetime
from dateutil import tz
import hashlib
import fnmatch
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec
# import a class, otherwise we'll use a fully qualified path
#from ansible.module_utils.ec2 import AWSRetry
import ansible.module_utils.ec2
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
# the following function, calculate_multipart_etag, is from tlastowka
# on github and is used under its (compatible) GPL license. So this
# license applies to the following function.
# source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py
#
# calculate_multipart_etag Copyright (C) 2015
# Tony Lastowka <tlastowka at gmail dot com>
# https://github.com/tlastowka
#
#
# calculate_multipart_etag is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# calculate_multipart_etag is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with calculate_multipart_etag. If not, see <http://www.gnu.org/licenses/>.
DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
"""
calculates a multipart upload etag for amazon s3
Arguments:
source_path -- The file to calculate the etag for
chunk_size -- The chunk size to calculate for.
"""
md5s = []
with open(source_path,'rb') as fp:
while True:
data = fp.read(chunk_size)
if not data:
break
md5s.append(hashlib.md5(data))
if len(md5s) == 1:
new_etag = '"{}"'.format(md5s[0].hexdigest())
else: # > 1
digests = b"".join(m.digest() for m in md5s)
new_md5 = hashlib.md5(digests)
new_etag = '"{}-{}"'.format(new_md5.hexdigest(),len(md5s))
return new_etag
def gather_files(fileroot, include=None, exclude=None):
ret = []
for (dirpath, dirnames, filenames) in os.walk(fileroot):
for fn in filenames:
fullpath = os.path.join(dirpath, fn)
# include/exclude
if include:
found = False
for x in include.split(','):
if fnmatch.fnmatch(fn, x):
found = True
if not found:
# not on the include list, so we don't want it.
continue
if exclude:
found = False
for x in exclude.split(','):
if fnmatch.fnmatch(fn, x):
found = True
if found:
# skip it, even if previously included.
continue
chopped_path = os.path.relpath(fullpath, start=fileroot)
fstat = os.stat(fullpath)
f_size = fstat[osstat.ST_SIZE]
f_modified_epoch = fstat[osstat.ST_MTIME]
ret.append({
'fullpath':fullpath,
'chopped_path':chopped_path,
'modified_epoch': f_modified_epoch,
'bytes': f_size
})
# dirpath = path *to* the directory
# dirnames = subdirs *in* our directory
# filenames
return ret
def calculate_s3_path(filelist, key_prefix=''):
ret = []
for fileentry in filelist:
# don't modify the input dict
retentry = fileentry.copy()
retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path'])
ret.append(retentry)
return ret
def calculate_local_etag(filelist, key_prefix=''):
'''Really, "calculate md5", but since AWS uses their own format, we'll just call
it a "local etag". TODO optimization: only calculate if remote key exists.'''
ret = []
for fileentry in filelist:
# don't modify the input dict
retentry = fileentry.copy()
retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath'])
ret.append(retentry)
return ret
def determine_mimetypes(filelist, override_map):
ret = []
for fileentry in filelist:
retentry = fileentry.copy()
localfile = fileentry['fullpath']
# reminder: file extension is '.txt', not 'txt'.
_, file_extension = os.path.splitext(localfile)
if override_map and override_map.get(file_extension):
# override? use it.
retentry['mime_type'] = override_map[file_extension]
else:
# else sniff it
retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False)
# might be None or '' from one of the above. Not a great type but better than nothing.
if not retentry['mime_type']:
retentry['mime_type'] = 'application/octet-stream'
ret.append(retentry)
return ret
def head_s3(s3, bucket, s3keys):
retkeys = []
for entry in s3keys:
retentry = entry.copy()
# don't modify the input dict
try:
retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
except botocore.exceptions.ClientError as err:
if hasattr(err, 'response') and 'ResponseMetadata' in err.response and 'HTTPStatusCode' in err.response['ResponseMetadata'] and str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404':
pass
else:
raise Exception(err)
#error_msg = boto_exception(err)
#return {'error': error_msg}
retkeys.append(retentry)
return retkeys
def filter_list(s3, bucket, s3filelist, strategy):
keeplist = list(s3filelist)
for e in keeplist:
e['_strategy'] = strategy
# init/fetch info from S3 if we're going to use it for comparisons
if not strategy == 'force':
keeplist = head_s3(s3, bucket, s3filelist)
# now actually run the strategies
if strategy == 'checksum':
for entry in keeplist:
if entry.get('s3_head'):
# since we have a remote s3 object, compare the values.
if entry['s3_head']['ETag'] == entry['local_etag']:
# files match, so remove the entry
entry['skip_flag'] = True
else:
# file etags don't match, keep the entry.
pass
else: # we don't have an etag, so we'll keep it.
pass
elif strategy == 'date_size':
for entry in keeplist:
if entry.get('s3_head'):
#fstat = entry['stat']
local_modified_epoch = entry['modified_epoch']
local_size = entry['bytes']
# py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward.
#remote_modified_epoch = entry['s3_head']['LastModified'].timestamp()
remote_modified_datetime = entry['s3_head']['LastModified']
delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()))
remote_modified_epoch = delta.seconds + (delta.days*86400)
remote_size = entry['s3_head']['ContentLength']
entry['whytime'] = '{} / {}'.format(local_modified_epoch, remote_modified_epoch)
entry['whysize'] = '{} / {}'.format(local_size, remote_size)
if local_modified_epoch <= remote_modified_epoch or local_size == remote_size:
entry['skip_flag'] = True
else:
entry['why'] = "no s3_head"
# else: probably 'force'. Basically we don't skip with any with other strategies.
else:
pass
# prune 'please skip' entries, if any.
return [x for x in keeplist if not x.get('skip_flag')]
def upload_files(s3, bucket, filelist, params):
ret = []
for entry in filelist:
args = {
'ContentType': entry['mime_type']
}
if params.get('permission'):
args['ACL'] = params['permission']
s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None)
ret.append(entry)
return ret
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
mode = dict(choices=['push'], default='push'),
file_change_strategy = dict(choices=['force','date_size','checksum'], default='date_size'),
bucket = dict(required=True),
key_prefix = dict(required=False, default=''),
file_root = dict(required=True, type='path'),
permission = dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
retries = dict(required=False),
mime_map = dict(required=False, type='dict'),
exclude = dict(required=False, default=".*"),
include = dict(required=False, default="*"),
# future options: cache_control (string or map, perhaps), encoding, metadata, storage_class, retries
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
result = {}
mode = module.params['mode']
try:
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
s3 = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs)
s3.list_buckets()
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=str(e))
if mode == 'push':
try:
result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include'])
result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map'))
result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix'])
result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3'])
result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy'])
result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params)
# mark changed if we actually upload something.
if result.get('uploads') and len(result.get('uploads')):
result['changed'] = True
#result.update(filelist=actionable_filelist)
except Exception as err:
error_msg = boto_exception(err)
import traceback # traces get swallowed by Ansible.
module.fail_json(msg=error_msg, traceback=traceback.format_exc().splitlines())
module.exit_json(**result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008, 2009 Intel Corporation
* Authors: Andi Kleen, Fengguang Wu
*
* High level machine check handler. Handles pages reported by the
* hardware as being corrupted usually due to a multi-bit ECC memory or cache
* failure.
*
* In addition there is a "soft offline" entry point that allows stop using
* not-yet-corrupted-by-suspicious pages without killing anything.
*
* Handles page cache pages in various states. The tricky part
* here is that we can access any page asynchronously in respect to
* other VM users, because memory failures could happen anytime and
* anywhere. This could violate some of their assumptions. This is why
* this code has to be extremely careful. Generally it tries to use
* normal locking rules, as in get the standard locks, even if that means
* the error handling takes potentially a long time.
*
* It can be very tempting to add handling for obscure cases here.
* In general any code for handling new cases should only be added iff:
* - You know how to test it.
* - You have a test that can be added to mce-test
* https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
* - The case actually shows up as a frequent (top 10) page state in
* tools/mm/page-types when running a real workload.
*
* There are several operations here with exponential complexity because
* of unsuitable VM data structures. For example the operation to map back
* from RMAP chains to processes has to walk the complete process list and
* has non linear complexity with the number. But since memory corruptions
* are rare we hope to get away with this. This avoids impacting the core
* VM.
*/
#define pr_fmt(fmt) "Memory failure: " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memory-failure.h>
#include <linux/page-flags.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/dax.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/backing-dev.h>
#include <linux/migrate.h>
#include <linux/slab.h>
#include <linux/leafops.h>
#include <linux/hugetlb.h>
#include <linux/memory_hotplug.h>
#include <linux/mm_inline.h>
#include <linux/memremap.h>
#include <linux/kfifo.h>
#include <linux/ratelimit.h>
#include <linux/pagewalk.h>
#include <linux/shmem_fs.h>
#include <linux/sysctl.h>
#define CREATE_TRACE_POINTS
#include <trace/events/memory-failure.h>
#include "swap.h"
#include "internal.h"
static int sysctl_memory_failure_early_kill __read_mostly;
static int sysctl_memory_failure_recovery __read_mostly = 1;
static int sysctl_enable_soft_offline __read_mostly = 1;
atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
static bool hw_memory_failure __read_mostly = false;
static DEFINE_MUTEX(mf_mutex);
void num_poisoned_pages_inc(unsigned long pfn)
{
atomic_long_inc(&num_poisoned_pages);
memblk_nr_poison_inc(pfn);
}
void num_poisoned_pages_sub(unsigned long pfn, long i)
{
atomic_long_sub(i, &num_poisoned_pages);
if (pfn != -1UL)
memblk_nr_poison_sub(pfn, i);
}
/**
* MF_ATTR_RO - Create sysfs entry for each memory failure statistics.
* @_name: name of the file in the per NUMA sysfs directory.
*/
#define MF_ATTR_RO(_name) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct memory_failure_stats *mf_stats = \
&NODE_DATA(dev->id)->mf_stats; \
return sysfs_emit(buf, "%lu\n", mf_stats->_name); \
} \
static DEVICE_ATTR_RO(_name)
MF_ATTR_RO(total);
MF_ATTR_RO(ignored);
MF_ATTR_RO(failed);
MF_ATTR_RO(delayed);
MF_ATTR_RO(recovered);
static struct attribute *memory_failure_attr[] = {
&dev_attr_total.attr,
&dev_attr_ignored.attr,
&dev_attr_failed.attr,
&dev_attr_delayed.attr,
&dev_attr_recovered.attr,
NULL,
};
const struct attribute_group memory_failure_attr_group = {
.name = "memory_failure",
.attrs = memory_failure_attr,
};
static const struct ctl_table memory_failure_table[] = {
{
.procname = "memory_failure_early_kill",
.data = &sysctl_memory_failure_early_kill,
.maxlen = sizeof(sysctl_memory_failure_early_kill),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "memory_failure_recovery",
.data = &sysctl_memory_failure_recovery,
.maxlen = sizeof(sysctl_memory_failure_recovery),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "enable_soft_offline",
.data = &sysctl_enable_soft_offline,
.maxlen = sizeof(sysctl_enable_soft_offline),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
}
};
static struct rb_root_cached pfn_space_itree = RB_ROOT_CACHED;
static DEFINE_MUTEX(pfn_space_lock);
/*
* Return values:
* 1: the page is dissolved (if needed) and taken off from buddy,
* 0: the page is dissolved (if needed) and not taken off from buddy,
* < 0: failed to dissolve.
*/
static int __page_handle_poison(struct page *page)
{
int ret;
/*
* zone_pcp_disable() can't be used here. It will
* hold pcp_batch_high_lock and dissolve_free_hugetlb_folio() might hold
* cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
* optimization is enabled. This will break current lock dependency
* chain and leads to deadlock.
* Disabling pcp before dissolving the page was a deterministic
* approach because we made sure that those pages cannot end up in any
* PCP list. Draining PCP lists expels those pages to the buddy system,
* but nothing guarantees that those pages do not get back to a PCP
* queue if we need to refill those.
*/
ret = dissolve_free_hugetlb_folio(page_folio(page));
if (!ret) {
drain_all_pages(page_zone(page));
ret = take_page_off_buddy(page);
}
return ret;
}
static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
{
if (hugepage_or_freepage) {
/*
* Doing this check for free pages is also fine since
* dissolve_free_hugetlb_folio() returns 0 for non-hugetlb folios as well.
*/
if (__page_handle_poison(page) <= 0)
/*
* We could fail to take off the target page from buddy
* for example due to racy page allocation, but that's
* acceptable because soft-offlined page is not broken
* and if someone really want to use it, they should
* take it.
*/
return false;
}
SetPageHWPoison(page);
if (release)
put_page(page);
page_ref_inc(page);
num_poisoned_pages_inc(page_to_pfn(page));
return true;
}
static hwpoison_filter_func_t __rcu *hwpoison_filter_func __read_mostly;
void hwpoison_filter_register(hwpoison_filter_func_t *filter)
{
rcu_assign_pointer(hwpoison_filter_func, filter);
}
EXPORT_SYMBOL_GPL(hwpoison_filter_register);
void hwpoison_filter_unregister(void)
{
RCU_INIT_POINTER(hwpoison_filter_func, NULL);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(hwpoison_filter_unregister);
static int hwpoison_filter(struct page *p)
{
int ret = 0;
hwpoison_filter_func_t *filter;
rcu_read_lock();
filter = rcu_dereference(hwpoison_filter_func);
if (filter)
ret = filter(p);
rcu_read_unlock();
return ret;
}
/*
* Kill all processes that have a poisoned page mapped and then isolate
* the page.
*
* General strategy:
* Find all processes having the page mapped and kill them.
* But we keep a page reference around so that the page is not
* actually freed yet.
* Then stash the page away
*
* There's no convenient way to get back to mapped processes
* from the VMAs. So do a brute-force search over all
* running processes.
*
* Remember that machine checks are not common (or rather
* if they are common you have other problems), so this shouldn't
* be a performance issue.
*
* Also there are some races possible while we get from the
* error detection to actually handle it.
*/
struct to_kill {
struct list_head nd;
struct task_struct *tsk;
unsigned long addr;
short size_shift;
};
/*
* Send all the processes who have the page mapped a signal.
* ``action optional'' if they are not immediately affected by the error
* ``action required'' if error happened in current execution context
*/
static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
{
struct task_struct *t = tk->tsk;
short addr_lsb = tk->size_shift;
int ret = 0;
pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
pfn, t->comm, task_pid_nr(t));
if ((flags & MF_ACTION_REQUIRED) && (t == current))
ret = force_sig_mceerr(BUS_MCEERR_AR,
(void __user *)tk->addr, addr_lsb);
else
/*
* Signal other processes sharing the page if they have
* PF_MCE_EARLY set.
* Don't use force here, it's convenient if the signal
* can be temporarily blocked.
*/
ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
addr_lsb, t);
if (ret < 0)
pr_info("Error sending signal to %s:%d: %d\n",
t->comm, task_pid_nr(t), ret);
return ret;
}
/*
* Unknown page type encountered. Try to check whether it can turn PageLRU by
* lru_add_drain_all.
*/
void shake_folio(struct folio *folio)
{
if (folio_test_hugetlb(folio))
return;
/*
* TODO: Could shrink slab caches here if a lightweight range-based
* shrinker will be available.
*/
if (folio_test_slab(folio))
return;
lru_add_drain_all();
}
EXPORT_SYMBOL_GPL(shake_folio);
static void shake_page(struct page *page)
{
shake_folio(page_folio(page));
}
static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
unsigned long address)
{
unsigned long ret = 0;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t ptent;
VM_BUG_ON_VMA(address == -EFAULT, vma);
pgd = pgd_offset(vma->vm_mm, address);
if (!pgd_present(*pgd))
return 0;
p4d = p4d_offset(pgd, address);
if (!p4d_present(*p4d))
return 0;
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
return 0;
if (pud_trans_huge(*pud))
return PUD_SHIFT;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return 0;
if (pmd_trans_huge(*pmd))
return PMD_SHIFT;
pte = pte_offset_map(pmd, address);
if (!pte)
return 0;
ptent = ptep_get(pte);
if (pte_present(ptent))
ret = PAGE_SHIFT;
pte_unmap(pte);
return ret;
}
/*
* Failure handling: if we can't find or can't kill a process there's
* not much we can do. We just print a message and ignore otherwise.
*/
/*
* Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
*/
static void __add_to_kill(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
struct to_kill *tk;
tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
if (!tk) {
pr_err("Out of memory while machine check handling\n");
return;
}
tk->addr = addr;
if (is_zone_device_page(p))
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
else
tk->size_shift = folio_shift(page_folio(p));
/*
* Send SIGKILL if "tk->addr == -EFAULT". Also, as
* "tk->size_shift" is always non-zero for !is_zone_device_page(),
* so "tk->size_shift == 0" effectively checks no mapping on
* ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
* to a process' address space, it's possible not all N VMAs
* contain mappings for the page, but at least one VMA does.
* Only deliver SIGBUS with payload derived from the VMA that
* has a mapping for the page.
*/
if (tk->addr == -EFAULT) {
pr_info("Unable to find user space address %lx in %s\n",
page_to_pfn(p), tsk->comm);
} else if (tk->size_shift == 0) {
kfree(tk);
return;
}
get_task_struct(tsk);
tk->tsk = tsk;
list_add_tail(&tk->nd, to_kill);
}
static void add_to_kill_anon_file(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
if (addr == -EFAULT)
return;
__add_to_kill(tsk, p, vma, to_kill, addr);
}
#ifdef CONFIG_KSM
static bool task_in_to_kill_list(struct list_head *to_kill,
struct task_struct *tsk)
{
struct to_kill *tk, *next;
list_for_each_entry_safe(tk, next, to_kill, nd) {
if (tk->tsk == tsk)
return true;
}
return false;
}
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
if (!task_in_to_kill_list(to_kill, tsk))
__add_to_kill(tsk, p, vma, to_kill, addr);
}
#endif
/*
* Kill the processes that have been collected earlier.
*
* Only do anything when FORCEKILL is set, otherwise just free the
* list (this is used for clean pages which do not need killing)
*/
static void kill_procs(struct list_head *to_kill, int forcekill,
unsigned long pfn, int flags)
{
struct to_kill *tk, *next;
list_for_each_entry_safe(tk, next, to_kill, nd) {
if (forcekill) {
if (tk->addr == -EFAULT) {
pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
pfn, tk->tsk->comm, task_pid_nr(tk->tsk));
do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
tk->tsk, PIDTYPE_PID);
}
/*
* In theory the process could have mapped
* something else on the address in-between. We could
* check for that, but we need to tell the
* process anyways.
*/
else if (kill_proc(tk, pfn, flags) < 0)
pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
pfn, tk->tsk->comm, task_pid_nr(tk->tsk));
}
list_del(&tk->nd);
put_task_struct(tk->tsk);
kfree(tk);
}
}
/*
* Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
* on behalf of the thread group. Return task_struct of the (first found)
* dedicated thread if found, and return NULL otherwise.
*
* We already hold rcu lock in the caller, so we don't have to call
* rcu_read_lock/unlock() in this function.
*/
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
{
struct task_struct *t;
for_each_thread(tsk, t) {
if (t->flags & PF_MCE_PROCESS) {
if (t->flags & PF_MCE_EARLY)
return t;
} else {
if (sysctl_memory_failure_early_kill)
return t;
}
}
return NULL;
}
/*
* Determine whether a given process is "early kill" process which expects
* to be signaled when some page under the process is hwpoisoned.
* Return task_struct of the dedicated thread (main thread unless explicitly
* specified) if the process is "early kill" and otherwise returns NULL.
*
* Note that the above is true for Action Optional case. For Action Required
* case, it's only meaningful to the current thread which need to be signaled
* with SIGBUS, this error is Action Optional for other non current
* processes sharing the same error page,if the process is "early kill", the
* task_struct of the dedicated thread will also be returned.
*/
struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
{
if (!tsk->mm)
return NULL;
/*
* Comparing ->mm here because current task might represent
* a subthread, while tsk always points to the main thread.
*/
if (force_early && tsk->mm == current->mm)
return current;
return find_early_kill_thread(tsk);
}
/*
* Collect processes when the error hit an anonymous page.
*/
static void collect_procs_anon(const struct folio *folio,
const struct page *page, struct list_head *to_kill,
int force_early)
{
struct task_struct *tsk;
struct anon_vma *av;
pgoff_t pgoff;
av = folio_lock_anon_vma_read(folio, NULL);
if (av == NULL) /* Not actually mapped anymore */
return;
pgoff = page_pgoff(folio, page);
rcu_read_lock();
for_each_process(tsk) {
struct vm_area_struct *vma;
struct anon_vma_chain *vmac;
struct task_struct *t = task_early_kill(tsk, force_early);
unsigned long addr;
if (!t)
continue;
anon_vma_interval_tree_foreach(vmac, &av->rb_root,
pgoff, pgoff) {
vma = vmac->vma;
if (vma->vm_mm != t->mm)
continue;
addr = page_mapped_in_vma(page, vma);
add_to_kill_anon_file(t, page, vma, to_kill, addr);
}
}
rcu_read_unlock();
anon_vma_unlock_read(av);
}
/*
* Collect processes when the error hit a file mapped page.
*/
static void collect_procs_file(const struct folio *folio,
const struct page *page, struct list_head *to_kill,
int force_early)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
struct address_space *mapping = folio->mapping;
pgoff_t pgoff;
i_mmap_lock_read(mapping);
rcu_read_lock();
pgoff = page_pgoff(folio, page);
for_each_process(tsk) {
struct task_struct *t = task_early_kill(tsk, force_early);
unsigned long addr;
if (!t)
continue;
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
pgoff) {
/*
* Send early kill signal to tasks where a vma covers
* the page but the corrupted page is not necessarily
* mapped in its pte.
* Assume applications who requested early kill want
* to be informed of all such data corruptions.
*/
if (vma->vm_mm != t->mm)
continue;
addr = page_address_in_vma(folio, page, vma);
add_to_kill_anon_file(t, page, vma, to_kill, addr);
}
}
rcu_read_unlock();
i_mmap_unlock_read(mapping);
}
#ifdef CONFIG_FS_DAX
static void add_to_kill_fsdax(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma,
struct list_head *to_kill, pgoff_t pgoff)
{
unsigned long addr = vma_address(vma, pgoff, 1);
__add_to_kill(tsk, p, vma, to_kill, addr);
}
/*
* Collect processes when the error hit a fsdax page.
*/
static void collect_procs_fsdax(const struct page *page,
struct address_space *mapping, pgoff_t pgoff,
struct list_head *to_kill, bool pre_remove)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
i_mmap_lock_read(mapping);
rcu_read_lock();
for_each_process(tsk) {
struct task_struct *t = tsk;
/*
* Search for all tasks while MF_MEM_PRE_REMOVE is set, because
* the current may not be the one accessing the fsdax page.
* Otherwise, search for the current task.
*/
if (!pre_remove)
t = task_early_kill(tsk, true);
if (!t)
continue;
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
if (vma->vm_mm == t->mm)
add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
}
}
rcu_read_unlock();
i_mmap_unlock_read(mapping);
}
#endif /* CONFIG_FS_DAX */
/*
* Collect the processes who have the corrupted page mapped to kill.
*/
static void collect_procs(const struct folio *folio, const struct page *page,
struct list_head *tokill, int force_early)
{
if (!folio->mapping)
return;
if (unlikely(folio_test_ksm(folio)))
collect_procs_ksm(folio, page, tokill, force_early);
else if (folio_test_anon(folio))
collect_procs_anon(folio, page, tokill, force_early);
else
collect_procs_file(folio, page, tokill, force_early);
}
struct hwpoison_walk {
struct to_kill tk;
unsigned long pfn;
int flags;
};
static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
{
tk->addr = addr;
tk->size_shift = shift;
}
static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
unsigned long poisoned_pfn, struct to_kill *tk)
{
unsigned long pfn = 0;
unsigned long hwpoison_vaddr;
unsigned long mask;
if (pte_present(pte)) {
pfn = pte_pfn(pte);
} else {
const softleaf_t entry = softleaf_from_pte(pte);
if (softleaf_is_hwpoison(entry))
pfn = softleaf_to_pfn(entry);
}
mask = ~((1UL << (shift - PAGE_SHIFT)) - 1);
if (!pfn || pfn != (poisoned_pfn & mask))
return 0;
hwpoison_vaddr = addr + ((poisoned_pfn - pfn) << PAGE_SHIFT);
set_to_kill(tk, hwpoison_vaddr, shift);
return 1;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
struct hwpoison_walk *hwp)
{
pmd_t pmd = *pmdp;
unsigned long pfn;
unsigned long hwpoison_vaddr;
if (!pmd_present(pmd))
return 0;
pfn = pmd_pfn(pmd);
if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
return 1;
}
return 0;
}
#else
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
struct hwpoison_walk *hwp)
{
return 0;
}
#endif
static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct hwpoison_walk *hwp = walk->private;
int ret = 0;
pte_t *ptep, *mapped_pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmdp, walk->vma);
if (ptl) {
ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
spin_unlock(ptl);
goto out;
}
mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
addr, &ptl);
if (!ptep)
goto out;
for (; addr != end; ptep++, addr += PAGE_SIZE) {
ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT,
hwp->pfn, &hwp->tk);
if (ret == 1)
break;
}
pte_unmap_unlock(mapped_pte, ptl);
out:
cond_resched();
return ret;
}
#ifdef CONFIG_HUGETLB_PAGE
static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct hwpoison_walk *hwp = walk->private;
struct hstate *h = hstate_vma(walk->vma);
spinlock_t *ptl;
pte_t pte;
int ret;
ptl = huge_pte_lock(h, walk->mm, ptep);
pte = huge_ptep_get(walk->mm, addr, ptep);
ret = check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
hwp->pfn, &hwp->tk);
spin_unlock(ptl);
return ret;
}
#else
#define hwpoison_hugetlb_range NULL
#endif
static int hwpoison_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
/* We also want to consider pages mapped into VM_PFNMAP. */
return 0;
}
static const struct mm_walk_ops hwpoison_walk_ops = {
.pmd_entry = hwpoison_pte_range,
.hugetlb_entry = hwpoison_hugetlb_range,
.test_walk = hwpoison_test_walk,
.walk_lock = PGWALK_RDLOCK,
};
/*
* Sends SIGBUS to the current process with error info.
*
* This function is intended to handle "Action Required" MCEs on already
* hardware poisoned pages. They could happen, for example, when
* memory_failure() failed to unmap the error page at the first call, or
* when multiple local machine checks happened on different CPUs.
*
* MCE handler currently has no easy access to the error virtual address,
* so this function walks page table to find it. The returned virtual address
* is proper in most cases, but it could be wrong when the application
* process has multiple entries mapping the error page.
*/
static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
int flags)
{
int ret;
struct hwpoison_walk priv = {
.pfn = pfn,
};
priv.tk.tsk = p;
if (!p->mm)
return -EFAULT;
mmap_read_lock(p->mm);
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
(void *)&priv);
/*
* ret = 1 when CMCI wins, regardless of whether try_to_unmap()
* succeeds or fails, then kill the process with SIGBUS.
* ret = 0 when poison page is a clean page and it's dropped, no
* SIGBUS is needed.
*/
if (ret == 1 && priv.tk.addr)
kill_proc(&priv.tk, pfn, flags);
mmap_read_unlock(p->mm);
return ret > 0 ? -EHWPOISON : 0;
}
/*
* MF_IGNORED - The m-f() handler marks the page as PG_hwpoisoned'ed.
* But it could not do more to isolate the page from being accessed again,
* nor does it kill the process. This is extremely rare and one of the
* potential causes is that the page state has been changed due to
* underlying race condition. This is the most severe outcomes.
*
* MF_FAILED - The m-f() handler marks the page as PG_hwpoisoned'ed.
* It should have killed the process, but it can't isolate the page,
* due to conditions such as extra pin, unmap failure, etc. Accessing
* the page again may trigger another MCE and the process will be killed
* by the m-f() handler immediately.
*
* MF_DELAYED - The m-f() handler marks the page as PG_hwpoisoned'ed.
* The page is unmapped, and is removed from the LRU or file mapping.
* An attempt to access the page again will trigger page fault and the
* PF handler will kill the process.
*
* MF_RECOVERED - The m-f() handler marks the page as PG_hwpoisoned'ed.
* The page has been completely isolated, that is, unmapped, taken out of
* the buddy system, or hole-punched out of the file mapping.
*/
static const char *action_name[] = {
[MF_IGNORED] = "Ignored",
[MF_FAILED] = "Failed",
[MF_DELAYED] = "Delayed",
[MF_RECOVERED] = "Recovered",
};
static const char * const action_page_types[] = {
[MF_MSG_KERNEL] = "reserved kernel page",
[MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
[MF_MSG_HUGE] = "huge page",
[MF_MSG_FREE_HUGE] = "free huge page",
[MF_MSG_GET_HWPOISON] = "get hwpoison page",
[MF_MSG_UNMAP_FAILED] = "unmapping failed page",
[MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
[MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
[MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
[MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
[MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
[MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
[MF_MSG_DIRTY_LRU] = "dirty LRU page",
[MF_MSG_CLEAN_LRU] = "clean LRU page",
[MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
[MF_MSG_BUDDY] = "free buddy page",
[MF_MSG_DAX] = "dax page",
[MF_MSG_UNSPLIT_THP] = "unsplit thp",
[MF_MSG_ALREADY_POISONED] = "already poisoned page",
[MF_MSG_PFN_MAP] = "non struct page pfn",
[MF_MSG_UNKNOWN] = "unknown page",
};
/*
* XXX: It is possible that a page is isolated from LRU cache,
* and then kept in swap cache or failed to remove from page cache.
* The page count will stop it from being freed by unpoison.
* Stress tests should be aware of this memory leak problem.
*/
static int delete_from_lru_cache(struct folio *folio)
{
if (folio_isolate_lru(folio)) {
/*
* Clear sensible page flags, so that the buddy system won't
* complain when the folio is unpoison-and-freed.
*/
folio_clear_active(folio);
folio_clear_unevictable(folio);
/*
* Poisoned page might never drop its ref count to 0 so we have
* to uncharge it manually from its memcg.
*/
mem_cgroup_uncharge(folio);
/*
* drop the refcount elevated by folio_isolate_lru()
*/
folio_put(folio);
return 0;
}
return -EIO;
}
static int truncate_error_folio(struct folio *folio, unsigned long pfn,
struct address_space *mapping)
{
int ret = MF_FAILED;
if (mapping->a_ops->error_remove_folio) {
int err = mapping->a_ops->error_remove_folio(mapping, folio);
if (err != 0)
pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
else if (!filemap_release_folio(folio, GFP_NOIO))
pr_info("%#lx: failed to release buffers\n", pfn);
else
ret = MF_RECOVERED;
} else {
/*
* If the file system doesn't support it just invalidate
* This fails on dirty or anything with private pages
*/
if (mapping_evict_folio(mapping, folio))
ret = MF_RECOVERED;
else
pr_info("%#lx: Failed to invalidate\n", pfn);
}
return ret;
}
struct page_state {
unsigned long mask;
unsigned long res;
enum mf_action_page_type type;
/* Callback ->action() has to unlock the relevant page inside it. */
int (*action)(struct page_state *ps, struct page *p);
};
/*
* Return true if page is still referenced by others, otherwise return
* false.
*
* The extra_pins is true when one extra refcount is expected.
*/
static bool has_extra_refcount(struct page_state *ps, struct page *p,
bool extra_pins)
{
int count = page_count(p) - 1;
if (extra_pins)
count -= folio_nr_pages(page_folio(p));
if (count > 0) {
pr_err("%#lx: %s still referenced by %d users\n",
page_to_pfn(p), action_page_types[ps->type], count);
return true;
}
return false;
}
/*
* Error hit kernel page.
* Do nothing, try to be lucky and not touch this instead. For a few cases we
* could be more sophisticated.
*/
static int me_kernel(struct page_state *ps, struct page *p)
{
unlock_page(p);
return MF_IGNORED;
}
/*
* Page in unknown state. Do nothing.
* This is a catch-all in case we fail to make sense of the page state.
*/
static int me_unknown(struct page_state *ps, struct page *p)
{
pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
unlock_page(p);
return MF_IGNORED;
}
/*
* Clean (or cleaned) page cache page.
*/
static int me_pagecache_clean(struct page_state *ps, struct page *p)
{
struct folio *folio = page_folio(p);
int ret;
struct address_space *mapping;
bool extra_pins;
delete_from_lru_cache(folio);
/*
* For anonymous folios the only reference left
* should be the one m_f() holds.
*/
if (folio_test_anon(folio)) {
ret = MF_RECOVERED;
goto out;
}
/*
* Now truncate the page in the page cache. This is really
* more like a "temporary hole punch"
* Don't do this for block devices when someone else
* has a reference, because it could be file system metadata
* and that's not safe to truncate.
*/
mapping = folio_mapping(folio);
if (!mapping) {
/* Folio has been torn down in the meantime */
ret = MF_FAILED;
goto out;
}
/*
* The shmem page is kept in page cache instead of truncating
* so is expected to have an extra refcount after error-handling.
*/
extra_pins = shmem_mapping(mapping);
/*
* Truncation is a bit tricky. Enable it per file system for now.
*
* Open: to take i_rwsem or not for this? Right now we don't.
*/
ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
if (has_extra_refcount(ps, p, extra_pins))
ret = MF_FAILED;
out:
folio_unlock(folio);
return ret;
}
/*
* Dirty pagecache page
* Issues: when the error hit a hole page the error is not properly
* propagated.
*/
static int me_pagecache_dirty(struct page_state *ps, struct page *p)
{
struct folio *folio = page_folio(p);
struct address_space *mapping = folio_mapping(folio);
/* TBD: print more information about the file. */
if (mapping) {
/*
* IO error will be reported by write(), fsync(), etc.
* who check the mapping.
* This way the application knows that something went
* wrong with its dirty file data.
*/
mapping_set_error(mapping, -EIO);
}
return me_pagecache_clean(ps, p);
}
/*
* Clean and dirty swap cache.
*
* Dirty swap cache page is tricky to handle. The page could live both in page
* table and swap cache(ie. page is freshly swapped in). So it could be
* referenced concurrently by 2 types of PTEs:
* normal PTEs and swap PTEs. We try to handle them consistently by calling
* try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
* and then
* - clear dirty bit to prevent IO
* - remove from LRU
* - but keep in the swap cache, so that when we return to it on
* a later page fault, we know the application is accessing
* corrupted data and shall be killed (we installed simple
* interception code in do_swap_page to catch it).
*
* Clean swap cache pages can be directly isolated. A later page fault will
* bring in the known good data from disk.
*/
static int me_swapcache_dirty(struct page_state *ps, struct page *p)
{
struct folio *folio = page_folio(p);
int ret;
bool extra_pins = false;
folio_clear_dirty(folio);
/* Trigger EIO in shmem: */
folio_clear_uptodate(folio);
ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED;
folio_unlock(folio);
if (ret == MF_DELAYED)
extra_pins = true;
if (has_extra_refcount(ps, p, extra_pins))
ret = MF_FAILED;
return ret;
}
static int me_swapcache_clean(struct page_state *ps, struct page *p)
{
struct folio *folio = page_folio(p);
int ret;
swap_cache_del_folio(folio);
ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
folio_unlock(folio);
if (has_extra_refcount(ps, p, false))
ret = MF_FAILED;
return ret;
}
/*
* Huge pages. Needs work.
* Issues:
* - Error on hugepage is contained in hugepage unit (not in raw page unit.)
* To narrow down kill region to one page, we need to break up pmd.
*/
static int me_huge_page(struct page_state *ps, struct page *p)
{
struct folio *folio = page_folio(p);
int res;
struct address_space *mapping;
bool extra_pins = false;
mapping = folio_mapping(folio);
if (mapping) {
res = truncate_error_folio(folio, page_to_pfn(p), mapping);
/* The page is kept in page cache. */
extra_pins = true;
folio_unlock(folio);
} else {
folio_unlock(folio);
/*
* migration entry prevents later access on error hugepage,
* so we can free and dissolve it into buddy to save healthy
* subpages.
*/
folio_put(folio);
if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
res = MF_FAILED;
}
}
if (has_extra_refcount(ps, p, extra_pins))
res = MF_FAILED;
return res;
}
/*
* Various page states we can handle.
*
* A page state is defined by its current page->flags bits.
* The table matches them in order and calls the right handler.
*
* This is quite tricky because we can access page at any time
* in its live cycle, so all accesses have to be extremely careful.
*
* This is not complete. More states could be added.
* For any missing state don't attempt recovery.
*/
#define dirty (1UL << PG_dirty)
#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
#define unevict (1UL << PG_unevictable)
#define mlock (1UL << PG_mlocked)
#define lru (1UL << PG_lru)
#define head (1UL << PG_head)
#define reserved (1UL << PG_reserved)
static struct page_state error_states[] = {
{ reserved, reserved, MF_MSG_KERNEL, me_kernel },
/*
* free pages are specially detected outside this table:
* PG_buddy pages only make a small fraction of all free pages.
*/
{ head, head, MF_MSG_HUGE, me_huge_page },
{ sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
{ sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
{ mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
{ mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
{ unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
{ unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
{ lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
{ lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
/*
* Catchall entry: must be at end.
*/
{ 0, 0, MF_MSG_UNKNOWN, me_unknown },
};
#undef dirty
#undef sc
#undef unevict
#undef mlock
#undef lru
#undef head
#undef reserved
static void update_per_node_mf_stats(unsigned long pfn,
enum mf_result result)
{
int nid = MAX_NUMNODES;
struct memory_failure_stats *mf_stats = NULL;
nid = pfn_to_nid(pfn);
if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) {
WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid);
return;
}
mf_stats = &NODE_DATA(nid)->mf_stats;
switch (result) {
case MF_IGNORED:
++mf_stats->ignored;
break;
case MF_FAILED:
++mf_stats->failed;
break;
case MF_DELAYED:
++mf_stats->delayed;
break;
case MF_RECOVERED:
++mf_stats->recovered;
break;
default:
WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result);
break;
}
++mf_stats->total;
}
/*
* "Dirty/Clean" indication is not 100% accurate due to the possibility of
* setting PG_dirty outside page lock. See also comment above set_page_dirty().
*/
static int action_result(unsigned long pfn, enum mf_action_page_type type,
enum mf_result result)
{
trace_memory_failure_event(pfn, type, result);
if (type != MF_MSG_ALREADY_POISONED && type != MF_MSG_PFN_MAP) {
num_poisoned_pages_inc(pfn);
update_per_node_mf_stats(pfn, result);
}
pr_err("%#lx: recovery action for %s: %s\n",
pfn, action_page_types[type], action_name[result]);
return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
}
static int page_action(struct page_state *ps, struct page *p,
unsigned long pfn)
{
int result;
/* page p should be unlocked after returning from ps->action(). */
result = ps->action(ps, p);
/* Could do more checks here if page looks ok */
/*
* Could adjust zone counters here to correct for the missing page.
*/
return action_result(pfn, ps->type, result);
}
static inline bool PageHWPoisonTakenOff(struct page *page)
{
return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
}
void SetPageHWPoisonTakenOff(struct page *page)
{
set_page_private(page, MAGIC_HWPOISON);
}
void ClearPageHWPoisonTakenOff(struct page *page)
{
if (PageHWPoison(page))
set_page_private(page, 0);
}
/*
* Return true if a page type of a given page is supported by hwpoison
* mechanism (while handling could fail), otherwise false. This function
* does not return true for hugetlb or device memory pages, so it's assumed
* to be called only in the context where we never have such pages.
*/
static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
{
if (PageSlab(page))
return false;
/* Soft offline could migrate movable_ops pages */
if ((flags & MF_SOFT_OFFLINE) && page_has_movable_ops(page))
return true;
return PageLRU(page) || is_free_buddy_page(page);
}
static int __get_hwpoison_page(struct page *page, unsigned long flags)
{
struct folio *folio = page_folio(page);
int ret = 0;
bool hugetlb = false;
ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false);
if (hugetlb) {
/* Make sure hugetlb demotion did not happen from under us. */
if (folio == page_folio(page))
return ret;
if (ret > 0) {
folio_put(folio);
folio = page_folio(page);
}
}
/*
* This check prevents from calling folio_try_get() for any
* unsupported type of folio in order to reduce the risk of unexpected
* races caused by taking a folio refcount.
*/
if (!HWPoisonHandlable(&folio->page, flags))
return -EBUSY;
if (folio_try_get(folio)) {
if (folio == page_folio(page))
return 1;
pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
folio_put(folio);
}
return 0;
}
#define GET_PAGE_MAX_RETRY_NUM 3
static int get_any_page(struct page *p, unsigned long flags)
{
int ret = 0, pass = 0;
bool count_increased = false;
if (flags & MF_COUNT_INCREASED)
count_increased = true;
try_again:
if (!count_increased) {
ret = __get_hwpoison_page(p, flags);
if (!ret) {
if (page_count(p)) {
/* We raced with an allocation, retry. */
if (pass++ < GET_PAGE_MAX_RETRY_NUM)
goto try_again;
ret = -EBUSY;
} else if (!PageHuge(p) && !is_free_buddy_page(p)) {
/* We raced with put_page, retry. */
if (pass++ < GET_PAGE_MAX_RETRY_NUM)
goto try_again;
ret = -EIO;
}
goto out;
} else if (ret == -EBUSY) {
/*
* We raced with (possibly temporary) unhandlable
* page, retry.
*/
if (pass++ < 3) {
shake_page(p);
goto try_again;
}
ret = -EIO;
goto out;
}
}
if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
ret = 1;
} else {
/*
* A page we cannot handle. Check whether we can turn
* it into something we can handle.
*/
if (pass++ < GET_PAGE_MAX_RETRY_NUM) {
put_page(p);
shake_page(p);
count_increased = false;
goto try_again;
}
put_page(p);
ret = -EIO;
}
out:
if (ret == -EIO)
pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
return ret;
}
static int __get_unpoison_page(struct page *page)
{
struct folio *folio = page_folio(page);
int ret = 0;
bool hugetlb = false;
ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true);
if (hugetlb) {
/* Make sure hugetlb demotion did not happen from under us. */
if (folio == page_folio(page))
return ret;
if (ret > 0)
folio_put(folio);
}
/*
* PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
* but also isolated from buddy freelist, so need to identify the
* state and have to cancel both operations to unpoison.
*/
if (PageHWPoisonTakenOff(page))
return -EHWPOISON;
return get_page_unless_zero(page) ? 1 : 0;
}
/**
* get_hwpoison_page() - Get refcount for memory error handling
* @p: Raw error page (hit by memory error)
* @flags: Flags controlling behavior of error handling
*
* get_hwpoison_page() takes a page refcount of an error page to handle memory
* error on it, after checking that the error page is in a well-defined state
* (defined as a page-type we can successfully handle the memory error on it,
* such as LRU page and hugetlb page).
*
* Memory error handling could be triggered at any time on any type of page,
* so it's prone to race with typical memory management lifecycle (like
* allocation and free). So to avoid such races, get_hwpoison_page() takes
* extra care for the error page's state (as done in __get_hwpoison_page()),
* and has some retry logic in get_any_page().
*
* When called from unpoison_memory(), the caller should already ensure that
* the given page has PG_hwpoison. So it's never reused for other page
* allocations, and __get_unpoison_page() never races with them.
*
* Return: 0 on failure or free buddy (hugetlb) page,
* 1 on success for in-use pages in a well-defined state,
* -EIO for pages on which we can not handle memory errors,
* -EBUSY when get_hwpoison_page() has raced with page lifecycle
* operations like allocation and free,
* -EHWPOISON when the page is hwpoisoned and taken off from buddy.
*/
static int get_hwpoison_page(struct page *p, unsigned long flags)
{
int ret;
zone_pcp_disable(page_zone(p));
if (flags & MF_UNPOISON)
ret = __get_unpoison_page(p);
else
ret = get_any_page(p, flags);
zone_pcp_enable(page_zone(p));
return ret;
}
/*
* The caller must guarantee the folio isn't large folio, except hugetlb.
* try_to_unmap() can't handle it.
*/
int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
{
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
struct address_space *mapping;
if (folio_test_swapcache(folio)) {
pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
ttu &= ~TTU_HWPOISON;
}
/*
* Propagate the dirty bit from PTEs to struct page first, because we
* need this to decide if we should kill or just drop the page.
* XXX: the dirty test could be racy: set_page_dirty() may not always
* be called inside page lock (it's recommended but not enforced).
*/
mapping = folio_mapping(folio);
if (!must_kill && !folio_test_dirty(folio) && mapping &&
mapping_can_writeback(mapping)) {
if (folio_mkclean(folio)) {
folio_set_dirty(folio);
} else {
ttu &= ~TTU_HWPOISON;
pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
pfn);
}
}
if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
/*
* For hugetlb folios in shared mappings, try_to_unmap
* could potentially call huge_pmd_unshare. Because of
* this, take semaphore in write mode here and set
* TTU_RMAP_LOCKED to indicate we have taken the lock
* at this higher level.
*/
mapping = hugetlb_folio_mapping_lock_write(folio);
if (!mapping) {
pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n",
folio_pfn(folio));
return -EBUSY;
}
try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
i_mmap_unlock_write(mapping);
} else {
try_to_unmap(folio, ttu);
}
return folio_mapped(folio) ? -EBUSY : 0;
}
/*
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
*/
static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
unsigned long pfn, int flags)
{
LIST_HEAD(tokill);
bool unmap_success;
int forcekill;
bool mlocked = folio_test_mlocked(folio);
/*
* Here we are interested only in user-mapped pages, so skip any
* other types of pages.
*/
if (folio_test_reserved(folio) || folio_test_slab(folio) ||
folio_test_pgtable(folio) || folio_test_offline(folio))
return true;
if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
return true;
/*
* This check implies we don't kill processes if their pages
* are in the swap cache early. Those are always late kills.
*/
if (!folio_mapped(folio))
return true;
/*
* First collect all the processes that have the page
* mapped in dirty form. This has to be done before try_to_unmap,
* because ttu takes the rmap data structures down.
*/
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL);
if (!unmap_success)
pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n",
pfn, folio_mapcount(folio));
/*
* try_to_unmap() might put mlocked page in lru cache, so call
* shake_page() again to ensure that it's flushed.
*/
if (mlocked)
shake_folio(folio);
/*
* Now that the dirty bit has been propagated to the
* struct page and all unmaps done we can decide if
* killing is needed or not. Only kill when the page
* was dirty or the process is not restartable,
* otherwise the tokill list is merely
* freed. When there was a problem unmapping earlier
* use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory.
*/
forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
!unmap_success;
kill_procs(&tokill, forcekill, pfn, flags);
return unmap_success;
}
static int identify_page_state(unsigned long pfn, struct page *p,
unsigned long page_flags)
{
struct page_state *ps;
/*
* The first check uses the current page flags which may not have any
* relevant information. The second check with the saved page flags is
* carried out only if the first check can't determine the page status.
*/
for (ps = error_states;; ps++)
if ((p->flags.f & ps->mask) == ps->res)
break;
page_flags |= (p->flags.f & (1UL << PG_dirty));
if (!ps->mask)
for (ps = error_states;; ps++)
if ((page_flags & ps->mask) == ps->res)
break;
return page_action(ps, p, pfn);
}
/*
* When 'release' is 'false', it means that if thp split has failed,
* there is still more to do, hence the page refcount we took earlier
* is still needed.
*/
static int try_to_split_thp_page(struct page *page, unsigned int new_order,
bool release)
{
int ret;
lock_page(page);
ret = split_huge_page_to_order(page, new_order);
unlock_page(page);
if (ret && release)
put_page(page);
return ret;
}
static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
struct address_space *mapping, pgoff_t index, int flags)
{
struct to_kill *tk;
unsigned long size = 0;
list_for_each_entry(tk, to_kill, nd)
if (tk->size_shift)
size = max(size, 1UL << tk->size_shift);
if (size) {
/*
* Unmap the largest mapping to avoid breaking up device-dax
* mappings which are constant size. The actual size of the
* mapping being torn down is communicated in siginfo, see
* kill_proc()
*/
loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
unmap_mapping_range(mapping, start, size, 0);
}
kill_procs(to_kill, flags & MF_MUST_KILL, pfn, flags);
}
/*
* Only dev_pagemap pages get here, such as fsdax when the filesystem
* either do not claim or fails to claim a hwpoison event, or devdax.
* The fsdax pages are initialized per base page, and the devdax pages
* could be initialized either as base pages, or as compound pages with
* vmemmap optimization enabled. Devdax is simplistic in its dealing with
* hwpoison, such that, if a subpage of a compound page is poisoned,
* simply mark the compound head page is by far sufficient.
*/
static int mf_generic_kill_procs(unsigned long long pfn, int flags,
struct dev_pagemap *pgmap)
{
struct folio *folio = pfn_folio(pfn);
LIST_HEAD(to_kill);
dax_entry_t cookie;
int rc = 0;
/*
* Prevent the inode from being freed while we are interrogating
* the address_space, typically this would be handled by
* lock_page(), but dax pages do not use the page lock. This
* also prevents changes to the mapping of this pfn until
* poison signaling is complete.
*/
cookie = dax_lock_folio(folio);
if (!cookie)
return -EBUSY;
if (hwpoison_filter(&folio->page)) {
rc = -EOPNOTSUPP;
goto unlock;
}
switch (pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_COHERENT:
/*
* TODO: Handle device pages which may need coordination
* with device-side memory.
*/
rc = -ENXIO;
goto unlock;
default:
break;
}
/*
* Use this flag as an indication that the dax page has been
* remapped UC to prevent speculative consumption of poison.
*/
SetPageHWPoison(&folio->page);
/*
* Unlike System-RAM there is no possibility to swap in a
* different physical page at a given virtual address, so all
* userspace consumption of ZONE_DEVICE memory necessitates
* SIGBUS (i.e. MF_MUST_KILL)
*/
flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
collect_procs(folio, &folio->page, &to_kill, true);
unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
unlock:
dax_unlock_folio(folio, cookie);
return rc;
}
#ifdef CONFIG_FS_DAX
/**
* mf_dax_kill_procs - Collect and kill processes who are using this file range
* @mapping: address_space of the file in use
* @index: start pgoff of the range within the file
* @count: length of the range, in unit of PAGE_SIZE
* @mf_flags: memory failure flags
*/
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
unsigned long count, int mf_flags)
{
LIST_HEAD(to_kill);
dax_entry_t cookie;
struct page *page;
size_t end = index + count;
bool pre_remove = mf_flags & MF_MEM_PRE_REMOVE;
mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
for (; index < end; index++) {
page = NULL;
cookie = dax_lock_mapping_entry(mapping, index, &page);
if (!cookie)
return -EBUSY;
if (!page)
goto unlock;
if (!pre_remove)
SetPageHWPoison(page);
/*
* The pre_remove case is revoking access, the memory is still
* good and could theoretically be put back into service.
*/
collect_procs_fsdax(page, mapping, index, &to_kill, pre_remove);
unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
index, mf_flags);
unlock:
dax_unlock_mapping_entry(mapping, index, cookie);
}
return 0;
}
EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
#endif /* CONFIG_FS_DAX */
#ifdef CONFIG_HUGETLB_PAGE
/*
* Struct raw_hwp_page represents information about "raw error page",
* constructing singly linked list from ->_hugetlb_hwpoison field of folio.
*/
struct raw_hwp_page {
struct llist_node node;
struct page *page;
};
static inline struct llist_head *raw_hwp_list_head(struct folio *folio)
{
return (struct llist_head *)&folio->_hugetlb_hwpoison;
}
bool is_raw_hwpoison_page_in_hugepage(struct page *page)
{
struct llist_head *raw_hwp_head;
struct raw_hwp_page *p;
struct folio *folio = page_folio(page);
bool ret = false;
if (!folio_test_hwpoison(folio))
return false;
if (!folio_test_hugetlb(folio))
return PageHWPoison(page);
/*
* When RawHwpUnreliable is set, kernel lost track of which subpages
* are HWPOISON. So return as if ALL subpages are HWPOISONed.
*/
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return true;
mutex_lock(&mf_mutex);
raw_hwp_head = raw_hwp_list_head(folio);
llist_for_each_entry(p, raw_hwp_head->first, node) {
if (page == p->page) {
ret = true;
break;
}
}
mutex_unlock(&mf_mutex);
return ret;
}
static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag)
{
struct llist_node *head;
struct raw_hwp_page *p, *next;
unsigned long count = 0;
head = llist_del_all(raw_hwp_list_head(folio));
llist_for_each_entry_safe(p, next, head, node) {
if (move_flag)
SetPageHWPoison(p->page);
else
num_poisoned_pages_sub(page_to_pfn(p->page), 1);
kfree(p);
count++;
}
return count;
}
#define MF_HUGETLB_FREED 0 /* freed hugepage */
#define MF_HUGETLB_IN_USED 1 /* in-use hugepage */
#define MF_HUGETLB_NON_HUGEPAGE 2 /* not a hugepage */
#define MF_HUGETLB_FOLIO_PRE_POISONED 3 /* folio already poisoned */
#define MF_HUGETLB_PAGE_PRE_POISONED 4 /* exact page already poisoned */
#define MF_HUGETLB_RETRY 5 /* hugepage is busy, retry */
/*
* Set hugetlb folio as hwpoisoned, update folio private raw hwpoison list
* to keep track of the poisoned pages.
*/
static int hugetlb_update_hwpoison(struct folio *folio, struct page *page)
{
struct llist_head *head;
struct raw_hwp_page *raw_hwp;
struct raw_hwp_page *p;
int ret = folio_test_set_hwpoison(folio) ? MF_HUGETLB_FOLIO_PRE_POISONED : 0;
/*
* Once the hwpoison hugepage has lost reliable raw error info,
* there is little meaning to keep additional error info precisely,
* so skip to add additional raw error info.
*/
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return MF_HUGETLB_FOLIO_PRE_POISONED;
head = raw_hwp_list_head(folio);
llist_for_each_entry(p, head->first, node) {
if (p->page == page)
return MF_HUGETLB_PAGE_PRE_POISONED;
}
raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
if (raw_hwp) {
raw_hwp->page = page;
llist_add(&raw_hwp->node, head);
} else {
/*
* Failed to save raw error info. We no longer trace all
* hwpoisoned subpages, and we need refuse to free/dissolve
* this hwpoisoned hugepage.
*/
folio_set_hugetlb_raw_hwp_unreliable(folio);
/*
* Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not
* used any more, so free it.
*/
__folio_free_raw_hwp(folio, false);
}
return ret;
}
static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag)
{
/*
* hugetlb_vmemmap_optimized hugepages can't be freed because struct
* pages for tail pages are required but they don't exist.
*/
if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
/*
* hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by
* definition.
*/
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return 0;
return __folio_free_raw_hwp(folio, move_flag);
}
void folio_clear_hugetlb_hwpoison(struct folio *folio)
{
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return;
if (folio_test_hugetlb_vmemmap_optimized(folio))
return;
folio_clear_hwpoison(folio);
folio_free_raw_hwp(folio, true);
}
/*
* Called from hugetlb code with hugetlb_lock held.
*/
int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared)
{
struct page *page = pfn_to_page(pfn);
struct folio *folio = page_folio(page);
bool count_increased = false;
int ret, rc;
if (!folio_test_hugetlb(folio)) {
ret = MF_HUGETLB_NON_HUGEPAGE;
goto out;
} else if (flags & MF_COUNT_INCREASED) {
ret = MF_HUGETLB_IN_USED;
count_increased = true;
} else if (folio_test_hugetlb_freed(folio)) {
ret = MF_HUGETLB_FREED;
} else if (folio_test_hugetlb_migratable(folio)) {
if (folio_try_get(folio)) {
ret = MF_HUGETLB_IN_USED;
count_increased = true;
} else {
ret = MF_HUGETLB_FREED;
}
} else {
ret = MF_HUGETLB_RETRY;
if (!(flags & MF_NO_RETRY))
goto out;
}
rc = hugetlb_update_hwpoison(folio, page);
if (rc >= MF_HUGETLB_FOLIO_PRE_POISONED) {
ret = rc;
goto out;
}
/*
* Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them
* from being migrated by memory hotremove.
*/
if (count_increased && folio_test_hugetlb_migratable(folio)) {
folio_clear_hugetlb_migratable(folio);
*migratable_cleared = true;
}
return ret;
out:
if (count_increased)
folio_put(folio);
return ret;
}
/*
* Taking refcount of hugetlb pages needs extra care about race conditions
* with basic operations like hugepage allocation/free/demotion.
* So some of prechecks for hwpoison (pinning, and testing/setting
* PageHWPoison) should be done in single hugetlb_lock range.
* Returns:
* 0 - not hugetlb, or recovered
* -EBUSY - not recovered
* -EOPNOTSUPP - hwpoison_filter'ed
* -EHWPOISON - folio or exact page already poisoned
* -EFAULT - kill_accessing_process finds current->mm null
*/
static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
{
int res, rv;
struct page *p = pfn_to_page(pfn);
struct folio *folio;
unsigned long page_flags;
bool migratable_cleared = false;
*hugetlb = 1;
retry:
res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
switch (res) {
case MF_HUGETLB_NON_HUGEPAGE: /* fallback to normal page handling */
*hugetlb = 0;
return 0;
case MF_HUGETLB_RETRY:
if (!(flags & MF_NO_RETRY)) {
flags |= MF_NO_RETRY;
goto retry;
}
return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
case MF_HUGETLB_FOLIO_PRE_POISONED:
case MF_HUGETLB_PAGE_PRE_POISONED:
rv = -EHWPOISON;
if (flags & MF_ACTION_REQUIRED)
rv = kill_accessing_process(current, pfn, flags);
if (res == MF_HUGETLB_PAGE_PRE_POISONED)
action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
else
action_result(pfn, MF_MSG_HUGE, MF_FAILED);
return rv;
default:
WARN_ON((res != MF_HUGETLB_FREED) && (res != MF_HUGETLB_IN_USED));
break;
}
folio = page_folio(p);
folio_lock(folio);
if (hwpoison_filter(p)) {
folio_clear_hugetlb_hwpoison(folio);
if (migratable_cleared)
folio_set_hugetlb_migratable(folio);
folio_unlock(folio);
if (res == MF_HUGETLB_IN_USED)
folio_put(folio);
return -EOPNOTSUPP;
}
/*
* Handling free hugepage. The possible race with hugepage allocation
* or demotion can be prevented by PageHWPoison flag.
*/
if (res == MF_HUGETLB_FREED) {
folio_unlock(folio);
if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
res = MF_FAILED;
}
return action_result(pfn, MF_MSG_FREE_HUGE, res);
}
page_flags = folio->flags.f;
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
folio_unlock(folio);
return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED);
}
return identify_page_state(pfn, p, page_flags);
}
#else
static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
{
return 0;
}
static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag)
{
return 0;
}
#endif /* CONFIG_HUGETLB_PAGE */
/* Drop the extra refcount in case we come from madvise() */
static void put_ref_page(unsigned long pfn, int flags)
{
if (!(flags & MF_COUNT_INCREASED))
return;
put_page(pfn_to_page(pfn));
}
static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
struct dev_pagemap *pgmap)
{
int rc = -ENXIO;
/* device metadata space is not recoverable */
if (!pgmap_pfn_valid(pgmap, pfn))
goto out;
/*
* Call driver's implementation to handle the memory failure, otherwise
* fall back to generic handler.
*/
if (pgmap_has_memory_failure(pgmap)) {
rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
/*
* Fall back to generic handler too if operation is not
* supported inside the driver/device/filesystem.
*/
if (rc != -EOPNOTSUPP)
goto out;
}
rc = mf_generic_kill_procs(pfn, flags, pgmap);
out:
/* drop pgmap ref acquired in caller */
put_dev_pagemap(pgmap);
if (rc != -EOPNOTSUPP)
action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
return rc;
}
/*
* The calling condition is as such: thp split failed, page might have
* been RDMA pinned, not much can be done for recovery.
* But a SIGBUS should be delivered with vaddr provided so that the user
* application has a chance to recover. Also, application processes'
* election for MCE early killed will be honored.
*/
static void kill_procs_now(struct page *p, unsigned long pfn, int flags,
struct folio *folio)
{
LIST_HEAD(tokill);
folio_lock(folio);
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
folio_unlock(folio);
kill_procs(&tokill, true, pfn, flags);
}
int register_pfn_address_space(struct pfn_address_space *pfn_space)
{
guard(mutex)(&pfn_space_lock);
if (!pfn_space->pfn_to_vma_pgoff)
return -EINVAL;
if (interval_tree_iter_first(&pfn_space_itree,
pfn_space->node.start,
pfn_space->node.last))
return -EBUSY;
interval_tree_insert(&pfn_space->node, &pfn_space_itree);
return 0;
}
EXPORT_SYMBOL_GPL(register_pfn_address_space);
void unregister_pfn_address_space(struct pfn_address_space *pfn_space)
{
guard(mutex)(&pfn_space_lock);
if (interval_tree_iter_first(&pfn_space_itree,
pfn_space->node.start,
pfn_space->node.last))
interval_tree_remove(&pfn_space->node, &pfn_space_itree);
}
EXPORT_SYMBOL_GPL(unregister_pfn_address_space);
static void add_to_kill_pgoff(struct task_struct *tsk,
struct vm_area_struct *vma,
struct list_head *to_kill,
pgoff_t pgoff)
{
struct to_kill *tk;
tk = kmalloc(sizeof(*tk), GFP_ATOMIC);
if (!tk) {
pr_info("Unable to kill proc %d\n", tsk->pid);
return;
}
/* Check for pgoff not backed by struct page */
tk->addr = vma_address(vma, pgoff, 1);
tk->size_shift = PAGE_SHIFT;
if (tk->addr == -EFAULT)
pr_info("Unable to find address %lx in %s\n",
pgoff, tsk->comm);
get_task_struct(tsk);
tk->tsk = tsk;
list_add_tail(&tk->nd, to_kill);
}
/*
* Collect processes when the error hit a PFN not backed by struct page.
*/
static void collect_procs_pfn(struct pfn_address_space *pfn_space,
unsigned long pfn, struct list_head *to_kill)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
struct address_space *mapping = pfn_space->mapping;
i_mmap_lock_read(mapping);
rcu_read_lock();
for_each_process(tsk) {
struct task_struct *t = tsk;
t = task_early_kill(tsk, true);
if (!t)
continue;
vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) {
pgoff_t pgoff;
if (vma->vm_mm == t->mm &&
!pfn_space->pfn_to_vma_pgoff(vma, pfn, &pgoff))
add_to_kill_pgoff(t, vma, to_kill, pgoff);
}
}
rcu_read_unlock();
i_mmap_unlock_read(mapping);
}
/**
* memory_failure_pfn - Handle memory failure on a page not backed by
* struct page.
* @pfn: Page Number of the corrupted page
* @flags: fine tune action taken
*
* Return:
* 0 - success,
* -EBUSY - Page PFN does not belong to any address space mapping.
*/
static int memory_failure_pfn(unsigned long pfn, int flags)
{
struct interval_tree_node *node;
LIST_HEAD(tokill);
scoped_guard(mutex, &pfn_space_lock) {
bool mf_handled = false;
/*
* Modules registers with MM the address space mapping to
* the device memory they manage. Iterate to identify
* exactly which address space has mapped to this failing
* PFN.
*/
for (node = interval_tree_iter_first(&pfn_space_itree, pfn, pfn); node;
node = interval_tree_iter_next(node, pfn, pfn)) {
struct pfn_address_space *pfn_space =
container_of(node, struct pfn_address_space, node);
collect_procs_pfn(pfn_space, pfn, &tokill);
mf_handled = true;
}
if (!mf_handled)
return action_result(pfn, MF_MSG_PFN_MAP, MF_IGNORED);
}
/*
* Unlike System-RAM there is no possibility to swap in a different
* physical page at a given virtual address, so all userspace
* consumption of direct PFN memory necessitates SIGBUS (i.e.
* MF_MUST_KILL)
*/
flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
kill_procs(&tokill, true, pfn, flags);
return action_result(pfn, MF_MSG_PFN_MAP, MF_RECOVERED);
}
/**
* memory_failure - Handle memory failure of a page.
* @pfn: Page Number of the corrupted page
* @flags: fine tune action taken
*
* This function is called by the low level machine check code
* of an architecture when it detects hardware memory corruption
* of a page. It tries its best to recover, which includes
* dropping pages, killing processes etc.
*
* The function is primarily of use for corruptions that
* happen outside the current execution context (e.g. when
* detected by a background scrubber)
*
* Must run in process context (e.g. a work queue) with interrupts
* enabled and no spinlocks held.
*
* Return:
* 0 - success,
* -ENXIO - memory not managed by the kernel
* -EOPNOTSUPP - hwpoison_filter() filtered the error event,
* -EHWPOISON - the page was already poisoned, potentially
* kill process,
* other negative values - failure.
*/
int memory_failure(unsigned long pfn, int flags)
{
struct page *p;
struct folio *folio;
struct dev_pagemap *pgmap;
int res = 0;
unsigned long page_flags;
bool retry = true;
int hugetlb = 0;
if (!sysctl_memory_failure_recovery)
panic("Memory failure on page %lx", pfn);
mutex_lock(&mf_mutex);
if (!(flags & MF_SW_SIMULATED))
hw_memory_failure = true;
p = pfn_to_online_page(pfn);
if (!p) {
res = arch_memory_failure(pfn, flags);
if (res == 0)
goto unlock_mutex;
if (!pfn_valid(pfn) && !arch_is_platform_page(PFN_PHYS(pfn))) {
/*
* The PFN is not backed by struct page.
*/
res = memory_failure_pfn(pfn, flags);
goto unlock_mutex;
}
if (pfn_valid(pfn)) {
pgmap = get_dev_pagemap(pfn);
put_ref_page(pfn, flags);
if (pgmap) {
res = memory_failure_dev_pagemap(pfn, flags,
pgmap);
goto unlock_mutex;
}
}
pr_err("%#lx: memory outside kernel control\n", pfn);
res = -ENXIO;
goto unlock_mutex;
}
try_again:
res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
if (hugetlb)
goto unlock_mutex;
if (TestSetPageHWPoison(p)) {
res = -EHWPOISON;
if (flags & MF_ACTION_REQUIRED)
res = kill_accessing_process(current, pfn, flags);
if (flags & MF_COUNT_INCREASED)
put_page(p);
action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
goto unlock_mutex;
}
/*
* We need/can do nothing about count=0 pages.
* 1) it's a free page, and therefore in safe hand:
* check_new_page() will be the gate keeper.
* 2) it's part of a non-compound high order page.
* Implies some kernel user: cannot stop them from
* R/W the page; let's pray that the page has been
* used and will be freed some time later.
* In fact it's dangerous to directly bump up page count from 0,
* that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
*/
res = get_hwpoison_page(p, flags);
if (!res) {
if (is_free_buddy_page(p)) {
if (take_page_off_buddy(p)) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
/* We lost the race, try again */
if (retry) {
ClearPageHWPoison(p);
retry = false;
goto try_again;
}
res = MF_FAILED;
}
res = action_result(pfn, MF_MSG_BUDDY, res);
} else {
res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
}
goto unlock_mutex;
} else if (res < 0) {
res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
goto unlock_mutex;
}
folio = page_folio(p);
/* filter pages that are protected from hwpoison test by users */
folio_lock(folio);
if (hwpoison_filter(p)) {
ClearPageHWPoison(p);
folio_unlock(folio);
folio_put(folio);
res = -EOPNOTSUPP;
goto unlock_mutex;
}
folio_unlock(folio);
if (folio_test_large(folio)) {
const int new_order = min_order_for_split(folio);
int err;
/*
* The flag must be set after the refcount is bumped
* otherwise it may race with THP split.
* And the flag can't be set in get_hwpoison_page() since
* it is called by soft offline too and it is just called
* for !MF_COUNT_INCREASED. So here seems to be the best
* place.
*
* Don't need care about the above error handling paths for
* get_hwpoison_page() since they handle either free page
* or unhandlable page. The refcount is bumped iff the
* page is a valid handlable page.
*/
folio_set_has_hwpoisoned(folio);
err = try_to_split_thp_page(p, new_order, /* release= */ false);
/*
* If splitting a folio to order-0 fails, kill the process.
* Split the folio regardless to minimize unusable pages.
* Because the memory failure code cannot handle large
* folios, this split is always treated as if it failed.
*/
if (err || new_order) {
/* get folio again in case the original one is split */
folio = page_folio(p);
res = -EHWPOISON;
kill_procs_now(p, pfn, flags, folio);
put_page(p);
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_FAILED);
goto unlock_mutex;
}
VM_BUG_ON_PAGE(!page_count(p), p);
folio = page_folio(p);
}
/*
* We ignore non-LRU pages for good reasons.
* - PG_locked is only well defined for LRU pages and a few others
* - to avoid races with __SetPageLocked()
* - to avoid races with __SetPageSlab*() (and more non-atomic ops)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
shake_folio(folio);
folio_lock(folio);
/*
* We're only intended to deal with the non-Compound page here.
* The page cannot become compound pages again as folio has been
* splited and extra refcnt is held.
*/
WARN_ON(folio_test_large(folio));
/*
* We use page flags to determine what action should be taken, but
* the flags can be modified by the error containment action. One
* example is an mlocked page, where PG_mlocked is cleared by
* folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
* status correctly, we save a copy of the page flags at this time.
*/
page_flags = folio->flags.f;
/*
* __munlock_folio() may clear a writeback folio's LRU flag without
* the folio lock. We need to wait for writeback completion for this
* folio or it may trigger a vfs BUG while evicting inode.
*/
if (!folio_test_lru(folio) && !folio_test_writeback(folio))
goto identify_page_state;
/*
* It's very difficult to mess with pages currently under IO
* and in many cases impossible, so we just avoid it here.
*/
folio_wait_writeback(folio);
/*
* Now take care of user space mappings.
* Abort on fail: __filemap_remove_folio() assumes unmapped page.
*/
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED);
goto unlock_page;
}
/*
* Torn down by someone else?
*/
if (folio_test_lru(folio) && !folio_test_swapcache(folio) &&
folio->mapping == NULL) {
res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
goto unlock_page;
}
identify_page_state:
res = identify_page_state(pfn, p, page_flags);
mutex_unlock(&mf_mutex);
return res;
unlock_page:
folio_unlock(folio);
unlock_mutex:
mutex_unlock(&mf_mutex);
return res;
}
EXPORT_SYMBOL_GPL(memory_failure);
#define MEMORY_FAILURE_FIFO_ORDER 4
#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
struct memory_failure_entry {
unsigned long pfn;
int flags;
};
struct memory_failure_cpu {
DECLARE_KFIFO(fifo, struct memory_failure_entry,
MEMORY_FAILURE_FIFO_SIZE);
raw_spinlock_t lock;
struct work_struct work;
};
static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
/**
* memory_failure_queue - Schedule handling memory failure of a page.
* @pfn: Page Number of the corrupted page
* @flags: Flags for memory failure handling
*
* This function is called by the low level hardware error handler
* when it detects hardware memory corruption of a page. It schedules
* the recovering of error page, including dropping pages, killing
* processes etc.
*
* The function is primarily of use for corruptions that
* happen outside the current execution context (e.g. when
* detected by a background scrubber)
*
* Can run in IRQ context.
*/
void memory_failure_queue(unsigned long pfn, int flags)
{
struct memory_failure_cpu *mf_cpu;
unsigned long proc_flags;
bool buffer_overflow;
struct memory_failure_entry entry = {
.pfn = pfn,
.flags = flags,
};
mf_cpu = &get_cpu_var(memory_failure_cpu);
raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
buffer_overflow = !kfifo_put(&mf_cpu->fifo, entry);
if (!buffer_overflow)
schedule_work_on(smp_processor_id(), &mf_cpu->work);
raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
put_cpu_var(memory_failure_cpu);
if (buffer_overflow)
pr_err("buffer overflow when queuing memory failure at %#lx\n",
pfn);
}
EXPORT_SYMBOL_GPL(memory_failure_queue);
static void memory_failure_work_func(struct work_struct *work)
{
struct memory_failure_cpu *mf_cpu;
struct memory_failure_entry entry = { 0, };
unsigned long proc_flags;
int gotten;
mf_cpu = container_of(work, struct memory_failure_cpu, work);
for (;;) {
raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
gotten = kfifo_get(&mf_cpu->fifo, &entry);
raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
if (!gotten)
break;
if (entry.flags & MF_SOFT_OFFLINE)
soft_offline_page(entry.pfn, entry.flags);
else
memory_failure(entry.pfn, entry.flags);
}
}
static int __init memory_failure_init(void)
{
struct memory_failure_cpu *mf_cpu;
int cpu;
for_each_possible_cpu(cpu) {
mf_cpu = &per_cpu(memory_failure_cpu, cpu);
raw_spin_lock_init(&mf_cpu->lock);
INIT_KFIFO(mf_cpu->fifo);
INIT_WORK(&mf_cpu->work, memory_failure_work_func);
}
register_sysctl_init("vm", memory_failure_table);
return 0;
}
core_initcall(memory_failure_init);
#undef pr_fmt
#define pr_fmt(fmt) "Unpoison: " fmt
#define unpoison_pr_info(fmt, pfn, rs) \
({ \
if (__ratelimit(rs)) \
pr_info(fmt, pfn); \
})
/**
* unpoison_memory - Unpoison a previously poisoned page
* @pfn: Page number of the to be unpoisoned page
*
* Software-unpoison a page that has been poisoned by
* memory_failure() earlier.
*
* This is only done on the software-level, so it only works
* for linux injected failures, not real hardware failures
*
* Returns 0 for success, otherwise -errno.
*/
int unpoison_memory(unsigned long pfn)
{
struct folio *folio;
struct page *p;
int ret = -EBUSY, ghp;
unsigned long count;
bool huge = false;
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
p = pfn_to_online_page(pfn);
if (!p)
return -EIO;
folio = page_folio(p);
mutex_lock(&mf_mutex);
if (hw_memory_failure) {
unpoison_pr_info("%#lx: disabled after HW memory failure\n",
pfn, &unpoison_rs);
ret = -EOPNOTSUPP;
goto unlock_mutex;
}
if (is_huge_zero_folio(folio)) {
unpoison_pr_info("%#lx: huge zero page is not supported\n",
pfn, &unpoison_rs);
ret = -EOPNOTSUPP;
goto unlock_mutex;
}
if (!PageHWPoison(p)) {
unpoison_pr_info("%#lx: page was already unpoisoned\n",
pfn, &unpoison_rs);
goto unlock_mutex;
}
if (folio_ref_count(folio) > 1) {
unpoison_pr_info("%#lx: someone grabs the hwpoison page\n",
pfn, &unpoison_rs);
goto unlock_mutex;
}
if (folio_test_slab(folio) || folio_test_pgtable(folio) ||
folio_test_reserved(folio) || folio_test_offline(folio))
goto unlock_mutex;
if (folio_mapped(folio)) {
unpoison_pr_info("%#lx: someone maps the hwpoison page\n",
pfn, &unpoison_rs);
goto unlock_mutex;
}
if (folio_mapping(folio)) {
unpoison_pr_info("%#lx: the hwpoison page has non-NULL mapping\n",
pfn, &unpoison_rs);
goto unlock_mutex;
}
ghp = get_hwpoison_page(p, MF_UNPOISON);
if (!ghp) {
if (folio_test_hugetlb(folio)) {
huge = true;
count = folio_free_raw_hwp(folio, false);
if (count == 0)
goto unlock_mutex;
}
ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY;
} else if (ghp < 0) {
if (ghp == -EHWPOISON) {
ret = put_page_back_buddy(p) ? 0 : -EBUSY;
} else {
ret = ghp;
unpoison_pr_info("%#lx: failed to grab page\n",
pfn, &unpoison_rs);
}
} else {
if (folio_test_hugetlb(folio)) {
huge = true;
count = folio_free_raw_hwp(folio, false);
if (count == 0) {
folio_put(folio);
goto unlock_mutex;
}
}
folio_put(folio);
if (TestClearPageHWPoison(p)) {
folio_put(folio);
ret = 0;
}
}
unlock_mutex:
mutex_unlock(&mf_mutex);
if (!ret) {
if (!huge)
num_poisoned_pages_sub(pfn, 1);
unpoison_pr_info("%#lx: software-unpoisoned page\n",
page_to_pfn(p), &unpoison_rs);
}
return ret;
}
EXPORT_SYMBOL(unpoison_memory);
#undef pr_fmt
#define pr_fmt(fmt) "Soft offline: " fmt
/*
* soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
* If the page is a non-dirty unmapped page-cache page, it simply invalidates.
* If the page is mapped, it migrates the contents over.
*/
static int soft_offline_in_use_page(struct page *page)
{
long ret = 0;
unsigned long pfn = page_to_pfn(page);
struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"};
bool huge = folio_test_hugetlb(folio);
bool isolated;
LIST_HEAD(pagelist);
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
.reason = MR_MEMORY_FAILURE,
};
if (!huge && folio_test_large(folio)) {
const int new_order = min_order_for_split(folio);
/*
* If new_order (target split order) is not 0, do not split the
* folio at all to retain the still accessible large folio.
* NOTE: if minimizing the number of soft offline pages is
* preferred, split it to non-zero new_order like it is done in
* memory_failure().
*/
if (new_order || try_to_split_thp_page(page, /* new_order= */ 0,
/* release= */ true)) {
pr_info("%#lx: thp split failed\n", pfn);
return -EBUSY;
}
folio = page_folio(page);
}
folio_lock(folio);
if (!huge)
folio_wait_writeback(folio);
if (PageHWPoison(page)) {
folio_unlock(folio);
folio_put(folio);
pr_info("%#lx: page already poisoned\n", pfn);
return 0;
}
if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
/*
* Try to invalidate first. This should work for
* non dirty unmapped page cache pages.
*/
ret = mapping_evict_folio(folio_mapping(folio), folio);
folio_unlock(folio);
if (ret) {
pr_info("%#lx: invalidated\n", pfn);
page_handle_poison(page, false, true);
return 0;
}
isolated = isolate_folio_to_list(folio, &pagelist);
/*
* If we succeed to isolate the folio, we grabbed another refcount on
* the folio, so we can safely drop the one we got from get_any_page().
* If we failed to isolate the folio, it means that we cannot go further
* and we will return an error, so drop the reference we got from
* get_any_page() as well.
*/
folio_put(folio);
if (isolated) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
bool release = !huge;
if (!page_handle_poison(page, huge, release))
ret = -EBUSY;
} else {
if (!list_empty(&pagelist))
putback_movable_pages(&pagelist);
pr_info("%#lx: %s migration failed %ld, type %pGp\n",
pfn, msg_page[huge], ret, &page->flags.f);
if (ret > 0)
ret = -EBUSY;
}
} else {
pr_info("%#lx: %s isolation failed, page count %d, type %pGp\n",
pfn, msg_page[huge], page_count(page), &page->flags.f);
ret = -EBUSY;
}
return ret;
}
/**
* soft_offline_page - Soft offline a page.
* @pfn: pfn to soft-offline
* @flags: flags. Same as memory_failure().
*
* Returns 0 on success,
* -EOPNOTSUPP for hwpoison_filter() filtered the error event, or
* disabled by /proc/sys/vm/enable_soft_offline,
* < 0 otherwise negated errno.
*
* Soft offline a page, by migration or invalidation,
* without killing anything. This is for the case when
* a page is not corrupted yet (so it's still valid to access),
* but has had a number of corrected errors and is better taken
* out.
*
* The actual policy on when to do that is maintained by
* user space.
*
* This should never impact any application or cause data loss,
* however it might take some time.
*
* This is not a 100% solution for all memory, but tries to be
* ``good enough'' for the majority of memory.
*/
int soft_offline_page(unsigned long pfn, int flags)
{
int ret;
bool try_again = true;
struct page *page;
if (!pfn_valid(pfn)) {
WARN_ON_ONCE(flags & MF_COUNT_INCREASED);
return -ENXIO;
}
/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
page = pfn_to_online_page(pfn);
if (!page) {
put_ref_page(pfn, flags);
return -EIO;
}
if (!sysctl_enable_soft_offline) {
pr_info_once("disabled by /proc/sys/vm/enable_soft_offline\n");
put_ref_page(pfn, flags);
return -EOPNOTSUPP;
}
mutex_lock(&mf_mutex);
if (PageHWPoison(page)) {
pr_info("%#lx: page already poisoned\n", pfn);
put_ref_page(pfn, flags);
mutex_unlock(&mf_mutex);
return 0;
}
retry:
get_online_mems();
ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
put_online_mems();
if (hwpoison_filter(page)) {
if (ret > 0)
put_page(page);
mutex_unlock(&mf_mutex);
return -EOPNOTSUPP;
}
if (ret > 0) {
ret = soft_offline_in_use_page(page);
} else if (ret == 0) {
if (!page_handle_poison(page, true, false)) {
if (try_again) {
try_again = false;
flags &= ~MF_COUNT_INCREASED;
goto retry;
}
ret = -EBUSY;
}
}
mutex_unlock(&mf_mutex);
return ret;
} | c | github | https://github.com/torvalds/linux | mm/memory-failure.c |
from __future__ import absolute_import
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
import os.path
import sys
sys.path.insert(1, os.path.abspath('../..'))
import numpy as np
import matplotlib.pyplot as plt
import burnman
import burnman.eos.birch_murnaghan as bm
import burnman.eos.birch_murnaghan_4th as bm4
import burnman.eos.mie_grueneisen_debye as mgd
import burnman.eos.slb as slb
import burnman.eos.vinet as vinet
import matplotlib.image as mpimg
def check_birch_murnaghan():
"""
Recreates Stixrude and Lithgow-Bertelloni (2005) Figure 1, bulk and shear modulus without thermal corrections
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 6.844e-6,
'K_0': 259.0e9,
'Kprime_0': 4.0,
'G_0': 175.0e9,
'Gprime_0': 1.7,
'molar_mass': .0,
}
test_mineral.set_method('bm3')
pressure = np.linspace(0., 140.e9, 100)
volume = np.empty_like(pressure)
bulk_modulus = np.empty_like(pressure)
shear_modulus = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = bm.volume(pressure[i], test_mineral.params)
bulk_modulus[i] = bm.bulk_modulus(volume[i], test_mineral.params)
shear_modulus[i] = bm.shear_modulus_third_order(
volume[i], test_mineral.params) # third order is used for the plot we are comparing against
# compare with figure 1
plt.plot(pressure / 1.e9, bulk_modulus /
1.e9, pressure / 1.e9, shear_modulus / 1.e9)
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig1.png')
plt.imshow(fig1, extent=[0, 140, 0, 800], aspect='auto')
plt.plot(pressure / 1.e9, bulk_modulus / 1.e9,
'g+', pressure / 1.e9, shear_modulus / 1.e9, 'g+')
plt.ylim(0, 800)
plt.xlim(0, 140)
plt.xlabel("Pressure (GPa)")
plt.ylabel("Modulus (GPa)")
plt.title(
"Comparing with Figure 1 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_birch_murnaghan_4th():
"""
Recreates the formulation of the 4th order Birch-Murnaghan EOS as in Ahmad and Alkammash, 2012; Figure 1.
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 10.e-6,
'K_0': 72.7e9,
'Kprime_0': 4.14,
'Kprime_prime_0': -0.0484e-9,
}
test_mineral.set_method('bm4')
pressure = np.linspace(0., 90.e9, 20)
volume = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = bm4.volume_fourth_order(
pressure[i], test_mineral.params) / test_mineral.params.get('V_0')
# compare with figure 1
plt.plot(pressure / 1.e9, volume)
fig1 = mpimg.imread('../../burnman/data/input_figures/Ahmad.png')
plt.imshow(fig1, extent=[0., 90., .65, 1.], aspect='auto')
plt.plot(pressure / 1.e9, volume, marker='o',
color='r', linestyle='', label='BM4')
plt.legend(loc='lower left')
plt.xlim(0., 90.)
plt.ylim(.65, 1.)
plt.xlabel("Volume/V0")
plt.ylabel("Pressure (GPa)")
plt.title("Comparing with Figure 1 of Ahmad et al., (2012)")
plt.show()
def check_vinet():
"""
Recreates Dewaele et al., 2006, Figure 1, fitting a Vinet EOS to Fe data
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 6.75e-6,
'K_0': 163.4e9,
'Kprime_0': 5.38,
}
test_mineral.set_method('vinet')
pressure = np.linspace(17.7e9, 300.e9, 20)
volume = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = vinet.volume(pressure[i], test_mineral.params)
# compare with figure 1
plt.plot(pressure / 1.e9, volume / 6.02e-7)
fig1 = mpimg.imread('../../burnman/data/input_figures/Dewaele.png')
plt.imshow(fig1, extent=[0., 300., 6.8, 11.8], aspect='auto')
plt.plot(pressure / 1.e9, volume / 6.02e-7, marker='o',
color='r', linestyle='', label='Vinet Fit')
plt.legend(loc='lower left')
plt.xlim(0., 300.)
plt.ylim(6.8, 11.8)
plt.ylabel("Volume (Angstroms^3/atom")
plt.xlabel("Pressure (GPa)")
plt.title("Comparing with Figure 1 of Dewaele et al., (2006)")
plt.show()
def check_mgd_shim_duffy_kenichi():
"""
Attemmpts to recreate Shim Duffy Kenichi (2002)
"""
plt.close()
# Create gold material from Table 1
gold = burnman.Mineral()
gold.params = {'name': 'gold',
'V_0': 10.22e-6,
'K_0': 167.0e9,
'Kprime_0': 5.0,
'G_0': 0.0e9,
'Gprime_0': 0.0,
'molar_mass': .196966,
'n': 1.0,
'Debye_0': 170.,
'grueneisen_0': 2.97, # this does better with gr = 2.93. Why?
'q_0': 1.0}
gold.set_method('mgd3')
# Total pressures, pulled from Table 2
ref_pressures = [
np.array([0., 3.55, 7.55, 12.06, 17.16, 22.91, 29.42, 36.77, 45.11, 54.56, 65.29, 77.50, 91.42, 107.32, 125.51, 146.38, 170.38, 198.07])]
ref_pressures.append(
np.array([4.99, 8.53, 12.53, 17.04, 22.13, 27.88, 34.38, 41.73, 50.06, 59.50, 70.22, 82.43, 96.33, 112.22, 130.40, 151.25, 175.24, 202.90]))
ref_pressures.append(
np.array([12.14, 15.69, 19.68, 24.19, 29.28, 35.03, 41.53, 48.88, 57.20, 66.64, 77.37, 89.57, 103.47, 119.35, 137.53, 158.38, 182.36, 210.02]))
ref_pressures.append(
np.array([19.30, 22.84, 26.84, 31.35, 36.44, 42.19, 48.68, 56.03, 64.35, 73.80, 84.52, 96.72, 110.62, 126.50, 144.68, 165.53, 189.51, 217.17]))
eos = mgd.MGD3()
pressures = np.empty_like(ref_pressures)
ref_dv = np.linspace(0.0, 0.34, len(pressures[0]))
ref_volumes = (1 - ref_dv) * gold.params['V_0']
T = np.array([300., 1000., 2000., 3000.])
for t in range(len(pressures)):
for i in range(len(pressures[t])):
pressures[t][i] = eos.pressure(T[t], ref_volumes[i], gold.params)
plt.plot(ref_dv, (pressures[t] / 1.e9 - ref_pressures[t]))
plt.ylim(-1, 1)
plt.ylabel("Difference in pressure (GPa)")
plt.xlabel("1-dV/V")
plt.title("Comparing with Shim, Duffy, and Kenichi (2002)")
plt.show()
def check_mgd_fei_mao_shu_hu():
"""
Benchmark agains Fei Mao Shu Hu (1991)
"""
mgfeo = burnman.Mineral()
mgfeo.params = {'name': 'MgFeO',
'V_0': 11.657e-6,
'K_0': 157.0e9,
'Kprime_0': 4.0,
'G_0': 0.0e9,
'Gprime_0': 0.0,
'molar_mass': .196966,
'n': 2.0,
'Debye_0': 500.,
'grueneisen_0': 1.50,
'q_0': 1.1}
mgfeo.set_method('mgd3')
# pulled from table 1
temperatures = np.array(
[300, 300, 483, 483, 483, 590, 593, 593, 593, 700, 600, 500, 650, 600,
600, 650, 700, 737, 727, 673, 600, 543, 565, 585, 600, 628, 654, 745, 768, 747, 726, 700, 676])
volumes = np.array(
[77.418, 72.327, 74.427, 73.655, 72.595, 74.1, 73.834, 73.101, 70.845, 73.024, 72.630, 68.644, 72.969, 72.324, 71.857,
72.128, 73.283, 73.337, 72.963, 71.969, 69.894, 67.430, 67.607, 67.737, 68.204, 68.518, 68.955, 70.777, 72.921, 72.476, 72.152, 71.858, 71.473])
# change from cubic angstroms per unit cell to cubic meters per mol of
# molecules.
volumes = volumes / 1.e30 * 6.022141e23 / 4.0
ref_pressures = np.array(
[0.0, 12.23, 7.77, 9.69, 12.54, 9.21, 9.90, 11.83, 18.35, 12.68, 13.15, 25.16, 12.53, 14.01, 15.34,
14.86, 11.99, 12.08, 13.03, 15.46, 21.44, 29.98, 29.41, 29.05, 27.36, 26.38, 24.97, 19.49, 13.39, 14.48, 15.27, 15.95, 16.94])
ref_pressures = ref_pressures
pressures = np.empty_like(volumes)
eos = mgd.MGD3()
for i in range(len(temperatures)):
pressures[i] = eos.pressure(temperatures[i], volumes[i], mgfeo.params)
plt.scatter(temperatures, (pressures / 1.e9 - ref_pressures))
plt.ylim(-1, 1)
plt.title("Comparing with Fei, Mao, Shu, and Hu (1991)")
plt.xlabel("Temperature (K) at various volumes")
plt.ylabel("Difference in total pressure (GPa)")
plt.show()
def check_slb_fig3():
"""
Benchmark grueneisen parameter against figure 3 of Stixrude and Lithgow-Bertelloni (2005b)
"""
perovskite = burnman.Mineral()
perovskite.params = {'name': 'perovksite',
'V_0': burnman.tools.molar_volume_from_unit_cell_volume(168.27, 4.),
'grueneisen_0': 1.63,
'q_0': 1.7}
volume = np.linspace(0.6, 1.0, 100)
grueneisen_slb = np.empty_like(volume)
grueneisen_mgd = np.empty_like(volume)
q_slb = np.empty_like(volume)
q_mgd = np.empty_like(volume)
slb_eos = slb.SLB2()
mgd_eos = mgd.MGD2()
# calculate its thermal properties
for i in range(len(volume)):
# call with dummy pressure and temperatures, they do not change it
grueneisen_slb[i] = slb_eos.grueneisen_parameter(
0., 0., volume[i] * perovskite.params['V_0'], perovskite.params)
grueneisen_mgd[i] = mgd_eos.grueneisen_parameter(
0., 0., volume[i] * perovskite.params['V_0'], perovskite.params)
q_slb[i] = slb_eos.volume_dependent_q(
1. / volume[i], perovskite.params)
q_mgd[i] = perovskite.params['q_0']
# compare with figure 7
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig3.png')
plt.imshow(fig1, extent=[0.6, 1.0, 0.35, 2.0], aspect='auto')
plt.plot(volume, grueneisen_slb, 'g+', volume, grueneisen_mgd, 'b+')
plt.plot(volume, q_slb, 'g+', volume, q_mgd, 'b+')
plt.xlim(0.6, 1.0)
plt.ylim(0.35, 2.0)
plt.ylabel("Grueneisen parameter")
plt.xlabel("Relative Volume V/V0")
plt.title(
"Comparing with Figure 3 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_slb_fig7_txt():
"""
Calculates all values for forsterite and benchmarks with values from Stixrude and Lithgow-Bertelloni (personal communication)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.603e-6,
'K_0': 127.955e9,
'Kprime_0': 4.232,
'G_0': 81.6e9,
'Gprime_0': 1.4,
'molar_mass': .140695,
'n': 7.0,
'Debye_0': 809.183,
'grueneisen_0': .993,
'q_0': 2.093,
'F_0': -1.1406e5,
'eta_s_0': 2.364}
forsterite.set_method('slb3')
data = np.loadtxt(
"../../burnman/data/input_minphys/slb_fig7.txt", skiprows=2)
temperature = np.array(data[:, 2])
pressure = np.array(data[:, 0])
rho = np.array(data[:, 3])
rho_comp = np.empty_like(rho)
Kt = np.array(data[:, 4])
Kt_comp = np.empty_like(Kt)
Ks = np.array(data[:, 5])
Ks_comp = np.empty_like(Ks)
G = np.array(data[:, 6])
G_comp = np.empty_like(G)
VB = np.array(data[:, 7])
VB_comp = np.empty_like(VB)
VS = np.array(data[:, 8])
VS_comp = np.empty_like(VS)
VP = np.array(data[:, 9])
VP_comp = np.empty_like(VP)
vol = np.array(data[:, 10])
vol_comp = np.empty_like(vol)
alpha = np.array(data[:, 11])
alpha_comp = np.empty_like(alpha)
Cp = np.array(data[:, 12])
Cp_comp = np.empty_like(Cp)
gr = np.array(data[:, 13])
gr_comp = np.empty_like(gr)
gibbs = np.array(data[:, 14])
gibbs_comp = np.empty_like(gibbs)
entropy = np.array(data[:, 15])
entropy_comp = np.empty_like(gibbs)
enthalpy = np.array(data[:, 16])
enthalpy_comp = np.empty_like(gibbs)
for i in range(len(temperature)):
forsterite.set_state(pressure[i], temperature[i])
rho_comp[i] = 100. * (forsterite.density / 1000. - rho[i]) / rho[i]
Kt_comp[i] = 100. * (
forsterite.isothermal_bulk_modulus / 1.e9 - Kt[i]) / Kt[i]
Ks_comp[i] = 100. * (
forsterite.adiabatic_bulk_modulus / 1.e9 - Ks[i]) / Ks[i]
G_comp[i] = 100. * (forsterite.shear_modulus / 1.e9 - G[i]) / G[i]
VB_comp[i] = 100. * (forsterite.v_phi / 1000. - VB[i]) / VB[i]
VS_comp[i] = 100. * (forsterite.v_s / 1000. - VS[i]) / VS[i]
VP_comp[i] = 100. * (forsterite.v_p / 1000. - VP[i]) / VP[i]
vol_comp[i] = 100. * (forsterite.molar_volume * 1.e6 - vol[i]) / vol[i]
alpha_comp[i] = 100. * (
forsterite.thermal_expansivity / 1.e-5 - alpha[i]) / (alpha[-1])
Cp_comp[i] = 100. * (forsterite.heat_capacity_p /
forsterite.params['molar_mass'] / 1000. - Cp[i]) / (Cp[-1])
gr_comp[i] = (forsterite.grueneisen_parameter - gr[i]) / gr[i]
gibbs_comp[i] = 100. * (
forsterite.molar_gibbs / 1.e6 - gibbs[i]) / gibbs[i]
entropy_comp[i] = 100. * (
forsterite.molar_entropy - entropy[i]) / (entropy[i] if entropy[i] != 0. else 1.)
enthalpy_comp[i] = 100. * (
forsterite.molar_enthalpy / 1.e6 - enthalpy[i]) / (enthalpy[i] if enthalpy[i] != 0. else 1.)
plt.plot(temperature, rho_comp, label=r'$\rho$')
plt.plot(temperature, Kt_comp, label=r'$K_S$')
plt.plot(temperature, Ks_comp, label=r'$K_T$')
plt.plot(temperature, G_comp, label=r'$G$')
plt.plot(temperature, VS_comp, label=r'$V_S$')
plt.plot(temperature, VP_comp, label=r'$V_P$')
plt.plot(temperature, VB_comp, label=r'$V_\phi$')
plt.plot(temperature, vol_comp, label=r'$V$')
plt.plot(temperature, alpha_comp, label=r'$\alpha$')
plt.plot(temperature, Cp_comp, label=r'$c_P$')
plt.plot(temperature, gr_comp, label=r'$\gamma$')
plt.plot(temperature, gibbs_comp, label=r'Gibbs')
plt.plot(temperature, enthalpy_comp, label=r'Enthalpy')
plt.plot(temperature, entropy_comp, label=r'Entropy')
plt.xlim([0, 2750])
plt.ylim([-0.001, 0.001])
plt.xticks([0, 800, 1600, 2200])
plt.xlabel("Temperature (K)")
plt.ylabel("Percent Difference from HeFESTo")
plt.legend(loc="center right")
# plt.savefig("output_figures/benchmark1.pdf")
plt.show()
def check_slb_fig7():
"""
Calculates all values for forsterite and benchmarks with figure 7 from Stixrude and Lithgow-Bertelloni (2005)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.60e-6,
'K_0': 128.0e9,
'Kprime_0': 4.2,
'G_0': 82.0e9,
'Gprime_0': 1.4,
'n': 7.0,
'molar_mass': .140695,
'Debye_0': 809.,
'grueneisen_0': .99,
'q_0': 2.1,
'eta_s_0': 2.4}
forsterite.set_method('slb3')
temperature = np.linspace(0., 2000., 200)
volume = np.empty_like(temperature)
bulk_modulus = np.empty_like(temperature)
shear_modulus = np.empty_like(temperature)
heat_capacity = np.empty_like(temperature)
pressure = 1.0e5
forsterite.set_state(pressure, 300.)
Ks_0 = forsterite.adiabatic_bulk_modulus
# calculate its thermal properties
for i in range(len(temperature)):
forsterite.set_state(pressure, temperature[i])
volume[i] = forsterite.molar_volume / forsterite.params['V_0']
bulk_modulus[i] = forsterite.adiabatic_bulk_modulus / Ks_0
shear_modulus[i] = forsterite.shear_modulus / forsterite.params['G_0']
heat_capacity[i] = forsterite.heat_capacity_p / forsterite.params['n']
# compare with figure 7
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_vol.png')
plt.imshow(fig1, extent=[0, 2200, 0.99, 1.08], aspect='auto')
plt.plot(temperature, volume, 'g+')
plt.ylim(0.99, 1.08)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Volume V/V0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_Cp.png')
plt.imshow(fig1, extent=[0, 2200, 0., 70.], aspect='auto')
plt.plot(temperature, heat_capacity, 'g+')
plt.ylim(0, 70)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Heat Capacity Cp")
plt.title(
"Comparing with adiabatic_bulk_modulus7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_K.png')
plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto')
plt.plot(temperature, bulk_modulus, 'g+')
plt.ylim(0.6, 1.02)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Bulk Modulus K/K0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_G.png')
plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto')
plt.plot(temperature, shear_modulus, 'g+')
plt.ylim(0.6, 1.02)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Shear Modulus G/G0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_averaging():
"""
Reproduce Figure 1a from Watt et. al. 1976 to check the Voigt, Reuss,
Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite
"""
voigt = burnman.averaging_schemes.Voigt()
reuss = burnman.averaging_schemes.Reuss()
voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill()
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
v_bulk_modulus = np.empty_like(volumes)
v_shear_modulus = np.empty_like(volumes)
r_bulk_modulus = np.empty_like(volumes)
r_shear_modulus = np.empty_like(volumes)
vrh_bulk_modulus = np.empty_like(volumes)
vrh_shear_modulus = np.empty_like(volumes)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
# MgO bulk and shear moduli taken from Landolt-Boernstein
# - Group III Condensed Matter Volume 41B, 1999, pp 1-3
K2 = 152. # Bulk modulus, GPa
G2 = 155. # Shear modulus, GPa
# AgCl bulk and shear moduli (estimated from plot)
G1 = G2 * 0.07
K1 = K2 * 0.27
for i in range(len(volumes)):
v_bulk_modulus[i] = voigt.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
v_shear_modulus[i] = voigt.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_bulk_modulus[i] = reuss.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_shear_modulus[i] = reuss.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a1.png')
plt.imshow(fig, extent=[0, 1.0, 0.25, 1.0], aspect='auto')
plt.plot(volumes, v_bulk_modulus / K2, 'g-')
plt.plot(volumes, r_bulk_modulus / K2, 'g-')
plt.plot(volumes, vrh_bulk_modulus / K2, 'g-')
plt.plot(volumes, hsu_bulk_modulus / K2, 'g-')
plt.plot(volumes, hsl_bulk_modulus / K2, 'g-')
plt.ylim(0.25, 1.00)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 1 of Watt et al 1976")
plt.show()
fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a2.png')
plt.imshow(fig, extent=[0, 1.0, 0.0, 1.0], aspect='auto')
plt.plot(volumes, v_shear_modulus / G2, 'g-')
plt.plot(volumes, r_shear_modulus / G2, 'g-')
plt.plot(volumes, vrh_shear_modulus / G2, 'g-')
plt.plot(volumes, hsu_shear_modulus / G2, 'g-')
plt.plot(volumes, hsl_shear_modulus / G2, 'g-')
plt.ylim(0.0, 1.00)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged shear modulus")
plt.title("Comparing with Figure 1 of Watt et al 1976")
plt.show()
# also check against some numerical values given in Berryman (1995) for
# porous glass
K = 46.3
G = 30.5
# the value for porosity=0.46 in the table appears to be a typo. Remove
# it here
porosity = np.array(
[0.0, 0.05, 0.11, 0.13, 0.25, 0.33, 0.36, 0.39, 0.44, 0.50, 0.70])
berryman_bulk_modulus = np.array(
[46.3, 41.6, 36.6, 35.1, 27.0, 22.5, 21.0, 19.6, 17.3, 14.8, 7.7]) # 15.5 probably a typo?
hsu_bulk_modulus_vals = np.empty_like(porosity)
for i in range(len(porosity)):
hsu_bulk_modulus_vals[i] = hashin_shtrikman_upper.average_bulk_moduli(
[porosity[i], 1.0 - porosity[i]], [0.0, K], [0.0, G])
for i in range(len(volumes)):
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [0.0, K], [0.0, G])
fig = mpimg.imread('../../burnman/data/input_figures/berryman_fig4.png')
plt.imshow(fig, extent=[0, 1.0, 0.0, 50.0], aspect='auto')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.scatter(porosity, hsu_bulk_modulus_vals, c='r')
plt.scatter(porosity, berryman_bulk_modulus, c='y')
plt.ylim(0.0, 50.0)
plt.xlim(0, 1.0)
plt.xlabel("Porosity")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 4 of Berryman (1995)")
plt.show()
def check_averaging_2():
"""
Reproduce Figure 1 from Hashin and Shtrikman (1963) to check the
Hashin-Shtrikman bounds for an elastic composite
"""
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
# These values are from Hashin and Shtrikman (1963)
K1 = 25.0
K2 = 60.7
G1 = 11.5
G2 = 41.8
for i in range(len(volumes)):
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
fig = mpimg.imread(
'../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig1_K.png')
plt.imshow(fig, extent=[0, 1.0, 1.1, K2 + 0.3], aspect='auto')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.plot(volumes, hsl_bulk_modulus, 'g-')
plt.ylim(K1, K2)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 1 of Hashin and Shtrikman (1963)")
plt.show()
fig = mpimg.imread(
'../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig2_G.png')
plt.imshow(fig, extent=[0, 1.0, 0.3, G2], aspect='auto')
plt.plot(volumes, hsu_shear_modulus, 'g-')
plt.plot(volumes, hsl_shear_modulus, 'g-')
plt.ylim(G1, G2)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged shear modulus")
plt.title("Comparing with Figure 2 of Hashin and Shtrikman (1963)")
plt.show()
def check_averaging_3():
"""
Reproduce Figure 3 from Avseth et al. (2010) to check the Voigt, Reuss,
Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite
"""
voigt = burnman.averaging_schemes.Voigt()
reuss = burnman.averaging_schemes.Reuss()
voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill()
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
v_bulk_modulus = np.empty_like(volumes)
v_shear_modulus = np.empty_like(volumes)
r_bulk_modulus = np.empty_like(volumes)
r_shear_modulus = np.empty_like(volumes)
vrh_bulk_modulus = np.empty_like(volumes)
vrh_shear_modulus = np.empty_like(volumes)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
hs_av_bulk_modulus = np.empty_like(volumes)
hs_av_shear_modulus = np.empty_like(volumes)
# Quartz bulk and shear moduli
K2 = 37.
G2 = 45.
# Fluid bulk and shear moduli
G1 = 0.00001
K1 = 2.35
for i in range(len(volumes)):
v_bulk_modulus[i] = voigt.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
v_shear_modulus[i] = voigt.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_bulk_modulus[i] = reuss.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_shear_modulus[i] = reuss.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hs_av_bulk_modulus[i] = 0.5 * hsl_bulk_modulus[
i] + 0.5 * hsu_bulk_modulus[i]
hs_av_shear_modulus[i] = 0.5 * hsl_shear_modulus[
i] + 0.5 * hsu_shear_modulus[i]
fig = mpimg.imread(
'../../burnman/data/input_figures/Avseth_et_al_2010_fig3_K.png')
plt.imshow(fig, extent=[0, 1.0, 0., 40.0], aspect='auto')
plt.plot(volumes, v_bulk_modulus, 'g-')
plt.plot(volumes, r_bulk_modulus, 'g-')
plt.plot(volumes, vrh_bulk_modulus, 'g-')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.plot(volumes, hsl_bulk_modulus, 'g-')
plt.plot(volumes, hs_av_bulk_modulus, 'g-')
plt.ylim(0., 40.00)
plt.xlim(0., 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 3 of Avseth et al., 2010")
plt.show()
if __name__ == "__main__":
check_averaging()
check_averaging_2()
check_averaging_3()
check_birch_murnaghan()
check_birch_murnaghan_4th()
check_vinet()
check_slb_fig7()
check_slb_fig3()
check_mgd_shim_duffy_kenichi()
check_mgd_fei_mao_shu_hu()
check_slb_fig7_txt() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.common.TopicIdPartition;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
/**
* Represents a set of topic partitions, where each entry contains topic ID, topic name and partition number.
* Keeps in-memory references to provide easy access to this data in different forms.
* (ex. retrieve topic IDs only, topic names, partitions with topic names, partitions with topic IDs)
* Data is kept sorted by topic name and partition number, for improved logging.
*/
public class TopicIdPartitionSet {
/**
* TopicPartition comparator based on topic name and partition.
*/
static final Utils.TopicPartitionComparator TOPIC_PARTITION_COMPARATOR = new Utils.TopicPartitionComparator();
/**
* TopicIdPartition comparator based on topic name and partition.
* (Ignoring topic ID while sorting, as this is sorted mainly for logging purposes).
*/
static final Utils.TopicIdPartitionComparator TOPIC_ID_PARTITION_COMPARATOR = new Utils.TopicIdPartitionComparator();
private final SortedSet<TopicIdPartition> topicIdPartitions;
private final SortedSet<TopicPartition> topicPartitions;
private final Set<Uuid> topicIds;
private final SortedSet<String> topicNames;
public TopicIdPartitionSet() {
this.topicIdPartitions = new TreeSet<>(TOPIC_ID_PARTITION_COMPARATOR);
this.topicPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR);
this.topicIds = new HashSet<>();
this.topicNames = new TreeSet<>();
}
/**
* Add a single partition to the assignment, along with its topic ID and name.
* This will keep it, and also save references to the topic ID, topic name and partition.
* Visible for testing.
*/
void add(TopicIdPartition topicIdPartition) {
topicIdPartitions.add(topicIdPartition);
topicPartitions.add(topicIdPartition.topicPartition());
topicIds.add(topicIdPartition.topicId());
topicNames.add(topicIdPartition.topicPartition().topic());
}
/**
* Add a set of partitions to the assignment, along with the topic ID and name.
*/
public void addAll(Uuid topicId, String topicName, Set<Integer> partitions) {
partitions.forEach(tp -> add(new TopicIdPartition(topicId, tp, topicName)));
}
public boolean isEmpty() {
return this.topicIdPartitions.isEmpty();
}
public SortedSet<TopicPartition> topicPartitions() {
return Collections.unmodifiableSortedSet(topicPartitions);
}
public Set<Uuid> topicIds() {
return Collections.unmodifiableSet(topicIds);
}
public SortedSet<String> topicNames() {
return Collections.unmodifiableSortedSet(topicNames);
}
/**
* @return Map of partition numbers per topic ID, sorted by topic names (for improved logging).
*/
public Map<Uuid, SortedSet<Integer>> toTopicIdPartitionMap() {
Map<Uuid, SortedSet<Integer>> partitions = new HashMap<>();
topicIdPartitions.forEach(topicIdPartition -> {
Uuid topicId = topicIdPartition.topicId();
partitions.computeIfAbsent(topicId, k -> new TreeSet<>()).add(topicIdPartition.partition());
});
return partitions;
}
/**
* @return Set of topic partitions (with topic name and partition number)
*/
protected SortedSet<TopicPartition> toTopicNamePartitionSet() {
SortedSet<TopicPartition> result = new TreeSet<>(TOPIC_PARTITION_COMPARATOR);
topicIdPartitions.forEach(topicIdPartition -> result.add(topicIdPartition.topicPartition()));
return result;
}
@Override
public String toString() {
return this.topicIdPartitions.toString();
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicIdPartitionSet.java |
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Title: CURLINFO_NAMELOOKUP_TIME
Section: 3
Source: libcurl
See-also:
- CURLINFO_NAMELOOKUP_TIME_T (3)
- curl_easy_getinfo (3)
- curl_easy_setopt (3)
Protocol:
- All
Added-in: 7.4.1
---
# NAME
CURLINFO_NAMELOOKUP_TIME - name lookup time
# SYNOPSIS
~~~c
#include <curl/curl.h>
CURLcode curl_easy_getinfo(CURL *handle, CURLINFO_NAMELOOKUP_TIME,
double *timep);
~~~
# DESCRIPTION
Pass a pointer to a double to receive the total time in seconds from the start
until the name resolving was completed.
When a redirect is followed, the time from each request is added together.
See also the TIMES overview in the curl_easy_getinfo(3) man page.
# %PROTOCOLS%
# EXAMPLE
~~~c
int main(void)
{
CURL *curl = curl_easy_init();
if(curl) {
CURLcode result;
double namelookup;
curl_easy_setopt(curl, CURLOPT_URL, "https://example.com/");
result = curl_easy_perform(curl);
if(result == CURLE_OK) {
result = curl_easy_getinfo(curl, CURLINFO_NAMELOOKUP_TIME, &namelookup);
if(result == CURLE_OK) {
printf("Time: %.1f", namelookup);
}
}
/* always cleanup */
curl_easy_cleanup(curl);
}
}
~~~
# %AVAILABILITY%
# RETURN VALUE
curl_easy_getinfo(3) returns a CURLcode indicating success or error.
CURLE_OK (0) means everything was OK, non-zero means an error occurred, see
libcurl-errors(3). | unknown | github | https://github.com/curl/curl | docs/libcurl/opts/CURLINFO_NAMELOOKUP_TIME.md |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.